changeset 11471:994e2a93a8e2

Use uppercase 'A' to refer to matrix inputs in m-files.
author Rik <octave@nomad.inbox5.com>
date Sun, 09 Jan 2011 16:01:05 -0800
parents eb9e0b597d61
children 1740012184f9
files scripts/ChangeLog scripts/general/arrayfun.m scripts/general/blkdiag.m scripts/general/structfun.m scripts/image/imagesc.m scripts/io/dlmwrite.m scripts/io/textscan.m scripts/linear-algebra/cond.m scripts/linear-algebra/condest.m scripts/linear-algebra/expm.m scripts/linear-algebra/logm.m scripts/linear-algebra/onenormest.m scripts/linear-algebra/qzhess.m scripts/optimization/glpk.m scripts/optimization/glpkmex.m scripts/polynomial/poly.m scripts/signal/unwrap.m scripts/sparse/gplot.m scripts/sparse/pcg.m scripts/sparse/pcr.m scripts/sparse/spaugment.m scripts/sparse/spdiags.m scripts/sparse/svds.m
diffstat 23 files changed, 255 insertions(+), 237 deletions(-) [+]
line wrap: on
line diff
--- a/scripts/ChangeLog
+++ b/scripts/ChangeLog
@@ -1,3 +1,21 @@
+2011-01-09  Rik  <octave@nomad.inbox5.com>
+
+	* general/arrayfun.m, general/blkdiag.m, general/structfun.m,
+	image/imagesc.m, linear-algebra/cond.m, linear-algebra/condest.m,
+	linear-algebra/expm.m, linear-algebra/logm.m,
+	linear-algebra/onenormest.m, linear-algebra/qzhess.m,
+	optimization/glpk.m, optimization/glpkmex.m, polynomial/poly.m,
+	sparse/gplot.m, sparse/pcg.m, sparse/pcr.m, sparse/spaugment.m,
+	sparse/spdiags.m, sparse/svds.m: Use uppercase 'A' to refer to matrix
+	argument.
+
+	* io/dlmwrite.m: Use uppercase 'M' to refer to matrix argument.
+
+	* io/textscan.m: Use uppercase 'C' to refer to Cell Array output.
+	Improve docstring.
+
+	* signal/unwrap.m: Use 'x' instead of 'a' for vector input argument.
+
 2011-01-09  Rik  <octave@nomad.inbox5.com>
 
 	* general/bicubic.m, general/nargchk.m, general/nargoutchk.m,
--- a/scripts/general/arrayfun.m
+++ b/scripts/general/arrayfun.m
@@ -18,10 +18,10 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {} arrayfun (@var{func}, @var{a})
-## @deftypefnx {Function File} {@var{x} =} arrayfun (@var{func}, @var{a})
-## @deftypefnx {Function File} {@var{x} =} arrayfun (@var{func}, @var{a}, @var{b}, @dots{})
-## @deftypefnx {Function File} {[@var{x}, @var{y}, @dots{}] =} arrayfun (@var{func}, @var{a}, @dots{})
+## @deftypefn  {Function File} {} arrayfun (@var{func}, @var{A})
+## @deftypefnx {Function File} {@var{x} =} arrayfun (@var{func}, @var{A})
+## @deftypefnx {Function File} {@var{x} =} arrayfun (@var{func}, @var{A}, @var{b}, @dots{})
+## @deftypefnx {Function File} {[@var{x}, @var{y}, @dots{}] =} arrayfun (@var{func}, @var{A}, @dots{})
 ## @deftypefnx {Function File} {} arrayfun (@dots{}, "UniformOutput", @var{val})
 ## @deftypefnx {Function File} {} arrayfun (@dots{}, "ErrorHandler", @var{errfunc})
 ##
@@ -31,9 +31,9 @@
 ##
 ## The first input argument @var{func} can be a string, a function
 ## handle, an inline function or an anonymous function.  The input
-## argument @var{a} can be a logic array, a numeric array, a string
+## argument @var{A} can be a logic array, a numeric array, a string
 ## array, a structure array or a cell array.  By a call of the function
-## @command{arrayfun} all elements of @var{a} are passed on to the named
+## @command{arrayfun} all elements of @var{A} are passed on to the named
 ## function @var{func} individually.
 ## 
 ## The named function can also take more than two input arguments, with
--- a/scripts/general/blkdiag.m
+++ b/scripts/general/blkdiag.m
@@ -17,8 +17,8 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn {Function File} {} blkdiag (@var{a}, @var{b}, @var{c}, @dots{})
-## Build a block diagonal matrix from @var{a}, @var{b}, @var{c}, @dots{}.
+## @deftypefn {Function File} {} blkdiag (@var{A}, @var{B}, @var{C}, @dots{})
+## Build a block diagonal matrix from @var{A}, @var{B}, @var{C}, @dots{}.
 ## All the arguments must be numeric and are two-dimensional matrices or
 ## scalars.
 ## @seealso{diag, horzcat, vertcat}
--- a/scripts/general/structfun.m
+++ b/scripts/general/structfun.m
@@ -19,7 +19,7 @@
 
 ## -*- texinfo -*-
 ## @deftypefn  {Function File} {} structfun (@var{func}, @var{S})
-## @deftypefnx {Function File} {[@var{a}, @dots{}] =} structfun (@dots{})
+## @deftypefnx {Function File} {[@var{A}, @dots{}] =} structfun (@dots{})
 ## @deftypefnx {Function File} {} structfun (@dots{}, "ErrorHandler", @var{errfunc})
 ## @deftypefnx {Function File} {} structfun (@dots{}, "UniformOutput", @var{val})
 ## 
--- a/scripts/image/imagesc.m
+++ b/scripts/image/imagesc.m
@@ -18,12 +18,12 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {} imagesc (@var{a})
-## @deftypefnx {Function File} {} imagesc (@var{x}, @var{y}, @var{a})
+## @deftypefn  {Function File} {} imagesc (@var{A})
+## @deftypefnx {Function File} {} imagesc (@var{x}, @var{y}, @var{A})
 ## @deftypefnx {Function File} {} imagesc (@dots{}, @var{limits})
 ## @deftypefnx {Function File} {} imagesc (@var{h}, @dots{})
 ## @deftypefnx {Function File} {@var{h} =} imagesc (@dots{})
-## Display a scaled version of the matrix @var{a} as a color image.  The
+## Display a scaled version of the matrix @var{A} as a color image.  The
 ## colormap is scaled so that the entries of the matrix occupy the entire
 ## colormap.  If @var{limits} = [@var{lo}, @var{hi}] are given, then that
 ## range is set to the 'clim' of the current axes.
@@ -31,7 +31,7 @@
 ## The axis values corresponding to the matrix elements are specified in
 ## @var{x} and @var{y}, either as pairs giving the minimum and maximum
 ## values for the respective axes, or as values for each row and column
-## of the matrix @var{a}.
+## of the matrix @var{A}.
 ##
 ## @seealso{image, imshow, caxis}
 ## @end deftypefn
--- a/scripts/io/dlmwrite.m
+++ b/scripts/io/dlmwrite.m
@@ -17,11 +17,11 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {} dlmwrite (@var{file}, @var{a})
-## @deftypefnx {Function File} {} dlmwrite (@var{file}, @var{a}, @var{delim}, @var{r}, @var{c})
-## @deftypefnx {Function File} {} dlmwrite (@var{file}, @var{a}, @var{key}, @var{val} @dots{})
-## @deftypefnx {Function File} {} dlmwrite (@var{file}, @var{a}, "-append", @dots{})
-## Write the matrix @var{a} to the named file using delimiters.
+## @deftypefn  {Function File} {} dlmwrite (@var{file}, @var{M})
+## @deftypefnx {Function File} {} dlmwrite (@var{file}, @var{M}, @var{delim}, @var{r}, @var{c})
+## @deftypefnx {Function File} {} dlmwrite (@var{file}, @var{M}, @var{key}, @var{val} @dots{})
+## @deftypefnx {Function File} {} dlmwrite (@var{file}, @var{M}, "-append", @dots{})
+## Write the matrix @var{M} to the named file using delimiters.
 ##
 ## @var{file} should be a file name or writable file ID given by @code{fopen}.
 ##
@@ -84,7 +84,7 @@
 ## * Significant modifications of the input arguements for additional
 ## functionality.
 
-function dlmwrite (file, a, varargin)
+function dlmwrite (file, M, varargin)
 
   if (nargin < 2 || ! ischar (file))
     print_usage ();
@@ -95,7 +95,7 @@
   r = 0;
   c = 0;
   newline = "\n";
-  if (ischar (a))
+  if (ischar (M))
     precision = "%c";
   else
     precision = "%.16g";
@@ -169,28 +169,28 @@
   else
     if (r > 0)
       fprintf (fid, "%s",
-               repmat ([repmat(delim, 1, c + columns(a)-1), newline], 1, r));
+               repmat ([repmat(delim, 1, c + columns(M)-1), newline], 1, r));
     endif
-    if (iscomplex (a))
+    if (iscomplex (M))
       cprecision = regexprep (precision, '^%([-.0-9])','%+$1');
       template = [precision, cprecision, "i", ...
                   repmat([delim, precision, cprecision, "i"], 1, ...
-                  columns(a) - 1), newline ];
+                  columns(M) - 1), newline ];
     else
-      template = [precision, repmat([delim, precision], 1, columns(a)-1),...
+      template = [precision, repmat([delim, precision], 1, columns(M)-1),...
                   newline];
     endif
     if (c > 0)
       template = [repmat(delim, 1, c), template];
     endif
-    if (iscomplex (a))
-      a = a.';
-      b = zeros (2*rows(a), columns (a));
-      b(1: 2 : end, :) = real (a);
-      b(2: 2 : end, :) = imag (a);
+    if (iscomplex (M))
+      M = M.';
+      b = zeros (2*rows(M), columns (M));
+      b(1: 2 : end, :) = real (M);
+      b(2: 2 : end, :) = imag (M);
       fprintf (fid, template, b);
     else
-      fprintf (fid, template, a.');
+      fprintf (fid, template, M.');
     endif
     if (! isscalar (file))
       fclose (fid);
--- a/scripts/io/textscan.m
+++ b/scripts/io/textscan.m
@@ -17,15 +17,15 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {@var{c} =} textscan (@var{fid}, @var{format})
-## @deftypefnx {Function File} {@var{c} =} textscan (@var{fid}, @var{format}, @
+## @deftypefn  {Function File} {@var{C} =} textscan (@var{fid}, @var{format})
+## @deftypefnx {Function File} {@var{C} =} textscan (@var{fid}, @var{format}, @
 ## @var{n})
-## @deftypefnx {Function File} {@var{c} =} textscan (@var{fid}, @var{format}, @
+## @deftypefnx {Function File} {@var{C} =} textscan (@var{fid}, @var{format}, @
 ## @var{param}, @var{value}, @dots{})
-## @deftypefnx {Function File} {@var{c} =} textscan (@var{fid}, @var{format}, @
+## @deftypefnx {Function File} {@var{C} =} textscan (@var{fid}, @var{format}, @
 ## @var{n}, @var{param}, @var{value}, @dots{})
-## @deftypefnx {Function File} {@var{a} =} textscan (@var{str}, @dots{})
-## @deftypefnx {Function File} {[@var{a}, @var{position}] =} textscan (@dots{})
+## @deftypefnx {Function File} {@var{C} =} textscan (@var{str}, @dots{})
+## @deftypefnx {Function File} {[@var{C}, @var{position}] =} textscan (@dots{})
 ## Read data from a text file.
 ##
 ## The file associated with @var{fid} is read and parsed according to @var{format}.
@@ -41,7 +41,7 @@
 ## The optional input, @var{n}, specifes the number of lines to be read from
 ## the file, associated with @var{fid}.
 ##
-## The output, @var{c}, is a cell array whose length is given by the number
+## The output, @var{C}, is a cell array whose length is given by the number
 ## of format specifiers.
 ##
 ## The second output, @var{position}, provides the position, in characters,
@@ -50,7 +50,7 @@
 ## @seealso{dlmread, fscanf, load, strread, textread}
 ## @end deftypefn
 
-function [c, p] = textscan (fid, format, varargin)
+function [C, p] = textscan (fid, format, varargin)
 
   ## Check input
   if (nargin < 1)
@@ -102,11 +102,11 @@
                    numel (idx_star = strfind (format, "%*"));
 
       ## Call strread to make it do the real work
-      c = cell (1, num_fields);
-      [c{:}] = strread (str, format, args{:});
+      C = cell (1, num_fields);
+      [C{:}] = strread (str, format, args{:});
 
       if (ischar (fid) && isfinite (nlines))
-        c = cellfun (@(x) x(1:nlines), c, "uniformoutput", false);
+        C = cellfun (@(x) x(1:nlines), C, "uniformoutput", false);
       endif
 
       if (nargout == 2)
--- a/scripts/linear-algebra/cond.m
+++ b/scripts/linear-algebra/cond.m
@@ -18,16 +18,16 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {} cond (@var{a})
-## @deftypefnx {Function File} {} cond (@var{a},@var{p})
+## @deftypefn  {Function File} {} cond (@var{A})
+## @deftypefnx {Function File} {} cond (@var{A},@var{p})
 ## Compute the @var{p}-norm condition number of a matrix.  @code{cond
-## (@var{a})} is
+## (@var{A})} is
 ## defined as 
 ## @tex
-## $ {\parallel a \parallel_p * \parallel a^{-1} \parallel_p .} $
+## $ {\parallel A \parallel_p * \parallel A^{-1} \parallel_p .} $
 ## @end tex
 ## @ifnottex
-## @code{norm (@var{a}, @var{p}) * norm (inv (@var{a}), @var{p})}.
+## @code{norm (@var{A}, @var{p}) * norm (inv (@var{A}), @var{p})}.
 ## @end ifnottex
 ##
 ## By default @code{@var{p} = 2} is used which implies a (relatively slow)
@@ -39,10 +39,10 @@
 
 ## Author: jwe
 
-function retval = cond (a, p)
+function retval = cond (A, p)
 
   if (nargin && nargin < 3)
-    if (ndims (a) > 2)
+    if (ndims (A) > 2)
       error ("cond: only valid on 2-D objects");
     endif
 
@@ -51,13 +51,13 @@
     endif
 
     if (! ischar (p) && p == 2)
-      [nr, nc] = size (a);
+      [nr, nc] = size (A);
       if (nr == 0 || nc == 0)
         retval = 0.0;
-      elseif (any (any (isinf (a) | isnan (a))))
+      elseif (any (any (isinf (A) | isnan (A))))
         error ("cond: argument must not contain Inf or NaN values");
       else
-        sigma   = svd (a);
+        sigma   = svd (A);
         sigma_1 = sigma(1);
         sigma_n = sigma(end);
         if (sigma_1 == 0 || sigma_n == 0)
@@ -67,7 +67,7 @@
         endif
       endif
     else
-      retval = norm (a, p) * norm (inv (a), p);  
+      retval = norm (A, p) * norm (inv (A), p);  
     endif
   else
     print_usage ();
--- a/scripts/linear-algebra/condest.m
+++ b/scripts/linear-algebra/condest.m
@@ -17,10 +17,10 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {} condest (@var{a}) 
-## @deftypefnx {Function File} {} condest (@var{a}, @var{t}) 
+## @deftypefn  {Function File} {} condest (@var{A}) 
+## @deftypefnx {Function File} {} condest (@var{A}, @var{t}) 
 ## @deftypefnx {Function File} {[@var{est}, @var{v}] =} condest (@dots{}) 
-## @deftypefnx {Function File} {[@var{est}, @var{v}] =} condest (@var{a}, @var{solve}, @var{solve_t}, @var{t})
+## @deftypefnx {Function File} {[@var{est}, @var{v}] =} condest (@var{A}, @var{solve}, @var{solve_t}, @var{t})
 ## @deftypefnx {Function File} {[@var{est}, @var{v}] =} condest (@var{apply}, @var{apply_t}, @var{solve}, @var{solve_t}, @var{n}, @var{t})
 ##
 ## Estimate the 1-norm condition number of a matrix @var{A}
@@ -28,7 +28,7 @@
 ## If @var{t} exceeds 5, then only 5 test vectors are used.
 ##
 ## If the matrix is not explicit, e.g., when estimating the condition 
-## number of @var{a} given an LU factorization, @code{condest} uses the 
+## number of @var{A} given an LU factorization, @code{condest} uses the 
 ## following functions:
 ##
 ## @table @var
--- a/scripts/linear-algebra/expm.m
+++ b/scripts/linear-algebra/expm.m
@@ -17,7 +17,7 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn {Function File} {} expm (@var{a})
+## @deftypefn {Function File} {} expm (@var{A})
 ## Return the exponential of a matrix, defined as the infinite Taylor
 ## series
 ## @tex
@@ -28,7 +28,7 @@
 ## @ifnottex
 ## 
 ## @example
-## expm(a) = I + a + a^2/2! + a^3/3! + @dots{}
+## expm(A) = I + A + A^2/2! + A^3/3! + @dots{}
 ## @end example
 ## 
 ## @end ifnottex
@@ -39,14 +39,14 @@
 ## preconditioning (SIAM Journal on Numerical Analysis, 1977).  Diagonal
 ## Pad@'e approximations are rational polynomials of matrices
 ## @tex
-## $D_q(a)^{-1}N_q(a)$
+## $D_q(A)^{-1}N_q(A)$
 ## @end tex
 ## @ifnottex
 ## 
 ## @example
 ## @group
 ##      -1
-## D (a)   N (a)
+## D (A)   N (A)
 ## @end group
 ## @end example
 ## 
@@ -62,41 +62,41 @@
 ## (with the same preconditioning steps) may be desirable in lieu of the
 ## Pad@'e approximation when
 ## @tex
-## $D_q(a)$
+## $D_q(A)$
 ## @end tex
 ## @ifnottex
-## @code{Dq(a)}
+## @code{Dq(A)}
 ## @end ifnottex
 ## is ill-conditioned.
 ## @end deftypefn
 
-function r = expm (a)
+function r = expm (A)
 
-  if (! ismatrix (a) || ! issquare (a))
+  if (! ismatrix (A) || ! issquare (A))
     error ("expm: input must be a square matrix");
   endif
 
-  if (isscalar (a))
-    r = exp (a);
+  if (isscalar (A))
+    r = exp (A);
     return
-  elseif (strfind (typeinfo (a), "diagonal matrix"))
-    r = diag (exp (diag (a)));
+  elseif (strfind (typeinfo (A), "diagonal matrix"))
+    r = diag (exp (diag (A)));
     return
   endif
 
-  n = rows (a);
+  n = rows (A);
   ## Trace reduction.
-  a(a == -Inf) = -realmax;
-  trshift = trace (a) / length (a);
+  A(A == -Inf) = -realmax;
+  trshift = trace (A) / length (A);
   if (trshift > 0)
-    a -= trshift*eye (n);
+    A -= trshift*eye (n);
   endif
   ## Balancing.
-  [d, p, aa] = balance (a);
+  [d, p, aa] = balance (A);
   ## FIXME: can we both permute and scale at once? Or should we rather do
   ## this:
   ##
-  ##   [d, xx, aa] = balance (a, "noperm");
+  ##   [d, xx, aa] = balance (A, "noperm");
   ##   [xx, p, aa] = balance (aa, "noscal");
   [f, e] = log2 (norm (aa, "inf"));
   s = max (0, e);
--- a/scripts/linear-algebra/logm.m
+++ b/scripts/linear-algebra/logm.m
@@ -19,14 +19,14 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {@var{s} =} logm (@var{a})
-## @deftypefnx {Function File} {@var{s} =} logm (@var{a}, @var{opt_iters})
+## @deftypefn  {Function File} {@var{s} =} logm (@var{A})
+## @deftypefnx {Function File} {@var{s} =} logm (@var{A}, @var{opt_iters})
 ## @deftypefnx {Function File} {[@var{s}, @var{iters}] =} logm (@dots{})
-## Compute the matrix logarithm of the square matrix @var{a}.  The
+## Compute the matrix logarithm of the square matrix @var{A}.  The
 ## implementation utilizes a Pad@'e approximant and the identity
 ##
 ## @example
-## logm(@var{a}) = 2^k * logm(@var{a}^(1 / 2^k))
+## logm(@var{A}) = 2^k * logm(@var{A}^(1 / 2^k))
 ## @end example
 ##
 ## The optional argument @var{opt_iters} is the maximum number of square roots
@@ -39,19 +39,19 @@
 ##            (SIAM, 2008.)
 ##
 
-function [s, iters] = logm (a, opt_iters = 100)
+function [s, iters] = logm (A, opt_iters = 100)
  
   if (nargin == 0 || nargin > 2)
     print_usage ();
   endif
 
-  if (! issquare (a))
+  if (! issquare (A))
     error ("logm: argument must be a square matrix");
   endif
 
-  [u, s] = schur (a);
+  [u, s] = schur (A);
 
-  if (isreal (a))
+  if (isreal (A))
     [u, s] = rsf2csf (u, s);
   endif
 
@@ -102,19 +102,19 @@
 #######################################################################
 
 ##LOGM_PADE_PF   Evaluate Pade approximant to matrix log by partial fractions.
-##   Y = LOGM_PADE_PF(a,M) evaluates the [M/M] Pade approximation to
-##   LOG(EYE(SIZE(a))+a) using a partial fraction expansion.
+##   Y = LOGM_PADE_PF(A,M) evaluates the [M/M] Pade approximation to
+##   LOG(EYE(SIZE(A))+A) using a partial fraction expansion.
 
-function s = logm_pade_pf (a, m)
+function s = logm_pade_pf (A, m)
   [nodes, wts] = gauss_legendre (m);
   ## Convert from [-1,1] to [0,1].
   nodes = (nodes+1)/2;
   wts = wts/2;
 
-  n = length (a);
+  n = length (A);
   s = zeros (n);
   for j = 1:m
-    s += wts(j)*(a/(eye (n) + nodes(j)*a));
+    s += wts(j)*(A/(eye (n) + nodes(j)*A));
   endfor
 endfunction
 
--- a/scripts/linear-algebra/onenormest.m
+++ b/scripts/linear-algebra/onenormest.m
@@ -17,11 +17,11 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {[@var{est}, @var{v}, @var{w}, @var{iter}] =} onenormest (@var{a}, @var{t}) 
+## @deftypefn  {Function File} {[@var{est}, @var{v}, @var{w}, @var{iter}] =} onenormest (@var{A}, @var{t}) 
 ## @deftypefnx {Function File} {[@var{est}, @var{v}, @var{w}, @var{iter}] =} onenormest (@var{apply}, @var{apply_t}, @var{n}, @var{t})
 ##
 ## Apply Higham and Tisseur's randomized block 1-norm estimator to
-## matrix @var{a} using @var{t} test vectors.  If @var{t} exceeds 5, then
+## matrix @var{A} using @var{t} test vectors.  If @var{t} exceeds 5, then
 ## only 5 test vectors are used.
 ##
 ## If the matrix is not explicit, e.g., when estimating the norm of 
--- a/scripts/linear-algebra/qzhess.m
+++ b/scripts/linear-algebra/qzhess.m
@@ -18,11 +18,11 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn {Function File} {[@var{aa}, @var{bb}, @var{q}, @var{z}] =} qzhess (@var{a}, @var{b})
+## @deftypefn {Function File} {[@var{aa}, @var{bb}, @var{q}, @var{z}] =} qzhess (@var{A}, @var{B})
 ## Compute the Hessenberg-triangular decomposition of the matrix pencil
-## @code{(@var{a}, @var{b})}, returning
-## @code{@var{aa} = @var{q} * @var{a} * @var{z}},
-## @code{@var{bb} = @var{q} * @var{b} * @var{z}}, with @var{q} and @var{z}
+## @code{(@var{A}, @var{B})}, returning
+## @code{@var{aa} = @var{q} * @var{A} * @var{z}},
+## @code{@var{bb} = @var{q} * @var{B} * @var{z}}, with @var{q} and @var{z}
 ## orthogonal.  For example:
 ##
 ## @example
@@ -46,22 +46,22 @@
 ## Created: August 1993
 ## Adapted-By: jwe
 
-function [aa, bb, q, z] = qzhess (a, b)
+function [aa, bb, q, z] = qzhess (A, B)
 
   if (nargin != 2)
     print_usage ();
   endif
 
-  [na, ma] = size (a);
-  [nb, mb] = size (b);
+  [na, ma] = size (A);
+  [nb, mb] = size (B);
   if (na != ma || na != nb || nb != mb)
     error ("qzhess: incompatible dimensions");
   endif
 
   ## Reduce to hessenberg-triangular form.
 
-  [q, bb] = qr (b);
-  aa = q' * a;
+  [q, bb] = qr (B);
+  aa = q' * A;
   q = q';
   z = eye (na);
   for j = 1:(na-2)
--- a/scripts/optimization/glpk.m
+++ b/scripts/optimization/glpk.m
@@ -17,7 +17,7 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn {Function File} {[@var{xopt}, @var{fmin}, @var{status}, @var{extra}] =} glpk (@var{c}, @var{a}, @var{b}, @var{lb}, @var{ub}, @var{ctype}, @var{vartype}, @var{sense}, @var{param})
+## @deftypefn {Function File} {[@var{xopt}, @var{fmin}, @var{status}, @var{extra}] =} glpk (@var{c}, @var{A}, @var{b}, @var{lb}, @var{ub}, @var{ctype}, @var{vartype}, @var{sense}, @var{param})
 ## Solve a linear program using the GNU @sc{glpk} library.  Given three
 ## arguments, @code{glpk} solves the following standard LP:
 ## @tex
@@ -85,7 +85,7 @@
 ## @item c
 ## A column array containing the objective function coefficients.
 ## 
-## @item a
+## @item A
 ## A matrix containing the constraints coefficients.
 ## 
 ## @item b
@@ -423,7 +423,7 @@
 ## @example
 ## @group
 ## c = [10, 6, 4]';
-## a = [ 1, 1, 1;
+## A = [ 1, 1, 1;
 ##      10, 4, 5;
 ##       2, 2, 6];
 ## b = [100, 600, 300]';
@@ -437,7 +437,7 @@
 ## param.itlim = 100;
 ## 
 ## [xmin, fmin, status, extra] = ...
-##    glpk (c, a, b, lb, ub, ctype, vartype, s, param);
+##    glpk (c, A, b, lb, ub, ctype, vartype, s, param);
 ## @end group
 ## @end example
 ## @end deftypefn
@@ -445,7 +445,7 @@
 ## Author: Nicolo' Giorgetti <giorgetti@dii.unisi.it>
 ## Adapted-by: jwe
 
-function [xopt, fmin, status, extra] = glpk (c, a, b, lb, ub, ctype, vartype, sense, param)
+function [xopt, fmin, status, extra] = glpk (c, A, b, lb, ub, ctype, vartype, sense, param)
 
   ## If there is no input output the version and syntax
   if (nargin < 3 || nargin > 9)
@@ -463,12 +463,12 @@
 
   ## 2) Matrix constraint
 
-  if (isempty (a))
+  if (isempty (A))
     error ("glpk: A cannot be an empty matrix");
     return;
   endif
-  [nc, nxa] = size(a);
-  if (! isreal (a) || nxa != nx)
+  [nc, nxa] = size(A);
+  if (! isreal (A) || nxa != nx)
     error ("glpk: A must be a real valued %d by %d matrix", nc, nx);
     return;
   endif
@@ -573,6 +573,6 @@
   endif
 
   [xopt, fmin, status, extra] = ...
-    __glpk__ (c, a, b, lb, ub, ctype, vartype, sense, param);
+    __glpk__ (c, A, b, lb, ub, ctype, vartype, sense, param);
 
 endfunction
--- a/scripts/optimization/glpkmex.m
+++ b/scripts/optimization/glpkmex.m
@@ -17,7 +17,7 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn {Function File} {[@var{xopt}, @var{fmin}, @var{status}, @var{extra}] =} glpkmex (@var{sense}, @var{c}, @var{a}, @var{b}, @var{ctype}, @var{lb}, @var{ub}, @var{vartype}, @var{param}, @var{lpsolver}, @var{save_pb})
+## @deftypefn {Function File} {[@var{xopt}, @var{fmin}, @var{status}, @var{extra}] =} glpkmex (@var{sense}, @var{c}, @var{A}, @var{b}, @var{ctype}, @var{lb}, @var{ub}, @var{vartype}, @var{param}, @var{lpsolver}, @var{save_pb})
 ## This function is provided for compatibility with the old @sc{matlab}
 ## interface to the GNU @sc{glpk} library.  For Octave code, you should use
 ## the @code{glpk} function instead.
--- a/scripts/polynomial/poly.m
+++ b/scripts/polynomial/poly.m
@@ -18,13 +18,13 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {} poly (@var{a})
+## @deftypefn  {Function File} {} poly (@var{A})
 ## @deftypefnx {Function File} {} poly (@var{x})
-## If @var{a} is a square @math{N}-by-@math{N} matrix, @code{poly (@var{a})}
-## is the row vector of the coefficients of @code{det (z * eye (N) - a)},
-## the characteristic polynomial of @var{a}.  For example, 
-## the following code finds the eigenvalues of @var{a} which are the roots of 
-## @code{poly (@var{a})}.
+## If @var{A} is a square @math{N}-by-@math{N} matrix, @code{poly (@var{A})}
+## is the row vector of the coefficients of @code{det (z * eye (N) - A)},
+## the characteristic polynomial of @var{A}.  For example, 
+## the following code finds the eigenvalues of @var{A} which are the roots of 
+## @code{poly (@var{A})}.
 ##
 ## @example
 ## @group
--- a/scripts/signal/unwrap.m
+++ b/scripts/signal/unwrap.m
@@ -17,9 +17,9 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {@var{b} =} unwrap (@var{a})
-## @deftypefnx {Function File} {@var{b} =} unwrap (@var{a}, @var{tol})
-## @deftypefnx {Function File} {@var{b} =} unwrap (@var{a}, @var{tol}, @var{dim})
+## @deftypefn  {Function File} {@var{b} =} unwrap (@var{x})
+## @deftypefnx {Function File} {@var{b} =} unwrap (@var{x}, @var{tol})
+## @deftypefnx {Function File} {@var{b} =} unwrap (@var{x}, @var{tol}, @var{dim})
 ## 
 ## Unwrap radian phases by adding multiples of 2*pi as appropriate to
 ## remove jumps greater than @var{tol}.  @var{tol} defaults to pi.
@@ -30,14 +30,14 @@
 
 ## Author: Bill Lash <lash@tellabs.com>
 
-function retval = unwrap (a, tol, dim)
+function retval = unwrap (x, tol, dim)
         
   if (nargin < 1 || nargin > 3)
     print_usage ();
   endif
 
-  if (!isnumeric(a))
-    error ("unwrap: A must be a numeric matrix or vector");
+  if (!isnumeric(x))
+    error ("unwrap: x must be a numeric matrix or vector");
   endif
 
   if (nargin < 2 || isempty (tol))
@@ -47,8 +47,8 @@
   ## Don't let anyone use a negative value for TOL.
   tol = abs (tol);
 
-  nd = ndims (a);
-  sz = size (a);
+  nd = ndims (x);
+  sz = size (x);
   if (nargin == 3)
     if (!(isscalar (dim) && dim == fix (dim))
         || !(1 <= dim && dim <= nd))
@@ -68,7 +68,7 @@
   ## Handle case where we are trying to unwrap a scalar, or only have
   ## one sample in the specified dimension.
   if (m == 1)       
-    retval = a;     
+    retval = x;     
     return;         
   endif
 
@@ -79,7 +79,7 @@
     idx{i} = 1:sz(i);
   endfor
   idx{dim} = [1,1:m-1];
-  d = a(idx{:}) - a;
+  d = x(idx{:}) - x;
 
   ## Find only the peaks, and multiply them by the range so that there
   ## are kronecker deltas at each wrap point multiplied by the range
@@ -91,7 +91,7 @@
 
   ## Now add the "steps" to the original data and put output in the
   ## same shape as originally.
-  retval = a + r;
+  retval = x + r;
 
 endfunction
 
--- a/scripts/sparse/gplot.m
+++ b/scripts/sparse/gplot.m
@@ -17,9 +17,9 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {} gplot (@var{a}, @var{xy})
-## @deftypefnx {Function File} {} gplot (@var{a}, @var{xy}, @var{line_style})
-## @deftypefnx {Function File} {[@var{x}, @var{y}] =} gplot (@var{a}, @var{xy})
+## @deftypefn  {Function File} {} gplot (@var{A}, @var{xy})
+## @deftypefnx {Function File} {} gplot (@var{A}, @var{xy}, @var{line_style})
+## @deftypefnx {Function File} {[@var{x}, @var{y}] =} gplot (@var{A}, @var{xy})
 ## Plot a graph defined by @var{A} and @var{xy} in the graph theory
 ## sense.  @var{A} is the adjacency matrix of the array to be plotted
 ## and @var{xy} is an @var{n}-by-2 matrix containing the coordinates of
@@ -32,7 +32,7 @@
 ## @seealso{treeplot, etreeplot, spy}
 ## @end deftypefn
 
-function [x, y] = gplot (a, xy, line_style)
+function [x, y] = gplot (A, xy, line_style)
 
   if (nargin < 2 || nargin > 3 || nargout > 2)
     print_usage ();
@@ -42,7 +42,7 @@
     line_style = "-";
   endif
 
-  [i, j] = find (a);
+  [i, j] = find (A);
   xcoord = [xy(i,1), xy(j,1), NaN(length(i),1) ]'(:);
   ycoord = [xy(i,2), xy(j,2), NaN(length(i),1) ]'(:);
 
--- a/scripts/sparse/pcg.m
+++ b/scripts/sparse/pcg.m
@@ -17,20 +17,20 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {@var{x} =} pcg (@var{a}, @var{b}, @var{tol}, @var{maxit}, @var{m1}, @var{m2}, @var{x0}, @dots{})
+## @deftypefn  {Function File} {@var{x} =} pcg (@var{A}, @var{b}, @var{tol}, @var{maxit}, @var{m1}, @var{m2}, @var{x0}, @dots{})
 ## @deftypefnx {Function File} {[@var{x}, @var{flag}, @var{relres}, @var{iter}, @var{resvec}, @var{eigest}] =} pcg (@dots{})
 ##
-## Solves the linear system of equations @code{@var{a} * @var{x} =
+## Solves the linear system of equations @code{@var{A} * @var{x} =
 ## @var{b}} by means of the Preconditioned Conjugate Gradient iterative
 ## method.  The input arguments are
 ##
 ## @itemize
 ## @item
-## @var{a} can be either a square (preferably sparse) matrix or a
+## @var{A} can be either a square (preferably sparse) matrix or a
 ## function handle, inline function or string containing the name
-## of a function which computes @code{@var{a} * @var{x}}.  In principle
-## @var{a} should be symmetric and positive definite; if @code{pcg}
-## finds @var{a} to not be positive definite, you will get a warning
+## of a function which computes @code{@var{A} * @var{x}}.  In principle
+## @var{A} should be symmetric and positive definite; if @code{pcg}
+## finds @var{A} to not be positive definite, you will get a warning
 ## message and the @var{flag} output parameter will be set.
 ## 
 ## @item
@@ -38,8 +38,8 @@
 ## 
 ## @item
 ## @var{tol} is the required relative tolerance for the residual error,
-## @code{@var{b} - @var{a} * @var{x}}.  The iteration stops if @code{norm
-## (@var{b} - @var{a} * @var{x}) <= @var{tol} * norm (@var{b} - @var{a} *
+## @code{@var{b} - @var{A} * @var{x}}.  The iteration stops if @code{norm
+## (@var{b} - @var{A} * @var{x}) <= @var{tol} * norm (@var{b} - @var{A} *
 ## @var{x0})}.  If @var{tol} is empty or is omitted, the function sets
 ## @code{@var{tol} = 1e-6} by default.
 ## 
@@ -52,7 +52,7 @@
 ## @var{m} = @var{m1} * @var{m2} is the (left) preconditioning matrix, so that
 ## the iteration is (theoretically) equivalent to solving by @code{pcg}
 ## @code{@var{P} *
-## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{a}}.
+## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{A}}.
 ## Note that a proper choice of the preconditioner may dramatically
 ## improve the overall performance of the method.  Instead of matrices
 ## @var{m1} and @var{m2}, the user may pass two functions which return 
@@ -68,14 +68,14 @@
 ## @end itemize
 ## 
 ## The arguments which follow @var{x0} are treated as parameters, and
-## passed in a proper way to any of the functions (@var{a} or @var{m})
+## passed in a proper way to any of the functions (@var{A} or @var{m})
 ## which are passed to @code{pcg}.  See the examples below for further
 ## details.  The output arguments are
 ##
 ## @itemize
 ## @item
 ## @var{x} is the computed approximation to the solution of
-## @code{@var{a} * @var{x} = @var{b}}.
+## @code{@var{A} * @var{x} = @var{b}}.
 ## 
 ## @item
 ## @var{flag} reports on the convergence.  @code{@var{flag} = 0} means
@@ -99,22 +99,22 @@
 ## 1, 2, @dots{}, @var{iter}+1}.  The preconditioned residual norm
 ## is defined as
 ## @code{norm (@var{r}) ^ 2 = @var{r}' * (@var{m} \ @var{r})} where
-## @code{@var{r} = @var{b} - @var{a} * @var{x}}, see also the
+## @code{@var{r} = @var{b} - @var{A} * @var{x}}, see also the
 ## description of @var{m}.  If @var{eigest} is not required, only
 ## @code{@var{resvec} (:,1)} is returned.
 ## 
 ## @item
 ## @var{eigest} returns the estimate for the smallest @code{@var{eigest}
 ## (1)} and largest @code{@var{eigest} (2)} eigenvalues of the
-## preconditioned matrix @code{@var{P} = @var{m} \ @var{a}}.  In 
+## preconditioned matrix @code{@var{P} = @var{m} \ @var{A}}.  In 
 ## particular, if no preconditioning is used, the estimates for the
-## extreme eigenvalues of @var{a} are returned.  @code{@var{eigest} (1)}
+## extreme eigenvalues of @var{A} are returned.  @code{@var{eigest} (1)}
 ## is an overestimate and @code{@var{eigest} (2)} is an underestimate, 
 ## so that @code{@var{eigest} (2) / @var{eigest} (1)} is a lower bound
 ## for @code{cond (@var{P}, 2)}, which nevertheless in the limit should
 ## theoretically be equal to the actual value of the condition number. 
 ## The method which computes @var{eigest} works only for symmetric positive
-## definite @var{a} and @var{m}, and the user is responsible for
+## definite @var{A} and @var{m}, and the user is responsible for
 ## verifying this assumption. 
 ## @end itemize
 ## 
@@ -124,9 +124,9 @@
 ## @example
 ## @group
 ##      n = 10; 
-##      a = diag (sparse (1:n));
+##      A = diag (sparse (1:n));
 ##      b = rand (n, 1);
-##      [l, u, p, q] = luinc (a, 1.e-3);
+##      [l, u, p, q] = luinc (A, 1.e-3);
 ## @end group
 ## @end example
 ## 
@@ -137,7 +137,7 @@
 ## @end example
 ## 
 ## @sc{Example 2:} @code{pcg} with a function which computes
-## @code{@var{a} * @var{x}}
+## @code{@var{A} * @var{x}}
 ## 
 ## @example
 ## @group
@@ -152,7 +152,7 @@
 ## @sc{Example 3:} @code{pcg} with a preconditioner: @var{l} * @var{u}
 ##
 ## @example
-## x = pcg (a, b, 1.e-6, 500, l*u);
+## x = pcg (A, b, 1.e-6, 500, l*u);
 ## @end example
 ##
 ## @sc{Example 4:} @code{pcg} with a preconditioner: @var{l} * @var{u}.
@@ -160,12 +160,12 @@
 ## are easier to invert
 ##
 ## @example
-## x = pcg (a, b, 1.e-6, 500, l, u);
+## x = pcg (A, b, 1.e-6, 500, l, u);
 ## @end example
 ##
 ## @sc{Example 5:} Preconditioned iteration, with full diagnostics.  The
 ## preconditioner (quite strange, because even the original matrix
-## @var{a} is trivial) is defined as a function
+## @var{A} is trivial) is defined as a function
 ## 
 ## @example
 ## @group
@@ -176,7 +176,7 @@
 ##   endfunction
 ## 
 ##   [x, flag, relres, iter, resvec, eigest] = ...
-##                      pcg (a, b, [], [], "apply_m");
+##                      pcg (A, b, [], [], "apply_m");
 ##   semilogy (1:iter+1, resvec);
 ## @end group
 ## @end example
@@ -218,7 +218,7 @@
 ##    - Add the ability to provide the pre-conditioner as two separate
 ## matrices
 
-function [x, flag, relres, iter, resvec, eigest] = pcg (a, b, tol, maxit, m1, m2, x0, varargin)
+function [x, flag, relres, iter, resvec, eigest] = pcg (A, b, tol, maxit, m1, m2, x0, varargin)
 
   ## M = M1*M2
 
@@ -261,12 +261,12 @@
 
   p = zeros (size (b));
   oldtau = 1; 
-  if (isnumeric (a))
+  if (isnumeric (A))
     ## A is a matrix.
-    r = b - a*x; 
+    r = b - A*x; 
   else
     ## A should be a function.
-    r = b - feval (a, x, varargin{:});
+    r = b - feval (A, x, varargin{:});
   endif
 
   resvec(1,1) = norm (r);
@@ -297,12 +297,12 @@
     beta = tau / oldtau;
     oldtau = tau;
     p = z + beta * p;
-    if (isnumeric (a))
+    if (isnumeric (A))
       ## A is a matrix.
-      w = a * p;
+      w = A * p;
     else
       ## A should be a function.
-      w = feval (a, p, varargin{:});
+      w = feval (A, p, varargin{:});
     endif
     ## Needed only for eigest.
     oldalpha = alpha;
--- a/scripts/sparse/pcr.m
+++ b/scripts/sparse/pcr.m
@@ -17,20 +17,20 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {@var{x} =} pcr (@var{a}, @var{b}, @var{tol}, @var{maxit}, @var{m}, @var{x0}, @dots{})
+## @deftypefn  {Function File} {@var{x} =} pcr (@var{A}, @var{b}, @var{tol}, @var{maxit}, @var{m}, @var{x0}, @dots{})
 ## @deftypefnx {Function File} {[@var{x}, @var{flag}, @var{relres}, @var{iter}, @var{resvec}] =} pcr (@dots{})
 ## 
-## Solves the linear system of equations @code{@var{a} * @var{x} =
+## Solves the linear system of equations @code{@var{A} * @var{x} =
 ## @var{b}} by means of the Preconditioned Conjugate Residuals iterative
 ## method.  The input arguments are
 ##
 ## @itemize
 ## @item
-## @var{a} can be either a square (preferably sparse) matrix or a
+## @var{A} can be either a square (preferably sparse) matrix or a
 ## function handle, inline function or string containing the name
-## of a function which computes @code{@var{a} * @var{x}}.  In principle
-## @var{a} should be symmetric and non-singular; if @code{pcr}
-## finds @var{a} to be numerically singular, you will get a warning
+## of a function which computes @code{@var{A} * @var{x}}.  In principle
+## @var{A} should be symmetric and non-singular; if @code{pcr}
+## finds @var{A} to be numerically singular, you will get a warning
 ## message and the @var{flag} output parameter will be set.
 ## 
 ## @item
@@ -38,8 +38,8 @@
 ## 
 ## @item
 ## @var{tol} is the required relative tolerance for the residual error,
-## @code{@var{b} - @var{a} * @var{x}}.  The iteration stops if @code{norm
-## (@var{b} - @var{a} * @var{x}) <= @var{tol} * norm (@var{b} - @var{a} *
+## @code{@var{b} - @var{A} * @var{x}}.  The iteration stops if @code{norm
+## (@var{b} - @var{A} * @var{x}) <= @var{tol} * norm (@var{b} - @var{A} *
 ## @var{x0})}.  If @var{tol} is empty or is omitted, the function sets
 ## @code{@var{tol} = 1e-6} by default.
 ## 
@@ -51,7 +51,7 @@
 ## @item
 ## @var{m} is the (left) preconditioning matrix, so that the iteration is
 ## (theoretically) equivalent to solving by @code{pcr} @code{@var{P} *
-## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{a}}.
+## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{A}}.
 ## Note that a proper choice of the preconditioner may dramatically
 ## improve the overall performance of the method.  Instead of matrix
 ## @var{m}, the user may pass a function which returns the results of 
@@ -65,14 +65,14 @@
 ## @end itemize
 ## 
 ## The arguments which follow @var{x0} are treated as parameters, and
-## passed in a proper way to any of the functions (@var{a} or @var{m})
+## passed in a proper way to any of the functions (@var{A} or @var{m})
 ## which are passed to @code{pcr}.  See the examples below for further
 ## details.  The output arguments are
 ##
 ## @itemize
 ## @item
 ## @var{x} is the computed approximation to the solution of
-## @code{@var{a} * @var{x} = @var{b}}.
+## @code{@var{A} * @var{x} = @var{b}}.
 ## 
 ## @item
 ## @var{flag} reports on the convergence.  @code{@var{flag} = 0} means
@@ -101,7 +101,7 @@
 ## @example
 ## @group
 ##      n = 10; 
-##      a = sparse (diag (1:n));
+##      A = sparse (diag (1:n));
 ##      b = rand (N, 1);
 ## @end group
 ## @end example
@@ -113,7 +113,7 @@
 ## @end example
 ## 
 ## @sc{Example 2:} @code{pcr} with a function which computes
-## @code{@var{a} * @var{x}}.
+## @code{@var{A} * @var{x}}.
 ##
 ## @example
 ## @group
@@ -127,7 +127,7 @@
 ## 
 ## @sc{Example 3:}  Preconditioned iteration, with full diagnostics.  The
 ## preconditioner (quite strange, because even the original matrix
-## @var{a} is trivial) is defined as a function
+## @var{A} is trivial) is defined as a function
 ## 
 ## @example
 ## @group
@@ -138,7 +138,7 @@
 ##   endfunction
 ## 
 ##   [x, flag, relres, iter, resvec] = ...
-##                      pcr (a, b, [], [], "apply_m")
+##                      pcr (A, b, [], [], "apply_m")
 ##   semilogy([1:iter+1], resvec);
 ## @end group
 ## @end example
@@ -154,7 +154,7 @@
 ##   endfunction
 ## 
 ##   [x, flag, relres, iter, resvec] = ...
-##                      pcr (a, b, [], [], "apply_m"', [], 3)
+##                      pcr (A, b, [], [], "apply_m"', [], 3)
 ## @end group
 ## @end example
 ## 
@@ -168,7 +168,7 @@
 
 ## Author: Piotr Krzyzanowski <piotr.krzyzanowski@mimuw.edu.pl>
 
-function [x, flag, relres, iter, resvec] = pcr (a, b, tol, maxit, m, x0, varargin)
+function [x, flag, relres, iter, resvec] = pcr (A, b, tol, maxit, m, x0, varargin)
 
   breakdown = false;
 
@@ -197,10 +197,10 @@
   endif
 
   ##  init
-  if (isnumeric (a))            # is A a matrix?
-    r = b - a*x;
+  if (isnumeric (A))            # is A a matrix?
+    r = b - A*x;
   else                          # then A should be a function!
-    r = b - feval (a, x, varargin{:});
+    r = b - feval (A, x, varargin{:});
   endif
 
   if (isnumeric (m))            # is M a matrix?
@@ -218,10 +218,10 @@
   b_bot_old = 1;
   q_old = p_old = s_old = zeros (size (x));
 
-  if (isnumeric (a))            # is A a matrix?
-    q = a * p;
+  if (isnumeric (A))            # is A a matrix?
+    q = A * p;
   else                          # then A should be a function!
-    q = feval (a, p, varargin{:});
+    q = feval (A, p, varargin{:});
   endif
         
   resvec(1) = abs (norm (r)); 
@@ -250,10 +250,10 @@
     x += lambda*p;
     r -= lambda*q;
         
-    if (isnumeric(a))           # is A a matrix?
-      t = a*s;
+    if (isnumeric(A))           # is A a matrix?
+      t = A*s;
     else                        # then A should be a function!
-      t = feval (a, s, varargin{:});
+      t = feval (A, s, varargin{:});
     endif
         
     alpha0 = (t'*s) / b_bot;
--- a/scripts/sparse/spaugment.m
+++ b/scripts/sparse/spaugment.m
@@ -17,19 +17,19 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn {Function File} {@var{s} =} spaugment (@var{a}, @var{c})
-## Creates the augmented matrix of @var{a}.  This is given by
+## @deftypefn {Function File} {@var{s} =} spaugment (@var{A}, @var{c})
+## Creates the augmented matrix of @var{A}.  This is given by
 ##
 ## @example
 ## @group
-## [@var{c} * eye(@var{m}, @var{m}),@var{a}; @var{a}', zeros(@var{n},
+## [@var{c} * eye(@var{m}, @var{m}),@var{A}; @var{A}', zeros(@var{n},
 ## @var{n})]
 ## @end group
 ## @end example
 ##
 ## @noindent
 ## This is related to the least squares solution of 
-## @code{@var{a} \\ @var{b}}, by
+## @code{@var{A} \\ @var{b}}, by
 ## 
 ## @example
 ## @group
@@ -42,7 +42,7 @@
 ## where @var{r} is the residual error
 ##
 ## @example
-## @var{r} = @var{b} - @var{a} * @var{x}
+## @var{r} = @var{b} - @var{A} * @var{x}
 ## @end example
 ##
 ## As the matrix @var{s} is symmetric indefinite it can be factorized
@@ -54,10 +54,10 @@
 ## @example
 ## @group
 ## m = 11; n = 10; mn = max(m ,n);
-## a = spdiags ([ones(mn,1), 10*ones(mn,1), -ones(mn,1)],
+## A = spdiags ([ones(mn,1), 10*ones(mn,1), -ones(mn,1)],
 ##              [-1, 0, 1], m, n);
-## x0 = a \ ones (m,1);
-## s = spaugment (a);
+## x0 = A \ ones (m,1);
+## s = spaugment (A);
 ## [L, U, P, Q] = lu (s);
 ## x1 = Q * (U \ (L \ (P  * [ones(m,1); zeros(n,1)])));
 ## x1 = x1(end - n + 1 : end);
@@ -72,30 +72,30 @@
 ## using the @code{spaugment} function.
 ## @end deftypefn
 
-function s = spaugment (a, c)
+function s = spaugment (A, c)
   if (nargin < 2)
-    if (issparse (a))
-      c = max (max (abs (a))) / 1000;
+    if (issparse (A))
+      c = max (max (abs (A))) / 1000;
     else
-      if (ndims (a) != 2)
+      if (ndims (A) != 2)
         error ("spaugment: expecting 2-dimenisional matrix")
       else
-        c = max (abs (a(:))) / 1000;
+        c = max (abs (A(:))) / 1000;
       endif
     endif
   elseif (!isscalar (c))
     error ("spaugment: c must be a scalar");
   endif
 
-  [m, n] = size (a);
-  s = [ c * speye(m, m), a; a', sparse(n, n)];
+  [m, n] = size (A);
+  s = [ c * speye(m, m), A; A', sparse(n, n)];
 endfunction
 
 %!testif HAVE_UMFPACK
 %! m = 11; n = 10; mn = max(m ,n);
-%! a = spdiags ([ones(mn,1), 10*ones(mn,1), -ones(mn,1)],[-1,0,1], m, n);
-%! x0 = a \ ones (m,1);
-%! s = spaugment (a);
+%! A = spdiags ([ones(mn,1), 10*ones(mn,1), -ones(mn,1)],[-1,0,1], m, n);
+%! x0 = A \ ones (m,1);
+%! s = spaugment (A);
 %! [L, U, P, Q] = lu (s);
 %! x1 = Q * (U \ (L \ (P  * [ones(m,1); zeros(n,1)])));
 %! x1 = x1(end - n + 1 : end);
--- a/scripts/sparse/spdiags.m
+++ b/scripts/sparse/spdiags.m
@@ -17,9 +17,9 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {[@var{b}, @var{c}] =} spdiags (@var{a})
-## @deftypefnx {Function File} {@var{b} =} spdiags (@var{a}, @var{c})
-## @deftypefnx {Function File} {@var{b} =} spdiags (@var{v}, @var{c}, @var{a})
+## @deftypefn  {Function File} {[@var{b}, @var{c}] =} spdiags (@var{A})
+## @deftypefnx {Function File} {@var{b} =} spdiags (@var{A}, @var{c})
+## @deftypefnx {Function File} {@var{b} =} spdiags (@var{v}, @var{c}, @var{A})
 ## @deftypefnx {Function File} {@var{b} =} spdiags (@var{v}, @var{c}, @var{m}, @var{n})
 ## A generalization of the function @code{diag}.  Called with a single
 ## input argument, the non-zero diagonals @var{c} of @var{A} are extracted.
@@ -29,7 +29,7 @@
 ## The other two forms of @code{spdiags} modify the input matrix by
 ## replacing the diagonals.  They use the columns of @var{v} to replace
 ## the columns represented by the vector @var{c}.  If the sparse matrix
-## @var{a} is defined then the diagonals of this matrix are replaced.
+## @var{A} is defined then the diagonals of this matrix are replaced.
 ## Otherwise a matrix of @var{m} by @var{n} is created with the
 ## diagonals given by @var{v}.
 ##
--- a/scripts/sparse/svds.m
+++ b/scripts/sparse/svds.m
@@ -17,31 +17,31 @@
 ## <http://www.gnu.org/licenses/>.
 
 ## -*- texinfo -*-
-## @deftypefn  {Function File} {@var{s} =} svds (@var{a})
-## @deftypefnx {Function File} {@var{s} =} svds (@var{a}, @var{k})
-## @deftypefnx {Function File} {@var{s} =} svds (@var{a}, @var{k}, @var{sigma})
-## @deftypefnx {Function File} {@var{s} =} svds (@var{a}, @var{k}, @var{sigma}, @var{opts})
+## @deftypefn  {Function File} {@var{s} =} svds (@var{A})
+## @deftypefnx {Function File} {@var{s} =} svds (@var{A}, @var{k})
+## @deftypefnx {Function File} {@var{s} =} svds (@var{A}, @var{k}, @var{sigma})
+## @deftypefnx {Function File} {@var{s} =} svds (@var{A}, @var{k}, @var{sigma}, @var{opts})
 ## @deftypefnx {Function File} {[@var{u}, @var{s}, @var{v}] =} svds (@dots{})
 ## @deftypefnx {Function File} {[@var{u}, @var{s}, @var{v}, @var{flag}] =} svds (@dots{})
 ##
-## Find a few singular values of the matrix @var{a}.  The singular values
+## Find a few singular values of the matrix @var{A}.  The singular values
 ## are calculated using 
 ##
 ## @example
 ## @group
-## [@var{m}, @var{n}] = size(@var{a})
-## @var{s} = eigs([sparse(@var{m}, @var{m}), @var{a};
-##                     @var{a}', sparse(@var{n}, @var{n})])
+## [@var{m}, @var{n}] = size(@var{A})
+## @var{s} = eigs([sparse(@var{m}, @var{m}), @var{A};
+##                     @var{A}', sparse(@var{n}, @var{n})])
 ## @end group
 ## @end example
 ##
 ## The eigenvalues returned by @code{eigs} correspond to the singular values 
-## of @var{a}.  The number of singular values to calculate is given by @var{k}
+## of @var{A}.  The number of singular values to calculate is given by @var{k}
 ## and defaults to 6.
 ## 
 ## The argument @var{sigma} specifies which singular values to find.  When 
 ## @var{sigma} is the string 'L', the default, the largest singular values of 
-## @var{a} are found.  Otherwise, @var{sigma} must be a real scalar and the 
+## @var{A} are found.  Otherwise, @var{sigma} must be a real scalar and the 
 ## singular values closest to @var{sigma} are found.  As a corollary, 
 ## @code{@var{sigma} = 0} finds the smallest singular values.  Note that for 
 ## relatively small values of @var{sigma}, there is a chance that the requested
@@ -66,31 +66,31 @@
 ## @end table
 ##
 ## If more than one output is requested then @code{svds} will return an
-## approximation of the singular value decomposition of @var{a}
+## approximation of the singular value decomposition of @var{A}
 ##
 ## @example
-## @var{a}_approx = @var{u}*@var{s}*@var{v}'
+## @var{A}_approx = @var{u}*@var{s}*@var{v}'
 ## @end example
 ##
 ## @noindent
-## where @var{a}_approx is a matrix of size @var{a} but only rank @var{k}.
+## where @var{A}_approx is a matrix of size @var{A} but only rank @var{k}.
 ## 
 ## @var{flag} returns 0 if the algorithm has succesfully converged, and 1 
 ## otherwise.  The test for convergence is
 ##
 ## @example
 ## @group
-## norm (@var{a}*@var{v} - @var{u}*@var{s}, 1) <= @var{tol} * norm (@var{a}, 1)
+## norm (@var{A}*@var{v} - @var{u}*@var{s}, 1) <= @var{tol} * norm (@var{A}, 1)
 ## @end group
 ## @end example
 ##
 ## @code{svds} is best for finding only a few singular values from a large
-## sparse matrix.  Otherwise, @code{svd (full(@var{a}))} will likely be more
+## sparse matrix.  Otherwise, @code{svd (full(@var{A}))} will likely be more
 ## efficient.
 ## @end deftypefn
 ## @seealso{svd, eigs}
 
-function [u, s, v, flag] = svds (a, k, sigma, opts)
+function [u, s, v, flag] = svds (A, k, sigma, opts)
 
   persistent root2 = sqrt (2);
 
@@ -98,7 +98,7 @@
     print_usage ();
   endif
 
-  if (ndims(a) > 2)
+  if (ndims(A) > 2)
     error ("svds: A must be a 2D matrix")
   endif
 
@@ -116,14 +116,14 @@
       opts.tol = opts.tol / root2;
     endif
     if (isfield (opts, "v0"))
-      if (!isvector (opts.v0) || (length (opts.v0) != sum (size (a))))
+      if (!isvector (opts.v0) || (length (opts.v0) != sum (size (A))))
         error ("svds: OPTS.v0 must be a vector with rows(A)+columns(A) entries");
       endif
     endif
   endif
 
   if (nargin < 3 || strcmp (sigma, "L"))
-    if (isreal (a))
+    if (isreal (A))
       sigma = "LA";
     else
       sigma = "LR";
@@ -136,8 +136,8 @@
     error ("svds: SIGMA must be a positive real value or the string 'L'");
   endif
 
-  [m, n] = size (a);
-  max_a = max (abs (a(:)));
+  [m, n] = size (A);
+  max_a = max (abs (A(:)));
   if (max_a == 0)
     s = zeros (k, 1);  # special case of zero matrix
   else
@@ -148,7 +148,7 @@
     endif
 
     ## Scale everything by the 1-norm to make things more stable.
-    b = a / max_a;
+    b = A / max_a;
     b_opts = opts;
     b_opts.tol = opts.tol / max_a;
     b_sigma = sigma;
@@ -181,7 +181,7 @@
     if (ischar (sigma))
       norma = max (s);
     else
-      norma = normest (a);
+      norma = normest (A);
     endif
     ## We wish to exclude all eigenvalues that are less than zero as these
     ## are artifacts of the way the matrix passed to eigs is formed. There 
@@ -233,17 +233,17 @@
     endif
 
     if (nargout > 3)
-      flag = norm (a*v - u*s, 1) > root2 * opts.tol * norm (a, 1);
+      flag = norm (A*v - u*s, 1) > root2 * opts.tol * norm (A, 1);
     endif
   endif
 
 endfunction
 
-%!shared n, k, a, u, s, v, opts
+%!shared n, k, A, u, s, v, opts
 %! n = 100;
 %! k = 7;
-%! a = sparse([3:n,1:n,1:(n-2)],[1:(n-2),1:n,3:n],[ones(1,n-2),0.4*n*ones(1,n),ones(1,n-2)]);
-%! [u,s,v] = svd(full(a));
+%! A = sparse([3:n,1:n,1:(n-2)],[1:(n-2),1:n,3:n],[ones(1,n-2),0.4*n*ones(1,n),ones(1,n-2)]);
+%! [u,s,v] = svd(full(A));
 %! s = diag(s);
 %! [~, idx] = sort(abs(s));
 %! s = s(idx);
@@ -254,12 +254,12 @@
 %! opts.v0 = rand (2*n,1); % Initialize eigs ARPACK starting vector 
 %!                         % to guarantee reproducible results
 %!testif HAVE_ARPACK
-%! [u2,s2,v2,flag] = svds(a,k);
+%! [u2,s2,v2,flag] = svds(A,k);
 %! s2 = diag(s2);
 %! assert(flag,!1);
 %! assert(s2, s(end:-1:end-k+1), 1e-10); 
 %!testif HAVE_ARPACK
-%! [u2,s2,v2,flag] = svds(a,k,0,opts);
+%! [u2,s2,v2,flag] = svds(A,k,0,opts);
 %! s2 = diag(s2);
 %! assert(flag,!1);
 %! assert(s2, s(k:-1:1), 1e-10); 
@@ -267,7 +267,7 @@
 %! idx = floor(n/2);
 %! % Don't put sigma right on a singular value or there are convergence issues 
 %! sigma = 0.99*s(idx) + 0.01*s(idx+1); 
-%! [u2,s2,v2,flag] = svds(a,k,sigma,opts);
+%! [u2,s2,v2,flag] = svds(A,k,sigma,opts);
 %! s2 = diag(s2);
 %! assert(flag,!1);
 %! assert(s2, s((idx+floor(k/2)):-1:(idx-floor(k/2))), 1e-10);