5837
|
1 ## Copyright (C) 2004 Piotr Krzyzanowski <piotr.krzyzanowski@mimuw.edu.pl> |
|
2 ## |
|
3 ## This file is part of Octave. |
|
4 ## |
|
5 ## Octave is free software; you can redistribute it and/or modify it |
|
6 ## under the terms of the GNU General Public License as published by |
|
7 ## the Free Software Foundation; either version 2, or (at your option) |
|
8 ## any later version. |
|
9 ## |
|
10 ## Octave is distributed in the hope that it will be useful, but |
|
11 ## WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 ## General Public License for more details. |
|
14 ## |
|
15 ## You should have received a copy of the GNU General Public License |
|
16 ## along with Octave; see the file COPYING. If not, write to the Free |
|
17 ## Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
|
18 ## 02110-1301, USA. |
|
19 |
|
20 ## -*- texinfo -*- |
5838
|
21 ## @deftypefn {Function File} {@var{x} =} pcg (@var{a}, @var{b}, @var{tol}, @var{maxit}, @var{m}, @var{x0}, @dots{}) |
5837
|
22 ## @deftypefnx {Function File} {[@var{x}, @var{flag}, @var{relres}, @var{iter}, @var{resvec}, @var{eigest}] =} pcg (@dots{}) |
|
23 ## |
5838
|
24 ## Solves the linear system of equations @code{@var{a} * @var{x} = |
5837
|
25 ## @var{b}} by means of the Preconditioned Conjugate Gradient iterative |
|
26 ## method. The input arguments are |
|
27 ## |
|
28 ## @itemize |
|
29 ## @item |
5838
|
30 ## @var{a} can be either a square (preferably sparse) matrix or a |
5837
|
31 ## function handle, inline function or string containing the name |
5838
|
32 ## of a function which computes @code{@var{a} * @var{x}}. In principle |
|
33 ## @var{a} should be symmetric and positive definite; if @code{pcg} |
|
34 ## finds @var{a} to not be positive definite, you will get a warning |
5837
|
35 ## message and the @var{flag} output parameter will be set. |
|
36 ## |
|
37 ## @item |
|
38 ## @var{b} is the right hand side vector. |
|
39 ## |
|
40 ## @item |
|
41 ## @var{tol} is the required relative tolerance for the residual error, |
5838
|
42 ## @code{@var{b} - @var{a} * @var{x}}. The iteration stops if @code{norm |
|
43 ## (@var{b} - @var{a} * @var{x}) <= @var{tol} * norm (@var{b} - @var{a} * |
5837
|
44 ## @var{x0})}. If @var{tol} is empty or is omitted, the function sets |
|
45 ## @code{@var{tol} = 1e-6} by default. |
|
46 ## |
|
47 ## @item |
|
48 ## @var{maxit} is the maximum allowable number of iterations; if |
|
49 ## @code{[]} is supplied for @code{maxit}, or @code{pcg} has less |
|
50 ## arguments, a default value equal to 20 is used. |
|
51 ## |
|
52 ## @item |
5838
|
53 ## @var{m} is the (left) preconditioning matrix, so that the iteration is |
5837
|
54 ## (theoretically) equivalent to solving by @code{pcg} @code{@var{P} * |
5838
|
55 ## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{a}}. |
5837
|
56 ## Note that a proper choice of the preconditioner may dramatically |
|
57 ## improve the overall performance of the method. Instead of matrix |
5838
|
58 ## @var{m}, the user may pass a function which returns the results of |
|
59 ## applying the inverse of @var{m} to a vector (usually this is the |
5837
|
60 ## preferred way of using the preconditioner). If @code{[]} is supplied |
5838
|
61 ## for @var{m}, or @var{m} is omitted, no preconditioning is applied. |
5837
|
62 ## |
|
63 ## @item |
|
64 ## @var{x0} is the initial guess. If @var{x0} is empty or omitted, the |
|
65 ## function sets @var{x0} to a zero vector by default. |
|
66 ## @end itemize |
|
67 ## |
|
68 ## The arguments which follow @var{x0} are treated as parameters, and |
5838
|
69 ## passed in a proper way to any of the functions (@var{a} or @var{m}) |
5837
|
70 ## which are passed to @code{pcg}. See the examples below for further |
|
71 ## details. The output arguments are |
|
72 ## |
|
73 ## @itemize |
|
74 ## @item |
|
75 ## @var{x} is the computed approximation to the solution of |
5838
|
76 ## @code{@var{a} * @var{x} = @var{b}}. |
5837
|
77 ## |
|
78 ## @item |
|
79 ## @var{flag} reports on the convergence. @code{@var{flag} = 0} means |
|
80 ## the solution converged and the tolerance criterion given by @var{tol} |
|
81 ## is satisfied. @code{@var{flag} = 1} means that the @var{maxit} limit |
|
82 ## for the iteration count was reached. @code{@var{flag} = 3} reports that |
|
83 ## the (preconditioned) matrix was found not positive definite. |
|
84 ## |
|
85 ## @item |
|
86 ## @var{relres} is the ratio of the final residual to its initial value, |
|
87 ## measured in the Euclidean norm. |
|
88 ## |
|
89 ## @item |
|
90 ## @var{iter} is the actual number of iterations performed. |
|
91 ## |
|
92 ## @item |
|
93 ## @var{resvec} describes the convergence history of the method. |
|
94 ## @code{@var{resvec} (i,1)} is the Euclidean norm of the residual, and |
|
95 ## @code{@var{resvec} (i,2)} is the preconditioned residual norm, |
|
96 ## after the (@var{i}-1)-th iteration, @code{@var{i} = |
|
97 ## 1,2,...@var{iter}+1}. The preconditioned residual norm is defined as |
5838
|
98 ## @code{norm (@var{r}) ^ 2 = @var{r}' * (@var{m} \ @var{r})} where |
|
99 ## @code{@var{r} = @var{b} - @var{a} * @var{x}}, see also the |
|
100 ## description of @var{m}. If @var{eigest} is not required, only |
5837
|
101 ## @code{@var{resvec} (:,1)} is returned. |
|
102 ## |
|
103 ## @item |
|
104 ## @var{eigest} returns the estimate for the smallest @code{@var{eigest} |
|
105 ## (1)} and largest @code{@var{eigest} (2)} eigenvalues of the |
5838
|
106 ## preconditioned matrix @code{@var{P} = @var{m} \ @var{a}}. In |
5837
|
107 ## particular, if no preconditioning is used, the extimates for the |
5838
|
108 ## extreme eigenvalues of @var{a} are returned. @code{@var{eigest} (1)} |
5837
|
109 ## is an overestimate and @code{@var{eigest} (2)} is an underestimate, |
|
110 ## so that @code{@var{eigest} (2) / @var{eigest} (1)} is a lower bound |
|
111 ## for @code{cond (@var{P}, 2)}, which nevertheless in the limit should |
|
112 ## theoretically be equal to the actual value of the condition number. |
|
113 ## The method which computes @var{eigest} works only for symmetric positive |
5838
|
114 ## definite @var{a} and @var{m}, and the user is responsible for |
5837
|
115 ## verifying this assumption. |
|
116 ## @end itemize |
|
117 ## |
|
118 ## Let us consider a trivial problem with a diagonal matrix (we exploit the |
|
119 ## sparsity of A) |
|
120 ## |
|
121 ## @example |
|
122 ## @group |
|
123 ## N = 10; |
|
124 ## A = diag([1:N]); A = sparse(A); |
|
125 ## b = rand(N,1); |
|
126 ## @end group |
|
127 ## @end example |
|
128 ## |
|
129 ## @sc{Example 1:} Simplest use of @code{pcg} |
|
130 ## |
|
131 ## @example |
|
132 ## x = pcg(A,b) |
|
133 ## @end example |
|
134 ## |
|
135 ## @sc{Example 2:} @code{pcg} with a function which computes |
5838
|
136 ## @code{@var{a} * @var{x}} |
5837
|
137 ## |
|
138 ## @example |
|
139 ## @group |
|
140 ## function y = applyA(x) |
|
141 ## y = [1:N]'.*x; |
|
142 ## endfunction |
|
143 ## |
|
144 ## x = pcg('applyA',b) |
|
145 ## @end group |
|
146 ## @end example |
|
147 ## |
|
148 ## @sc{Example 3:} Preconditioned iteration, with full diagnostics. The |
|
149 ## preconditioner (quite strange, because even the original matrix |
5838
|
150 ## @var{a} is trivial) is defined as a function |
5837
|
151 ## |
|
152 ## @example |
|
153 ## @group |
|
154 ## function y = applyM(x) |
|
155 ## K = floor(length(x)-2); |
|
156 ## y = x; |
|
157 ## y(1:K) = x(1:K)./[1:K]'; |
|
158 ## endfunction |
|
159 ## |
|
160 ## [x, flag, relres, iter, resvec, eigest] = pcg(A,b,[],[],'applyM') |
|
161 ## semilogy([1:iter+1], resvec); |
|
162 ## @end group |
|
163 ## @end example |
|
164 ## |
|
165 ## @sc{Example 4:} Finally, a preconditioner which depends on a |
|
166 ## parameter @var{k}. |
|
167 ## |
|
168 ## @example |
|
169 ## @group |
|
170 ## function y = applyM(x, varargin) |
|
171 ## K = varargin@{1@}; |
|
172 ## y = x; y(1:K) = x(1:K)./[1:K]'; |
|
173 ## endfuntion |
|
174 ## |
|
175 ## [x, flag, relres, iter, resvec, eigest] = ... |
|
176 ## pcg(A,b,[],[],'applyM',[],3) |
|
177 ## @end group |
|
178 ## @end example |
|
179 ## |
|
180 ## @sc{References} |
|
181 ## |
|
182 ## [1] C.T.Kelley, 'Iterative methods for linear and nonlinear equations', |
|
183 ## SIAM, 1995 (the base PCG algorithm) |
|
184 ## |
|
185 ## [2] Y.Saad, 'Iterative methods for sparse linear systems', PWS 1996 |
|
186 ## (condition number estimate from PCG) Revised version of this book is |
|
187 ## available online at http://www-users.cs.umn.edu/~saad/books.html |
|
188 ## |
|
189 ## |
|
190 ## @seealso{sparse, pcr} |
|
191 ## @end deftypefn |
|
192 |
5838
|
193 ## Author: Piotr Krzyzanowski <piotr.krzyzanowski@mimuw.edu.pl> |
5837
|
194 |
5838
|
195 function [x, flag, relres, iter, resvec, eigest] = pcg (A, b, tol, maxit, M, x0, varargin) |
5837
|
196 |
5838
|
197 if (nargin < 6 || isempty (x0)) |
|
198 x = zeros (size (b)); |
5837
|
199 else |
|
200 x = x0; |
|
201 endif |
|
202 |
|
203 if (nargin < 5) |
|
204 M = []; |
|
205 endif |
|
206 |
5838
|
207 if (nargin < 4 || isempty (maxit)) |
|
208 maxit = min (size (b, 1), 20); |
5837
|
209 endif |
|
210 |
5838
|
211 maxit += 2; |
5837
|
212 |
5838
|
213 if (nargin < 3 || isempty (tol)) |
5837
|
214 tol = 1e-6; |
|
215 endif |
|
216 |
|
217 preconditioned_residual_out = false; |
|
218 if (nargout > 5) |
5838
|
219 T = zeros (maxit, maxit); |
5837
|
220 preconditioned_residual_out = true; |
|
221 endif |
|
222 |
|
223 matrix_positive_definite = true; # assume A is positive definite |
|
224 |
5838
|
225 p = zeros (size (b)); |
5837
|
226 oldtau = 1; |
5838
|
227 if (isnumeric (A)) # is A a matrix? |
5837
|
228 r = b - A*x; |
|
229 else # then A should be a function! |
5838
|
230 r = b - feval (A, x, varargin{:}); |
5837
|
231 endif |
|
232 |
5838
|
233 resvec(1,1) = norm (r); |
5837
|
234 alpha = 1; |
|
235 iter = 2; |
|
236 |
5838
|
237 while (resvec(iter-1,1) > tol*resvec(1,1) && iter < maxit) |
|
238 if (isnumeric (M)) # is M a matrix? |
|
239 if (isempty (M)) # if M is empty, use no precond |
5837
|
240 z = r; |
|
241 else # otherwise, apply the precond |
|
242 z = M \ r; |
|
243 endif |
|
244 else # then M should be a function! |
5838
|
245 z = feval (M, r, varargin{:}); |
5837
|
246 endif |
5838
|
247 tau = z' * r; |
|
248 resvec(iter-1,2) = sqrt (tau); |
|
249 beta = tau / oldtau; |
5837
|
250 oldtau = tau; |
|
251 p = z + beta*p; |
5838
|
252 if (isnumeric (A)) # is A a matrix? |
|
253 w = A * p; |
5837
|
254 else # then A should be a function! |
5838
|
255 w = feval (A, p, varargin{:}); |
5837
|
256 endif |
|
257 oldalpha = alpha; # needed only for eigest |
5838
|
258 alpha = tau / (p'*w); |
5837
|
259 if (alpha <= 0.0) # negative matrix? |
|
260 matrix_positive_definite = false; |
|
261 endif |
5838
|
262 x += alpha*p; |
|
263 r -= alpha*w; |
|
264 if (nargout > 5 && iter > 2) |
5837
|
265 T(iter-1:iter, iter-1:iter) = T(iter-1:iter, iter-1:iter) + ... |
|
266 [1 sqrt(beta); sqrt(beta) beta]./oldalpha; |
|
267 ## EVS = eig(T(2:iter-1,2:iter-1)); |
|
268 ## fprintf(stderr,"PCG condest: %g (iteration: %d)\n", max(EVS)/min(EVS),iter); |
|
269 endif |
5838
|
270 resvec(iter,1) = norm (r); |
|
271 iter++; |
5837
|
272 endwhile |
|
273 |
|
274 if (nargout > 5) |
5838
|
275 if (matrix_positive_definite) |
5837
|
276 if (iter > 3) |
|
277 T = T(2:iter-2,2:iter-2); |
|
278 l = eig(T); |
5838
|
279 eigest = [min(l), max(l)]; |
|
280 ## fprintf (stderr, "PCG condest: %g\n", eigest(2)/eigest(1)); |
5837
|
281 else |
5838
|
282 eigest = [NaN, NaN]; |
|
283 warning ("PCG: eigenvalue estimate failed: iteration converged too fast."); |
5837
|
284 endif |
|
285 else |
5838
|
286 eigest = [NaN, NaN]; |
5837
|
287 endif |
|
288 |
|
289 ## apply the preconditioner once more and finish with the precond |
|
290 ## residual |
5838
|
291 if (isnumeric (M)) # is M a matrix? |
|
292 if (isempty (M)) # if M is empty, use no precond |
5837
|
293 z = r; |
|
294 else # otherwise, apply the precond |
5838
|
295 z = M \ r; |
5837
|
296 endif |
|
297 else # then M should be a function! |
5838
|
298 z = feval (M, r, varargin{:}); |
5837
|
299 endif |
5838
|
300 resvec(iter-1,2) = sqrt (r'*z); |
5837
|
301 else |
5838
|
302 resvec = resvec(:,1); |
5837
|
303 endif |
|
304 |
|
305 flag = 0; |
|
306 relres = resvec(iter-1,1)./resvec(1,1); |
5838
|
307 iter -= 2; |
|
308 if (iter >= maxit-2) |
5837
|
309 flag = 1; |
|
310 if (nargout < 2) |
5838
|
311 warning ("PCG: maximum number of iterations (%d) reached\n", iter); |
|
312 warning ("The initial residual norm was reduced %g times.\n", 1.0/relres); |
5837
|
313 endif |
5838
|
314 elseif (nargout < 2) |
|
315 fprintf (stderr, "PCG: converged in %d iterations. ", iter); |
|
316 fprintf (stderr, "The initial residual norm was reduced %g times.\n",... |
|
317 1.0/relres); |
5837
|
318 endif |
|
319 |
5838
|
320 if (! matrix_positive_definite) |
5837
|
321 flag = 3; |
|
322 if (nargout < 2) |
5838
|
323 warning ("PCG: matrix not positive definite?\n"); |
5837
|
324 endif |
|
325 endif |
|
326 endfunction |
|
327 |
|
328 %!demo |
|
329 %! |
|
330 %! # Simplest usage of pcg (see also 'help pcg') |
|
331 %! |
|
332 %! N = 10; |
|
333 %! A = diag([1:N]); b = rand(N,1); y = A\b; #y is the true solution |
|
334 %! x = pcg(A,b); |
|
335 %! printf('The solution relative error is %g\n', norm(x-y)/norm(y)); |
|
336 %! |
|
337 %! # You shouldn't be afraid if pcg issues some warning messages in this |
|
338 %! # example: watch out in the second example, why it takes N iterations |
|
339 %! # of pcg to converge to (a very accurate, by the way) solution |
|
340 %!demo |
|
341 %! |
|
342 %! # Full output from pcg, except for the eigenvalue estimates |
|
343 %! # We use this output to plot the convergence history |
|
344 %! |
|
345 %! N = 10; |
|
346 %! A = diag([1:N]); b = rand(N,1); X = A\b; #X is the true solution |
|
347 %! [x, flag, relres, iter, resvec] = pcg(A,b); |
|
348 %! printf('The solution relative error is %g\n', norm(x-X)/norm(X)); |
|
349 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||/||b||)'); |
|
350 %! semilogy([0:iter],resvec/resvec(1),'o-g;relative residual;'); |
|
351 %!demo |
|
352 %! |
|
353 %! # Full output from pcg, including the eigenvalue estimates |
|
354 %! # Hilbert matrix is extremely ill conditioned, so pcg WILL have problems |
|
355 %! |
|
356 %! N = 10; |
|
357 %! A = hilb(N); b = rand(N,1); X = A\b; #X is the true solution |
|
358 %! [x, flag, relres, iter, resvec, eigest] = pcg(A,b,[],200); |
|
359 %! printf('The solution relative error is %g\n', norm(x-X)/norm(X)); |
|
360 %! printf('Condition number estimate is %g\n', eigest(2)/eigest(1)); |
|
361 %! printf('Actual condition number is %g\n', cond(A)); |
|
362 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||)'); |
|
363 %! semilogy([0:iter],resvec,['o-g;absolute residual;';'+-r;absolute preconditioned residual;']); |
|
364 %!demo |
|
365 %! |
|
366 %! # Full output from pcg, including the eigenvalue estimates |
|
367 %! # We use the 1-D Laplacian matrix for A, and cond(A) = O(N^2) |
|
368 %! # and that's the reasone we need some preconditioner; here we take |
|
369 %! # a very simple and not powerful Jacobi preconditioner, |
|
370 %! # which is the diagonal of A |
|
371 %! |
|
372 %! N = 100; |
|
373 %! A = zeros(N,N); |
|
374 %! for i=1:N-1 # form 1-D Laplacian matrix |
|
375 %! A(i:i+1,i:i+1) = [2 -1; -1 2]; |
|
376 %! endfor |
|
377 %! b = rand(N,1); X = A\b; #X is the true solution |
|
378 %! maxit = 80; |
|
379 %! printf('System condition number is %g\n',cond(A)); |
|
380 %! # No preconditioner: the convergence is very slow! |
|
381 %! |
|
382 %! [x, flag, relres, iter, resvec, eigest] = pcg(A,b,[],maxit); |
|
383 %! printf('System condition number estimate is %g\n',eigest(2)/eigest(1)); |
|
384 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||)'); |
|
385 %! semilogy([0:iter],resvec(:,1),'o-g;NO preconditioning: absolute residual;'); |
|
386 %! |
|
387 %! pause(1); |
|
388 %! # Test Jacobi preconditioner: it will not help much!!! |
|
389 %! |
|
390 %! M = diag(diag(A)); # Jacobi preconditioner |
|
391 %! [x, flag, relres, iter, resvec, eigest] = pcg(A,b,[],maxit,M); |
|
392 %! printf('JACOBI preconditioned system condition number estimate is %g\n',eigest(2)/eigest(1)); |
|
393 %! hold on; |
|
394 %! semilogy([0:iter],resvec(:,1),'o-r;JACOBI preconditioner: absolute residual;'); |
|
395 %! |
|
396 %! pause(1); |
|
397 %! # Test nonoverlapping block Jacobi preconditioner: it will help much! |
|
398 %! |
|
399 %! M = zeros(N,N);k=4 |
|
400 %! for i=1:k:N # form 1-D Laplacian matrix |
|
401 %! M(i:i+k-1,i:i+k-1) = A(i:i+k-1,i:i+k-1); |
|
402 %! endfor |
|
403 %! [x, flag, relres, iter, resvec, eigest] = pcg(A,b,[],maxit,M); |
|
404 %! printf('BLOCK JACOBI preconditioned system condition number estimate is %g\n',eigest(2)/eigest(1)); |
|
405 %! semilogy([0:iter],resvec(:,1),'o-b;BLOCK JACOBI preconditioner: absolute residual;'); |
|
406 %! hold off; |
|
407 %!test |
|
408 %! |
|
409 %! #solve small diagonal system |
|
410 %! |
|
411 %! N = 10; |
|
412 %! A = diag([1:N]); b = rand(N,1); X = A\b; #X is the true solution |
|
413 %! [x, flag] = pcg(A,b,[],N+1); |
|
414 %! assert(norm(x-X)/norm(X),0,1e-10); |
|
415 %! assert(flag,0); |
|
416 %! |
|
417 %!test |
|
418 %! |
|
419 %! #solve small indefinite diagonal system |
|
420 %! #despite A is indefinite, the iteration continues and converges |
|
421 %! #indefiniteness of A is detected |
|
422 %! |
|
423 %! N = 10; |
|
424 %! A = diag([1:N].*(-ones(1,N).^2)); b = rand(N,1); X = A\b; #X is the true solution |
|
425 %! [x, flag] = pcg(A,b,[],N+1); |
|
426 %! assert(norm(x-X)/norm(X),0,1e-10); |
|
427 %! assert(flag,3); |
|
428 %! |
|
429 %!test |
|
430 %! |
|
431 %! #solve tridiagonal system, do not converge in default 20 iterations |
|
432 %! |
|
433 %! N = 100; |
|
434 %! A = zeros(N,N); |
|
435 %! for i=1:N-1 # form 1-D Laplacian matrix |
|
436 %! A(i:i+1,i:i+1) = [2 -1; -1 2]; |
|
437 %! endfor |
|
438 %! b = ones(N,1); X = A\b; #X is the true solution |
|
439 %! [x, flag, relres, iter, resvec, eigest] = pcg(A,b,1e-12); |
|
440 %! assert(flag); |
|
441 %! assert(relres>1.0); |
|
442 %! assert(iter,20); #should perform max allowable default number of iterations |
|
443 %! |
|
444 %!test |
|
445 %! |
|
446 %! #solve tridiagonal system with 'prefect' preconditioner |
|
447 %! #converges in one iteration, so the eigest does not work |
|
448 %! #and issues a warning |
|
449 %! |
|
450 %! N = 100; |
|
451 %! A = zeros(N,N); |
|
452 %! for i=1:N-1 # form 1-D Laplacian matrix |
|
453 %! A(i:i+1,i:i+1) = [2 -1; -1 2]; |
|
454 %! endfor |
|
455 %! b = ones(N,1); X = A\b; #X is the true solution |
|
456 %! [x, flag, relres, iter, resvec, eigest] = pcg(A,b,[],[],A,b); |
|
457 %! assert(norm(x-X)/norm(X),0,1e-6); |
|
458 %! assert(flag,0); |
|
459 %! assert(iter,1); #should converge in one iteration |
|
460 %! assert(isnan(eigest),isnan([NaN NaN])); |
|
461 %! |