Mercurial > hg > octave-nkf
annotate scripts/sparse/pcg.m @ 8506:bc982528de11
comment style fixes
author | John W. Eaton <jwe@octave.org> |
---|---|
date | Tue, 13 Jan 2009 11:56:00 -0500 |
parents | f3c00dc0912b |
children | cadc73247d65 |
rev | line source |
---|---|
7017 | 1 ## Copyright (C) 2004, 2006, 2007 Piotr Krzyzanowski |
5837 | 2 ## |
3 ## This file is part of Octave. | |
4 ## | |
5 ## Octave is free software; you can redistribute it and/or modify it | |
6 ## under the terms of the GNU General Public License as published by | |
7016 | 7 ## the Free Software Foundation; either version 3 of the License, or (at |
8 ## your option) any later version. | |
5837 | 9 ## |
10 ## Octave is distributed in the hope that it will be useful, but | |
11 ## WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 ## General Public License for more details. | |
14 ## | |
15 ## You should have received a copy of the GNU General Public License | |
7016 | 16 ## along with Octave; see the file COPYING. If not, see |
17 ## <http://www.gnu.org/licenses/>. | |
5837 | 18 |
19 ## -*- texinfo -*- | |
6747 | 20 ## @deftypefn {Function File} {@var{x} =} pcg (@var{a}, @var{b}, @var{tol}, @var{maxit}, @var{m1}, @var{m2}, @var{x0}, @dots{}) |
5837 | 21 ## @deftypefnx {Function File} {[@var{x}, @var{flag}, @var{relres}, @var{iter}, @var{resvec}, @var{eigest}] =} pcg (@dots{}) |
22 ## | |
5838 | 23 ## Solves the linear system of equations @code{@var{a} * @var{x} = |
5837 | 24 ## @var{b}} by means of the Preconditioned Conjugate Gradient iterative |
25 ## method. The input arguments are | |
26 ## | |
27 ## @itemize | |
28 ## @item | |
5838 | 29 ## @var{a} can be either a square (preferably sparse) matrix or a |
5837 | 30 ## function handle, inline function or string containing the name |
5838 | 31 ## of a function which computes @code{@var{a} * @var{x}}. In principle |
32 ## @var{a} should be symmetric and positive definite; if @code{pcg} | |
33 ## finds @var{a} to not be positive definite, you will get a warning | |
5837 | 34 ## message and the @var{flag} output parameter will be set. |
35 ## | |
36 ## @item | |
37 ## @var{b} is the right hand side vector. | |
38 ## | |
39 ## @item | |
40 ## @var{tol} is the required relative tolerance for the residual error, | |
5838 | 41 ## @code{@var{b} - @var{a} * @var{x}}. The iteration stops if @code{norm |
42 ## (@var{b} - @var{a} * @var{x}) <= @var{tol} * norm (@var{b} - @var{a} * | |
5837 | 43 ## @var{x0})}. If @var{tol} is empty or is omitted, the function sets |
44 ## @code{@var{tol} = 1e-6} by default. | |
45 ## | |
46 ## @item | |
47 ## @var{maxit} is the maximum allowable number of iterations; if | |
48 ## @code{[]} is supplied for @code{maxit}, or @code{pcg} has less | |
49 ## arguments, a default value equal to 20 is used. | |
50 ## | |
51 ## @item | |
6747 | 52 ## @var{m} = @var{m1} * @var{m2} is the (left) preconditioning matrix, so that the iteration is |
5837 | 53 ## (theoretically) equivalent to solving by @code{pcg} @code{@var{P} * |
5838 | 54 ## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{a}}. |
5837 | 55 ## Note that a proper choice of the preconditioner may dramatically |
6747 | 56 ## improve the overall performance of the method. Instead of matrices |
57 ## @var{m1} and @var{m2}, the user may pass two functions which return | |
58 ## the results of applying the inverse of @var{m1} and @var{m2} to | |
59 ## a vector (usually this is the preferred way of using the preconditioner). | |
60 ## If @code{[]} is supplied for @var{m1}, or @var{m1} is omitted, no | |
61 ## preconditioning is applied. If @var{m2} is omitted, @var{m} = @var{m1} | |
62 ## will be used as preconditioner. | |
5837 | 63 ## |
64 ## @item | |
65 ## @var{x0} is the initial guess. If @var{x0} is empty or omitted, the | |
66 ## function sets @var{x0} to a zero vector by default. | |
67 ## @end itemize | |
68 ## | |
69 ## The arguments which follow @var{x0} are treated as parameters, and | |
5838 | 70 ## passed in a proper way to any of the functions (@var{a} or @var{m}) |
5837 | 71 ## which are passed to @code{pcg}. See the examples below for further |
72 ## details. The output arguments are | |
73 ## | |
74 ## @itemize | |
75 ## @item | |
76 ## @var{x} is the computed approximation to the solution of | |
5838 | 77 ## @code{@var{a} * @var{x} = @var{b}}. |
5837 | 78 ## |
79 ## @item | |
80 ## @var{flag} reports on the convergence. @code{@var{flag} = 0} means | |
81 ## the solution converged and the tolerance criterion given by @var{tol} | |
82 ## is satisfied. @code{@var{flag} = 1} means that the @var{maxit} limit | |
83 ## for the iteration count was reached. @code{@var{flag} = 3} reports that | |
84 ## the (preconditioned) matrix was found not positive definite. | |
85 ## | |
86 ## @item | |
87 ## @var{relres} is the ratio of the final residual to its initial value, | |
88 ## measured in the Euclidean norm. | |
89 ## | |
90 ## @item | |
91 ## @var{iter} is the actual number of iterations performed. | |
92 ## | |
93 ## @item | |
94 ## @var{resvec} describes the convergence history of the method. | |
95 ## @code{@var{resvec} (i,1)} is the Euclidean norm of the residual, and | |
96 ## @code{@var{resvec} (i,2)} is the preconditioned residual norm, | |
97 ## after the (@var{i}-1)-th iteration, @code{@var{i} = | |
6547 | 98 ## 1, 2, @dots{}, @var{iter}+1}. The preconditioned residual norm |
99 ## is defined as | |
5838 | 100 ## @code{norm (@var{r}) ^ 2 = @var{r}' * (@var{m} \ @var{r})} where |
101 ## @code{@var{r} = @var{b} - @var{a} * @var{x}}, see also the | |
102 ## description of @var{m}. If @var{eigest} is not required, only | |
5837 | 103 ## @code{@var{resvec} (:,1)} is returned. |
104 ## | |
105 ## @item | |
106 ## @var{eigest} returns the estimate for the smallest @code{@var{eigest} | |
107 ## (1)} and largest @code{@var{eigest} (2)} eigenvalues of the | |
5838 | 108 ## preconditioned matrix @code{@var{P} = @var{m} \ @var{a}}. In |
7007 | 109 ## particular, if no preconditioning is used, the estimates for the |
5838 | 110 ## extreme eigenvalues of @var{a} are returned. @code{@var{eigest} (1)} |
5837 | 111 ## is an overestimate and @code{@var{eigest} (2)} is an underestimate, |
112 ## so that @code{@var{eigest} (2) / @var{eigest} (1)} is a lower bound | |
113 ## for @code{cond (@var{P}, 2)}, which nevertheless in the limit should | |
114 ## theoretically be equal to the actual value of the condition number. | |
115 ## The method which computes @var{eigest} works only for symmetric positive | |
5838 | 116 ## definite @var{a} and @var{m}, and the user is responsible for |
5837 | 117 ## verifying this assumption. |
118 ## @end itemize | |
119 ## | |
120 ## Let us consider a trivial problem with a diagonal matrix (we exploit the | |
121 ## sparsity of A) | |
122 ## | |
123 ## @example | |
124 ## @group | |
125 ## N = 10; | |
7515
f3c00dc0912b
Eliminate the rest of the dispatched sparse functions
David Bateman <dbateman@free.fr>
parents:
7031
diff
changeset
|
126 ## A = diag (sparse([1:N])); |
6547 | 127 ## b = rand (N, 1); |
6747 | 128 ## [L, U, P, Q] = luinc (A,1.e-3); |
5837 | 129 ## @end group |
130 ## @end example | |
131 ## | |
132 ## @sc{Example 1:} Simplest use of @code{pcg} | |
133 ## | |
134 ## @example | |
135 ## x = pcg(A,b) | |
136 ## @end example | |
137 ## | |
138 ## @sc{Example 2:} @code{pcg} with a function which computes | |
5838 | 139 ## @code{@var{a} * @var{x}} |
5837 | 140 ## |
141 ## @example | |
142 ## @group | |
6547 | 143 ## function y = applyA (x) |
5837 | 144 ## y = [1:N]'.*x; |
145 ## endfunction | |
146 ## | |
6547 | 147 ## x = pcg ("applyA", b) |
5837 | 148 ## @end group |
149 ## @end example | |
6747 | 150 ## |
151 ## @sc{Example 3:} @code{pcg} with a preconditioner: @var{l} * @var{u} | |
152 ## | |
153 ## @example | |
154 ## x=pcg(A,b,1.e-6,500,L*U); | |
155 ## @end example | |
156 ## | |
157 ## @sc{Example 4:} @code{pcg} with a preconditioner: @var{l} * @var{u}. | |
158 ## Faster than @sc{Example 3} since lower and upper triangular matrices | |
159 ## are easier to invert | |
160 ## | |
161 ## @example | |
162 ## x=pcg(A,b,1.e-6,500,L,U); | |
163 ## @end example | |
164 ## | |
165 ## @sc{Example 5:} Preconditioned iteration, with full diagnostics. The | |
5837 | 166 ## preconditioner (quite strange, because even the original matrix |
5838 | 167 ## @var{a} is trivial) is defined as a function |
5837 | 168 ## |
169 ## @example | |
170 ## @group | |
6547 | 171 ## function y = applyM(x) |
172 ## K = floor (length (x) - 2); | |
173 ## y = x; | |
174 ## y(1:K) = x(1:K)./[1:K]'; | |
5837 | 175 ## endfunction |
176 ## | |
7031 | 177 ## [x, flag, relres, iter, resvec, eigest] = ... |
178 ## pcg (A, b, [], [], "applyM"); | |
6547 | 179 ## semilogy (1:iter+1, resvec); |
5837 | 180 ## @end group |
181 ## @end example | |
182 ## | |
6747 | 183 ## @sc{Example 6:} Finally, a preconditioner which depends on a |
5837 | 184 ## parameter @var{k}. |
185 ## | |
186 ## @example | |
187 ## @group | |
6547 | 188 ## function y = applyM (x, varargin) |
5837 | 189 ## K = varargin@{1@}; |
6547 | 190 ## y = x; |
191 ## y(1:K) = x(1:K)./[1:K]'; | |
7007 | 192 ## endfunction |
5837 | 193 ## |
194 ## [x, flag, relres, iter, resvec, eigest] = ... | |
6747 | 195 ## pcg (A, b, [], [], "applyM", [], [], 3) |
5837 | 196 ## @end group |
197 ## @end example | |
198 ## | |
199 ## @sc{References} | |
200 ## | |
201 ## [1] C.T.Kelley, 'Iterative methods for linear and nonlinear equations', | |
202 ## SIAM, 1995 (the base PCG algorithm) | |
203 ## | |
204 ## [2] Y.Saad, 'Iterative methods for sparse linear systems', PWS 1996 | |
205 ## (condition number estimate from PCG) Revised version of this book is | |
206 ## available online at http://www-users.cs.umn.edu/~saad/books.html | |
207 ## | |
208 ## | |
209 ## @seealso{sparse, pcr} | |
210 ## @end deftypefn | |
211 | |
5838 | 212 ## Author: Piotr Krzyzanowski <piotr.krzyzanowski@mimuw.edu.pl> |
6747 | 213 ## Modified by: Vittoria Rezzonico <vittoria.rezzonico@epfl.ch> |
214 ## - Add the ability to provide the pre-conditioner as two separate | |
215 ## matrices | |
5837 | 216 |
6747 | 217 function [x, flag, relres, iter, resvec, eigest] = pcg (A, b, tol, maxit, M1, M2, x0, varargin) |
5837 | 218 |
6747 | 219 ## M = M1*M2 |
220 | |
221 if (nargin < 7 || isempty (x0)) | |
5838 | 222 x = zeros (size (b)); |
5837 | 223 else |
224 x = x0; | |
225 endif | |
226 | |
6747 | 227 if ((nargin < 5) || isempty (M1)) |
228 existM1 = 0; | |
229 else | |
230 existM1 = 1; | |
231 endif | |
232 | |
233 if ((nargin < 6) || isempty (M2)) | |
234 existM2 = 0; | |
235 else | |
236 existM2 = 1; | |
237 endif | |
5837 | 238 |
5838 | 239 if (nargin < 4 || isempty (maxit)) |
240 maxit = min (size (b, 1), 20); | |
5837 | 241 endif |
242 | |
5838 | 243 maxit += 2; |
5837 | 244 |
5838 | 245 if (nargin < 3 || isempty (tol)) |
5837 | 246 tol = 1e-6; |
247 endif | |
248 | |
249 preconditioned_residual_out = false; | |
250 if (nargout > 5) | |
5838 | 251 T = zeros (maxit, maxit); |
5837 | 252 preconditioned_residual_out = true; |
253 endif | |
254 | |
8506 | 255 ## Assume A is positive definite. |
256 matrix_positive_definite = true; | |
5837 | 257 |
5838 | 258 p = zeros (size (b)); |
5837 | 259 oldtau = 1; |
8506 | 260 if (isnumeric (A)) |
261 ## A is a matrix. | |
5837 | 262 r = b - A*x; |
8506 | 263 else |
264 ## A should be a function. | |
5838 | 265 r = b - feval (A, x, varargin{:}); |
5837 | 266 endif |
267 | |
5838 | 268 resvec(1,1) = norm (r); |
5837 | 269 alpha = 1; |
270 iter = 2; | |
271 | |
6747 | 272 while (resvec (iter-1,1) > tol * resvec (1,1) && iter < maxit) |
273 if (existM1) | |
274 if(isnumeric (M1)) | |
275 y = M1 \ r; | |
276 else | |
277 y = feval (M1, r, varargin{:}); | |
5837 | 278 endif |
6747 | 279 else |
280 y = r; | |
281 endif | |
282 if (existM2) | |
283 if (isnumeric (M2)) | |
284 z = M2 \ y; | |
285 else | |
286 z = feval (M2, y, varargin{:}); | |
287 endif | |
288 else | |
289 z = y; | |
5837 | 290 endif |
5838 | 291 tau = z' * r; |
6747 | 292 resvec (iter-1,2) = sqrt (tau); |
5838 | 293 beta = tau / oldtau; |
5837 | 294 oldtau = tau; |
6747 | 295 p = z + beta * p; |
8506 | 296 if (isnumeric (A)) |
297 ## A is a matrix. | |
5838 | 298 w = A * p; |
8506 | 299 else |
300 ## A should be a function. | |
5838 | 301 w = feval (A, p, varargin{:}); |
5837 | 302 endif |
8506 | 303 ## Needed only for eigest. |
304 oldalpha = alpha; | |
5838 | 305 alpha = tau / (p'*w); |
8506 | 306 if (alpha <= 0.0) |
307 ## Negative matrix. | |
5837 | 308 matrix_positive_definite = false; |
309 endif | |
6747 | 310 x += alpha * p; |
311 r -= alpha * w; | |
5838 | 312 if (nargout > 5 && iter > 2) |
5837 | 313 T(iter-1:iter, iter-1:iter) = T(iter-1:iter, iter-1:iter) + ... |
314 [1 sqrt(beta); sqrt(beta) beta]./oldalpha; | |
315 ## EVS = eig(T(2:iter-1,2:iter-1)); | |
316 ## fprintf(stderr,"PCG condest: %g (iteration: %d)\n", max(EVS)/min(EVS),iter); | |
317 endif | |
6747 | 318 resvec (iter,1) = norm (r); |
5838 | 319 iter++; |
5837 | 320 endwhile |
321 | |
322 if (nargout > 5) | |
5838 | 323 if (matrix_positive_definite) |
5837 | 324 if (iter > 3) |
325 T = T(2:iter-2,2:iter-2); | |
6747 | 326 l = eig (T); |
5838 | 327 eigest = [min(l), max(l)]; |
6498 | 328 ## fprintf (stderr, "pcg condest: %g\n", eigest(2)/eigest(1)); |
5837 | 329 else |
5838 | 330 eigest = [NaN, NaN]; |
6498 | 331 warning ("pcg: eigenvalue estimate failed: iteration converged too fast."); |
5837 | 332 endif |
333 else | |
5838 | 334 eigest = [NaN, NaN]; |
5837 | 335 endif |
336 | |
8506 | 337 ## Apply the preconditioner once more and finish with the precond |
338 ## residual. | |
6747 | 339 if (existM1) |
340 if(isnumeric (M1)) | |
341 y = M1 \ r; | |
342 else | |
343 y = feval (M1, r, varargin{:}); | |
5837 | 344 endif |
6747 | 345 else |
346 y = r; | |
5837 | 347 endif |
6747 | 348 if (existM2) |
349 if (isnumeric (M2)) | |
350 z = M2 \ y; | |
351 else | |
352 z = feval (M2, y, varargin{:}); | |
353 endif | |
354 else | |
355 z = y; | |
356 endif | |
357 | |
358 resvec (iter-1,2) = sqrt (r' * z); | |
5837 | 359 else |
5838 | 360 resvec = resvec(:,1); |
5837 | 361 endif |
362 | |
363 flag = 0; | |
6747 | 364 relres = resvec (iter-1,1) ./ resvec(1,1); |
5838 | 365 iter -= 2; |
6747 | 366 if (iter >= maxit - 2) |
5837 | 367 flag = 1; |
368 if (nargout < 2) | |
6498 | 369 warning ("pcg: maximum number of iterations (%d) reached\n", iter); |
6747 | 370 warning ("the initial residual norm was reduced %g times.\n", ... |
371 1.0 / relres); | |
5837 | 372 endif |
5838 | 373 elseif (nargout < 2) |
6498 | 374 fprintf (stderr, "pcg: converged in %d iterations. ", iter); |
375 fprintf (stderr, "the initial residual norm was reduced %g times.\n",... | |
5838 | 376 1.0/relres); |
5837 | 377 endif |
378 | |
5838 | 379 if (! matrix_positive_definite) |
5837 | 380 flag = 3; |
381 if (nargout < 2) | |
6498 | 382 warning ("pcg: matrix not positive definite?\n"); |
5837 | 383 endif |
384 endif | |
385 endfunction | |
386 | |
387 %!demo | |
388 %! | |
389 %! # Simplest usage of pcg (see also 'help pcg') | |
390 %! | |
391 %! N = 10; | |
6747 | 392 %! A = diag ([1:N]); b = rand (N, 1); y = A \ b; #y is the true solution |
393 %! x = pcg (A, b); | |
394 %! printf('The solution relative error is %g\n', norm (x - y) / norm (y)); | |
5837 | 395 %! |
396 %! # You shouldn't be afraid if pcg issues some warning messages in this | |
397 %! # example: watch out in the second example, why it takes N iterations | |
398 %! # of pcg to converge to (a very accurate, by the way) solution | |
399 %!demo | |
400 %! | |
401 %! # Full output from pcg, except for the eigenvalue estimates | |
402 %! # We use this output to plot the convergence history | |
403 %! | |
404 %! N = 10; | |
6747 | 405 %! A = diag ([1:N]); b = rand (N, 1); X = A \ b; #X is the true solution |
406 %! [x, flag, relres, iter, resvec] = pcg (A, b); | |
407 %! printf('The solution relative error is %g\n', norm (x - X) / norm (X)); | |
5837 | 408 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||/||b||)'); |
6747 | 409 %! semilogy([0:iter], resvec / resvec(1),'o-g'); |
410 %! legend('relative residual'); | |
5837 | 411 %!demo |
412 %! | |
413 %! # Full output from pcg, including the eigenvalue estimates | |
414 %! # Hilbert matrix is extremely ill conditioned, so pcg WILL have problems | |
415 %! | |
416 %! N = 10; | |
6747 | 417 %! A = hilb (N); b = rand (N, 1); X = A \ b; #X is the true solution |
418 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], 200); | |
419 %! printf('The solution relative error is %g\n', norm (x - X) / norm (X)); | |
420 %! printf('Condition number estimate is %g\n', eigest(2) / eigest (1)); | |
421 %! printf('Actual condition number is %g\n', cond (A)); | |
5837 | 422 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||)'); |
6747 | 423 %! semilogy([0:iter], resvec,['o-g';'+-r']); |
424 %! legend('absolute residual','absolute preconditioned residual'); | |
5837 | 425 %!demo |
426 %! | |
427 %! # Full output from pcg, including the eigenvalue estimates | |
428 %! # We use the 1-D Laplacian matrix for A, and cond(A) = O(N^2) | |
429 %! # and that's the reasone we need some preconditioner; here we take | |
430 %! # a very simple and not powerful Jacobi preconditioner, | |
431 %! # which is the diagonal of A | |
432 %! | |
433 %! N = 100; | |
6747 | 434 %! A = zeros (N, N); |
435 %! for i=1 : N - 1 # form 1-D Laplacian matrix | |
436 %! A (i:i+1, i:i+1) = [2 -1; -1 2]; | |
5837 | 437 %! endfor |
6747 | 438 %! b = rand (N, 1); X = A \ b; #X is the true solution |
5837 | 439 %! maxit = 80; |
6747 | 440 %! printf('System condition number is %g\n', cond (A)); |
5837 | 441 %! # No preconditioner: the convergence is very slow! |
442 %! | |
6747 | 443 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit); |
444 %! printf('System condition number estimate is %g\n', eigest(2) / eigest(1)); | |
5837 | 445 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||)'); |
6747 | 446 %! semilogy([0:iter], resvec(:,1), 'o-g'); |
447 %! legend('NO preconditioning: absolute residual'); | |
5837 | 448 %! |
449 %! pause(1); | |
450 %! # Test Jacobi preconditioner: it will not help much!!! | |
451 %! | |
6747 | 452 %! M = diag (diag (A)); # Jacobi preconditioner |
453 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit, M); | |
454 %! printf('JACOBI preconditioned system condition number estimate is %g\n', eigest(2) / eigest(1)); | |
5837 | 455 %! hold on; |
6747 | 456 %! semilogy([0:iter], resvec(:,1), 'o-r'); |
457 %! legend('NO preconditioning: absolute residual', ... | |
458 %! 'JACOBI preconditioner: absolute residual'); | |
5837 | 459 %! |
460 %! pause(1); | |
461 %! # Test nonoverlapping block Jacobi preconditioner: it will help much! | |
462 %! | |
6747 | 463 %! M = zeros (N, N); k = 4; |
464 %! for i = 1 : k : N # form 1-D Laplacian matrix | |
465 %! M (i:i+k-1, i:i+k-1) = A (i:i+k-1, i:i+k-1); | |
5837 | 466 %! endfor |
6747 | 467 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit, M); |
468 %! printf('BLOCK JACOBI preconditioned system condition number estimate is %g\n', eigest(2) / eigest(1)); | |
469 %! semilogy ([0:iter], resvec(:,1),'o-b'); | |
470 %! legend('NO preconditioning: absolute residual', ... | |
471 %! 'JACOBI preconditioner: absolute residual', ... | |
472 %! 'BLOCK JACOBI preconditioner: absolute residual'); | |
5837 | 473 %! hold off; |
474 %!test | |
475 %! | |
476 %! #solve small diagonal system | |
477 %! | |
478 %! N = 10; | |
6747 | 479 %! A = diag ([1:N]); b = rand (N, 1); X = A \ b; #X is the true solution |
480 %! [x, flag] = pcg (A, b, [], N+1); | |
481 %! assert(norm (x - X) / norm (X), 0, 1e-10); | |
482 %! assert(flag, 0); | |
5837 | 483 %! |
484 %!test | |
485 %! | |
486 %! #solve small indefinite diagonal system | |
487 %! #despite A is indefinite, the iteration continues and converges | |
488 %! #indefiniteness of A is detected | |
489 %! | |
490 %! N = 10; | |
6747 | 491 %! A = diag([1:N] .* (-ones(1, N) .^ 2)); b = rand (N, 1); X = A \ b; #X is the true solution |
492 %! [x, flag] = pcg (A, b, [], N+1); | |
493 %! assert(norm (x - X) / norm (X), 0, 1e-10); | |
494 %! assert(flag, 3); | |
5837 | 495 %! |
496 %!test | |
497 %! | |
498 %! #solve tridiagonal system, do not converge in default 20 iterations | |
499 %! | |
500 %! N = 100; | |
6747 | 501 %! A = zeros (N, N); |
502 %! for i = 1 : N - 1 # form 1-D Laplacian matrix | |
503 %! A (i:i+1, i:i+1) = [2 -1; -1 2]; | |
5837 | 504 %! endfor |
6747 | 505 %! b = ones (N, 1); X = A \ b; #X is the true solution |
506 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, 1e-12); | |
5837 | 507 %! assert(flag); |
6747 | 508 %! assert(relres > 1.0); |
509 %! assert(iter, 20); #should perform max allowable default number of iterations | |
5837 | 510 %! |
511 %!test | |
512 %! | |
513 %! #solve tridiagonal system with 'prefect' preconditioner | |
514 %! #converges in one iteration, so the eigest does not work | |
515 %! #and issues a warning | |
516 %! | |
517 %! N = 100; | |
6747 | 518 %! A = zeros (N, N); |
519 %! for i = 1 : N - 1 # form 1-D Laplacian matrix | |
520 %! A (i:i+1, i:i+1) = [2 -1; -1 2]; | |
5837 | 521 %! endfor |
6747 | 522 %! b = ones (N, 1); X = A \ b; #X is the true solution |
523 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], [], A, [], b); | |
524 %! assert(norm (x - X) / norm (X), 0, 1e-6); | |
525 %! assert(flag, 0); | |
526 %! assert(iter, 1); #should converge in one iteration | |
527 %! assert(isnan (eigest), isnan ([NaN, NaN])); | |
5837 | 528 %! |