Mercurial > hg > octave-lyh
annotate scripts/sparse/pcg.m @ 7515:f3c00dc0912b
Eliminate the rest of the dispatched sparse functions
author | David Bateman <dbateman@free.fr> |
---|---|
date | Fri, 22 Feb 2008 15:50:51 +0100 |
parents | 120f3135952f |
children | bc982528de11 |
rev | line source |
---|---|
7017 | 1 ## Copyright (C) 2004, 2006, 2007 Piotr Krzyzanowski |
5837 | 2 ## |
3 ## This file is part of Octave. | |
4 ## | |
5 ## Octave is free software; you can redistribute it and/or modify it | |
6 ## under the terms of the GNU General Public License as published by | |
7016 | 7 ## the Free Software Foundation; either version 3 of the License, or (at |
8 ## your option) any later version. | |
5837 | 9 ## |
10 ## Octave is distributed in the hope that it will be useful, but | |
11 ## WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 ## General Public License for more details. | |
14 ## | |
15 ## You should have received a copy of the GNU General Public License | |
7016 | 16 ## along with Octave; see the file COPYING. If not, see |
17 ## <http://www.gnu.org/licenses/>. | |
5837 | 18 |
19 ## -*- texinfo -*- | |
6747 | 20 ## @deftypefn {Function File} {@var{x} =} pcg (@var{a}, @var{b}, @var{tol}, @var{maxit}, @var{m1}, @var{m2}, @var{x0}, @dots{}) |
5837 | 21 ## @deftypefnx {Function File} {[@var{x}, @var{flag}, @var{relres}, @var{iter}, @var{resvec}, @var{eigest}] =} pcg (@dots{}) |
22 ## | |
5838 | 23 ## Solves the linear system of equations @code{@var{a} * @var{x} = |
5837 | 24 ## @var{b}} by means of the Preconditioned Conjugate Gradient iterative |
25 ## method. The input arguments are | |
26 ## | |
27 ## @itemize | |
28 ## @item | |
5838 | 29 ## @var{a} can be either a square (preferably sparse) matrix or a |
5837 | 30 ## function handle, inline function or string containing the name |
5838 | 31 ## of a function which computes @code{@var{a} * @var{x}}. In principle |
32 ## @var{a} should be symmetric and positive definite; if @code{pcg} | |
33 ## finds @var{a} to not be positive definite, you will get a warning | |
5837 | 34 ## message and the @var{flag} output parameter will be set. |
35 ## | |
36 ## @item | |
37 ## @var{b} is the right hand side vector. | |
38 ## | |
39 ## @item | |
40 ## @var{tol} is the required relative tolerance for the residual error, | |
5838 | 41 ## @code{@var{b} - @var{a} * @var{x}}. The iteration stops if @code{norm |
42 ## (@var{b} - @var{a} * @var{x}) <= @var{tol} * norm (@var{b} - @var{a} * | |
5837 | 43 ## @var{x0})}. If @var{tol} is empty or is omitted, the function sets |
44 ## @code{@var{tol} = 1e-6} by default. | |
45 ## | |
46 ## @item | |
47 ## @var{maxit} is the maximum allowable number of iterations; if | |
48 ## @code{[]} is supplied for @code{maxit}, or @code{pcg} has less | |
49 ## arguments, a default value equal to 20 is used. | |
50 ## | |
51 ## @item | |
6747 | 52 ## @var{m} = @var{m1} * @var{m2} is the (left) preconditioning matrix, so that the iteration is |
5837 | 53 ## (theoretically) equivalent to solving by @code{pcg} @code{@var{P} * |
5838 | 54 ## @var{x} = @var{m} \ @var{b}}, with @code{@var{P} = @var{m} \ @var{a}}. |
5837 | 55 ## Note that a proper choice of the preconditioner may dramatically |
6747 | 56 ## improve the overall performance of the method. Instead of matrices |
57 ## @var{m1} and @var{m2}, the user may pass two functions which return | |
58 ## the results of applying the inverse of @var{m1} and @var{m2} to | |
59 ## a vector (usually this is the preferred way of using the preconditioner). | |
60 ## If @code{[]} is supplied for @var{m1}, or @var{m1} is omitted, no | |
61 ## preconditioning is applied. If @var{m2} is omitted, @var{m} = @var{m1} | |
62 ## will be used as preconditioner. | |
5837 | 63 ## |
64 ## @item | |
65 ## @var{x0} is the initial guess. If @var{x0} is empty or omitted, the | |
66 ## function sets @var{x0} to a zero vector by default. | |
67 ## @end itemize | |
68 ## | |
69 ## The arguments which follow @var{x0} are treated as parameters, and | |
5838 | 70 ## passed in a proper way to any of the functions (@var{a} or @var{m}) |
5837 | 71 ## which are passed to @code{pcg}. See the examples below for further |
72 ## details. The output arguments are | |
73 ## | |
74 ## @itemize | |
75 ## @item | |
76 ## @var{x} is the computed approximation to the solution of | |
5838 | 77 ## @code{@var{a} * @var{x} = @var{b}}. |
5837 | 78 ## |
79 ## @item | |
80 ## @var{flag} reports on the convergence. @code{@var{flag} = 0} means | |
81 ## the solution converged and the tolerance criterion given by @var{tol} | |
82 ## is satisfied. @code{@var{flag} = 1} means that the @var{maxit} limit | |
83 ## for the iteration count was reached. @code{@var{flag} = 3} reports that | |
84 ## the (preconditioned) matrix was found not positive definite. | |
85 ## | |
86 ## @item | |
87 ## @var{relres} is the ratio of the final residual to its initial value, | |
88 ## measured in the Euclidean norm. | |
89 ## | |
90 ## @item | |
91 ## @var{iter} is the actual number of iterations performed. | |
92 ## | |
93 ## @item | |
94 ## @var{resvec} describes the convergence history of the method. | |
95 ## @code{@var{resvec} (i,1)} is the Euclidean norm of the residual, and | |
96 ## @code{@var{resvec} (i,2)} is the preconditioned residual norm, | |
97 ## after the (@var{i}-1)-th iteration, @code{@var{i} = | |
6547 | 98 ## 1, 2, @dots{}, @var{iter}+1}. The preconditioned residual norm |
99 ## is defined as | |
5838 | 100 ## @code{norm (@var{r}) ^ 2 = @var{r}' * (@var{m} \ @var{r})} where |
101 ## @code{@var{r} = @var{b} - @var{a} * @var{x}}, see also the | |
102 ## description of @var{m}. If @var{eigest} is not required, only | |
5837 | 103 ## @code{@var{resvec} (:,1)} is returned. |
104 ## | |
105 ## @item | |
106 ## @var{eigest} returns the estimate for the smallest @code{@var{eigest} | |
107 ## (1)} and largest @code{@var{eigest} (2)} eigenvalues of the | |
5838 | 108 ## preconditioned matrix @code{@var{P} = @var{m} \ @var{a}}. In |
7007 | 109 ## particular, if no preconditioning is used, the estimates for the |
5838 | 110 ## extreme eigenvalues of @var{a} are returned. @code{@var{eigest} (1)} |
5837 | 111 ## is an overestimate and @code{@var{eigest} (2)} is an underestimate, |
112 ## so that @code{@var{eigest} (2) / @var{eigest} (1)} is a lower bound | |
113 ## for @code{cond (@var{P}, 2)}, which nevertheless in the limit should | |
114 ## theoretically be equal to the actual value of the condition number. | |
115 ## The method which computes @var{eigest} works only for symmetric positive | |
5838 | 116 ## definite @var{a} and @var{m}, and the user is responsible for |
5837 | 117 ## verifying this assumption. |
118 ## @end itemize | |
119 ## | |
120 ## Let us consider a trivial problem with a diagonal matrix (we exploit the | |
121 ## sparsity of A) | |
122 ## | |
123 ## @example | |
124 ## @group | |
125 ## N = 10; | |
7515
f3c00dc0912b
Eliminate the rest of the dispatched sparse functions
David Bateman <dbateman@free.fr>
parents:
7031
diff
changeset
|
126 ## A = diag (sparse([1:N])); |
6547 | 127 ## b = rand (N, 1); |
6747 | 128 ## [L, U, P, Q] = luinc (A,1.e-3); |
5837 | 129 ## @end group |
130 ## @end example | |
131 ## | |
132 ## @sc{Example 1:} Simplest use of @code{pcg} | |
133 ## | |
134 ## @example | |
135 ## x = pcg(A,b) | |
136 ## @end example | |
137 ## | |
138 ## @sc{Example 2:} @code{pcg} with a function which computes | |
5838 | 139 ## @code{@var{a} * @var{x}} |
5837 | 140 ## |
141 ## @example | |
142 ## @group | |
6547 | 143 ## function y = applyA (x) |
5837 | 144 ## y = [1:N]'.*x; |
145 ## endfunction | |
146 ## | |
6547 | 147 ## x = pcg ("applyA", b) |
5837 | 148 ## @end group |
149 ## @end example | |
6747 | 150 ## |
151 ## @sc{Example 3:} @code{pcg} with a preconditioner: @var{l} * @var{u} | |
152 ## | |
153 ## @example | |
154 ## x=pcg(A,b,1.e-6,500,L*U); | |
155 ## @end example | |
156 ## | |
157 ## @sc{Example 4:} @code{pcg} with a preconditioner: @var{l} * @var{u}. | |
158 ## Faster than @sc{Example 3} since lower and upper triangular matrices | |
159 ## are easier to invert | |
160 ## | |
161 ## @example | |
162 ## x=pcg(A,b,1.e-6,500,L,U); | |
163 ## @end example | |
164 ## | |
165 ## @sc{Example 5:} Preconditioned iteration, with full diagnostics. The | |
5837 | 166 ## preconditioner (quite strange, because even the original matrix |
5838 | 167 ## @var{a} is trivial) is defined as a function |
5837 | 168 ## |
169 ## @example | |
170 ## @group | |
6547 | 171 ## function y = applyM(x) |
172 ## K = floor (length (x) - 2); | |
173 ## y = x; | |
174 ## y(1:K) = x(1:K)./[1:K]'; | |
5837 | 175 ## endfunction |
176 ## | |
7031 | 177 ## [x, flag, relres, iter, resvec, eigest] = ... |
178 ## pcg (A, b, [], [], "applyM"); | |
6547 | 179 ## semilogy (1:iter+1, resvec); |
5837 | 180 ## @end group |
181 ## @end example | |
182 ## | |
6747 | 183 ## @sc{Example 6:} Finally, a preconditioner which depends on a |
5837 | 184 ## parameter @var{k}. |
185 ## | |
186 ## @example | |
187 ## @group | |
6547 | 188 ## function y = applyM (x, varargin) |
5837 | 189 ## K = varargin@{1@}; |
6547 | 190 ## y = x; |
191 ## y(1:K) = x(1:K)./[1:K]'; | |
7007 | 192 ## endfunction |
5837 | 193 ## |
194 ## [x, flag, relres, iter, resvec, eigest] = ... | |
6747 | 195 ## pcg (A, b, [], [], "applyM", [], [], 3) |
5837 | 196 ## @end group |
197 ## @end example | |
198 ## | |
199 ## @sc{References} | |
200 ## | |
201 ## [1] C.T.Kelley, 'Iterative methods for linear and nonlinear equations', | |
202 ## SIAM, 1995 (the base PCG algorithm) | |
203 ## | |
204 ## [2] Y.Saad, 'Iterative methods for sparse linear systems', PWS 1996 | |
205 ## (condition number estimate from PCG) Revised version of this book is | |
206 ## available online at http://www-users.cs.umn.edu/~saad/books.html | |
207 ## | |
208 ## | |
209 ## @seealso{sparse, pcr} | |
210 ## @end deftypefn | |
211 | |
5838 | 212 ## Author: Piotr Krzyzanowski <piotr.krzyzanowski@mimuw.edu.pl> |
6747 | 213 ## Modified by: Vittoria Rezzonico <vittoria.rezzonico@epfl.ch> |
214 ## - Add the ability to provide the pre-conditioner as two separate | |
215 ## matrices | |
5837 | 216 |
6747 | 217 function [x, flag, relres, iter, resvec, eigest] = pcg (A, b, tol, maxit, M1, M2, x0, varargin) |
5837 | 218 |
6747 | 219 ## M = M1*M2 |
220 | |
221 if (nargin < 7 || isempty (x0)) | |
5838 | 222 x = zeros (size (b)); |
5837 | 223 else |
224 x = x0; | |
225 endif | |
226 | |
6747 | 227 if ((nargin < 5) || isempty (M1)) |
228 existM1 = 0; | |
229 else | |
230 existM1 = 1; | |
231 endif | |
232 | |
233 if ((nargin < 6) || isempty (M2)) | |
234 existM2 = 0; | |
235 else | |
236 existM2 = 1; | |
237 endif | |
5837 | 238 |
5838 | 239 if (nargin < 4 || isempty (maxit)) |
240 maxit = min (size (b, 1), 20); | |
5837 | 241 endif |
242 | |
5838 | 243 maxit += 2; |
5837 | 244 |
5838 | 245 if (nargin < 3 || isempty (tol)) |
5837 | 246 tol = 1e-6; |
247 endif | |
248 | |
249 preconditioned_residual_out = false; | |
250 if (nargout > 5) | |
5838 | 251 T = zeros (maxit, maxit); |
5837 | 252 preconditioned_residual_out = true; |
253 endif | |
254 | |
255 matrix_positive_definite = true; # assume A is positive definite | |
256 | |
5838 | 257 p = zeros (size (b)); |
5837 | 258 oldtau = 1; |
5838 | 259 if (isnumeric (A)) # is A a matrix? |
5837 | 260 r = b - A*x; |
261 else # then A should be a function! | |
5838 | 262 r = b - feval (A, x, varargin{:}); |
5837 | 263 endif |
264 | |
5838 | 265 resvec(1,1) = norm (r); |
5837 | 266 alpha = 1; |
267 iter = 2; | |
268 | |
6747 | 269 while (resvec (iter-1,1) > tol * resvec (1,1) && iter < maxit) |
270 if (existM1) | |
271 if(isnumeric (M1)) | |
272 y = M1 \ r; | |
273 else | |
274 y = feval (M1, r, varargin{:}); | |
5837 | 275 endif |
6747 | 276 else |
277 y = r; | |
278 endif | |
279 if (existM2) | |
280 if (isnumeric (M2)) | |
281 z = M2 \ y; | |
282 else | |
283 z = feval (M2, y, varargin{:}); | |
284 endif | |
285 else | |
286 z = y; | |
5837 | 287 endif |
5838 | 288 tau = z' * r; |
6747 | 289 resvec (iter-1,2) = sqrt (tau); |
5838 | 290 beta = tau / oldtau; |
5837 | 291 oldtau = tau; |
6747 | 292 p = z + beta * p; |
5838 | 293 if (isnumeric (A)) # is A a matrix? |
294 w = A * p; | |
5837 | 295 else # then A should be a function! |
5838 | 296 w = feval (A, p, varargin{:}); |
5837 | 297 endif |
298 oldalpha = alpha; # needed only for eigest | |
5838 | 299 alpha = tau / (p'*w); |
5837 | 300 if (alpha <= 0.0) # negative matrix? |
301 matrix_positive_definite = false; | |
302 endif | |
6747 | 303 x += alpha * p; |
304 r -= alpha * w; | |
5838 | 305 if (nargout > 5 && iter > 2) |
5837 | 306 T(iter-1:iter, iter-1:iter) = T(iter-1:iter, iter-1:iter) + ... |
307 [1 sqrt(beta); sqrt(beta) beta]./oldalpha; | |
308 ## EVS = eig(T(2:iter-1,2:iter-1)); | |
309 ## fprintf(stderr,"PCG condest: %g (iteration: %d)\n", max(EVS)/min(EVS),iter); | |
310 endif | |
6747 | 311 resvec (iter,1) = norm (r); |
5838 | 312 iter++; |
5837 | 313 endwhile |
314 | |
315 if (nargout > 5) | |
5838 | 316 if (matrix_positive_definite) |
5837 | 317 if (iter > 3) |
318 T = T(2:iter-2,2:iter-2); | |
6747 | 319 l = eig (T); |
5838 | 320 eigest = [min(l), max(l)]; |
6498 | 321 ## fprintf (stderr, "pcg condest: %g\n", eigest(2)/eigest(1)); |
5837 | 322 else |
5838 | 323 eigest = [NaN, NaN]; |
6498 | 324 warning ("pcg: eigenvalue estimate failed: iteration converged too fast."); |
5837 | 325 endif |
326 else | |
5838 | 327 eigest = [NaN, NaN]; |
5837 | 328 endif |
329 | |
330 ## apply the preconditioner once more and finish with the precond | |
331 ## residual | |
6747 | 332 if (existM1) |
333 if(isnumeric (M1)) | |
334 y = M1 \ r; | |
335 else | |
336 y = feval (M1, r, varargin{:}); | |
5837 | 337 endif |
6747 | 338 else |
339 y = r; | |
5837 | 340 endif |
6747 | 341 if (existM2) |
342 if (isnumeric (M2)) | |
343 z = M2 \ y; | |
344 else | |
345 z = feval (M2, y, varargin{:}); | |
346 endif | |
347 else | |
348 z = y; | |
349 endif | |
350 | |
351 resvec (iter-1,2) = sqrt (r' * z); | |
5837 | 352 else |
5838 | 353 resvec = resvec(:,1); |
5837 | 354 endif |
355 | |
356 flag = 0; | |
6747 | 357 relres = resvec (iter-1,1) ./ resvec(1,1); |
5838 | 358 iter -= 2; |
6747 | 359 if (iter >= maxit - 2) |
5837 | 360 flag = 1; |
361 if (nargout < 2) | |
6498 | 362 warning ("pcg: maximum number of iterations (%d) reached\n", iter); |
6747 | 363 warning ("the initial residual norm was reduced %g times.\n", ... |
364 1.0 / relres); | |
5837 | 365 endif |
5838 | 366 elseif (nargout < 2) |
6498 | 367 fprintf (stderr, "pcg: converged in %d iterations. ", iter); |
368 fprintf (stderr, "the initial residual norm was reduced %g times.\n",... | |
5838 | 369 1.0/relres); |
5837 | 370 endif |
371 | |
5838 | 372 if (! matrix_positive_definite) |
5837 | 373 flag = 3; |
374 if (nargout < 2) | |
6498 | 375 warning ("pcg: matrix not positive definite?\n"); |
5837 | 376 endif |
377 endif | |
378 endfunction | |
379 | |
380 %!demo | |
381 %! | |
382 %! # Simplest usage of pcg (see also 'help pcg') | |
383 %! | |
384 %! N = 10; | |
6747 | 385 %! A = diag ([1:N]); b = rand (N, 1); y = A \ b; #y is the true solution |
386 %! x = pcg (A, b); | |
387 %! printf('The solution relative error is %g\n', norm (x - y) / norm (y)); | |
5837 | 388 %! |
389 %! # You shouldn't be afraid if pcg issues some warning messages in this | |
390 %! # example: watch out in the second example, why it takes N iterations | |
391 %! # of pcg to converge to (a very accurate, by the way) solution | |
392 %!demo | |
393 %! | |
394 %! # Full output from pcg, except for the eigenvalue estimates | |
395 %! # We use this output to plot the convergence history | |
396 %! | |
397 %! N = 10; | |
6747 | 398 %! A = diag ([1:N]); b = rand (N, 1); X = A \ b; #X is the true solution |
399 %! [x, flag, relres, iter, resvec] = pcg (A, b); | |
400 %! printf('The solution relative error is %g\n', norm (x - X) / norm (X)); | |
5837 | 401 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||/||b||)'); |
6747 | 402 %! semilogy([0:iter], resvec / resvec(1),'o-g'); |
403 %! legend('relative residual'); | |
5837 | 404 %!demo |
405 %! | |
406 %! # Full output from pcg, including the eigenvalue estimates | |
407 %! # Hilbert matrix is extremely ill conditioned, so pcg WILL have problems | |
408 %! | |
409 %! N = 10; | |
6747 | 410 %! A = hilb (N); b = rand (N, 1); X = A \ b; #X is the true solution |
411 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], 200); | |
412 %! printf('The solution relative error is %g\n', norm (x - X) / norm (X)); | |
413 %! printf('Condition number estimate is %g\n', eigest(2) / eigest (1)); | |
414 %! printf('Actual condition number is %g\n', cond (A)); | |
5837 | 415 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||)'); |
6747 | 416 %! semilogy([0:iter], resvec,['o-g';'+-r']); |
417 %! legend('absolute residual','absolute preconditioned residual'); | |
5837 | 418 %!demo |
419 %! | |
420 %! # Full output from pcg, including the eigenvalue estimates | |
421 %! # We use the 1-D Laplacian matrix for A, and cond(A) = O(N^2) | |
422 %! # and that's the reasone we need some preconditioner; here we take | |
423 %! # a very simple and not powerful Jacobi preconditioner, | |
424 %! # which is the diagonal of A | |
425 %! | |
426 %! N = 100; | |
6747 | 427 %! A = zeros (N, N); |
428 %! for i=1 : N - 1 # form 1-D Laplacian matrix | |
429 %! A (i:i+1, i:i+1) = [2 -1; -1 2]; | |
5837 | 430 %! endfor |
6747 | 431 %! b = rand (N, 1); X = A \ b; #X is the true solution |
5837 | 432 %! maxit = 80; |
6747 | 433 %! printf('System condition number is %g\n', cond (A)); |
5837 | 434 %! # No preconditioner: the convergence is very slow! |
435 %! | |
6747 | 436 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit); |
437 %! printf('System condition number estimate is %g\n', eigest(2) / eigest(1)); | |
5837 | 438 %! title('Convergence history'); xlabel('Iteration'); ylabel('log(||b-Ax||)'); |
6747 | 439 %! semilogy([0:iter], resvec(:,1), 'o-g'); |
440 %! legend('NO preconditioning: absolute residual'); | |
5837 | 441 %! |
442 %! pause(1); | |
443 %! # Test Jacobi preconditioner: it will not help much!!! | |
444 %! | |
6747 | 445 %! M = diag (diag (A)); # Jacobi preconditioner |
446 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit, M); | |
447 %! printf('JACOBI preconditioned system condition number estimate is %g\n', eigest(2) / eigest(1)); | |
5837 | 448 %! hold on; |
6747 | 449 %! semilogy([0:iter], resvec(:,1), 'o-r'); |
450 %! legend('NO preconditioning: absolute residual', ... | |
451 %! 'JACOBI preconditioner: absolute residual'); | |
5837 | 452 %! |
453 %! pause(1); | |
454 %! # Test nonoverlapping block Jacobi preconditioner: it will help much! | |
455 %! | |
6747 | 456 %! M = zeros (N, N); k = 4; |
457 %! for i = 1 : k : N # form 1-D Laplacian matrix | |
458 %! M (i:i+k-1, i:i+k-1) = A (i:i+k-1, i:i+k-1); | |
5837 | 459 %! endfor |
6747 | 460 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], maxit, M); |
461 %! printf('BLOCK JACOBI preconditioned system condition number estimate is %g\n', eigest(2) / eigest(1)); | |
462 %! semilogy ([0:iter], resvec(:,1),'o-b'); | |
463 %! legend('NO preconditioning: absolute residual', ... | |
464 %! 'JACOBI preconditioner: absolute residual', ... | |
465 %! 'BLOCK JACOBI preconditioner: absolute residual'); | |
5837 | 466 %! hold off; |
467 %!test | |
468 %! | |
469 %! #solve small diagonal system | |
470 %! | |
471 %! N = 10; | |
6747 | 472 %! A = diag ([1:N]); b = rand (N, 1); X = A \ b; #X is the true solution |
473 %! [x, flag] = pcg (A, b, [], N+1); | |
474 %! assert(norm (x - X) / norm (X), 0, 1e-10); | |
475 %! assert(flag, 0); | |
5837 | 476 %! |
477 %!test | |
478 %! | |
479 %! #solve small indefinite diagonal system | |
480 %! #despite A is indefinite, the iteration continues and converges | |
481 %! #indefiniteness of A is detected | |
482 %! | |
483 %! N = 10; | |
6747 | 484 %! A = diag([1:N] .* (-ones(1, N) .^ 2)); b = rand (N, 1); X = A \ b; #X is the true solution |
485 %! [x, flag] = pcg (A, b, [], N+1); | |
486 %! assert(norm (x - X) / norm (X), 0, 1e-10); | |
487 %! assert(flag, 3); | |
5837 | 488 %! |
489 %!test | |
490 %! | |
491 %! #solve tridiagonal system, do not converge in default 20 iterations | |
492 %! | |
493 %! N = 100; | |
6747 | 494 %! A = zeros (N, N); |
495 %! for i = 1 : N - 1 # form 1-D Laplacian matrix | |
496 %! A (i:i+1, i:i+1) = [2 -1; -1 2]; | |
5837 | 497 %! endfor |
6747 | 498 %! b = ones (N, 1); X = A \ b; #X is the true solution |
499 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, 1e-12); | |
5837 | 500 %! assert(flag); |
6747 | 501 %! assert(relres > 1.0); |
502 %! assert(iter, 20); #should perform max allowable default number of iterations | |
5837 | 503 %! |
504 %!test | |
505 %! | |
506 %! #solve tridiagonal system with 'prefect' preconditioner | |
507 %! #converges in one iteration, so the eigest does not work | |
508 %! #and issues a warning | |
509 %! | |
510 %! N = 100; | |
6747 | 511 %! A = zeros (N, N); |
512 %! for i = 1 : N - 1 # form 1-D Laplacian matrix | |
513 %! A (i:i+1, i:i+1) = [2 -1; -1 2]; | |
5837 | 514 %! endfor |
6747 | 515 %! b = ones (N, 1); X = A \ b; #X is the true solution |
516 %! [x, flag, relres, iter, resvec, eigest] = pcg (A, b, [], [], A, [], b); | |
517 %! assert(norm (x - X) / norm (X), 0, 1e-6); | |
518 %! assert(flag, 0); | |
519 %! assert(iter, 1); #should converge in one iteration | |
520 %! assert(isnan (eigest), isnan ([NaN, NaN])); | |
5837 | 521 %! |