1 ## Copyright (C) 2007-2012 Regents of the University of California
3 ## This file is part of Octave.
5 ## Octave is free software; you can redistribute it and/or modify it
6 ## under the terms of the GNU General Public License as published by
7 ## the Free Software Foundation; either version 3 of the License, or (at
8 ## your option) any later version.
10 ## Octave is distributed in the hope that it will be useful, but
11 ## WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 ## General Public License for more details.
15 ## You should have received a copy of the GNU General Public License
16 ## along with Octave; see the file COPYING. If not, see
17 ## <http://www.gnu.org/licenses/>.
20 ## @deftypefn {Function File} {[@var{est}, @var{v}, @var{w}, @var{iter}] =} onenormest (@var{A}, @var{t})
21 ## @deftypefnx {Function File} {[@var{est}, @var{v}, @var{w}, @var{iter}] =} onenormest (@var{apply}, @var{apply_t}, @var{n}, @var{t})
23 ## Apply Higham and Tisseur's randomized block 1-norm estimator to
24 ## matrix @var{A} using @var{t} test vectors. If @var{t} exceeds 5, then
25 ## only 5 test vectors are used.
27 ## If the matrix is not explicit, e.g., when estimating the norm of
28 ## @code{inv (@var{A})} given an LU@tie{}factorization, @code{onenormest}
29 ## applies @var{A} and its conjugate transpose through a pair of functions
30 ## @var{apply} and @var{apply_t}, respectively, to a dense matrix of size
31 ## @var{n} by @var{t}. The implicit version requires an explicit dimension
34 ## Returns the norm estimate @var{est}, two vectors @var{v} and
35 ## @var{w} related by norm
36 ## @code{(@var{w}, 1) = @var{est} * norm (@var{v}, 1)},
37 ## and the number of iterations @var{iter}. The number of
38 ## iterations is limited to 10 and is at least 2.
43 ## N.J. Higham and F. Tisseur, @cite{A Block Algorithm
44 ## for Matrix 1-Norm Estimation, with an Application to 1-Norm
45 ## Pseudospectra}. SIMAX vol 21, no 4, pp 1185-1201.
46 ## @url{http://dx.doi.org/10.1137/S0895479899356080}
49 ## N.J. Higham and F. Tisseur, @cite{A Block Algorithm
50 ## for Matrix 1-Norm Estimation, with an Application to 1-Norm
51 ## Pseudospectra}. @url{http://citeseer.ist.psu.edu/223007.html}
54 ## @seealso{condest, norm, cond}
57 ## Code originally licensed under
59 ## Copyright (c) 2007, Regents of the University of California
60 ## All rights reserved.
62 ## Redistribution and use in source and binary forms, with or without
63 ## modification, are permitted provided that the following conditions
66 ## * Redistributions of source code must retain the above copyright
67 ## notice, this list of conditions and the following disclaimer.
69 ## * Redistributions in binary form must reproduce the above
70 ## copyright notice, this list of conditions and the following
71 ## disclaimer in the documentation and/or other materials provided
72 ## with the distribution.
74 ## * Neither the name of the University of California, Berkeley nor
75 ## the names of its contributors may be used to endorse or promote
76 ## products derived from this software without specific prior
77 ## written permission.
79 ## THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
80 ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
81 ## TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
82 ## PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND
83 ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84 ## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85 ## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
86 ## USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
87 ## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
88 ## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
89 ## OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
92 ## Author: Jason Riedy <ejr@cs.berkeley.edu>
93 ## Keywords: linear-algebra norm estimation
96 function [est, v, w, iter] = onenormest (varargin)
98 if (size (varargin, 2) < 1 || size (varargin, 2) > 4)
105 if (ismatrix (varargin{1}))
106 n = size (varargin{1}, 1);
107 if n != size (varargin{1}, 2),
108 error ("onenormest: matrix must be square");
110 apply = @(x) varargin{1} * x;
111 apply_t = @(x) varargin{1}' * x;
112 if (size (varargin) > 1)
115 t = min (n, default_t);
117 issing = isa (varargin {1}, "single");
119 if (size (varargin, 2) < 3)
124 apply_t = varargin{2};
125 if (size (varargin) > 3)
130 issing = isa (varargin {3}, "single");
133 ## Initial test vectors X.
135 X = X ./ (ones (n,1) * sum (abs (X), 1));
137 ## Track if a vertex has been visited.
138 been_there = zeros (n, 1);
140 ## To check if the estimate has increased.
143 ## Normalized vector of signs.
147 myeps = eps ("single");
153 for iter = 1 : itmax + 1
154 Y = feval (apply, X);
156 ## Find the initial estimate as the largest A*x.
157 [est, ind_best] = max (sum (abs (Y), 1));
158 if (est > est_old || iter == 2)
161 if (iter >= 2 && est < est_old)
162 ## No improvement, so stop.
170 ## Gone too far. Stop.
176 ## Test if any of S are approximately parallel to previous S
177 ## vectors or current S vectors. If everything is parallel,
178 ## stop. Otherwise, replace any parallel vectors with
180 partest = any (abs (S_old' * S - n) < 4*eps*n);
182 ## All the current vectors are parallel to old vectors.
183 ## We've hit a cycle, so stop.
187 ## Some vectors are parallel to old ones and are cycling,
188 ## but not all of them. Replace the parallel vectors with
190 numpar = sum (partest);
191 replacements = 2*(rand (n,numpar) < 0.5) - 1;
192 S(:,partest) = replacements;
194 ## Now test for parallel vectors within S.
195 partest = any ((S' * S - eye (t)) == n);
197 numpar = sum (partest);
198 replacements = 2*(rand (n,numpar) < 0.5) - 1;
199 S(:,partest) = replacements;
202 Z = feval (apply_t, S);
204 ## Now find the largest non-previously-visted index per
208 if (iter >= 2 && mhi == ind_best)
209 ## Hit a cycle, stop.
212 [h, ind] = sort (h, 'descend');
215 if (all (been_there(firstind)))
216 ## Visited all these before, so stop.
219 ind = ind (!been_there (ind));
220 if (length (ind) < t)
221 ## There aren't enough new vectors, so we're practically
227 ## Visit the new indices.
232 been_there (ind (1 : t)) = 1;
235 ## The estimate est and vector w are set in the loop above. The
236 ## vector v selects the ind_best column of A.
243 %! A = randn(N) + eye(N);
245 %! nm1inv = onenormest(@(x) U\(L\(P*x)), @(x) P'*(L'\(U'\x)), N, 30)
251 %! [nm1, v1, w1] = onenormest (A);
252 %! [nminf, vinf, winf] = onenormest (A', 6);
253 %! assert (nm1, N, -2*eps);
254 %! assert (nminf, N, -2*eps);
255 %! assert (norm (w1, 1), nm1 * norm (v1, 1), -2*eps)
256 %! assert (norm (winf, 1), nminf * norm (vinf, 1), -2*eps)
261 %! [nm1, v1, w1] = onenormest (@(x) A*x, @(x) A'*x, N, 3);
262 %! [nminf, vinf, winf] = onenormest (@(x) A'*x, @(x) A*x, N, 3);
263 %! assert (nm1, N, -2*eps);
264 %! assert (nminf, N, -2*eps);
265 %! assert (norm (w1, 1), nm1 * norm (v1, 1), -2*eps)
266 %! assert (norm (winf, 1), nminf * norm (vinf, 1), -2*eps)
271 %! [nm1, v1, w1] = onenormest (A);
272 %! [nminf, vinf, winf] = onenormest (A', 6);
273 %! assert (nm1, norm (A, 1), -2*eps);
274 %! assert (nminf, norm (A, inf), -2*eps);
275 %! assert (norm (w1, 1), nm1 * norm (v1, 1), -2*eps)
276 %! assert (norm (winf, 1), nminf * norm (vinf, 1), -2*eps)
278 ## Only likely to be within a factor of 10.
280 %! old_state = rand ("state");
281 %! restore_state = onCleanup (@() rand ("state", old_state));
282 %! rand ('state', 42); % Initialize to guarantee reproducible results
285 %! [nm1, v1, w1] = onenormest (A);
286 %! [nminf, vinf, winf] = onenormest (A', 6);
287 %! assert (nm1, norm (A, 1), -.1);
288 %! assert (nminf, norm (A, inf), -.1);
289 %! assert (norm (w1, 1), nm1 * norm (v1, 1), -2*eps)
290 %! assert (norm (winf, 1), nminf * norm (vinf, 1), -2*eps)