1 ## Copyright (C) 1996-2012 John W. Eaton
3 ## This file is part of Octave.
5 ## Octave is free software; you can redistribute it and/or modify it
6 ## under the terms of the GNU General Public License as published by
7 ## the Free Software Foundation; either version 3 of the License, or (at
8 ## your option) any later version.
10 ## Octave is distributed in the hope that it will be useful, but
11 ## WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 ## General Public License for more details.
15 ## You should have received a copy of the GNU General Public License
16 ## along with Octave; see the file COPYING. If not, see
17 ## <http://www.gnu.org/licenses/>.
20 ## @deftypefn {Function File} {@var{p} =} polyfit (@var{x}, @var{y}, @var{n})
21 ## @deftypefnx {Function File} {[@var{p}, @var{s}] =} polyfit (@var{x}, @var{y}, @var{n})
22 ## @deftypefnx {Function File} {[@var{p}, @var{s}, @var{mu}] =} polyfit (@var{x}, @var{y}, @var{n})
23 ## Return the coefficients of a polynomial @var{p}(@var{x}) of degree
24 ## @var{n} that minimizes the least-squares-error of the fit to the points
25 ## @code{[@var{x}, @var{y}]}.
27 ## The polynomial coefficients are returned in a row vector.
29 ## The optional output @var{s} is a structure containing the following fields:
33 ## Triangular factor R from the QR@tie{}decomposition.
36 ## The Vandermonde matrix used to compute the polynomial coefficients.
39 ## The degrees of freedom.
42 ## The norm of the residuals.
45 ## The values of the polynomial for each value of @var{x}.
48 ## The second output may be used by @code{polyval} to calculate the
49 ## statistical error limits of the predicted values.
51 ## When the third output, @var{mu}, is present the
52 ## coefficients, @var{p}, are associated with a polynomial in
53 ## @var{xhat} = (@var{x}-@var{mu}(1))/@var{mu}(2).
54 ## Where @var{mu}(1) = mean (@var{x}), and @var{mu}(2) = std (@var{x}).
55 ## This linear transformation of @var{x} improves the numerical
56 ## stability of the fit.
57 ## @seealso{polyval, polyaffine, roots, vander, zscore}
60 ## Author: KH <Kurt.Hornik@wu-wien.ac.at>
61 ## Created: 13 December 1994
64 function [p, s, mu] = polyfit (x, y, n)
66 if (nargin < 3 || nargin > 4)
71 ## Normalized the x values.
72 mu = [mean(x), std(x)];
73 x = (x - mu(1)) / mu(2);
76 if (! size_equal (x, y))
77 error ("polyfit: X and Y must be vectors of the same size");
80 if (! (isscalar (n) && n >= 0 && ! isinf (n) && n == fix (n)))
81 error ("polyfit: N must be a non-negative integer");
84 y_is_row_vector = (rows (y) == 1);
86 ## Reshape x & y into column vectors.
91 ## Construct the Vandermonde matrix.
94 ## Solve by QR decomposition.
95 [q, r, k] = qr (v, 0);
111 s.normr = norm (yf - y);
114 ## Return a row vector.
120 %! x = [-2, -1, 0, 1, 2];
121 %! assert(all (all (abs (polyfit (x, x.^2+x+1, 2) - [1, 1, 1]) < sqrt (eps))));
123 %!error(polyfit ([1, 2; 3, 4], [1, 2, 3, 4], 2))
126 %! x = [-2, -1, 0, 1, 2];
127 %! assert(all (all (abs (polyfit (x, x.^2+x+1, 3) - [0, 1, 1, 1]) < sqrt (eps))));
130 %! x = [-2, -1, 0, 1, 2];
131 %! fail("polyfit (x, x.^2+x+1)");
134 %! x = [-2, -1, 0, 1, 2];
135 %! fail("polyfit (x, x.^2+x+1, [])");
137 ## Test difficult case where scaling is really needed. This example
138 ## demonstrates the rather poor result which occurs when the dependent
139 ## variable is not normalized properly.
140 ## Also check the usage of 2nd & 3rd output arguments.
142 %! x = [ -1196.4, -1195.2, -1194, -1192.8, -1191.6, -1190.4, -1189.2, -1188, \
143 %! -1186.8, -1185.6, -1184.4, -1183.2, -1182];
144 %! y = [ 315571.7086, 315575.9618, 315579.4195, 315582.6206, 315585.4966, \
145 %! 315588.3172, 315590.9326, 315593.5934, 315596.0455, 315598.4201, \
146 %! 315600.7143, 315602.9508, 315605.1765 ];
147 %! [p1, s1] = polyfit (x, y, 10);
148 %! [p2, s2, mu] = polyfit (x, y, 10);
149 %! assert (s2.normr < s1.normr)
153 %! p0 = [1i, 0, 2i, 4];
154 %! y0 = polyval (p0, x);
155 %! p = polyfit (x, y0, numel(p0)-1);
156 %! assert (p, p0, 1000*eps)
159 %! x = 1000 + (-5:5);
160 %! xn = (x - mean (x)) / std (x);
162 %! y = polyval (pn, xn);
163 %! [p, s, mu] = polyfit (x, y, numel(pn)-1);
164 %! [p2, s2] = polyfit (x, y, numel(pn)-1);
165 %! assert (p, pn, s.normr)
166 %! assert (s.yf, y, s.normr)
167 %! assert (mu, [mean(x), std(x)])
168 %! assert (s.normr/s2.normr < sqrt(eps))
171 %! x = [1, 2, 3; 4, 5, 6];
172 %! y = [0, 0, 1; 1, 0, 0];
173 %! p = polyfit (x, y, 5);
174 %! expected = [0, 1, -14, 65, -112, 60]/12;
175 %! assert (p, expected, sqrt(eps))