1 %% Copyright (C) 2006 Peter V. Lanspeary <pvl@mecheng.adelaide.edu.au>
3 %% This program is free software; you can redistribute it and/or modify it under
4 %% the terms of the GNU General Public License as published by the Free Software
5 %% Foundation; either version 3 of the License, or (at your option) any later
8 %% This program is distributed in the hope that it will be useful, but WITHOUT
9 %% ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 %% FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
13 %% You should have received a copy of the GNU General Public License along with
14 %% this program; if not, see <http://www.gnu.org/licenses/>.
16 %% [a,v,k] = arburg(x,poles,criterion)
18 %% Calculate coefficients of an autoregressive (AR) model of complex data
19 %% "x" using the whitening lattice-filter method of Burg (1968). The inverse
20 %% of the model is a moving-average filter which reduces "x" to white noise.
21 %% The power spectrum of the AR model is an estimate of the maximum
22 %% entropy power spectrum of the data. The function "ar_psd" calculates the
23 %% power spectrum of the AR model.
26 %% x %% [vector] sampled data
28 %% poles %% [integer scalar] number of poles in the AR model or
29 %% %% limit to the number of poles if a
30 %% %% valid "stop_crit" is provided.
32 %% criterion %% [optional string arg] model-selection criterion. Limits
33 %% %% the number of poles so that spurious poles are not
34 %% %% added when the whitened data has no more information
35 %% %% in it (see Kay & Marple, 1981). Recognised values are
36 %% %% 'AKICc' -- approximate corrected Kullback information
37 %% %% criterion (recommended),
38 %% %% 'KIC' -- Kullback information criterion
39 %% %% 'AICc' -- corrected Akaike information criterion
40 %% %% 'AIC' -- Akaike information criterion
41 %% %% 'FPE' -- final prediction error" criterion
42 %% %% The default is to NOT use a model-selection criterion
45 %% a %% [polynomial/vector] list of (P+1) autoregression coeffic-
46 %% %% ients; for data input x(n) and white noise e(n),
49 %% %% x(n) = sqrt(v).e(n) + SUM a(k).x(n-k)
52 %% v %% [real scalar] mean square of residual noise from the
53 %% %% whitening operation of the Burg lattice filter.
55 %% k %% [column vector] reflection coefficients defining the
56 %% %% lattice-filter embodiment of the model
59 %% (1) arburg does not remove the mean from the data. You should remove
60 %% the mean from the data if you want a power spectrum. A non-zero mean
61 %% can produce large errors in a power-spectrum estimate. See
63 %% (2) If you don't know what the value of "poles" should be, choose the
64 %% largest (reasonable) value you could want and use the recommended
65 %% value, criterion='AKICc', so that arburg can find it.
66 %% E.g. arburg(x,64,'AKICc')
67 %% The AKICc has the least bias and best resolution of the available
68 %% model-selection criteria.
69 %% (3) arburg runs in octave and matlab, does not depend on octave forge
70 %% or signal-processing-toolbox functions.
71 %% (4) Autoregressive and moving-average filters are stored as polynomials
72 %% which, in matlab, are row vectors.
74 %% NOTE ON SELECTION CRITERION
75 %% AIC, AICc, KIC and AKICc are based on information theory. They attempt
76 %% to balance the complexity (or length) of the model against how well the
77 %% model fits the data. AIC and KIC are biassed estimates of the asymmetric
78 %% and the symmetric Kullback-Leibler divergence respectively. AICc and
79 %% AKICc attempt to correct the bias. See reference [4].
83 %% [1] John Parker Burg (1968)
84 %% "A new analysis technique for time series data",
85 %% NATO advanced study Institute on Signal Processing with Emphasis on
86 %% Underwater Acoustics, Enschede, Netherlands, Aug. 12-23, 1968.
88 %% [2] Steven M. Kay and Stanley Lawrence Marple Jr.:
89 %% "Spectrum analysis -- a modern perspective",
90 %% Proceedings of the IEEE, Vol 69, pp 1380-1419, Nov., 1981
92 %% [3] William H. Press and Saul A. Teukolsky and William T. Vetterling and
94 %% "Numerical recipes in C, The art of scientific computing", 2nd edition,
95 %% Cambridge University Press, 2002 --- Section 13.7.
97 %% [4] Abd-Krim Seghouane and Maiza Bekara
98 %% "A small sample model selection criterion based on Kullback's symmetric
99 %% divergence", IEEE Transactions on Signal Processing,
100 %% Vol. 52(12), pp 3314-3323, Dec. 2004
103 function [varargout] = arburg( x, poles, criterion )
107 error( 'arburg(x,poles): Need at least 2 args.' );
108 elseif ( ~isvector(x) || length(x) < 3 )
109 error( 'arburg: arg 1 (x) must be vector of length >2.' );
110 elseif ( ~isscalar(poles) || ~isreal(poles) || fix(poles)~=poles || poles<=0.5)
111 error( 'arburg: arg 2 (poles) must be positive integer.' );
112 elseif ( poles >= length(x)-2 )
113 %% lattice-filter algorithm requires "poles<length(x)"
114 %% AKICc and AICc require "length(x)-poles-2">0
115 error( 'arburg: arg 2 (poles) must be less than length(x)-2.' );
116 elseif ( nargin>2 && ~isempty(criterion) && ...
117 (~ischar(criterion) || size(criterion,1)~=1 ) )
118 error( 'arburg: arg 3 (criterion) must be string.' );
121 %% Set the model-selection-criterion flags.
122 %% is_AKICc, isa_KIC and is_corrected are short-circuit flags
123 if ( nargin > 2 && ~isempty(criterion) )
124 is_AKICc = strcmp(criterion,'AKICc'); %% AKICc
125 isa_KIC = is_AKICc || strcmp(criterion,'KIC'); %% KIC or AKICc
126 is_corrected = is_AKICc || strcmp(criterion,'AICc'); %% AKICc or AICc
127 use_inf_crit = is_corrected || isa_KIC || strcmp(criterion,'AIC');
128 use_FPE = strcmp(criterion,'FPE');
129 if ( ~use_inf_crit && ~use_FPE )
130 error( 'arburg: value of arg 3 (criterion) not recognised' );
137 %% f(n) = forward prediction error
138 %% b(n) = backward prediction error
139 %% Storage of f(n) and b(n) is a little tricky. Because f(n) is always
140 %% combined with b(n-1), f(1) and b(N) are never used, and therefore are
141 %% not stored. Not storing unused data makes the calculation of the
142 %% reflection coefficient look much cleaner :)
143 %% N.B. {initial v} = {error for zero-order model} =
144 %% {zero-lag autocorrelation} = E(x*conj(x)) = x*x'/N
145 %% E = expectation operator
148 if ( size(x,1) > 1 ) % if x is column vector
152 else % if x is row vector
157 %% new_crit/old_crit is the mode-selection criterion
159 old_crit = 2 * new_crit;
162 %% new reflection coeff = -2* E(f.conj(b)) / ( E(f^2)+E(b(^2) )
163 last_k= -2 * (b' * f) / ( f' * f + b' * b);
164 %% Levinson-Durbin recursion for residual
165 new_v = v * ( 1.0 - real(last_k * conj(last_k)) );
168 %% Apply the model-selection criterion and break out of loop if it
169 %% increases (rather than decreases).
170 %% Do it before we update the old model "a" and "v".
172 %% * Information Criterion (AKICc, KIC, AICc, AIC)
175 %% AKICc = log(new_v)+p/N/(N-p)+(3-(p+2)/N)*(p+1)/(N-p-2);
176 %% KIC = log(new_v)+ 3 *(p+1)/N;
177 %% AICc = log(new_v)+ 2 *(p+1)/(N-p-2);
178 %% AIC = log(new_v)+ 2 *(p+1)/N;
179 %% -- Calculate KIC, AICc & AIC by using is_AKICc, is_KIC and
180 %% is_corrected to "short circuit" the AKICc calculation.
181 %% The extra 4--12 scalar arithmetic ops should be quicker than
182 %% doing if...elseif...elseif...elseif...elseif.
183 new_crit = log(new_v) + is_AKICc*p/N/(N-p) + ...
184 (2+isa_KIC-is_AKICc*(p+2)/N) * (p+1) / (N-is_corrected*(p+2));
185 if ( new_crit > old_crit )
189 %% (FPE) Final prediction error
192 new_crit = new_v * (N+p+1)/(N-p-1);
193 if ( new_crit > old_crit )
197 %% Update model "a" and "v".
198 %% Use Levinson-Durbin recursion formula (for complex data).
199 a = [ prev_a + last_k .* conj(prev_a(p-1:-1:1)) last_k ];
207 %% calculate new prediction errors (by recursion):
208 %% f(p,n) = f(p-1,n) + k * b(p-1,n-1) n=2,3,...n
209 %% b(p,n) = b(p-1,n-1) + conj(k) * f(p-1,n) n=2,3,...n
210 %% remember f(p,1) is not stored, so don't calculate it; make f(p,2)
211 %% the first element in f. b(p,n) isn't calculated either.
213 new_f = f(2:nn) + last_k * b(2:nn);
214 b = b(1:nn-1) + conj(last_k) * f(1:nn-1);
220 varargout{1} = [1 a];