1 ## Copyright (C) 2005 Michel D. Schmid <michaelschmid@users.sourceforge.net>
4 ## This program is free software; you can redistribute it and/or modify it
5 ## under the terms of the GNU General Public License as published by
6 ## the Free Software Foundation; either version 2, or (at your option)
9 ## This program is distributed in the hope that it will be useful, but
10 ## WITHOUT ANY WARRANTY; without even the implied warranty of
11 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 ## General Public License for more details.
14 ## You should have received a copy of the GNU General Public License
15 ## along with this program; see the file COPYING. If not, see
16 ## <http://www.gnu.org/licenses/>.
19 ## @deftypefn {Function File} {@var{net}} = newff (@var{Pr},@var{ss},@var{trf},@var{btf},@var{blf},@var{pf})
20 ## @code{newff} create a feed-forward backpropagation network
23 ## Pr - R x 2 matrix of min and max values for R input elements
24 ## Ss - 1 x Ni row vector with size of ith layer, for N layers
25 ## trf - 1 x Ni list with transfer function of ith layer,
27 ## btf - Batch network training function,
28 ## default = "trainlm"
29 ## blf - Batch weight/bias learning function,
30 ## default = "learngdm"
31 ## pf - Performance function,
37 ## Pr = [0.1 0.8; 0.1 0.75; 0.01 0.8];
38 ## it's a 3 x 2 matrix, this means 3 input neurons
40 ## net = newff(Pr, [4 1], @{"tansig","purelin"@}, "trainlm", "learngdm", "mse");
45 ## @seealso{sim, init, train}
47 ## Author: Michel D. Schmid
49 function net = newff(Pr,ss,transFunc,trainFunc,notUsed,performFunc)
51 ## initial descriptipn
52 ## newff(Pr,ss,transfunc,trainFunc,notUsed,performFunc)
53 ## * Pr is a nx2 matrix with min and max values of standardized inputs
55 ## * ss is a row vector, the first element describes the number
56 ## of hidden neurons, the second element describes the number
58 ## * transFunc is a cell array of transfer function, standard is "tansig"
59 ## * trainFunc is the training algorithm
60 ## * notUsed exist only because we have only one train algorithm which doesn't
61 ## need a weight learning function
62 ## * performFunc is written for the performance function, standard is "mse"
64 ## check range of input arguments
65 error(nargchk(2,6,nargin))
67 ## get number of layers (without input layer)
72 # the number of transfer functions depends on the number of
73 # hidden layers, so we have to create a loop here 30.09.09 (dd.mm.yy)
76 transFunc{i,1} = "purelin";
78 transFunc{i,1}= "tansig";
83 trainFunc = "trainlm";
89 ## it doesn't matter what nargin 5 is ...!
90 ## it won't be used ... it's only for matlab compatibility
98 checkInputArgs(Pr,ss);
100 ## Standard architecture of neural network
101 net = __newnetwork(1,nLayers,1,"newff");
103 ## first argument: number of inputs, nothing else allowed till now
104 ## it's not the same like the number of neurons in this input
105 ## second argument: number of layers, including output layer
106 ## third argument: number of outputs, nothing else allowed till now
107 ## it's not the same like the number of neurons in this output
109 ## set inputs with limit of only ONE input
110 net.inputs{1}.range = Pr;
111 [nRows, nColumns] = size(Pr);
112 net.inputs{1}.size = nRows;
115 net.IW{1,1} = zeros(1,nRows);
116 ## set more needed empty cells
117 for iLayers = 2:nLayers
118 net.IW{iLayers,1} = [];
119 # net.IW{2:nLayers,1} = []; # old code
121 ## set number of bias, one per layer
122 for iBiases = 1:nLayers
123 net.b{iBiases,1} = 0;
126 ## set rest of layers
128 ## set size of LayerWeights LW
129 ## the numbers of rows and columns depends on the
130 ## number of hidden neurons and output neurons...
131 ## 2 hidden neurons match 2 columns ...
132 ## 2 output neurons match 2 rows ...
134 net.LW{i,i-1} = zeros(ss(i),ss(i-1));
136 for iLayers = 1:nLayers
137 net.layers{iLayers}.size = ss(iLayers);
138 net.layers{iLayers}.transferFcn = transFunc{iLayers};
141 ## define everything with "targets"
142 net.numTargets = ss(end);
143 net.targets = cell(1,nLayers);
146 net.targets{i}.size = ss(end);
147 ## next row of code is only for MATLAB(TM) compatibility
148 ## I never used this the last 4 years ...
149 net.targets{i}.userdata = "Put your custom informations here!";
156 net.performFcn = performFunc;
160 # net.biases{i}.learnFcn = blf;
161 # net.layerWeights{i,:}.learnFcn = blf;
162 net.biases{i}.size = ss(i);
166 net.trainFcn = trainFunc; # actually, only trainlm will exist
167 net = setTrainParam(net);
171 # ======================================================
173 # additional check functions...
175 # ======================================================
176 function checkInputArgs(Pr,ss)
178 ## check if Pr has correct format
179 if !isreal(Pr) || (size(Pr,2)!=2)
180 error("Input ranges must be a two column matrix!")
182 if any(Pr(:,1) > Pr(:,2)) # check if numbers in the second column are larger as in the first one
183 error("Input ranges has values in the second column larger as in the same row of the first column.")
186 ## check if ss has correct format, must be 1xR row vector
188 error("Layer sizes is not a row vector.")
191 error("There must be at least one hidden layer and one output layer!")
195 if !isreal(sk) || any(sk<1) || any(round(sk)!=sk)
196 error("Layer sizes is not a row vector of positive integers.")
201 # ======================================================
203 # additional set functions...
205 # ======================================================
206 function net = setTrainParam(net)
208 trainFunc = net.trainFcn;
212 net.trainParam.epochs = 100;
213 net.trainParam.goal = 0;
214 net.trainParam.max_fail = 5;
215 net.trainParam.mem_reduc = 1;
216 net.trainParam.min_grad = 1.0000e-010;
217 net.trainParam.mu = 0.0010;
218 net.trainParam.mu_dec = 0.1;
219 net.trainParam.mu_inc = 10;
220 net.trainParam.mu_max = 1.0000e+010;
221 net.trainParam.show = 50;
222 net.trainParam.time = Inf;
224 error("newff:setTrainParam: this train algorithm isn't available till now!")
228 # ========================================================
234 %! disp("testing newff")
236 # if input range Pr has only one column
239 %! fail("newff(Pr,[1 1],{'tansig','purelin'},'trainlm','unused','mse')","Input ranges must be a two column matrix!")
241 # if input range Pr has two columns
244 %! assert(__checknetstruct(newff(Pr,[1 1],{'tansig','purelin'},'trainlm','unused','mse')))
245 ## __checknetstruct returns TRUE is input arg is a network structure ...
247 # if input range Pr has three columns
249 %! Pr = [1 2 3; 4 5 6];
250 %! fail("newff(Pr,[1 1],{'tansig','purelin'},'trainlm','unused','mse')","Input ranges must be a two column matrix!")
252 # if input range has in the second col greater values as in the first col ...
255 %! fail("newff(Pr,[1 1],{'tansig','purelin'},'trainlm','unused','mse')",\
256 %! "Input ranges has values in the second column larger as in the same row of the first column.")
258 # check if ss has correct format
261 %! fail("newff(Pr,[1 1; 2 3],{'tansig','purelin'},'trainlm','unused','mse')",\
262 %! "Layer sizes is not a row vector.")
264 # check if ss has correct format
267 %! assert(__checknetstruct(newff(Pr,[ 2 3],{'tansig','purelin'},'trainlm','unused','mse')))
269 # check if ss has correct format
272 %! fail("newff(Pr,[1],{'tansig','purelin'},'trainlm','unused','mse')",\
273 %! "There must be at least one hidden layer and one output layer!")
275 # check if ss has correct format
278 %! fail("newff(Pr,[-1 1],{'tansig','purelin'},'trainlm','unused','mse')",\
279 %! "Layer sizes is not a row vector of positive integers.")