1 ## Copyright (C) 2006 Michel D. Schmid <email: michaelschmid@users.sourceforge.net>
4 ## This program is free software; you can redistribute it and/or modify it
5 ## under the terms of the GNU General Public License as published by
6 ## the Free Software Foundation; either version 2, or (at your option)
9 ## This program is distributed in the hope that it will be useful, but
10 ## WITHOUT ANY WARRANTY; without even the implied warranty of
11 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 ## General Public License for more details.
14 ## You should have received a copy of the GNU General Public License
15 ## along with this program; see the file COPYING. If not, see
16 ## <http://www.gnu.org/licenses/>.
19 ## @deftypefn {Function File} {}[@var{perf}, @var{Ee}, @var{Aa}, @var{Nn}] = __calcperf (@var{net},@var{xx},@var{Im},@var{Tt})
20 ## @code{__calcperf} calculates the performance of a multi-layer neural network.
21 ## PLEASE DON'T USE IT ELSEWHERE, it proparly won't work.
24 ## Author: Michel D. Schmid
27 function [perf,Ee,Aa,Nn] = __calcperf(net,xx,Im,Tt)
30 ## perf, net performance.. from input to output through the hidden layers
31 ## Aa, output values of the hidden and last layer (output layer)
32 ## is used for NEWFF network types
34 ## calculate bias terms
35 ## must have the same number of columns like the input matrix Im
36 [nRows, nColumns] = size(Im);
37 Btemp = cell(net.numLayers,1); # Btemp: bias matrix
38 ones1xQ = ones(1,nColumns);
39 for i= 1:net.numLayers
40 Btemp{i} = net.b{i}(:,ones1xQ);
44 IWtemp = cell(net.numLayers,net.numInputs,1);# IW: input weights ...
45 LWtemp = cell(net.numLayers,net.numLayers,1);# LW: layer weights ...
46 Aa = cell(net.numLayers,1);# Outputs hidden and output layer
47 Nn = cell(net.numLayers,1);# outputs before the transfer function
48 IW = net.IW; # input weights
49 LW = net.LW; # layer weights
51 ## calculate the whole network till outputs are reached...
52 for iLayers = 1:net.numLayers
54 ## calculate first input weights to weighted inputs..
55 ## this can be done with matrix calculation...
57 ## to do this, there must be a special matrix ...
58 ## e.g. IW = [1 2 3 4 5; 6 7 8 9 10] * [ 1 2 3; 4 5 6; 7 8 9; 10 11 12; 1 2 3];
60 IWtemp{iLayers,1} = IW{iLayers,1} * Im;
61 onlyTempVar = [IWtemp(iLayers,1) Btemp(iLayers)];
63 IWtemp{iLayers,1} = [];
66 ## now calculate layer weights to weighted layer outputs
69 LWtemp{iLayers,1} = LW{iLayers,iLayers-1} * Ad;
70 onlyTempVar = [LWtemp(iLayers,1) Btemp(iLayers)];
72 LWtemp{iLayers,1} = [];
75 Nn{iLayers,1} = onlyTempVar{1};
76 for k=2:length(onlyTempVar)
77 Nn{iLayers,1} = Nn{iLayers,1} + onlyTempVar{k};
80 ## now calculate with the transfer functions the layer output
81 switch net.layers{iLayers}.transferFcn
83 Aa{iLayers,1} = purelin(Nn{iLayers,1});
85 Aa{iLayers,1} = tansig(Nn{iLayers,1});
87 Aa{iLayers,1} = logsig(Nn{iLayers,1});
89 error(["Transfer function: " net.layers{iLayers}.transferFcn " doesn't exist!"])
92 endfor # iLayers = 1:net.numLayers
94 ## now calc network error
95 Ee = cell(net.numLayers,1);
98 Ee{i,1} = Tt{i,1} - Aa{i,1};# Tt: target
99 # Ee will be the error vector cell array
102 ## now calc network performance
103 switch(net.performFcn)
107 error("for performance functions, only mse is currently valid!")