Kod prawie ostatecznie wygląda tak (nie działa w pełni tak jak powinien):
// clear variables
clear
// clear console
clc
// start the same randomness
rand('seed',0);
// track time elapsed
tic();
// data
P = [15 19 20 45 49 7 8
8 12 13 39 44 4 9
6 9 31 43 44 6 9
2 30 34 35 45 1 2
3 17 31 34 40 1 2
14 20 23 39 49 4 10
10 19 24 30 39 2 4
3 12 24 37 38 3 7
2 3 30 31 45 6 8
8 14 23 30 45 1 9
25 31 38 49 50 5 10
12 20 21 22 35 4 10
7 12 28 34 45 3 6
6 27 30 35 41 4 5
4 14 25 34 49 4 9
1 23 32 45 49 5 10
5 12 20 29 48 7 9
1 7 12 23 39 3 4
7 16 22 36 44 3 4
2 6 30 32 49 1 4
2 13 39 45 47 4 6
12 22 24 29 38 5 6
15 19 35 36 41 5 10
1 17 29 39 42 7 8
9 14 28 30 37 3 10
13 19 23 34 41 3 8
3 21 26 40 41 8 10];
// define patterns
k=6; // No. of pattern points
c=3; // No. of prediction points
// learning parameters lp=[0.1 0.05 0.5 0.1];
// ranges: 0.1-1 0-0.1 0-0.9999 0-0.25
lp=[0.2 0.05 0.5 0];
// T = epochs; 12000 for run; 200/500 for testing
T=200;
// data size
[rowsdata,colsdata]=size(P);
// normalize the learning data between [mn1,mx1] - in relation with r below
mx=max(P);
mn=min(P);
mx1=1;
mn1=-1;
P=(P-mn)/(mx-mn)*(mx1-mn1)+mn1;
// change absolute values to relative values
for w=2:rowsdata
S(w-1,:)=(P(w,:)-P(w-1,:))./(P(w-1,:))
end
// ?
X=S; // (1:rowsdata-1,:);
// fix
X=X+0.5;
// data rows and cols count
[rows,cols]=size(X);
// learning series, pair <x,t>, automatic conversion of X which is a column
x=[];
t=[];
for a=1:cols
for i=1:(rows-k-c)
x=[x X(i:i+k-1,a)];
t=[t X(i+k:i+k+(c-1),a)];
end
end
// NN structure
[in_count, pattern_count]=size(x);
[out_count, pattern_count]=size(t);
// number of neurons in layers
N=[in_count 11 out_count];
// limit, default [-1,1]
r=[-1,1];
// initialize the weight hypermatrix (without bias).
// N - Row vector describing the number of neurons per layer. N(1) is the size of input pattern vector, N(size(N,'c')) is the size of output pattern vector (and also target).
// r - Two component row vector defining the smallest and the largest value for initialization. Weights will be initialized with random numbers between these two values.
// r(1) the lower limit
// r(2) the upper limit
// This parameter is optional, default value is [-1,1].
// W - The weight hypermatrix, in the format used by ann_BP_Std_nb, ann_BP_run_nb and other functions working with feedforward nets (without bias).
W=ann_FF_init_nb(N, r);
// learning = online backpropagation with momentum.
// x = Matrix of input patterns, one pattern per column.
// t = Matrix of targets, one pattern per column. Each column have a correspondent column in x.
// N = Row vector describing the number of neurons per layer. N(1) is the size of input pattern vector, N(size(N,'c')) is the size of output pattern vector (and also target).
// W = The weight hypermatrix (initialized first with ann_FF_init_nb).
// lp = Learning parameters
// T = the number of epochs (training cycles trough all pattern set.
[W,Delta_W_old]=ann_FF_Mom_online_nb(x,t,N,W,lp,T);
// run patterns trough a feedforward net (without bias).
// y (result) - Matrix of outputs, one pattern per column. Each column have a correspondent column in x.
// x - Matrix of input patterns, one pattern per column.
// N - Row vector describing the number of neurons per layer. N(1) is the size of input pattern vector, N(size(N,'c')) is the size of output pattern vector.
// W - The weight hypermatrix (initialized first trough ann_BP_init_nb).
result=[];
for y=1:cols
result = [result ann_FF_run_nb(X(rows-in_count+1:rows,y),N,W)];
end
result=[X; result];
// fix
result=result-0.5;
// concat data and result
resultP = [P(1,:); result];
// change relative values to absolute values
for w=2:(rowsdata+out_count)
resultP(w,:)=resultP(w,:).*resultP(w-1,:)+resultP(w-1,:)
end
// denormalize
resultP=(resultP-mn1)/(mx1-mn1)*(mx-mn)+mn;
// plot data
pY=1:(rowsdata+out_count);
plot(pY,resultP);
// legend('seria 1','seria 2','seria 3','seria 4','seria 5','seria 6','seria 7','seria 8','seria 9',[3])
disp(resultP);
disp(toc()); // Linux=3.97s Windows=31.84s