?? 用matlab編的神經(jīng)網(wǎng)絡(luò)程序.txt
字號(hào):
用matlab編的神經(jīng)網(wǎng)絡(luò)程序
一、
% bp.m - Implementation of backpropagation algorithm
% (C) copyright 2001 by Yu Hen Hu
% created: 3/17/2001
% call bpconfig.m, cvgtest.m, bpdisplay.m, bptest.m
% rsample.m, randomize.m, actfun.m, actfunp.m
% partunef.m
% modified: 12/10/2003 handle case when testing file has no label
clear all,
bpconfig; % configurate the MLP network and learning parameters.
% BP iterations begins
while not_converged==1,
% start a new epoch
% Randomly select K training samples from the training set.
[train,ptr,train0]=rsample(train0,K,Kr,ptr); % train is K by M+N
z{1}=(train(:,1:M))'; % input sample matrix M by K
d=train(:,M+1:MN)'; % corresponding target value N by K
% Feed-forward phase, compute sum of square errors
for l=2:L, % the l-th layer
u{l}=w{l}*[ones(1,K);z{l-1}]; % u{l} is n(l) by K
z{l}=actfun(u{l},atype(l));
end
error=d-z{L}; % error is N by K
E(t)=sum(sum(error.*error));
% Error back-propagation phase, compute delta error
delta{L}=actfunp(u{L},atype(L)).*error; % N (=n(L)) by K
if L>2,
for l=L-1:-1:2,
delta{l}=(w{l+1}(:,2:n(l)+1))'*delta{l+1}.*actfunp(u{l},atype(l));
end
end
% update the weight matrix using gradient, momentum and random perturbation
for l=2:L,
dw{l}=alpha*delta{l}*[ones(1,K);z{l-1}]'+...
mom*dw{l}+randn(size(w{l}))*0.005;
w{l}=w{l}+dw{l};
end
% display the training error
bpdisplay;
% Test convergence to see if the convergence condition is satisfied,
cvgtest;
t = t + 1; % increment epoch count
end % while loop
disp('Final training results:')
if classreg==0,
[Cmat,crate]=bptest(wbest,tune,atype),
elseif classreg==1,
SS=bptestap(wbest,tune,atype),
end
if testys==1,
disp('Apply trained MLP network to the testing data. The results are: ');
if classreg==0,
[Cmat,crate]=bptest(wbest,test0,atype),
elseif classreg==1,
[SS,out]=bptestap(wbest,test0,atype);
figure(2),clf,plot(test0,out),title('output of testing results')
end
end
二、
echo on
% LearnER.m - Example of Error Correcting Learning
% copyright (c) 1996-2000 by Yu Hen Hu
% Created: 9/2/96
% Modified: 1/28/2000
% Modified: 9/3/2001 add additional runs of LMS and display weight converge curve
%
clear all
x=[ 1 1 1 1
0.5 -0.4 1.1 0.7
0.8 0.4 -0.3 1.2];
d=[1 0 0 1];
w=zeros(3,1);
weight=[];
eta=.01;
echo off; pause
for n=1:4,
y(n) = w'*x(:,n);
e(n) = d(n) - y(n);
w=w+eta*e(n)*x(:,n);
weight=[weight w];
['iteration #' int2str(n) ':']
weight
pause
end
for m=1:499,
x0=randomize(x')'; % change the order of presentation of x
for n=1:4,
y(n) = w'*x0(:,n);
e(n) = d(n) - y(n);
w=w+eta*e(n)*x0(:,n);
weight=[weight w];
end
end
figure(1),
subplot(311),plot([1:size(weight,2)],weight(1, ),ylabel('w0')
title('convergence curve of the weights')
subplot(312),plot([1:size(weight,2)],weight(2, ),ylabel('w1')
subplot(313),plot([1:size(weight,2)],weight(3, ),ylabel('w2')
echo on
% Batched mode LS solution
R = x*x'
rho=sum((x*diag(d))')'
w_ls = inv(R)*rho
error = d - w_ls'*x;
ernorm = error*error'
echo off
三、
%perceptron.m - perceptron learning algorithm
% INput: train(:,1:M) - pattern train(:,M+1) - target
% Output: weight vector w=[w0 w1 ... wM], w0: bias
% actual output vector y
% Need to call m-file routine: datasepf.m, sline.m
% copyright (C) 1996-2001 by Yu Hen Hu
% Modified: 2/9/2000, 2/3/2001
% K2: # of training samples
% M: feature space dimension
clear all, clf
gdata=input('Enter 0 to load a data file, Return to generate separable data: ');
if isempty(gdata)|gdata~=0,
% generate random training data
K2=input('number of training samples = ');
[orig,slope]=datasepf(K2); % slope is the slope of separating plane
% that has the formula: y = slope*x + 0.5*(1-slope)
else
disp('enter the data matrix, row by row, [x1 .. xN t]');
orig=input(' start from class 1, followed by class 0: ');
end
[Km,Kn]=size(orig);
M=Kn-1; % number of inputs
K0=sum([orig(:,Kn)==0]); K1=Km-K0; K2=K0+K1;% # of targets = 0 and 1
mdisplay=10; % # of displaying hyperplane before checking for stopping
% Initial hyperplane
% w=[rand(1,M) 0]; % initial random weights
% The initial hyperplane can be estimated as a hyperplane separating
% a pair of data sample with different labels
% in orig, this is the first and the last data sample since there are
% only two classes and sorted according to class labels
% the separating hyperplane of two points a and b
% has the normal vector [-0.5(|b|^2-|a|^2) b-a]
wa=orig(1,1:M); wb=orig(K2,1:M);
wmag=0.5*(wb*wb'-wa*wa');
wunit=wb-wa;
w=[-wmag wunit(1) wunit(2)];
figure(1)
subplot(1,2,2),plot(orig(1:K1,1),orig(1:K1,2),'*',orig(K1+1:K2,1),orig(K1+1:K2,2),'o')
axis('square');
v=axis;
[lx,ly]=sline(w,v);
subplot(1,2,1),plot(orig(1:K1,1),orig(1:K1,2),'*',...
orig(K1+1:K2,1),orig(K1+1:K2,2),'o',lx,ly)
axis('square');
title('Initial hyperplane')
converged=0;
% 0 < eta < 1/x(k)_max.
etamax=sqrt(max(orig(:,1).*orig(:,1)+orig(:,2).*orig(:,2)));
eta=input(['0 < eta < ' num2str(etamax) ', Enter eta = '])
epoch=0;
while converged==0, % not converged yet
train=randomize(orig);
for i=1:K2,
y(i)=0.5*(1+sign(w*[1;train(i,1:M)']));
w=w+eta*(train(i,M+1)-y(i))*[1 train(i,1:M)];
[lx,ly]=sline(w,v);
subplot(1,2,2),plot(orig(1:K1,1),orig(1:K1,2),'*g',...
orig(K1+1:K2,1),orig(K1+1:K2,2),'og',lx,ly,'-',...
train(i,1),train(i,2),'sr');
axis('square');
pause(0.1)
drawnow
end % for loop
epoch=epoch+1;
if sum(abs(train(:,M+1)-y'))==0, % check if converged
converged=1;
end
if rem(epoch,mdisplay)==0,
converged=input('type 1 to terminate, Return to continue : ')
if isempty(converged),
converged=0;
end
end
if converged==1,
[lx,ly]=sline(w,v);
subplot(1,2,2),plot(orig(1:K1,1),orig(1:K1,2),'*',...
orig(K1+1:K2,1),orig(K1+1:K2,2),'o',lx,ly)
axis('square');
title('final hyperplane location')
end
end % while loop
以下是另一個(gè)BP程序
Here input P and targets T define a simple function which
we can plot:
p = [0 1 2 3 4 5 6 7 8];
t = [0 0.84 0.91 0.14 -0.77 -0.96 -0.28 0.66 0.99];
plot(p,t,'o')
Here NEWFF is used to create a two layer feed forward network.
The network will have an input (ranging from 0 to 8), followed
by a layer of 10 TANSIG neurons, followed by a layer with 1
PURELIN neuron. TRAINLM backpropagation is used. The network
is also simulated.
net = newff([0 8],[10 1],{'tansig' 'purelin'},'trainlm');
y1 = sim(net,p)
plot(p,t,'o',p,y1,'x')
Here the network is trained for up to 50 epochs to a error goal of
0.01, and then resimulated.
net.trainParam.epochs = 50;
net.trainParam.goal = 0.01;
net = train(net,p,t);
y2 = sim(net,p)
plot(p,t,'o',p,y1,'x',p,y2,'*')
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號(hào)
Ctrl + =
減小字號(hào)
Ctrl + -