?? neuralnetwork_bp_classification.m
字號:
% BP 神經網絡用于模式分類
% 使用平臺 - Matlab6.5
% 作者:陸振波,海軍工程大學
% 歡迎同行來信交流與合作,更多文章與程序下載請訪問我的個人主頁
% 電子郵件:luzhenbo@yahoo.com.cn
% 個人主頁:http://luzhenbo.88uu.com.cn
clc
clear
close all
%---------------------------------------------------
% 產生訓練樣本與測試樣本,每一列為一個樣本
P1 = [rand(3,5),rand(3,5)+1,rand(3,5)+2];
T1 = [repmat([1;0;0],1,5),repmat([0;1;0],1,5),repmat([0;0;1],1,5)];
%
% B =repmat Replicate and tile an array SyntaxB = repmat(A,m,n)
% B = repmat(A,[m n])
% B = repmat(A,[m n p...])
% repmat(A,m,n)
% DescriptionB = repmat(A,m,n) creates a large matrix B consisting of an m-by-n tiling of copies of A. The statement repmat(A,n) creates an n-by-n tiling. B = repmat(A,[m n]) accomplishes the same result as repmat(A,m,n). B = repmat(A,[m n p...]) produces a multidimensional (m-by-n-by-p-by-...) array composed of copies of A. A may be multidimensional. repmat(A,m,n) when A is a scalar, produces an m-by-n matrix filled with A's value. This can be much faster than a*ones(m,n) when m or n is large. ExamplesIn this example, repmat replicates 12 copies of the second-order identity matrix, resulting in a "checkerboard" pattern. B = repmat(eye(2),3,4)
% B =
% 1 0 1 0 1 0 1 0
% 0 1 0 1 0 1 0 1
% 1 0 1 0 1 0 1 0
% 0 1 0 1 0 1 0 1
% 1 0 1 0 1 0 1 0
% 0 1 0 1 0 1 0 1
% The statement N = repmat(NaN,[2 3]) creates a 2-by-3 matrix of NaNs.
% 1 0 1 0 1 0 1 0
% 0 1 0 1 0 1 0 1
% 1 0 1 0 1 0 1 0
% 0 1 0 1 0 1 0 1
% 1 0 1 0 1 0 1 0
% 0 1 0 1 0 1 0 1
% The statement N = repmat(NaN,[2 3]) creates a 2-by-3 matrix of NaNs.
P2 = [rand(3,5),rand(3,5)+1,rand(3,5)+2];
T2 = [repmat([1;0;0],1,5),repmat([0;1;0],1,5),repmat([0;0;1],1,5)];
%---------------------------------------------------
% 歸一化
[PN1,minp,maxp] = premnmx(P1);
PN2 = tramnmx(P2,minp,maxp);
%---------------------------------------------------
% 設置網絡參數
NodeNum = 10; % 隱層節點數
TypeNum = 3; % 輸出維數
TF1 = 'tansig';TF2 = 'purelin'; % 判別函數(缺省值)
%TF1 = 'tansig';TF2 = 'logsig';
%TF1 = 'logsig';TF2 = 'purelin';
%TF1 = 'tansig';TF2 = 'tansig';
%TF1 = 'logsig';TF2 = 'logsig';
%TF1 = 'purelin';TF2 = 'purelin';
net = newff(minmax(PN1),[NodeNum TypeNum],{TF1 TF2});
%---------------------------------------------------
% 指定訓練參數
% net.trainFcn = 'traingd'; % 梯度下降算法
% net.trainFcn = 'traingdm'; % 動量梯度下降算法
%
% net.trainFcn = 'traingda'; % 變學習率梯度下降算法
% net.trainFcn = 'traingdx'; % 變學習率動量梯度下降算法
%
% (大型網絡的首選算法 - 模式識別)
% net.trainFcn = 'trainrp'; % RPROP(彈性BP)算法,內存需求最小
%
% 共軛梯度算法
% net.trainFcn = 'traincgf'; % Fletcher-Reeves修正算法
% net.trainFcn = 'traincgp'; % Polak-Ribiere修正算法,內存需求比Fletcher-Reeves修正算法略大
% net.trainFcn = 'traincgb'; % Powell-Beal復位算法,內存需求比Polak-Ribiere修正算法略大
% (大型網絡的首選算法 - 函數擬合,模式識別)
% net.trainFcn = 'trainscg'; % Scaled Conjugate Gradient算法,內存需求與Fletcher-Reeves修正算法相同,計算量比上面三種算法都小很多
%
% net.trainFcn = 'trainbfg'; % Quasi-Newton Algorithms - BFGS Algorithm,計算量和內存需求均比共軛梯度算法大,但收斂比較快
% net.trainFcn = 'trainoss'; % One Step Secant Algorithm,計算量和內存需求均比BFGS算法小,比共軛梯度算法略大
%
% (中小型網絡的首選算法 - 函數擬合,模式識別)
net.trainFcn = 'trainlm'; % Levenberg-Marquardt算法,內存需求最大,收斂速度最快
%
% net.trainFcn = 'trainbr'; % 貝葉斯正則化算法
%
% 有代表性的五種算法為:'traingdx','trainrp','trainscg','trainoss', 'trainlm'
%---------------------%
net.trainParam.show = 1; % 訓練顯示間隔
net.trainParam.lr = 0.3; % 學習步長 - traingd,traingdm
net.trainParam.mc = 0.95; % 動量項系數 - traingdm,traingdx
net.trainParam.mem_reduc = 10; % 分塊計算Hessian矩陣(僅對Levenberg-Marquardt算法有效)
net.trainParam.epochs = 1000; % 最大訓練次數
net.trainParam.goal = 1e-8; % 最小均方誤差
net.trainParam.min_grad = 1e-20; % 最小梯度
net.trainParam.time = inf; % 最大訓練時間
%---------------------------------------------------
% 訓練與測試
net = train(net,PN1,T1); % 訓練
%---------------------------------------------------
% 測試
Y1 = sim(net,PN1); % 訓練樣本實際輸出
Y2 = sim(net,PN2); % 測試樣本實際輸出
Y1 = full(compet(Y1)); % 競爭輸出
Y2 = full(compet(Y2));
% View code for competDefault Topics compet Competitive transfer function.
%
% Syntax
%
% A = compet(N)
% info = compet(code)
%
% Description
%
% compet is a transfer function. Transfer functions
% calculate a layer's output from its net input.
%
% compet(N) takes one input argument,
% N - SxQ matrix of net input (column) vectors.
% and returns output vectors with 1 where each net input
% vector has its maximum value, and 0 elsewhere.
%
% compet(code) returns information about this function.
% These codes are defined:
% 'deriv' - Name of derivative function.
% 'name' - Full name.
% 'output' - Output range.
% 'active' - Active input range.
%
% compet does not have a derivative function.
%
% Examples
%
% Here we define a net input vector N, calculate the output,
% and plot both with bar graphs.
%
% n = [0; 1; -0.5; 0.5];
% a = compet(n);
% subplot(2,1,1), bar(n), ylabel('n')
% subplot(2,1,2), bar(a), ylabel('a')
%---------------------------------------------------
% 結果統計
Result = ~sum(abs(T1-Y1)) % 正確分類顯示為1
Percent1 = sum(Result)/length(Result) % 訓練樣本正確分類率
Result = ~sum(abs(T2-Y2)) % 正確分類顯示為1
Percent2 = sum(Result)/length(Result) % 測試樣本正確分類率
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -