?? rpcl.m
字號(hào):
% MATLAB implementation of Rival-Penalized Competitive Learning (RPCL)
% Source:
% L. Xu, A. Krzyzak and E. Oja 1993, "Rival penalized competitive
% learning for clustering analysis, RBF net, and curve detection,"
% IEEE Trans. Neural Networks, vol. 4, no. 4, p. 636--648.
%
% Note: According to the RPCL algorithm, once training is completed,
% prototypes located far away from the data clusters should
% be eliminated.
%
% Code authors: Guilherme A. Barreto
% Date: October 18th 2005
clear; clc; close all;
% Load data
load dataset1.dat;
Dw=dataset1; clear dataset1
% Get size of data matrix (1 input vector per row)
[LEN_DATA DIM_INPUT]=size(Dw);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Create a SOM structure %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Mx = 16; % Number of neurons
MAP_SIZE = [Mx 1]; % Size of SOM map (always use 1-D map)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Create a CL network structure %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
sMap = som_map_struct(DIM_INPUT,'msize',MAP_SIZE,'rect','sheet');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Different weights initialization methods %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% sMap = som_randinit(Dw, sMap); % Random weight initialization
% sMap = som_lininit(Dw, sMap); % Linear weight initialization
I=randperm(LEN_DATA); sMap.codebook=Dw(I(1:Mx),:); % Select Mx*My data vectors at random
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Train the RPCL algorithm %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Ep=200; % number of training epochs
alpha=0.05; % Learning rate of the 1st winner
eta=0.001; % Learning rate of the 2nd winner (rival)
counter=zeros(Mx,1); % Counter for the number of victories
freq=zeros(Mx,1); % Relative frequency of victories
for t=1:Ep, % loop for training epochs
Epoca=t,
% Shuffle input data vectors at each training epoch
I=randperm(LEN_DATA); % shuffle the row indices
Dw=Dw(I,:);
for tt=1:LEN_DATA, % loop for iteration within an epoch
% Plain Euclidean distances from current input to all neurons
Di=sqrt(som_eucdist2(sMap,Dw(tt,:))); % Compute Euclidean distances for all neurons
% Weighted Euclidean distances
WDi=freq.*Di;
% Find the 1st winner using Wdi
[WDi1 win1]=min(WDi);
% Find the 2nd winner using Wdi
WDi(win1)=10^4; % Insert a large value to avoid the 1st winner to be chosen again
[WDi2 win2]=min(WDi);
% Reward the 1st winner
sMap.codebook(win1,:)=sMap.codebook(win1,:) + alpha*(Dw(tt,:)-sMap.codebook(win1,:));
% Penalize the 2nd winner (rival)
sMap.codebook(win2,:)=sMap.codebook(win2,:) - eta*(Dw(tt,:)-sMap.codebook(win2,:));
% Update counter of victories of the 1st winner
counter(win1)=counter(win1)+1;
% Update the relative frequency of victories of the winner
freq(win1)= counter(win1)/sum(counter);
end
% Quantization error per training epoch
Qerr(t) = som_quality(sMap, Dw);
end
% Plot prototypes and data altogether
figure, plot(Dw(:,1),Dw(:,2),'+r'), hold on
plot(sMap.codebook(:,1),sMap.codebook(:,2),'b*')
title('Prototype vectors in input space'), hold off
% Plot quantization error evolution per training epoch
figure, plot(Qerr)
title('Quantization Error per Training Epoch')
% A bar plot of the number of victories per neuron throughout epochs
figure, bar(1:Mx,counter)
title('Victories per neuron')
?? 快捷鍵說(shuō)明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號(hào)
Ctrl + =
減小字號(hào)
Ctrl + -