?? unsuni.m
字號:
function [MI,SIGMA,Pk,I,solution,t]=unsuni(X,K,tmax,randinit,t,MI,SIGMA,Pk)
% UNSUNI EM algorithm, mixture of Gaussians, diag. cov. matrix.
% [MI,SIGMA,Pk,I,solution,t]=unsuni(X,K,tmax,randinit,t,MI,SIGMA,Pk)
%
% UNSUNI is implementation of unsupervised learning algorithm
% (EM algorithm) which estimates parameters of mixture of
% normal distributions from non-labelled data.
%
% Considering statistical model of data is normal distibuted
% p.d.f. with independent features and number of normal distributions
% is given by argument K.
%
% UNSUNI(X,K,tmax,randinit)
% X [NxM] is matrix containing M non-labelled points in N-dimensional
% feature space.
% K [1x1] is number of classes - number of normal distribution - for wchich
% the algorithm estimates the parameters.
% tmax [1x1] is upper limit of steps the algorithm will perform.
% randinit [1x1] if is equal to 1 then algorithm begins from
% randomly generated initial model, otherwise computes
% initial model from first K training points which takes
% as mean values of the models and covariance matrices
% considers uniform.
%
% UNSUNI(X,K,tmax,randinit) begins from state determined by
% t [1x1] is initial step number.
% MI [NxK], SIGMA [Nx(NxN)], Pk [1xK] is solution in step t.
%
% Output
% MI [NxK] contains K vectors of mean values, MI=[mi_1,mi_2,...,mi_K].
% SIGMA [Nx(NxN)] contains K covariance matrices,
% SIGMA=[sigma_1,sigma_2,...sigma_K].
% Pk [1xK] contains K apriori probabilities for each distributions.
% I [1xK] labels of training points determined according to Bayes
% classification rule.
% solution [1x1] is equal to 1 if algorithm ends in a stationary
% point (if models in two subsequent steps are the same).
% t [1x1] is step number when the algorithm ends.
%
% See also UNSUND.
%
% Statistical Pattern Recognition Toolbox, Vojtech Franc, Vaclav Hlavac
% (c) Czech Technical University Prague, http://cmp.felk.cvut.cz
% Written Vojtech Franc (diploma thesis) 10.11.1999
% Modifications:
% 23.12.99 V. Franc
% 22. 6.00 V. Hlavac - comments polished.
DIM=size(X,1);
N=size(X,2);
% default arguments
if nargin < 5,
t=0;
end
if nargin < 4,
randinit=1;
end
if nargin < 3,
tmax=inf;
end
if (nargin > 4 & nargin < 8) | nargin < 2,
error('Not enought input arguments.');
return;
end
if K > N,
error('Error |X| < |K|.');
return;
end
% preallocates memory
alpha=zeros(N,K);
if t==0,
% STEP (1)
% prior prob.
Pk=ones(1,K)/K;
SIGMA=repmat(zeros(DIM,DIM),1,K);
MI=zeros(DIM,K);
% mean value
% non-random init
if randinit==0,
MI(:,1:K)=X(:,1:K);
else
% random init
k=[0];
i=1;
while length(k)<=K,
randi=fix((N-1)*rand(1))+1;
if isempty(find(k==randi)),
k=[k,randi];
MI(:,i)=X(:,randi);
i=i+1;
end
end
end
% recognition with unit covariance matrix
for k=1:K,
alpha(:,k)=sqrt(sum((X-repmat(MI(:,k),1,N)).*(X-repmat(MI(:,k),1,N))))';
end
for i=1:N,
alpha(i,:)=alpha(i,:)/sum(alpha(i,:));
end
for k=1:K,
sumAlpha=sum(alpha(:,k));
mi=MI(:,k);
sigma=zeros(DIM,DIM);
for l=1:DIM,
sigma(l,l)=sum( alpha(:,k)'.* (X(l,:)-repmat(mi(l),1,N)).^2 );
end
sigma=sigma/sumAlpha;
SIGMA(:,(k-1)*DIM+1:DIM*k)=sigma;
end
t=1;
tmax=tmax-1;
end
% learning:
solution=0;
while solution==0 & tmax > 0,
tmax=tmax-1;
t=t+1;
% Recognition
for i=1:N,
Pxi=0;
x=X(:,i);
for k=1:K,
mi=MI(:,k);
sigma=SIGMA(:,(k-1)*DIM+1:DIM*k);
alpha(i,k)=Pk(k)*normald(x,mi,sigma);
end
Pxi=sum(alpha(i,:));
if Pxi>0,
alpha(i,:)=alpha(i,:)/Pxi;
else
alpha(i,:)=1/K;
end
end %for i=1:N,
% Learning
OLDSIGMA=SIGMA;
OLDMI=MI;
for k=1:K,
sumAlpha=sum(alpha(:,k));
Pk(k)=sumAlpha/N;
mi=sum( repmat(alpha(:,k),1,DIM).*X')'/sumAlpha;
MI(:,k)=mi;
sigma=zeros(DIM,DIM);
for l=1:DIM,
sigma(l,l)=sum( alpha(:,k)'.* (X(l,:)-repmat(mi(l),1,N)).^2 );
end
sigma=sigma/sumAlpha;
SIGMA(:,(k-1)*DIM+1:DIM*k)=sigma;
end
% MI and SIGMA not changed --> stationary point
if ~any(any(OLDSIGMA-SIGMA)) & ~any(any(OLDMI-MI)),
solution=1;
end
end
if K==1,
I=ones(1,N);
else
[maxPkx,I]=max(alpha');
end
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -