?? svmtrain.m
字號:
function [AlphaY, SVs, Bias, Para] = SVMTrain(Samples, Labels, Parameters, Verbose)
% USAGE:
% [AlphaY, SVs, Bias, Para] = SVMTrain(Samples, Labels, Parameters)
%
% DESCRIPTION:
% Construct a 2-class SVM classifier. This function is used to
% do the input parameter checking, and it calls a mex file to
% implement the algorithm.
%
% INPUTS:
% Samples: all the training patterns. (a row of column vectors)
% Lables: the corresponding class labels for the training patterns in Samples.
% where Labels(i) in {1, -1}. (a row vector)
% Parameters: the paramters required by the training algorithm.
% (a 10-element row vector)
% +-----------------------------------------------------------------
% |Kernel Type| Degree | Gamma | Coefficient | C |Cache size|epsilon|
% +-----------------------------------------------------------------
% -------------------------------------------+
% | SVM Type | nu (nu-svm) | loss tolerance |
% -------------------------------------------+
% where Kernel Type:
% 0 --- Linear
% 1 --- Polynomial: (Gamma*<X(:,i),X(:,j)>+Coefficient)^Degree
% 2 --- RBF: (exp(-Gamma*|X(:,i)-X(:,j)|^2))
% 3 --- Sigmoid: tanh(Gamma*<X(:,i),X(:,j)>+Coefficient)
% Gamma: If the input value is zero, Gamma will be set defautly as
% 1/(max_pattern_dimension) in the function. If the input
% value is non-zero, Gamma will remain unchanged in the
% function.
% C: Cost of the constrain violation (for C-SVC & C-SVR)
% Cache Size: as the buffer to hold the <X(:,i),X(:,j)> (in MB)
% epsilon: tolerance of termination criterion
% SVM Type:
% 0 --- c-SVM classifier
% 1 --- nu-SVM classifier
% 2 --- 1-SVM
% 3 --- c-SVM regressioner
% nu: the nu used in nu-SVM classifer (for 1-SVM and nu-SVM)
% loss tolerance: the epsilon in epsilon-insensitive loss function
% Verbose: =0: the program will be very quite, providing little feedback
% =2: the program will be very verbose, providing lots of the feedback
% =1: the situation between the above two.
%
% OUTPUTS:
% AlphaY: Alpha * Y, where Alpha is the non-zero Lagrange Coefficients
% Y is the corresponding labels.
% SVs : support vectors. That is, the patterns corresponding the non-zero
% Alphas.
% Bias : the bias in the decision function, which is AlphaY*Kernel(SVs',x)-Bias.
% Para: output parameters. Basically, this is the same as the input parameters. However,
% when Gamma=0 as input, it will be changed to 1/N in the program, where
% N is the dimension of the input patterns.
%
if (nargin < 3) | (nargin >4)
disp(' Incorrect number of input variables.');
help SVMTrain;
return
end
[spM spN]=size(Samples);
[lbM lbN]=size(Labels);
if lbM ~= 1
disp(' Error: ''Labels'' should be a row vector.');
return
end
if spN ~= lbN
disp(' Error: different number of training patterns and their labels.');
return
end
[prM prN]= size(Parameters);
if prM ~= 1
disp(' Error: ''Parameters'' should be a row vector.');
return
end
if prN ~= 10
disp(' Error: ''Parameters'' should have exactly 10 elements.');
return
end
if (Parameters(1)>3) & (Parameters(1) < 0)
disp(' Error: this program only supports 4 types of kernel functions.');
return
end
if (Parameters(8)>3) & (Parameters(8) < 0)
disp(' Error: this program only supports 4 types of SVMs.');
return
end
if Verbose == 0
[AlphaY, SVs, Bias, Para] = mexSVMTrain_0(Samples, Labels, Parameters);
else if Verbose == 1
[AlphaY, SVs, Bias, Para] = mexSVMTrain_1(Samples, Labels, Parameters);
else if Verbose == 2
[AlphaY, SVs, Bias, Para] = mexSVMTrain_2(Samples, Labels, Parameters);
else
[AlphaY, SVs, Bias, Para] = mexSVMTrain_2(Samples, Labels, Parameters);
end
end
end
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -