?? svmtrain.m
字號:
function svm = svmTrain(svmType,X,Y,ker,p1,p2)% SVM Classification:% svm = svmTrain('svc_c',x,y,ker,C); % svm = svmTrain('svc_nu',x,y,ker,nu); %% One-Class SVM:% svm = svmTrain('svm_one_class',x,[],ker,nu);%% SVM Regression:% svm = svmTrain('svr_epsilon',x,y,ker,C,e); % svm = svmTrain('svr_nu',x,y,ker,C,nu); % 輸入參數:% X 訓練樣本,d×n的矩陣,n為樣本個數,d為樣本維數% Y 訓練目標,1×n的矩陣,n為樣本個數,值為+1或-1% ker 核參數(結構體變量)% the following fields:% type - linear : k(x,y) = x'*y% poly : k(x,y) = (x'*y+c)^d% gauss : k(x,y) = exp(-0.5*(norm(x-y)/s)^2)% tanh : k(x,y) = tanh(g*x'*y+c)% degree - Degree d of polynomial kernel (positive scalar).% offset - Offset c of polynomial and tanh kernel (scalar, negative for tanh).% width - Width s of Gauss kernel (positive scalar).% gamma - Slope g of the tanh kernel (positive scalar).% 輸出參數:% svm 支持向量機(結構體變量)% the following fields:% type - 支持向量機類型 {'svc_c','svc_nu','svm_one_class','svr_epsilon','svr_nu'}% ker - 核參數% x - 訓練樣本,d×n的矩陣,n為樣本個數,d為樣本維數% y - 訓練目標,1×n的矩陣,n為樣本個數,值為+1或-1% a - 拉格朗日乘子,1×n的矩陣% ------------------------------------------------------------%options = optimset;options.LargeScale = 'off';options.Display = 'off';switch svmType case 'svc_c', % svc的c:分類 C = p1; n = length(Y); H = (Y'*Y).*kernell(ker,X,X); %矩陣 f = -ones(n,1); %向量 A = []; %矩陣 b = []; %向量 Aeq = Y; %矩陣 beq = 0; %向量 lb = zeros(n,1); % x的下界 ub = C*ones(n,1); % x的上界 a0 = zeros(n,1); %初值 [a,fval,eXitflag,output,lambda] = quadprog(H,f,A,b,Aeq,beq,lb,ub,a0,options); %fval:返回解a處的目標函數值 quadprog:二次規劃函數 a 拉格朗日乘子 case 'svc_nu', % nu:υ 分類 nu = p1; n = length(Y); H = (Y'*Y).*kernell(ker,X,X); f = zeros(n,1); A = -ones(1,n); b = -nu; Aeq = Y; beq = 0; lb = zeros(n,1); ub = ones(n,1)/n; a0 = zeros(n,1); [a,fval,eXitflag,output,lambda] = quadprog(H,f,A,b,Aeq,beq,lb,ub,a0,options); case 'svm_one_class', % 分類 nu = p1; n = size(X,2); H = kernell(ker,X,X); f = zeros(n,1); for i = 1:n f(i,:) = -kernell(ker,X(:,i),X(:,i)); end A = []; b = []; Aeq = ones(1,n); beq = 1; lb = zeros(n,1); ub = ones(n,1)/(nu*n); a0 = zeros(n,1); [a,fval,eXitflag,output,lambda] = quadprog(H,f,A,b,Aeq,beq,lb,ub,a0,options); case 'svr_epsilon', % epsilon:ε 回歸 C = p1; e = p2; n = length(Y); Q = kernel(ker,X,X); H = [Q,-Q;-Q,Q]; f = [e*ones(n,1)-Y';e*ones(n,1)+Y']; % 符號不一樣,決策函數就不一樣,實際上是一回事! %f = [e*ones(n,1)+Y';e*ones(n,1)-Y']; A = []; b = []; Aeq = [ones(1,n),-ones(1,n)]; beq = 0; lb = zeros(2*n,1); ub = C*ones(2*n,1); a0 = zeros(2*n,1); [a,fval,eXitflag,output,lambda] = quadprog(H,f,A,b,Aeq,beq,lb,ub,a0,options); a = a(1:n)-a(n+1:end); case 'svr_nu', % 回歸 C = p1; nu = p2; n = length(Y); Q = kernell(ker,X,X); H = [Q,-Q;-Q,Q]; f = [-Y';+Y']; % 符號不一樣,決策函數就不一樣,實際上是一回事! %f = [+Y';-Y']; A = []; b = []; Aeq = [ones(1,n),-ones(1,n);ones(1,2*n)]; beq = [0;C*n*nu]; lb = zeros(2*n,1); ub = C*ones(2*n,1); a0 = zeros(2*n,1); [a,fval,eXitflag,output,lambda] = quadprog(H,f,A,b,Aeq,beq,lb,ub,a0,options); a = a(1:n)-a(n+1:end); otherwise,end% ------------------------------------------------------------%% 輸出 svmsvm.type = svmType;svm.ker = ker;svm.x = X;svm.y = Y;svm.a = a';
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -