?? plvq.cpp
字號:
//該函數用PLVQ實現對prototype部分的訓練
/* 輸入參數包括:
prototype -- 指向prototype部分的指針
N -- prototype部分的結點個數
TrainNum -- 訓練樣本的個數
L -- 訓練樣本的維數
Train -- 訓練樣本集合
*/
#include <iostream.h>
#include <stdio.h>
#include "FuzzyNN.h"
#include "feedfoward.h"
#include "tools.h"
#include <math.h>
void plvq(ProtoNode *prototype,int N,int TrainNum,int L,FuzzyNum **Train)
{
double alpha0;
int T;
int i,j,t,k;
double alpha;
double D;
double *s;
double *w;
ProtoNode *prototypebak;
double error;
double theta;
double precision;
prototypebak=new ProtoNode[N+1];
mcheck(prototypebak);
for(j=1;j<=N;j++)
{
prototypebak[j].w=new FuzzyNum[L+1];
mcheck(prototypebak[j].w);
prototypebak[j].g=new int[N+1];
mcheck(prototypebak[j].g);
}
s=new double[N+1];
mcheck(s);
w=new double[N+1];
mcheck(w);
cout<<"==================================="<<endl;
cout<<"現在開始對神經網絡進行第一趟的訓練."<<endl;
cout<<"請輸入初始學習速率的大小:"<<endl;
cin>>alpha0;
T=500;
theta=0.25*sqrt(L);
precision=1e-10;
//初始化prototype部分
for(j=1;j<=N;j++)
{
for(i=1;i<=L;i++)
{
(prototype[j].w)[i].w1=frand(0.45,0.55);
(prototype[j].w)[i].w2=frand(0.45,0.55);
(prototype[j].w)[i].a=frand(0.45,0.55);
(prototype[j].w)[i].b=frand(0.45,0.55);
}
for(i=1;i<=N;i++)
if(i==j)(prototype[j].g)[i]=1;
else (prototype[j].g)[i]=0;
}
//對結點的權值進行調整
for(t=0;t<T;t++)
{
alpha=alpha0*(1-(double)t/T);
for(j=1;j<=N;j++)
{
for(i=1;i<=L;i++)
{
(prototypebak[j].w)[i].w1=(prototype[j].w)[i].w1;
(prototypebak[j].w)[i].w2=(prototype[j].w)[i].w2;
(prototypebak[j].w)[i].a=(prototype[j].w)[i].a;
(prototypebak[j].w)[i].b=(prototype[j].w)[i].b;
}
for(i=1;i<=N;i++)
if(i==j)(prototypebak[j].g)[i]=1;
else (prototypebak[j].g)[i]=0;
}
for(k=1;k<=TrainNum;k++)
{
for(j=1;j<=N;j++)
{
s[j]=0;
for(i=1;i<=L;i++)
s[j]+=(coa(Train[k][i])-coa((prototype[j].w)[i]))*(coa(Train[k][i])-coa((prototype[j].w)[i]));
}
D=0;
for(j=1;j<=N;j++)
D+=exp(-s[j]/(2*theta*theta));
for(j=1;j<=N;j++)
w[j]=exp(-s[j]/(2*theta*theta))/D;
for(j=1;j<=N;j++)
{
for(i=1;i<=L;i++)
{
(prototype[j].w)[i].w1+=alpha*w[j]*(Train[k][i].w1-(prototype[j].w)[i].w2);
(prototype[j].w)[i].w2+=alpha*w[j]*(Train[k][i].w2-(prototype[j].w)[i].w1);
(prototype[j].w)[i].a+=alpha*w[j]*(Train[k][i].a+(prototype[j].w)[i].b);
(prototype[j].w)[i].b+=alpha*w[j]*(Train[k][i].b+(prototype[j].w)[i].a);
}
}
}
//計算誤差,判斷是否可以結束訓練
for(j=1;j<=N;j++)
{
s[j]=0;
for(i=1;i<=L;i++)
s[j]+=fabs(coa((prototypebak[j].w)[i])-coa((prototype[j].w)[i]));
}
error=0;
for(j=1;j<=N;j++)
error+=s[j];
if(error<precision)break;
}
if(t==T)cout<<"網絡未能收斂"<<endl;
cout<<"神經網絡第一趟訓練完畢."<<endl;
cout<<"==================================="<<endl;
delete []s;
delete []w;
}
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -