?? neuronet_bp.cpp
字號:
#include "StdAfx.h"
#include "neuronet_bp.h"
#include "math.h"
CNeuroNet_BP::CNeuroNet_BP()
{
}
CNeuroNet_BP::CNeuroNet_BP(INT NumLayer, INT* pNumNodeInEachLayer, CString MyFileName)
{
Alpha = 0.9;
Eta = 0.45;
Gain = 5;
Error = 0;
nLayer = NumLayer;
pLayer = new NeuroLayer[nLayer];
this->TrainTimes =0;
fileName = MyFileName;
//構(gòu)建網(wǎng)絡(luò)
for(INT i=0; i<nLayer; i++)
{
pLayer[i].nNode = pNumNodeInEachLayer[i];
}
this->nInput = pLayer[0].nNode;
//分配網(wǎng)絡(luò)變量空間
this->GenerateNet();
//隨機(jī)設(shè)定權(quán)值
this->RandomWeight();
}
CNeuroNet_BP::~CNeuroNet_BP(void)
{
}
// 隨機(jī)賦權(quán)值
void CNeuroNet_BP::RandomWeight(void)
{
srand(4567);
for(INT i=0; i<nLayer; i++)
{
for(INT j=0; j<pLayer[i].nNode; j++)
{
for(INT k=0; k<pLayer[i].nInput; k++)
{
pLayer[i].ppWeight[j][k] = (rand()%100-50)/100.0; //隨機(jī)賦權(quán)值(-0.5, 0.5);
pLayer[i].ppDWeight[j][k] = 0.0;
}
}
}
}
// 從文件中讀取神經(jīng)網(wǎng)絡(luò)的權(quán)值
BOOL CNeuroNet_BP::ReadNetFromFile(void)
{
CFile file;
if(file.Open(fileName, CFile::modeRead)==FALSE)
{
return FALSE;
}
char *pChar = new char[10];
file.Read((void*)pChar, 10);
//if(pChar != "NeuroBP")
//{
// return FALSE;
//}
file.Read((void*)&this->TrainTimes, sizeof(DWORD));
file.Read((void*)&this->Error, sizeof(DOUBLE));
file.Read((void*)&this->Alpha, sizeof(DOUBLE));
file.Read((void*)&this->Eta, sizeof(DOUBLE));
file.Read((void*)&this->Gain, sizeof(DOUBLE));
file.Read((void*)&this->nInput, sizeof(INT));
file.Read((void*)&this->nLayer, sizeof(INT));
//讀出每層節(jié)點(diǎn)數(shù)
INT* pNumNode = new INT[nLayer];
for(INT i=0; i<nLayer; i++)
{
file.Read((void*)&pNumNode[i], sizeof(INT));
}
///
pLayer = new NeuroLayer[nLayer];
for(INT i=0; i<nLayer; i++)
{
pLayer[i].nNode = pNumNode[i];
}
this->GenerateNet();
//給每個神經(jīng)元賦權(quán)值
for(INT i=0; i<nLayer; i++)
{
for(INT j=0; j<pLayer[i].nNode; j++)
{
file.Read(pLayer[i].ppWeight[j], pLayer[i].nInput*sizeof(DOUBLE)); //讀出每層的所有權(quán)值
file.Read(pLayer[i].ppDWeight[j], pLayer[i].nInput*sizeof(DOUBLE)); //讀出每層的所有權(quán)值
//TRACE("pw[%d][%d][0]= %f", i, j, pLayer[i].ppWeight[j]);
}
}
return TRUE;
}
// 把權(quán)值保存到文件
BOOL CNeuroNet_BP::SaveNetToFile(void)
{
INT nTemp = 0;
char ID[10] = "NeuroBP";
//char Memo[100] = "未說明";
HANDLE hFile = CreateFile(_T(fileName),
GENERIC_WRITE, FILE_SHARE_READ,
NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
if (hFile == INVALID_HANDLE_VALUE)
{
AfxMessageBox(_T("Couldn't create the file!"));
return FALSE;
}
// Attach a CFile object to the handle we have.
CFile file(hFile);
file.Write((void*)ID, 10); //寫入文件類型標(biāo)記
//file.Write((void*)Memo, 100); //寫入描述參數(shù)
file.Write((void*)&TrainTimes, sizeof(DWORD)); //寫入已訓(xùn)練次數(shù)
file.Write((void*)&this->Error, sizeof(DOUBLE)); //寫入最近的系統(tǒng)誤差
file.Write((void*)&this->Alpha, sizeof(DOUBLE));
file.Write((void*)&this->Eta, sizeof(DOUBLE));
file.Write((void*)&this->Gain, sizeof(DOUBLE));
file.Write(&(this->nInput), sizeof(INT)); //寫入輸入數(shù)據(jù)寬度
file.Write(&(this->nLayer), sizeof(INT));
//寫入每層節(jié)點(diǎn)數(shù)
for(INT i=0; i<nLayer; i++)
{
file.Write(&(pLayer[i].nNode), sizeof(INT));
}
//寫入每層所有節(jié)點(diǎn)的權(quán)值
for(INT i=0; i<nLayer; i++)
{
for(INT j=0; j<pLayer[i].nNode; j++)
{
file.Write(pLayer[i].ppWeight[j], pLayer[i].nInput*sizeof(DOUBLE));
file.Write(pLayer[i].ppDWeight[j], pLayer[i].nInput*sizeof(DOUBLE));
//TRACE("pw[%d][%d][0]= %f", i, j, pLayer[i].ppWeight[j]);
}
}
file.Close();
return TRUE;
}
// 處理輸入數(shù)據(jù),產(chǎn)生輸出
BOOL CNeuroNet_BP::Propagate( DOUBLE* pInput)
{
this->pInput = pInput;
//計算輸出結(jié)果
for(INT i=0; i<this->nLayer; i++)
{
if(i==0)
{
pLayer[i].pInput = this->pInput;
}
else
{
pLayer[i].pInput = pLayer[i-1].pOutput;
}
for(INT j=0; j<pLayer[i].nNode; j++)
{
pLayer[i].pOutput[j] = func(i, j);
TRACE("pOut[%d][%d]=\t%f\t", i, j, pLayer[i].pOutput[j]);
}
}
return TRUE;
}
// 節(jié)點(diǎn)轉(zhuǎn)移函數(shù)
DOUBLE CNeuroNet_BP::func(INT i, INT j)
{
DOUBLE sigma = 0.0;
for(INT k=0; k<pLayer[i].nInput; k++)
{
sigma = sigma + pLayer[i].pInput[k] * pLayer[i].ppWeight[j][k];
}
sigma = 1.0/(1.0 + exp(-sigma*this->Gain));
return sigma;
}
//用樣本數(shù)據(jù)進(jìn)行訓(xùn)練
void CNeuroNet_BP::Train(DOUBLE* pInput, DOUBLE* pTeach)
{
Propagate(pInput); //處理數(shù)據(jù)
ComputeOutputError(pTeach); //計算誤差
BackPropagate(); //反向傳播,調(diào)整權(quán)值
this->TrainTimes++;
}
//******************************************************************************
// 反 向 傳 播 調(diào) 整 權(quán) 值
// *****************************************************************************
// 計算輸出誤差
void CNeuroNet_BP::ComputeOutputError(DOUBLE* pTarget)
{
DOUBLE Out, Err;
this->Error=0;
for(INT i=0; i<pLayer[nLayer-1].nNode; i++)
{
Out = pLayer[nLayer-1].pOutput[i];
Err = pTarget[i] - Out;
//this->pLayer[nLayer-1].pError[i] = Gain*Out*(1-Out)*Err;
this->pLayer[nLayer-1].pError[i] = Out*(1-Out)*Err;
this->Error += 0.5*pow(Err, 2);
}
}
//反向傳播誤差
void CNeuroNet_BP::BackPropagate(void)
{
DOUBLE Out, Err;
//反向計算誤差
for(INT i=nLayer-2; i>=0; i--)
{
for(INT j=0; j<pLayer[i].nNode; j++)
{
Out = pLayer[i].pOutput[j];
Err = 0;
for(INT k=0; k<pLayer[i+1].nNode; k++)
{
Err += pLayer[i+1].pError[k] * pLayer[i+1].ppWeight[k][j];
}
pLayer[i].pError[j] = Out * (1-Out) * Err;
}
}
//調(diào)整權(quán)值
for(INT i=nLayer-1; i>=0; i--) //層
{
for(INT j=0; j<pLayer[i].nNode; j++)
{
for(INT k=0; k<pLayer[i].nInput; k++)
{
//if(i==0)
//{
// Out = pLayer[0].pInput[k];
//}
//else
//{
Out = pLayer[i].pInput[k];
//}
Err = pLayer[i].pError[j];
pLayer[i].ppWeight[j][k] += this->Eta *Out*Err + this->Alpha*pLayer[i].ppDWeight[j][k];
pLayer[i].ppDWeight[j][k] = this->Eta *Err * Out + this->Alpha*pLayer[i].ppDWeight[j][k];
}
}
}
}
// 構(gòu)建網(wǎng)絡(luò),分配變量空間
void CNeuroNet_BP::GenerateNet(void)
{
for(INT i=0; i<nLayer; i++)
{
if(i==0)
{
pLayer[i].nInput = this->nInput;
}
else
{
pLayer[i].nInput = pLayer[i-1].nNode;
}
pLayer[i].pOutput = new DOUBLE[pLayer[i].nNode]; //為輸出數(shù)據(jù)分配空間
pLayer[i].pError = new DOUBLE[pLayer[i].nNode]; //為節(jié)點(diǎn)誤差分配空間
pLayer[i].ppWeight = new DOUBLE*[pLayer[i].nNode ]; //給權(quán)值的指針分配空間
pLayer[i].ppDWeight = new DOUBLE*[pLayer[i].nNode ]; //給權(quán)值增量分配空間
for(INT j=0; j<pLayer[i].nNode; j++)
{
pLayer[i].ppWeight[j] = new DOUBLE[pLayer[i].nInput]; //給每個節(jié)點(diǎn)的權(quán)值分配空間
pLayer[i].ppDWeight[j] = new DOUBLE[pLayer[i].nInput]; //給每個權(quán)值增量分配空間
}
}
this->pOutput = pLayer[nLayer-1].pOutput;
}
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -