?? optim.pas
字號:
end;
X[I] := Temp1;
end;
end;
function ParamConv(OldX, X : TVector;
Lbound, Ubound : Integer;
Tol : Float) : Boolean;
{ ----------------------------------------------------------------------
Check for convergence on parameters
---------------------------------------------------------------------- }
var
I : Integer;
Conv : Boolean;
begin
I := Lbound;
Conv := True;
repeat
Conv := Conv and (Abs(X[I] - OldX[I]) < Max(Tol, Tol * Abs(OldX[I])));
Inc(I);
until (Conv = False) or (I > Ubound);
ParamConv := Conv;
end;
function Marquardt(Func : TFuncNVar;
HessGrad : THessGrad;
X : TVector;
Lbound, Ubound : Integer;
MaxIter : Integer;
Tol : Float;
var F_min : Float;
H_inv : TMatrix) : Integer;
const
LAMBDA0 = 1.0E-2; { Initial lambda value }
LAMBDAMAX = 1.0E+3; { Highest lambda value }
FTOL = 1.0E-10; { Tolerance on function decrease }
var
Lambda,
Lambda1 : Float; { Marquardt's lambda }
I : Integer; { Loop variable }
OldX : TVector; { Old parameters }
G : TVector; { Gradient vector }
H : TMatrix; { Hessian matrix }
A : TMatrix; { Modified Hessian matrix }
Det : Float; { Determinant of A }
DeltaX : TVector; { New search direction }
F1 : Float; { New minimum }
Lambda_Ok : Boolean; { Successful Lambda decrease }
Conv : Boolean; { Convergence reached }
Done : Boolean; { Iterations done }
Iter : Integer; { Iteration count }
ErrCode : Integer; { Error code }
begin
if WriteLogFile then
begin
CreateLogFile;
WriteLn(LogFile, 'Marquardt');
WriteLn(LogFile, 'Iter F Lambda');
end;
Lambda := LAMBDA0;
DimVector(OldX, Ubound);
DimVector(G, Ubound);
DimMatrix(H, Ubound, Ubound);
DimMatrix(A, Ubound, Ubound);
DimVector(DeltaX, Ubound);
F_min := Func(X); { Initial function value }
LinObjFunc := Func; { Function for line minimization }
Iter := 1;
Conv := False;
Done := False;
repeat
if WriteLogFile then
WriteLn(LogFile, Iter:4, ' ', F_min:12, ' ', Lambda:12);
{ Save current parameters }
CopyVector(OldX, X, Lbound, Ubound);
{ Compute Gradient and Hessian }
HessGrad(Func, X, Lbound, Ubound, G, H);
CopyMatrix(A, H, Lbound, Lbound, Ubound, Ubound);
{ Change sign of gradient }
for I := Lbound to Ubound do
G[I] := - G[I];
if Conv then { Newton-Raphson iteration }
begin
ErrCode := GaussJordan(A, G, Lbound, Ubound, H_inv, DeltaX, Det);
if ErrCode = MAT_OK then
for I := Lbound to Ubound do
X[I] := OldX[I] + DeltaX[I];
Done := True;
end
else { Marquardt iteration }
begin
repeat
{ Multiply each diagonal term of H by (1 + Lambda) }
Lambda1 := 1.0 + Lambda;
for I := Lbound to Ubound do
A[I,I] := Lambda1 * H[I,I];
Lambda_OK := False;
ErrCode := GaussJordan(A, G, Lbound, Ubound, H_inv, DeltaX, Det);
if ErrCode = MAT_OK then
begin
{ Initialize parameters }
CopyVector(X, OldX, Lbound, Ubound);
{ Minimize in the direction specified by DeltaX }
ErrCode := LinMin(Func, X, DeltaX,
Lbound, Ubound, 100, 0.01, F1);
{ Check that the function has decreased. Otherwise
increase Lambda, without exceeding LAMBDAMAX }
Lambda_Ok := (F1 - F_min) < F_min * FTOL;
if not Lambda_Ok then Lambda := 10.0 * Lambda;
if Lambda > LAMBDAMAX then ErrCode := OPT_BIG_LAMBDA;
end;
until Lambda_Ok or (ErrCode <> MAT_OK);
{ Check for convergence }
Conv := ParamConv(OldX, X, Lbound, Ubound, Tol);
{ Prepare next iteration }
Lambda := 0.1 * Lambda;
F_min := F1;
end;
Inc(Iter);
if Iter > MaxIter then ErrCode := OPT_NON_CONV;
until Done or (ErrCode <> OPT_OK);
if WriteLogFile then
Close(LogFile);
if ErrCode = MAT_SINGUL then ErrCode := OPT_SING;
Marquardt := ErrCode;
end;
function BFGS(Func : TFuncNVar;
Gradient : TGradient;
X : TVector;
Lbound, Ubound : Integer;
MaxIter : Integer;
Tol : Float;
var F_min : Float;
H_inv : TMatrix) : Integer;
var
I, J, Iter : Integer;
DeltaXmax, Gmax, P1, P2, R1, R2 : Float;
OldX, DeltaX, dX, G, OldG, dG, HdG, R1dX, R2HdG, U, P2U : TVector;
Conv : Boolean;
function AbsMax(V : TVector; Lbound, Ubound : Integer) : Float;
{ Returns the component with maximum absolute value }
var
I : Integer;
AbsV : TVector;
begin
DimVector(AbsV, Ubound);
for I := Lbound to Ubound do
AbsV[I] := Abs(V[I]);
AbsMax := Max(AbsV, Lbound, Ubound);
end;
begin
if WriteLogFile then
begin
CreateLogFile;
WriteLn(LogFile, 'BFGS');
WriteLn(LogFile, 'Iter F');
end;
DimVector(OldX, Ubound);
DimVector(DeltaX, Ubound);
DimVector(dX, Ubound);
DimVector(G, Ubound);
DimVector(OldG, Ubound);
DimVector(dG, Ubound);
DimVector(HdG, Ubound);
DimVector(R1dX, Ubound);
DimVector(R2HdG, Ubound);
DimVector(U, Ubound);
DimVector(P2U, Ubound);
Iter := 0;
Conv := False;
LinObjFunc := Func; { Function for line minimization }
{ Initialize function }
F_min := Func(X);
{ Initialize inverse hessian to unit matrix }
for I := Lbound to Ubound do
for J := Lbound to Ubound do
if I = J then H_inv[I,J] := 1.0 else H_inv[I,J] := 0.0;
{ Initialize gradient }
Gradient(Func, X, Lbound, Ubound, G);
Gmax := AbsMax(G, Lbound, Ubound);
{ Initialize search direction }
if Gmax > MACHEP then
for I := Lbound to Ubound do
DeltaX[I] := - G[I]
else
Conv := True; { Quit if gradient is already small }
while (not Conv) and (Iter < MaxIter) do
begin
if WriteLogFile then
WriteLn(LogFile, Iter:4, ' ', F_min:12);
{ Normalize search direction to avoid excessive displacements }
DeltaXmax := AbsMax(DeltaX, Lbound, Ubound);
if DeltaXmax > 1.0 then
for I := Lbound to Ubound do
DeltaX[I] := DeltaX[I] / DeltaXmax;
{ Save old parameters and gradient }
CopyVector(OldX, X, Lbound, Ubound);
CopyVector(OldG, G, Lbound, Ubound);
{ Minimize along the direction specified by DeltaX }
LinMin(Func, X, DeltaX, Lbound, Ubound, 100, 0.01, F_min);
{ Compute new gradient }
Gradient(Func, X, Lbound, Ubound, G);
{ Compute differences between two successive
estimations of parameter vector and gradient vector }
for I := Lbound to Ubound do
begin
dX[I] := X[I] - OldX[I];
dG[I] := G[I] - OldG[I];
end;
{ Multiply by inverse hessian }
for I := Lbound to Ubound do
begin
HdG[I] := 0.0;
for J := Lbound to Ubound do
HdG[I] := HdG[I] + H_inv[I,J] * dG[J];
end;
{ Scalar products in denominator of BFGS formula }
P1 := 0.0; P2 := 0.0;
for I := Lbound to Ubound do
begin
P1 := P1 + dX[I] * dG[I];
P2 := P2 + dG[I] * HdG[I];
end;
if (P1 = 0.0) or (P2 = 0.0) then
Conv := True
else
begin
{ Inverses of scalar products }
R1 := 1.0 / P1; R2 := 1.0 / P2;
{ Compute BFGS correction terms }
for I := Lbound to Ubound do
begin
R1dX[I] := R1 * dX[I];
R2HdG[I] := R2 * HdG[I];
U[I] := R1dX[I] - R2HdG[I];
P2U[I] := P2 * U[I];
end;
{ Update inverse hessian }
for I := Lbound to Ubound do
for J := Lbound to Ubound do
H_inv[I,J] := H_inv[I,J] + R1dX[I] * dX[J]
- R2HdG[I] * HdG[J] + P2U[I] * U[J];
{ Update search direction }
for I := Lbound to Ubound do
begin
DeltaX[I] := 0.0;
for J := Lbound to Ubound do
DeltaX[I] := DeltaX[I] - H_inv[I,J] * G[J];
end;
{ Test convergence and update iteration count }
Conv := ParamConv(OldX, X, Lbound, Ubound, Tol);
Inc(Iter);
end;
end;
if WriteLogFile then
Close(LogFile);
if Iter > MaxIter then
BFGS := OPT_NON_CONV
else
BFGS := OPT_OK;
end;
begin
Eps := Power(MACHEP, 0.333);
end.
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -