?? penalty.m
字號:
function [x,ev,j] = penalty (x0,mu,tol,m,f,p,q,dm)
%-----------------------------------------------------------------------
% Usage: [x,ev,j] = penalty (x0,mu,tol,m,f,p,q,dm)
%
% Description: Use the self-scaled DFP method method with penalty
% functions to solve the following n-dimensional
% constrained optimization problem:
%
% minimize: f(x)
% subject to: p(x) = 0
% q(x) >= 0
%
% Inputs: x0 = n by 1 vector containing initial guess
% mu = penalty parameter (mu >= 0)
% tol = error tolerance for terminating the search
% m = maximum number of iterations (m >= 1)
% f = string containing name of objective function: f(x)
% p = string containing name of equality constraint
% function: p(x) = 0
% q = string containing name of inequality constraint
% function: q(x) >= 0. The forms of f, p, and q
% are:
%
% function y = f(x)
% function u = p(x)
% function v = q(q)
%
% When f is called, it must return the value of the
% objective function f(x). When p is called, it must
% take the n by 1 vector x and compute the r by 1
% equality constraint vector u = p(x). When q is,
% called, it must take the n by 1 vector x and
% compute the s by 1 inequality constraint vector
% v = q(x). If p = '', then no equality containts
% are applied in which case a user-supplied function
% is not required. A similar remark holds for the
% inequality containts when q = ''. Consequently,
% penalty can be used to solve the following
% special cases:
%
% ------------------------------------
% p = '' => no equality constraints
% q = '' => no inequality constraints
% ------------------------------------
%
% dm = optional display mode. If present,
% intermediate results are displayed.
%
% Outputs: x = n by 1 solution vector
% ev = number of scalar function evaluations
% j = number of iterations. If it is less than the
% user-specified maximum, m, then the following
% convergence criterion was satisfied where em denotes
% the machine epsilon, and h is the step length:
%
% (||dF(x)/dx|| < eps) or (h*||x|| < em)
%
% Here F(x) = f(x) + mu*(P(x) + Q(x)) is the generalized
% objective function. P(x) is the penalty function
% associated with the equality constraint, p(x) = 0, and
% Q(x) is the penalty function associated with the
% inequality constraint, q(x) >= 0.
%-----------------------------------------------------------------------
% Initialize
n = length (x0);
chkvec (x0,1,'penalty');
mu = args (mu,0,mu,5,'penalty');
tol = args (tol,0,tol,6,'penalty');
m = args (m,1,m,7,'penalty');
if getfun(p)
chkfun(feval(p,x0),6,'penalty');
end
if getfun(q)
chkfun(feval(q,x0),7,'penalty');
end
display = nargin > 7;
gamma = tol + 1;
j = 0;
k = 0;
h = 1;
err = 0;
ev = 0;
e = 2*sqrt(eps);
x = x0;
dx = zeros (n,1);
dg = zeros (n,1);
g1 = zeros (n,1);
g2 = zeros (n,1);
d = zeros (n,1);
Q = eye (n);
% Find optimal x
g1 = gradfmu (f,p,q,x,mu);
ev = ev + 2*n;
hwbar = waitbar(0,'Computing Optimum: penalty');
while (j < m) & (gamma > tol) & (~err)
% Perform line search along d = -Qg
waitbar (max(j/m,tol/gamma))
d = -Q*g1;
delta = norm (d,inf);
% Perform a line search along direction d
[a,b,c,err,eval] = bracket (delta,x,d,mu,f,p,q);
ev = ev + eval;
if ~err
[h,eval] = golden (x,d,a,c,e,mu,f,p,q);
ev= ev + eval;
if display
y = fmu (f,p,q,x,mu);
end
x = x + h*d;
dx = h*d;
end
% Compute new gradient
dg = g1;
g1 = gradfmu (f,p,q,x,mu);
ev = ev + 2*n;
gamma = norm (g1,inf);
dg = g1 - dg;
if display
fprintf ('\n(k h gamma df) = (%5i %10.3g %10.3g %10.3g)',...
k,h,gamma,fmu(f,p,q,x,mu)-y);
end
% Update estimate Q of inverse of Hessian matrix
g2 = Q*dg;
b1 = dot (g2,dg);
b2 = dot (dx,dg);
if abs(b1) > eps
beta = b2/b1;
Q = beta*(Q - g2*g2'/b1) + dx*dx'/b2;
end
% Update indices, check for restart
j = j + 1;
k = mod(k+1,n);
if k == 0
Q = eye(n);
end
end
close (hwbar)
?? 快捷鍵說明
復(fù)制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -