Page 214 - Compact Numerical Methods For Computers
P. 214
Descent to a minimum II: conjugate gradients 203
Algorithm 22. Function minimisation by conjugate gradients (cont.)
end;
steplength:=oldstep; {STEP 9}
{STEP 10: step along search direction}
accpoint:=false; {don’t have a good point yet}
repeat {line search}
count:=0; {to count unchanged parameters}
for i:=1 to n do
begin
Bvec[i]:=X[i]+steplength*t[i];
if (reltest+X[i])=(reltest+Bvec[i]) then count:=count+1;
end; {loop on i}
if count<n then {STEP 11} {main convergence test}
begin {STEP 12} {can proceed with linear search}
f:=fminfn(n, Bvec, Workdata, notcomp); {function calculation}
funcount:=funcount+1;
accpoint:=(not notcomp) and
(f<=Fmin+gradproj*steplength*acctol);
{STEP 13: a point is acceptable only if function is computable
(not notcomp) and it satisfies the acceptable point criterion}
if not accpoint then
begin
steplength:=steplength*stepredn; write(‘*’);
end;
end; {compute and test for linear search}
until (count=n) or accpoint; {end of loop for line search}
if count<n then {STEP 14}
begin {replacements for STEPS 15 onward}
newstep:=2*((f-Fmin)-gradproj*steplength);
{quadratic inverse interpolation}
if newstep>0 then
begin {cacl interp}
newstep:=-gradproj*sqr(steplength)/newstep;
for i:=1 to n do
begin
Bvec[i]:=X[i]+newstep*t[i];
end; {no check yet on change in parameters}
Fmin:=f; {save new lowest point}
f:=fminfn(n, Bvec, Workdata, notcomp);
funcount:=funcount+1;
if f<Fmin then
begin
Fmin:=f; write(‘ i< ’);
end
else {reset to best Bvec}
begin
write(‘ i> ’);
for i:=1 to n do Bvec[i]:=X[i]+steplength*t[i];
end;
end; {interpolation}
end; {if count < n}
end; {if G1>tol}
oldstep:=setstep*steplength; {a heuristic to prepare next iteration}