Page 212 - Compact Numerical Methods For Computers
P. 212
Descent to a minimum II: conjugate gradients 201
Algorithm 22. Function minimisation by conjugate gradients (cont.)
f : real; {temporary function value}
funcount : integer; {count of function evaluations}
g : rvector; {to hold gradient}
G1, G2 : real; {temporary working storage}
G3, gradproj : real; {temporary working storage}
gradcount : integer; {count of gradient evaluations}
i, j : integer; {working integers}
method : methodtype;
newstep : real; {interpolation steplength}
notcomp : boolean; {non-computability flag}
oldstep : real; {last stepsize used}
s : real; {inner product accumulator}
setstep : real; {to allow for adjustment of best step before
next iteration}
steplength: real; {linear search steplength}
t : r-vector; {to store working vector for line search}
tol : real; {convergence tolerance}
begin
writeln(‘alg22.pas -- Nash Algorithm 22 version 2 1988-03-24’);
writeln(’Conjugate gradients function minimiser’);
{method:=Fletcher-Reeves;} {set here rather than have an input}
writeln(‘Steplength saving factor multiplies best steplength found at the’);
writeln(‘end of each iteration as a starting value for next search’);
write(‘Enter a steplength saving factor (sugg. 1.7) -- setstep’);
readln(infile, setstep);
if infname<>’con’ then writeln(setstep);
write(‘Choose method (l=FR, 2=PR, 3=BS) ‘);
readln(infile, i); if infname<>’con’ then writeln(i);
case i of
1: method:=Fletcher_Reeves;
2: method:=Polak_Ribiere;
3: method:=Beale_Sorenson;
else halt;
end;
case method of
Fletcher_Reeves: writeln(‘Method: Fletcher Reeves*);
Polak_Ribiere: writeln(‘Method: Polak Ribiere’);
Beale_Sorenson: writeln(‘Method: Beale Sorenson’);
end;
fail:=false; {method has not yet failed!}
cyclimit:=n; {use n steps of cg before re-setting to steepest descent}
if intol<0.0 then intol:=Calceps; {allows machine to set a value}
tol:=intol*n*sqrt(intol); {gradient test tolerance}
{Note: this tolerance should properly be scaled to the problem at hand.
However, such a scaling presumes knowledge of the function and gradient
which we do not usually have at this stage of the minimisation.}
writeln(‘tolerance used in gradient test=‘, tol);
f:=fminfn(n, Bvec, Workdata, notcomp); {initial fn calculation} {STEP 1}
if notcomp then
begin
writeln(‘**** Function cannot be evaluated at initial parameters ****‘);
fail := true; {method has failed}