Page 213 - Compact Numerical Methods For Computers
P. 213
202 Compact numerical methods for computers
Algorithm 22. Function minimisation by conjugate gradients (cont.)
end
else {proceed with minimisation}
begin
Fmin:=f;{save the best value so far}
funcount:=1; {function count initialized to 1}
gradcount:=0; {initialise gradient count}
repeat {STEP 2: iteration for method -- terminates when no progress
can be made, and search is a steepest descent.}
for i:=1 to n do
begin
t[i]:=0.0; {to zero step vector}
c[i]:=0.0; {to zero ‘last’ gradient}
end;
cycle:=0; {STEP 3: main loop of cg process}
oldstep:=1.0; {initially a full step}
count:=0; {to ensure this is set < n}
repeat {until one cg cycle complete}
cycle:=cycle+1;
writeln(gradcount,’ ’, funcount,’ ’, Fmin);
write(‘parameters ’);
for i:=1 to n do
begin
write(Bvec[i]: 10:5,’ ’);
if (7 * (i div 7) = i) and (i<n) then writeln;
end;
writeln;
gradcount:=gradcount+1; {STEP 4: initialize gradient count to 0}
fmingr(n, Bvec, Workdata, g);
G1:=0.0; G2:=0.0; {STEP 5}
for i:=1 to n do
begin
X[i]:=Bvec[i];{save best parameters}
case method of
Fletcher-Reeves: begin
G1:=G1+sqr(g[i]); G2:=G2+sqr(c[i]);
end;
Polak_Ribiere : begin
G1:=G1-tg[i]*(g[i]-c[i]); G2:=G2+sqr(c[i]);
end;
Beale_Sorenson : begin
G1:=G1+g[i]*(g[i]-c[i]); G2:=G2+t[i]*(g[i]-c[i]);
end;
end; {case statement for method selection}
c[i]:=g[i];{save gradient}
end; {loop on i}
if G1>tol then {STEP 6: descent sufficient to proceed}
begin {STEP 7: generate direction}
if G2>0.0 then G3:=G1/G2 else G3:=1.0; {ensure G3 defined}
gradproj:=0.0; {STEP 8}
for i:=1 to n do
begin
t[i]:=t[i]*G3-g[i]; gradproj:=gradproj+t[i]*g[i];