Page 344 - Applied Numerical Methods Using MATLAB
P. 344

UNCONSTRAINED OPTIMIZATION  333

               with
                                     T                T
                                                         g
                           [g n+1 − g n ] g n+1      g n+1 n+1
                      β n =               (FR)   or          (PR)      (7.1.19)
                                  T                     T
                                 g g n                 g g n
                                  n                     n
             Step 3. Update the approximate solution point to x k+1 = x(N), which is the
                  last temporary one.
             Step 4.If x k ≈ x k−1 and f(x k ) ≈ f(x k−1 ), then declare x k to be the minimum
                  and terminate the procedure. Otherwise, increment k by one and go back
                  to Step 1.



              function [xo,fo] = opt_conjg(f,x0,TolX,TolFun,alpha0,MaxIter,KC)
              %KC = 1: Polak–Ribiere Conjugate Gradient method
              %KC = 2: Fletcher–Reeves Conjugate Gradient method
              if nargin < 7, KC = 0; end
              if nargin < 6, MaxIter = 100; end
              if nargin < 5, alpha0 = 10; end
              if nargin < 4, TolFun = 1e-8; end
              if nargin < 3, TolX = 1e-6; end
              N = length(x0); nmax1 = 20; warning = 0; h = 1e-4; %dimension of variable
              x = x0;  fx = feval(f,x0);  fx0 = fx;
              fork=1: MaxIter
               xk0 = x; fk0 = fx; alpha = alpha0;
               g = grad(f,x,h);  s = -g;
               for n = 1:N
                 alpha = alpha0;
                 fx1 = feval(f,x + alpha*2*s); %trial move in search direction
                 for n1 = 1:nmax1 %To find the optimum step size by line search
                   fx2 = fx1; fx1 = feval(f,x+alpha*s);
                   if fx0 > fx1 + TolFun & fx1 < fx2 - TolFun %fx0 > fx1 < fx2
                     den = 4*fx1 - 2*fx0 - 2*fx2; num = den-fx0 + fx2; %Eq.(7.1.5)
                     alpha = alpha*num/den;
                     x = x+alpha*s;  fx = feval(f,x);
                     break;
                    elseif n1 == nmax1/2
                     alpha = -alpha0;  fx1 = feval(f,x + alpha*2*s);
                    else
                     alpha = alpha/2;
                   end
                 end
                 x0 = x;  fx0 = fx;
                 ifn<N
                   g1 = grad(f,x,h);
                   if KC <= 1,  s=-g1 +(g1 - g)*g1’/(g*g’+ 1e-5)*s; %(7.1.19a)
                   else  s = -g1 + g1*g1’/(g*g’+ 1e-5)*s; %(7.1.19b)
                   end
                   g = g1;
                 end
                 if n1 >= nmax1, warning = warning+1; %can’t find optimum step size
                  else  warning = 0;
                 end
               end
               if warning >= 2|(norm(x - xk0)<TolX&abs(fx - fk0)< TolFun), break;  end
              end
              xo=x;  fo=fx;
              if k == MaxIter, fprintf(’Just best in %d iterations’,MaxIter), end
              %nm716  to minimize f(x) by the conjugate gradient method.
              f713 = inline(’x(1).^2 - 4*x(1) - x(1).*x(2) + x(2).^2 - x(2)’,’x’);
              x0 =[0 0], TolX = 1e-4; TolFun = 1e-4; alpha0 = 10; MaxIter = 100;
              [xo,fo] = opt_conjg(f713,x0,TolX,TolFun,alpha0,MaxIter,1)
              [xo,fo] = opt_conjg(f713,x0,TolX,TolFun,alpha0,MaxIter,2)
   339   340   341   342   343   344   345   346   347   348   349