C:/programs/UNCMIN/src/include/Uncmin.h

Go to the documentation of this file.
00001 /*! \file Uncmin.h
00002 
00003   \brief
00004   C++ implementation of UNCMIN routines for unconstrained minimization
00005   
00006   This software is available at 
00007   http://www.smallwaters.com/software/cpp/uncmin.html
00008  
00009   Version 0080309
00010   This release updates original work by Brad Hanson (http://www.b-a-h.com/) in 2001.
00011  
00012   The UNCMIN routines are based on Appendix A of:
00013  
00014   Dennis, J. E., & Schnabel, R. B. (1996). Numerical methods for
00015   unconstrained optimization and nonlinear equations. Philadelphia:
00016   Society for Industrual and Applied Mathematics.
00017  
00018   The original UNCMIN routines in FORTRAN are available from GAMS
00019   (http://gams.nist.gov/) as OPTIF9 and OPTIF0.
00020  
00021   FORTRAN UNCMIN routines were translated to java (Uncmin_f77.java) by
00022   Steve Verrill (steve@ws13.fpl.fs.fed.us).
00023   Uncmin_F77.java is available at http://www1.fpl.fs.fed.us/optimization.html
00024 
00025   The routines in Uncmin_f77.java were translated to C++ by
00026   Brad Hanson.
00027   
00028   (c) 2008, Werner Wothke, maintenance programming and documentation.
00029  
00030  
00031   Uncmin is a templated class with template parameters:
00032  
00033   V - A vector class of double precision numbers. Member functions
00034       that should be defined for class V are:
00035  
00036       V& operator=(const V &A); // assignment
00037  
00038       V& operator=(const double& scalar); // assign a value to all elements
00039  
00040       double& operator()(int i); // FORTRAN style one-offset subscripting
00041  
00042       int size(); // returns number of elements in vector
00043  
00044   M - A matrix class of double precision numbers. Member functions that
00045       should be defined for class M are:
00046  
00047       double operator()(int i, int j); // FORTRAN style one-offset subscripting
00048  
00049       int num_rows(); // returns number of rows in matrix
00050       int num_cols(); // returns number of columns in matrix
00051 
00052   FUNC - A class defining the function to minimize, its gradient and hessian.
00053          An example of how this class should be defined is:
00054   
00055     // V is vector class, M is matrix class
00056     template <class V, class M>
00057     class minclass
00058     {
00059        double f_to_minimize(V &x);
00060        // Returns value of function to minimize
00061  
00062        void gradient(V &x, V &g);
00063        // Returns gradient of function in g. This does not have
00064        // to be defined, in which case the gradient is approximated.
00065        // If the gradient is not defined Analytic_Gradient should 
00066        // return 0;
00067  
00068        void hessian(V &x, M &h);
00069        // Returns hessian of function in h. This does not have
00070        // to be defined, in which case the hessian is approximated.
00071        // If the hessian is not defined Analytic_Hessian should 
00072        // return 0;
00073  
00074        int HasAnalyticGradient();
00075        // Returns 1 if gradient is defined, otherwise returns 0
00076  
00077        int HasAnalyticHessian();
00078        // Returns 1 if hessian is defined, otherwise returns 0
00079  
00080        int ValidParameters(V &x);
00081        // Returns 1 if f_to_minimize(x) is defined for argument x.
00082  
00083        int dim();
00084        // Returns dimension of problem (number of elements in vector x
00085        // passed to f_to_minimize)
00086     }
00087  
00088     An example of how to use Uncmin to minimize a function defined in
00089     a class minclass with vector and matrix classes from the Simple
00090     C++ Numerical Toolkit (http://www.smallwaters.com/software/cpp/scppnt.html):
00091  
00092     #include "Uncmin.h"
00093     #include "vec.h"  // Simple C++ Numeric Toolkit (SCPPNT) vector class
00094     #include "cmat.h" // Simple C++ Numeric Toolkit (SCPPNT) matrix class
00095     #include <cstdio>
00096  
00097     using namespace SCPPNT;
00098  
00099     // Dimension of problem (number of parameters)
00100     const int dim = 4;
00101  
00102     // Create function object
00103     minclass<Vector<double>, Matrix<double> > test;
00104  
00105     // create Uncmin object
00106     Uncmin<Vector<double>, Matrix<double>, minclass> min(&test);
00107  
00108     // Starting values
00109     double start_values[4] = {1.0, 1.0, 1.0, 1.0}; // use some appropriate values here
00110     Vector<double> start(start_values, start_values+dim);
00111 
00112     // xpls will contain solution, gpls will contain
00113     // gradient at solution after call to min.Minimize
00114     Vector<double> xpls(dim), gpls(dim);
00115 
00116     // fpls contains the value of the function at
00117     // the solution given by xpls.
00118     double fpls;
00119  
00120     // Minimize function.
00121     // Minimize returns zero if successful (0, 1, 2, 3
00122     // returned by GetMessage), non-zero if not successful.
00123     if (min.Minimize(start, xpls,  fpls,  gpls))
00124     {
00125        int msg = min.GetMessage(); // find out what went wrong
00126        std::printf("\nMessage returned from Uncmin: %d\n", msg);
00127     }
00128 
00129     The GetMessage member function returns the status of the solution found
00130     by the last call to the Minimize member function.
00131     Possible values returned by GetMessage are:
00132  
00133     0 = Optimal solution found, terminated with gradient small
00134     1 = Terminated with gradient small, xpls is probably optimal.
00135     2 = Terminated with stepsize small, xpls is probably optimal.
00136     3 = Lower point cannot be found, xpls is probably optimal.
00137     4 = Iteration limit (default 150) exceeded.
00138     5 = Too many large steps, function may be unbounded.
00139     -1 = Analytic gradient check requested, but no analytic gradient supplied
00140     -2 = Analytic hessian check requested, but no analytic hessian supplied
00141     -3 = Illegal dimension
00142     -4 = Illegal tolerance
00143     -5 = Illegal iteration limit
00144     -6 = Minimization function has no good digits
00145     -7 = Iteration limit exceeded in lnsrch
00146     -20 = Function not defined at starting value
00147     -21 = Check of analytic gradient failed
00148     -22 = Check of analytic hessian failed
00149 
00150 
00151     The symbol BOOST_NO_STDC_NAMESPACE should be defined if the C++ compiler
00152     does not put the standard C library functions in the std namespace
00153     (this is needed for Visual C++ 6).
00154  
00155     The symbol BOOST_NO_LIMITS should be defined if the standard C++
00156     library header file <limits> is not available.
00157     (this is needed for Visual C++ 6).
00158 
00159  
00160     Version History
00161  
00162     Version 080309 (March 9, 2008)
00163     
00164     Edited doxygen-compatible documentation, second pass. Added descriptive comments,
00165     much as possible, incorporated any useful information from the existing FORTRAN 
00166     and Java documentation sets and removed these out-dated documentations afterwards. 
00167     
00168     Version 071226 (December 26, 2007)
00169  
00170     Refactored num_cols property to num_columns in UNCMIN::minimize().
00171     Tagged all public methods and properties with doxygen-compatible comments.
00172  
00173     Version 0.001206 (February 10, 2001)
00174  
00175     Added BOOST_NO_STDC_NAMESPACE and BOOST_NO_LIMITS symbols which
00176     need to be defined to compile Uncmin.h using Microsoft Visual C++ 6.
00177 
00178  */
00179 
00180 #ifndef UNCMIN_H_
00181 #define UNCMIN_H_
00182 
00183 #include <cstdio>
00184 #include <cmath>
00185 
00186 #ifdef BOOST_NO_STDC_NAMESPACE
00187 // If the standard C library functions are not in the std namespace (like Microsoft Visual C++ 6)
00188 namespace std
00189 { using ::sqrt; using ::pow; using ::fabs; using ::log; using ::FILE; using ::fprintf;}
00190 #endif
00191 
00192 #ifndef BOOST_NO_LIMITS
00193 // include limits for epsilon function used in SetTolerances.
00194 #include <limits>
00195 #else
00196 // If there is no limits header.
00197 #include <float.h>
00198 #endif
00199 
00200 /*! Documentation of member functions is given in their definitions which follow
00201  the Uncmin class definition */
00202 template<class V, class M, class FUNC> class Uncmin
00203 {
00204 
00205 public:
00206 
00207   typedef FUNC function_type;   //!< Type definition of function object to minimize.
00208 
00209   /* Constructor. */
00210   Uncmin(FUNC *f);
00211   
00212   /* Constructor. */
00213   Uncmin(int d = 1);
00214 
00215   /* Destructor */
00216   ~Uncmin();
00217 
00218   /* Calculate function minimum. */
00219   int Minimize(V &start, V &xpls, double &fpls, V &gpls, M &hpls);
00220 
00221   /* Version of Minimize where hessian matrix is allocated locally rather
00222    than passed as an argument to the function. */
00223   int Minimize(V &start, V &xpls, double &fpls, V &gpls);
00224 
00225   /* Get object containing function to minimize, gradient, and hessian. */
00226   FUNC *GetFunction();
00227 
00228   /* Set object containing function to minimize, gradient, and hessian. */
00229   void SetFunction(FUNC *f);
00230 
00231   /* Methods to set options. */
00232   void SetFuncExpensive(int iexp);
00233 
00234   /* Set typical magnitude of each argument to function. */
00235   int SetScaleArg(V typsiz);
00236 
00237   /* Set typical magnitude of function near minimum. */
00238   void SetScaleFunc(double fscale);
00239 
00240   /* Set the number of reliable digits returned by f_to_minimize. */
00241   void SetNumDigits(double ndigit);
00242 
00243   /* Set maximum number of iterations. */
00244   void SetMaxIter(int maxiter);
00245 
00246   /* Set step tolerance, gradient tolerance, and machine epsilon. */
00247   void SetTolerances(double step, double grad, double macheps = -1.0);
00248 
00249   /* Set maximum allowable scaled step length at any iteration. */
00250   void SetMaxStep(double stepmx);
00251 
00252   /* Set initial trust region radius. */
00253   void SetTrustRegionRadius(double dlt);
00254 
00255   /* Set flags to check analytic gradient and hessian. */
00256   void SetChecks(int check_gradient, int check_hessian = 0);
00257 
00258   /* Set flags to indicate whether results are printed. */
00259   void SetPrint(std::FILE *file, int print_results, int print_iterations = 0);
00260 
00261   /* Set method to use to solve minimization problem.
00262    1 = line search, 2 = double dogleg, 3 = More-Hebdon.  */
00263   void SetMethod(int method);
00264 
00265   /* Return stopping criteria computed at the end of the last call to Minimize. */
00266   void GetStoppingCriteria(double &gradtol, double &steptol, int &iterations);
00267 
00268   /*! Returns message generated in the last call to Minimize. */
00269   int GetMessage();
00270 
00271 private:
00272 
00273   FUNC *minclass; //!<  Class that contains function to miminize, gradient, and hessian. 
00274 
00275 
00276   int algorithm; //!<  Indicates algorithm to use to solve minimization problem: 1 = line search, 2 = double dogleg, 3 = More-Hebdon.
00277 
00278   std::FILE *mfile; //!< File to print messages to.
00279 
00280   int n; //!< Dimension of problem, provided in minclass by minclass->dim().
00281 
00282   double epsm; //!< Machine epsilon.
00283 
00284 
00285   double mLastGradCrit;  //!< Stopping criterion for gradient.
00286   
00287 
00288   double mLastStepCrit; //!<  Stopping criterion for argument change.
00289 
00290   
00291   int mLastIteration; //!<  Stopping criterion by number of iterations.
00292 
00293 
00294   int mLastMessage; //!<  Message generated by last call to Minimize.
00295 
00296   /* Options */
00297 
00298   int iexp;  //!< Flag indicating computational complexity. Set iexp = 1, for expensive calculations, iexp = 0 otherwise. If iexp = 1, the Hessian will be evaluated by secant (BFGS) update.
00299   
00300   /* These two flags are set from minclass using minclass.Analytic_Gradient() and
00301    minclass.Analytic_Methods() */
00302   int iagflg; //!< Flag: iagflag = 0 if an analytic gradient is not supplied, provided by minclass.
00303   int iahflg; //!< Flag: iahflag = 0 if an analytic Hessian is not supplied, provided by minclass.
00304 
00305   int fCheckGradient; //!< fCheckGradient = 0 if analytic gradient is not checked.
00306 
00307   int fCheckHessian; //!< fCheckHessian = 0 if analytic hessian is not checked.
00308 
00309   int fPrintResults; //!< fPrintResults = 0 if results are not printed to mfile.
00310 
00311   int fPrintIterationResults; //!< fPrintIterationResults = 0 if results of each iteration are not printed to mfile.
00312 
00313   /* Tolerances */
00314 
00315   V typsiz; //!< Typical size of of each argument of function to minimize.
00316 
00317   double fscale; //!< Estimate of scale of objective function.
00318 
00319   int ndigit; //!< Number of good digits in the minimization function. Special case, set ndigit = -1, for function providing within 1 or 2 of full number of significant digits.
00320 
00321   int itnlim; //!< Maximum number of allowable iterations.
00322 
00323   double mdlt; //!< Trust region radius. Special case,  mdlt = -1, uses length of initial scaled gradient instead.
00324 
00325   double gradtl; //!< Tolerance at which the gradient is considered close enough to zero to terminate the algorithm.
00326 
00327   double steptl; //!< Tolerance at which scaled distance between two successive iterates is considered close enough to zero to terminate algorithm. 
00328   
00329   double stepmx; //!< Maximum allowable step size.
00330 
00331   /* Private functions used in minimization. */
00332 
00333   void dfault();
00334 
00335   int optchk(V &x, V &sx, int &msg, int &method);
00336 
00337   void fstofd(V &xpls, double fpls, V &g, V &sx, double rnoise);
00338 
00339   void fstofd(V &xpls, V &fpls, M &a, V &sx, double rnoise, V &fhat);
00340 
00341   int grdchk(V &x, double f, V &g, V &typsiz, V &sx, double fscale, double rnf, double analtl,
00342       V &gest, int &msg);
00343 
00344   void optstp(V &xpls, double fpls, V &gpls, V &x, int &itncnt, int &icscmx, int &itrmcd, V &sx,
00345       double &fscale, int &itnlim, int &iretcd, bool &mxtake, int &msg);
00346 
00347   void hsnint(M &a, V &sx, int method);
00348 
00349   void sndofd(V &xpls, double &fpls, M &a, V &sx, double &rnoise, V &stepsz, V &anbr);
00350 
00351   int heschk(V &x, double &f, V &g, M &a, V &typsiz, V &sx, double rnf, double analtl, int &iagflg,
00352       V &udiag, V &wrk1, V &wrk2, int &msg);
00353 
00354   void result(V &x, double &f, V &g, M &a, V &p, int &itncnt, int iflg);
00355 
00356   void bakslv(M &a, V &x, V &b);
00357 
00358   void chlhsn(M &a, V &sx, V &udiag);
00359 
00360   void choldc(M &a, double diagmx, double tol, double &addmax);
00361 
00362   void dogdrv(V &x, double &f, V &g, M &a, V &p, V &xpls, double &fpls, V &sx, double &dlt,
00363       int &iretcd, bool &mxtake, V &sc, V &wrk1, V &wrk2, V &wrk3);
00364 
00365   void dogstp(V &g, M &a, V &p, V &sx, double rnwtln, double &dlt, bool &nwtake, bool &fstdog,
00366       V &ssd, V &v, double &cln, double &eta, V &sc);
00367 
00368   void forslv(M &a, V &x, V &b);
00369 
00370   void fstocd(V &x, V &sx, double rnoise, V &g);
00371 
00372   void hookdr(V &x, double &f, V &g, M &a, V &udiag, V &p, V &xpls, double &fpls, V &sx,
00373       double &dlt, int &iretcd, bool &mxtake, double &amu, double &dltp, double &phi,
00374       double &phip0, V &sc, V &xplsp, V &wrk0, int &itncnt);
00375 
00376   void hookst(V &g, M &a, V &udiag, V &p, V &sx, double rnwtln, double &dlt, double &amu,
00377       double &dltp, double &phi, double &phip0, bool &fstime, V &sc, bool &nwtake, V &wrk0);
00378 
00379   void lltslv(M &a, V &x, V &b);
00380 
00381   void lnsrch(V &x, double &f, V &g, V &p, V &xpls, double &fpls, bool &mxtake, int &iretcd, V &sx);
00382 
00383   void mvmltl(M &a, V &x, V &y);
00384 
00385   void mvmlts(M &a, V &x, V &y);
00386 
00387   void mvmltu(M &a, V &x, V &y);
00388 
00389   void qraux1(M &r, int i);
00390 
00391   void qraux2(M &r, int i, double a, double b);
00392 
00393   void qrupdt(M &a, V &u, V &v);
00394 
00395   void secfac(V &x, V &g, M &a, V &xpls, V &gpls, int &itncnt, double rnf, int &iagflg,
00396       bool &noupdt, V &s, V &y, V &u, V &w);
00397 
00398   void secunf(V &x, V &g, M &a, V &udiag, V &xpls, V &gpls, int &itncnt, double rnf, int &iagflg,
00399       bool &noupdt, V &s, V &y, V &t);
00400 
00401   void tregup(V &x, double &f, V &g, M &a, V &sc, V &sx, bool &nwtake, double &dlt, int &iretcd,
00402       V &xplsp, double &fplsp, V &xpls, double &fpls, bool &mxtake, int method, V &udiag);
00403 
00404   double ddot(V &x, V &y);
00405 
00406   double dnrm2(V &x);
00407 
00408   double max(double a, double b);
00409 
00410   double min(double a, double b);
00411 
00412 };
00413 
00414 /*!
00415   \brief 
00416   Constructor that sets dimension and function to minimize.
00417   
00418   The minclass function object must define:
00419                     
00420   1. a method, f_to_minimize, to minimize. f_to_minimize must have the form 
00421          public static double f_to_minimize(double x[])
00422      where x is the vector of arguments to the function and the return value is the 
00423      value of the function evaluated at x.  
00424 
00425   2. a method, gradient, that has the form
00426          public static void gradient(double x[], double g[])
00427      where g is the gradient of f evaluated at x.  This method will have an empty body if 
00428      the user does not wish to provide an analytic estimate of the gradient.
00429      
00430   3. a method, hessian, that has the form
00431          public static void hessian(double x[],double h[][])
00432      where h is the Hessian of f evaluated at x.  This method will have an empty body if the 
00433      user does not wish to provide an analytic estimate of the Hessian. If the user wants 
00434      Uncmin++ to check the Hessian, then the hessian method should only fill the lower 
00435      triangle (and diagonal)of h.
00436   
00437    Minimization parameters are set at default values.
00438   
00439   \section template_args Template Parameters
00440   
00441   \param M  SCPPNT Matrix type.
00442   \param V  SCPPNT Vector type.
00443   \param FUNC Type of function to minimize.
00444   
00445   \section function_args Function Parameters
00446    
00447   \param[in]  *f  Pointer to function object f.
00448  */
00449 template<class V, class M, class FUNC> Uncmin<V, M, FUNC>::Uncmin(FUNC *f) :
00450   minclass(f)
00451 {
00452   if (minclass)
00453     n = minclass->dim();
00454   else
00455     n = 0;
00456 
00457   mfile = 0;
00458 
00459   mLastGradCrit = 0.0;
00460   mLastStepCrit = 0.0;
00461   mLastIteration = 0;
00462   mLastMessage = 0;
00463 
00464   /* Set default parameter values */
00465   dfault();
00466 }
00467 
00468 /*!
00469   \brief 
00470   Constructor that sets dimension, but not function to minimize.
00471   
00472   Note: Function to minimize must be set by calling SetFunction. 
00473   Minimization parameters are set to default values.
00474   
00475   \section template_args Template Parameters
00476   
00477   \param M  SCPPNT Matrix type.
00478   \param V  SCPPNT Vector type.
00479   \param FUNC Type of function to minimize.
00480   
00481   \section function_args Function Parameters
00482    
00483   \param[in]  d  Dimension d (number of function arguments).
00484  */
00485 template<class V, class M, class FUNC> Uncmin<V, M, FUNC>::Uncmin(int d) :
00486   minclass(0), typsiz(d)
00487 {
00488 
00489   n = d;
00490 
00491   mfile = 0;
00492 
00493   mLastGradCrit = 0.0;
00494   mLastStepCrit = 0.0;
00495   mLastIteration = 0;
00496   mLastMessage = 0;
00497 
00498   /* Set default parameter values */
00499   dfault();
00500 
00501 }
00502 
00503 /*!
00504   \brief Destructor of uncmin.
00505  */
00506 template<class V, class M, class FUNC> Uncmin<V, M, FUNC>::~Uncmin()
00507 {
00508 
00509 }
00510 
00511 /*!
00512   \brief 
00513   Assigns flag indicating whether function is expensive to evaluate.
00514   
00515   Set iexp =1 if function is expensive to evaluate, = 0 otherwise.  
00516   If iexp = 1, then the Hessian will be evaluated by secant (BFGS) update 
00517   rather than analytically or by finite differences.
00518 
00519   Default value is 0 if this function is not called.
00520   
00521   \section template_args Template Parameters
00522   
00523   \param M  SCPPNT Matrix type.
00524   \param V  SCPPNT Vector type.
00525   \param FUNC Type of function to minimize.
00526   
00527   \section function_args Function Parameters
00528    
00529   \param[in]  iexp  Set iexp =1 if function is expensive to evaluate, iexp = 0 otherwise.
00530  */
00531 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetFuncExpensive(int iexp)
00532 {
00533   this->iexp = iexp;
00534 }
00535 
00536 /*!
00537   \brief
00538   Assigns typical magnitude of each argument to function.
00539 
00540   Returns zero if dimension matches, otherwise returns 1.
00541  
00542   The default is to set all elements of typsiz to 1 if this function is not called.
00543   
00544   \section template_args Template Parameters
00545   
00546   \param M  SCPPNT Matrix type.
00547   \param V  SCPPNT Vector type.
00548   \param FUNC Type of function to minimize.
00549   
00550   \section function_args Function Parameters
00551    
00552   \param[in]  typsiz  Vector describing the magnitude of all function arguments.
00553  */
00554 template<class V, class M, class FUNC> int Uncmin<V, M, FUNC>::SetScaleArg(V typsiz)
00555 {
00556   if (typsiz.size() != n)
00557     return 1;
00558 
00559   this->typsiz = typsiz;
00560 
00561   return 0;
00562 }
00563 
00564 /*!
00565   \brief
00566   Assigns typical magnitude of function near minimum.
00567   
00568   \section template_args Template Parameters
00569   
00570   \param M  SCPPNT Matrix type.
00571   \param V  SCPPNT Vector type.
00572   \param FUNC Type of function to minimize.
00573   
00574   \section function_args Function Parameters
00575    
00576   \param[in]  fscale  Magnitude of function values near minimum.
00577                       A default value of 1 is used for fscale if this
00578                       function is not called.
00579  */
00580 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetScaleFunc(double fscale)
00581 {
00582   this->fscale = fscale;
00583 }
00584 
00585 /*!
00586   \brief
00587   Assigns the number of reliable digits returned by f_to_minimize.
00588   
00589   If the argument to the function is -1 this
00590   means that f_to_minimize is expected to
00591   provide within one or two of the full number of
00592   significant digits available.
00593  
00594   The default value of -1 is used for ndigit when
00595   this function is not called.
00596 
00597   \section template_args Template Parameters
00598   
00599   \param M  SCPPNT Matrix type.
00600   \param V  SCPPNT Vector type.
00601   \param FUNC Type of function to minimize.
00602   
00603   \section function_args Function Parameters
00604    
00605   \param[in]  ndigit  Number of digits precision required for the solution.
00606  */
00607 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetNumDigits(double ndigit)
00608 {
00609   this->ndigit = ndigit;
00610 }
00611 
00612 /*!
00613   \brief
00614   Assigns the maximum number of iterations.
00615   
00616   The default value of 150 is used if this function is not called.
00617 
00618   \section template_args Template Parameters
00619   
00620   \param M  SCPPNT Matrix type.
00621   \param V  SCPPNT Vector type.
00622   \param FUNC Type of function to minimize.
00623   
00624   \section function_args Function Parameters
00625    
00626   \param[in]  maxiter  Maximum number of iterations.
00627  */
00628 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetMaxIter(int maxiter)
00629 {
00630   this->itnlim = maxiter;
00631 }
00632 
00633 /*!
00634   \brief
00635   Assigns step tolerance, gradient tolerance, and the machine epsilon.
00636   
00637   Note: For any of these values <= zero, default values are used.
00638   
00639   \section template_args Template Parameters
00640   
00641   \param M  SCPPNT Matrix type.
00642   \param V  SCPPNT Vector type.
00643   \param FUNC Type of function to minimize.
00644   
00645   \section function_args Function Parameters
00646    
00647   \param[in]  step  Tolerance at which scaled distance between two successive iterates
00648     is considered close enough to zero to terminate algorithm. If the value of
00649     step is <= zero a default value is used.
00650   \param[in]  grad  Tolerance at which gradient is considered close enough
00651     to zero to terminate algorithm. If the value of
00652     grad is <= zero a default value is used.
00653   \param[in]  macheps Largest relative spacing. Machine epsilon == 2**(-T+1) where
00654     (1 + 2**-T) == 1. This argument has a default value of -1 if not included
00655     in function call.
00656  */
00657 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetTolerances(double step,
00658     double grad, double macheps)
00659 {
00660   if (macheps <= 0.0) // use default value
00661   {
00662 
00663 #ifdef BOOST_NO_LIMITS
00664     epsm = DBL_EPSILON; // from float.h
00665 #else
00666     // use epsilon from numeric_limits in standard C++ library
00667     epsm = std::numeric_limits<double>::epsilon();
00668 #endif
00669 
00670   }
00671   else
00672   {
00673     epsm = macheps;
00674   }
00675 
00676   if (grad <= 0.0)
00677   {
00678     gradtl = std::pow(epsm, 1.0/3.0); // default value
00679   }
00680   else
00681   {
00682     gradtl = grad;
00683   }
00684 
00685   if (step <= 0.0)
00686   {
00687     steptl = std::sqrt(epsm); // default value
00688   }
00689   else
00690   {
00691     steptl = step;
00692   }
00693 
00694 }
00695 
00696 /*!
00697   \brief
00698   Assigns maximum allowable scaled step length at any iteration.
00699   
00700   The maximum step length is used to prevent steps that would cause the 
00701   optimization algorithm to overflow or leave the domain of iterest,
00702   as well as to detect divergence. It should be chosen small enough
00703   to prevent the first two of these occurrences, but larger
00704   than any anticipated reasonable step size. The algorithm will
00705   halt if it takes steps larger than the maximum allowable step
00706   length on 5 consecutive iterations.
00707  
00708   If the argument to the function is <= 0 then a default value of
00709   stepmx will be computed in optchk.
00710  
00711   A default value of 0 is used for stepmx when this function is
00712   not called.
00713 
00714   \section template_args Template Parameters
00715   
00716   \param M  SCPPNT Matrix type.
00717   \param V  SCPPNT Vector type.
00718   \param FUNC Type of function to minimize.
00719   
00720   \section function_args Function Parameters
00721    
00722   \param[in]  stepmx  Maximum (scaled) step size.
00723  */
00724 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetMaxStep(double stepmx)
00725 {
00726   this->stepmx = stepmx;
00727 }
00728 
00729 /*!
00730   \brief
00731   Assigns initial trust region radius.
00732   
00733   Used when method = 2 or 3, ignored when method = 1. 
00734   The value should be what the user considers a reasonable 
00735   scaled step length for the first iteration, and should be 
00736   less than the maximum step length.
00737  
00738   If dlt = -1, then the length of the initial scaled gradient is used instead.
00739  
00740   A default value of -1 is used if this function is not called.
00741  
00742   \section template_args Template Parameters
00743   
00744   \param M  SCPPNT Matrix type.
00745   \param V  SCPPNT Vector type.
00746   \param FUNC Type of function to minimize.
00747   
00748   \section function_args Function Parameters
00749    
00750   \param[in]  dlt  Maximum (scaled) step size of first iteration.
00751  */
00752 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetTrustRegionRadius(double dlt)
00753 {
00754   this->mdlt = dlt;
00755 }
00756 
00757 /*!
00758   \brief
00759   Returns pointer to object containing function to minimize, gradient, and hessian.
00760  
00761   \section template_args Template Parameters
00762   
00763   \param M  SCPPNT Matrix type.
00764   \param V  SCPPNT Vector type.
00765   \param FUNC Type of function to minimize.
00766  */
00767 template<class V, class M, class FUNC> FUNC * Uncmin<V, M, FUNC>::GetFunction()
00768 {
00769   return minclass;
00770 }
00771 
00772 
00773 /*!
00774   \brief
00775   Assigns pointer to object containing function to minimize, gradient, and hessian.
00776 
00777   \section template_args Template Parameters
00778   
00779   \param M  SCPPNT Matrix type.
00780   \param V  SCPPNT Vector type.
00781   \param FUNC Type of function to minimize.
00782   
00783   \section function_args Function Parameters
00784    
00785   \param[in]  *f  Pointer to function object f.
00786  */
00787 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetFunction(FUNC *f)
00788 {
00789   minclass = f;
00790 
00791   /* If dimension does not match change n and re-intialize typsiz */
00792   if (n != f->dim())
00793   {
00794     n = f->dim();
00795     V defaultTypSiz(n, 1.0);
00796     typsiz = defaultTypSiz;
00797   }
00798 }
00799 
00800 /*!
00801   \brief
00802   Assigns flags to check analytic gradient and hessian.
00803 
00804   The default is to not check the gradient and hessian, if this function is not called
00805   (fCheckGradient = 0 and fCheckHessian = 0).
00806  
00807   Note that check_hessian can be left off function call in which case default value of
00808   0 is used.
00809   
00810   \section template_args Template Parameters
00811   
00812   \param M  SCPPNT Matrix type.
00813   \param V  SCPPNT Vector type.
00814   \param FUNC Type of function to minimize.
00815   
00816   \section function_args Function Parameters
00817    
00818   \param[in]  check_gradient  Flag to check analytic gradient of function: 1 = yes, 0 = no.
00819   \param[in]  check_hessian  Flag to check analytic hessian of function: 1 = yes, 0 = no.
00820  */
00821 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetChecks(int check_gradient,
00822     int check_hessian)
00823 {
00824   fCheckGradient = check_gradient;
00825   fCheckHessian = check_hessian;
00826 }
00827 
00828 /*!
00829   \brief
00830   Assigns flags to indicate whether results are printed and specify which file is 
00831   receive the print output.
00832 
00833   Printing is done to open file indicated by first argument.
00834 
00835   The default action is not to print any output.
00836   
00837   \section template_args Template Parameters
00838   
00839   \param M  SCPPNT Matrix type.
00840   \param V  SCPPNT Vector type.
00841   \param FUNC Type of function to minimize.
00842   \param FILE Type of file for writing print output to.
00843   
00844   \section function_args Function Parameters
00845    
00846   \param[in]  *file  Pointer to the open file that will receive the printed results.
00847   \param[in]  print_results  Flag (if non-zero) to print standard results.
00848   \param[in]  print_iterations  Flag (if non-zero) to print intermediate results at each 
00849       iteration (this argument can be left off function call in which case the
00850       default value of 0 is used).
00851   */
00852 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetPrint(std::FILE *file,
00853     int print_results, int print_iterations)
00854 {
00855 
00856   mfile = file;
00857 
00858   if (!file)
00859   {
00860     fPrintResults = 0;
00861     fPrintIterationResults = 0;
00862   }
00863   else
00864   {
00865     fPrintResults = print_results;
00866     fPrintIterationResults = print_iterations;
00867   }
00868 
00869 }
00870 
00871 /*!
00872   \brief
00873   Assigns method to use to solve minimization problem.
00874   
00875   A default value is 1 (line search) if SetMethod is not called.
00876 
00877   \section template_args Template Parameters
00878   
00879   \param M  SCPPNT Matrix type.
00880   \param V  SCPPNT Vector type.
00881   \param FUNC Type of function to minimize.
00882   
00883   \section function_args Function Parameters
00884    
00885   \param[in]  method  Method indicator: 1 = line search, 2 = double dogleg, 3 = More-Hebdon.
00886  */
00887 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::SetMethod(int method)
00888 {
00889   if (method == 2 || method == 3)
00890     algorithm = method;
00891   else
00892     algorithm = 1; // use default value of 1 for any values of argument other than 2 or 3
00893 }
00894 
00895 /*!
00896   \brief
00897   Returns stopping criteria computed at the end of the last call to Minimize.
00898 
00899   \section template_args Template Parameters
00900   
00901   \param M  SCPPNT Matrix type.
00902   \param V  SCPPNT Vector type.
00903   \param FUNC Type of function to minimize.
00904   
00905   \section function_args Function Parameters
00906    
00907   \param[out]  &gradtol  Address of gradient tolerance, i.e., how close the gradient is to zero.
00908   \param[out]  &steptol  Address of maximum scaled difference between function parameters in 
00909       consecutive iterations.
00910   \param[out]  &iterations  Address of number of iterations used.
00911  */
00912 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::GetStoppingCriteria(
00913     double &gradtol, double &steptol, int &iterations)
00914 {
00915   gradtol = mLastGradCrit;
00916   steptol = mLastStepCrit;
00917   iterations = mLastIteration;
00918 }
00919 
00920 /*!
00921   \brief
00922   Returns message index generated in the last call to Minimize.
00923  
00924   Possible values of message are:
00925  
00926     0 = Optimal solution found, terminated with gradient small.
00927     1 = Terminated with gradient small, xpls is probably optimal.
00928     2 = Terminated with stepsize small, xpls is probably optimal.
00929     3 = Lower point cannot be found, xpls is probably optimal.
00930     4 = Iteration limit (default 150) exceeded.
00931     5 = Too many large steps, function may be unbounded.
00932    -1 = Analytic gradient check requested, but no analytic gradient supplied
00933    -2 = Analytic hessian check requested, but no analytic hessian supplied
00934    -3 = Illegal dimension
00935    -4 = Illegal tolerance
00936    -5 = Illegal iteration limit
00937    -6 = Minimization function has no good digits
00938    -7 = Iteration limit exceeded in lnsrch
00939   -20 = Function not defined at starting value
00940   -21 = Check of analytic gradient failed
00941   -22 = Check of analytic hessian failed
00942 
00943   \section template_args Template Parameters
00944   
00945   \param M  SCPPNT Matrix type.
00946   \param V  SCPPNT Vector type.
00947   \param FUNC Type of function to minimize.
00948  */
00949 template<class V, class M, class FUNC> inline int Uncmin<V, M, FUNC>::GetMessage()
00950 {
00951   return mLastMessage;
00952 }
00953 
00954 /*!
00955   \brief
00956   Method to compute values that minimize the function.
00957 
00958   Translated by Brad Hanson from optdrv_f77 by Steve Verrill.
00959   
00960   Returns 0 if mLastMessage is 0, 1, 2, or 3 (indicating optimal solution probably found), 
00961   otherwise returns non-zero.
00962   
00963   On exit data member mLastMessage is set to indicate status of solution.
00964 
00965   Possible values of mLastMessage are:
00966     0 = Optimal solution found, terminated with gradient small.
00967     1 = Terminated with gradient small, xpls is probably optimal.
00968     2 = Terminated with stepsize small, xpls is probably optimal.
00969     3 = Lower point cannot be found, xpls is probably optimal.
00970     4 = Iteration limit (default 150) exceeded.
00971     5 = Too many large steps, function may be unbounded.
00972    -1 = Analytic gradient check requested, but no analytic gradient supplied.
00973    -2 = Analytic hessian check requested, but no analytic hessian supplied.
00974    -3 = Illegal dimension.
00975    -4 = Illegal tolerance.
00976    -5 = Illegal iteration limit.
00977    -6 = Minimization function has no good digits. 
00978    -7 = Iteration limit exceeded in lnsrch.
00979   -20 = Function not defined at starting value.
00980   -21 = Check of analytic gradient failed.
00981   -22 = Check of analytic hessian failed.
00982 
00983   \section template_args Template Parameters
00984   
00985   \param M  SCPPNT Matrix type.
00986   \param V  SCPPNT Vector type.
00987   \param FUNC Type of function to minimize.
00988   
00989   \section function_args Function Parameters
00990    
00991   \param[in]   &start  Address of vector containing initial estimate of location of minimum.
00992   \param[out]  &xpls  Address of vector containing local minimum (size must be n on input).
00993   \param[out]  &fpls  Address of function value at local minimum.
00994   \param[out]  &gpls  Address of vector containing gradient at local minimum (size must be 
00995       n on input). 
00996   \param[out]  &hpls  Address of matrix containing hessian at local minimum (size must be 
00997       n X n on input). Only the lower half should be input; the upper off-diagonal entries 
00998       can be set to zero. On return, the lower half of hpls contains the Cholesky factor 
00999       of the Hessian, evaluated at the solution (i.e., H = LL').
01000  */
01001 template<class V, class M, class FUNC> int Uncmin<V, M, FUNC>::Minimize(V &start, V &xpls,
01002     double &fpls, V &gpls, M &hpls)
01003 {
01004 
01005   bool noupdt;
01006   bool mxtake;
01007 
01008   int i, j, num5, remain, ilow, ihigh;
01009   int icscmx;
01010   int iretcd;
01011   int itncnt;
01012 
01013   /*
01014     Return code.
01015       itrmcd =  0:    Optimal solution found
01016       itrmcd =  1:    Terminated with gradient small, X is probably optimal
01017       itrmcd =  2:    Terminated with stepsize small, X is probably optimal
01018       itrmcd =  3:    Lower point cannot be found, X is probably optimal
01019       itrmcd =  4:    Iteration limit (150) exceeded
01020       itrmcd =  5:    Too many large steps, function may be unbounded.
01021    */
01022   int itrmcd;
01023 
01024   double rnf, analtl, dltsav, amusav, dlpsav, phisav, phpsav;
01025 
01026   double f;
01027 
01028   double amu = 0.0;
01029   double dltp = 0.0;
01030   double phi = 0.0;
01031   double phip0 = 0.0;
01032 
01033   int method = algorithm;
01034 
01035   mLastGradCrit = 0.0;
01036   mLastStepCrit = 0.0;
01037   mLastIteration = 0;
01038   mLastMessage = -9999;
01039 
01040   iagflg = minclass->HasAnalyticGradient();
01041   iahflg = minclass->HasAnalyticHessian();
01042 
01043   /*
01044     msg
01045     Flag to set various options.
01046     Bits which indicate flags which control checks of gradient and hessian and what output is printed.
01047     msg should be zero or a sum of the following values which indicate which flags to set or not set.
01048 
01049       1 = Turn off check of analytic gradient.
01050       2 = Turn off check of analytic hessian.
01051       4 = Turn off printing of results.
01052       8 = Turn on printing of results at each iteration.
01053    */
01054   int msg = (fCheckGradient == 0) * 1 + (fCheckHessian == 0) * 2 + (fPrintResults == 0) * 4
01055       + (fPrintIterationResults != 0) * 8;
01056 
01057   /* Check that dimensions match */
01058   if (start.size() != n || xpls.size() != n || gpls.size() != n || hpls.num_rows() != n
01059       || hpls.num_columns() != n) // Refactored function "num_columns", ww, 12-22-2007
01060   {
01061     mLastMessage = -3;
01062     return -1;
01063   }
01064 
01065   /* Check that starting values are valid */
01066   if (!(minclass->ValidParameters(start)))
01067   {
01068     mLastMessage = -20;
01069     return -1;
01070   }
01071 
01072   /* Workspace */
01073   M &a = hpls;
01074   V udiag(n); //workspace (for diagonal of Hessian)
01075   V g(n); // workspace (for gradient at current iterate)
01076   V p(n); // workspace for step
01077   V sx(n); // workspace (for scaling vector)
01078   V wrk0(n), wrk1(n), wrk2(n), wrk3(n);
01079 
01080   V x(start);
01081 
01082   double dlt = mdlt;
01083 
01084   dltsav = amusav = dlpsav = phisav = phpsav = 0.0;
01085 
01086   // INITIALIZATION
01087 
01088   for (i = 1; i <= n; i++)
01089   {
01090     p(i) = 0.0;
01091   }
01092 
01093   itncnt = 0;
01094   iretcd = -1;
01095 
01096   /* Check for valid options */
01097   if (optchk(x, sx, msg, method))
01098   {
01099     mLastMessage = msg;
01100     return -1;
01101   }
01102 
01103   rnf = max(std::pow(10.0, -ndigit), epsm);
01104   analtl = max(.01, std::sqrt(rnf));
01105 
01106   if (!(msg & 4))
01107   {
01108     num5 = n/5;
01109     remain = n%5;
01110 
01111     std::fprintf(mfile, "\n\nOPTDRV          Typical x\n\n");
01112 
01113     ilow = -4;
01114     ihigh = 0;
01115 
01116     for (i = 1; i <= num5; i++)
01117     {
01118       ilow += 5;
01119       ihigh += 5;
01120 
01121       std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
01122 
01123       for (j = 1; j <= 5; j++)
01124       {
01125         std::fprintf(mfile, "%lf  ", (double) typsiz(ilow+j-1));
01126       }
01127       std::fprintf(mfile, "\n");
01128     }
01129 
01130     ilow += 5;
01131     ihigh = ilow + remain - 1;
01132 
01133     std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
01134 
01135     for (j = 1; j <= remain; j++)
01136     {
01137       std::fprintf(mfile, "%lf  ", (double) typsiz(ilow+j-1));
01138     }
01139 
01140     std::fprintf(mfile, "\n");
01141 
01142     std::fprintf(mfile, "\n\nOPTDRV      Scaling vector for x\n\n");
01143 
01144     ilow = -4;
01145     ihigh = 0;
01146 
01147     for (i = 1; i <= num5; i++)
01148     {
01149       ilow += 5;
01150       ihigh += 5;
01151 
01152       std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
01153 
01154       for (j = 1; j <= 5; j++)
01155       {
01156         std::fprintf(mfile, "%lf  ", (double) sx(ilow+j-1));
01157       }
01158       std::fprintf(mfile, "\n");
01159     }
01160 
01161     ilow += 5;
01162     ihigh = ilow + remain - 1;
01163 
01164     std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
01165 
01166     for (j = 1; j <= remain; j++)
01167     {
01168       std::fprintf(mfile, "%lf  ", (double) sx(ilow+j-1));
01169     }
01170 
01171     std::fprintf(mfile, "\n");
01172 
01173     std::fprintf(mfile, "\n\nOPTDRV      Typical f = %f\n", fscale);
01174     std::fprintf(mfile, "OPTDRV      Number of good digits in");
01175     std::fprintf(mfile, " f_to_minimize = %d\n", ndigit);
01176     std::fprintf(mfile, "OPTDRV      Gradient flag");
01177     std::fprintf(mfile, " = %d\n", iagflg);
01178     std::fprintf(mfile, "OPTDRV      Hessian flag");
01179     std::fprintf(mfile, " = %d\n", iahflg);
01180     std::fprintf(mfile, "OPTDRV      Expensive function calculation flag");
01181     std::fprintf(mfile, " = %d\n", iexp);
01182     std::fprintf(mfile, "OPTDRV      Method to use");
01183     std::fprintf(mfile, " = %d\n", method);
01184     std::fprintf(mfile, "OPTDRV      Iteration limit");
01185     std::fprintf(mfile, " = %d\n", itnlim);
01186     std::fprintf(mfile, "OPTDRV      Machine epsilon");
01187     std::fprintf(mfile, " = %e\n", epsm);
01188     std::fprintf(mfile, "OPTDRV      Maximum step size");
01189     std::fprintf(mfile, " = %f\n", stepmx);
01190     std::fprintf(mfile, "OPTDRV      Step tolerance");
01191     std::fprintf(mfile, " = %e\n", steptl);
01192     std::fprintf(mfile, "OPTDRV      Gradient tolerance");
01193     std::fprintf(mfile, " = %e\n", gradtl);
01194     std::fprintf(mfile, "OPTDRV      Trust region radius");
01195     std::fprintf(mfile, " = %f\n", dlt);
01196     std::fprintf(mfile, "OPTDRV      Relative noise in");
01197     std::fprintf(mfile, " f_to_minimize = %e\n", rnf);
01198     std::fprintf(mfile, "OPTDRV      Analytical fd tolerance");
01199     std::fprintf(mfile, " = %e\n", analtl);
01200   }
01201 
01202   // EVALUATE FCN(X)
01203   f = minclass->f_to_minimize(x);
01204 
01205   // EVALUATE ANALYTIC OR FINITE DIFFERENCE GRADIENT AND CHECK ANALYTIC
01206   // GRADIENT, IF REQUESTED.
01207   if (iagflg == 0)
01208   {
01209     fstofd(x, f, g, sx, rnf);
01210   }
01211   else
01212   {
01213     minclass->gradient(x, g);
01214 
01215     if (!(msg & 1))
01216     {
01217       if (grdchk(x, f, g, typsiz, sx, fscale, rnf, analtl, wrk1, msg))
01218       {
01219         mLastMessage = msg;
01220         return -1; // msg = -21
01221       }
01222     }
01223   }
01224 
01225   optstp(x, f, g, wrk1, itncnt, icscmx, itrmcd, sx, fscale, itnlim, iretcd, mxtake, msg);
01226 
01227   if (itrmcd != 0)
01228   {
01229     fpls = f;
01230 
01231     for (i = 1; i <= n; i++)
01232     {
01233       xpls(i) = x(i);
01234       gpls(i) = g(i);
01235     }
01236   }
01237   else
01238   {
01239     if (iexp == 1)
01240     {
01241       // IF OPTIMIZATION FUNCTION EXPENSIVE TO EVALUATE (IEXP=1), THEN
01242       // HESSIAN WILL BE OBTAINED BY SECANT (BFGS) UPDATES.  GET INITIAL HESSIAN.
01243 
01244       hsnint(a, sx, method);
01245     }
01246     else
01247     {
01248       // EVALUATE ANALYTIC OR FINITE DIFFERENCE HESSIAN AND CHECK ANALYTIC
01249       // HESSIAN IF REQUESTED (ONLY IF USER-SUPPLIED ANALYTIC HESSIAN
01250       // ROUTINE minclass->hessian FILLS ONLY LOWER TRIANGULAR PART AND DIAGONAL OF A).
01251 
01252       if (iahflg == 0)
01253       {
01254         if (iagflg == 1)
01255         {
01256           fstofd(x, g, a, sx, rnf, wrk1);
01257         }
01258         else
01259         {
01260           sndofd(x, f, a, sx, rnf, wrk1, wrk2);
01261         }
01262       }
01263       else
01264       {
01265         if (msg & 2)
01266         {
01267           minclass->hessian(x, a);
01268         }
01269         else
01270         {
01271           if (heschk(x, f, g, a, typsiz, sx, rnf, analtl, iagflg, udiag, wrk1, wrk2, msg))
01272           {
01273             mLastMessage = msg;
01274             return -1; // msg = -22
01275           }
01276           // HESCHK EVALUATES minclass->hessian AND CHECKS IT AGAINST THE FINITE
01277           // DIFFERENCE HESSIAN WHICH IT CALCULATES BY CALLING FSTOFD
01278           // (IF IAGFLG .EQ. 1) OR SNDOFD (OTHERWISE).
01279         }
01280       }
01281     }
01282 
01283     if (!(msg & 4))
01284       result(x, f, g, a, p, itncnt, 1);
01285 
01286     // ITERATION
01287     while (itrmcd == 0)
01288     {
01289       itncnt++;
01290       mLastIteration = itncnt; // BAH
01291 
01292       // FIND PERTURBED LOCAL MODEL HESSIAN AND ITS LL+ DECOMPOSITION
01293       // (SKIP THIS STEP IF LINE SEARCH OR DOGSTEP TECHNIQUES BEING USED WITH
01294       // SECANT UPDATES.  CHOLESKY DECOMPOSITION L ALREADY OBTAINED FROM
01295       // SECFAC.)
01296       if (iexp != 1 || method == 3)
01297       {
01298         chlhsn(a, sx, udiag);
01299       }
01300 
01301       // SOLVE FOR NEWTON STEP:  AP=-G
01302       for (i = 1; i <= n; i++)
01303       {
01304         wrk1(i) = -g(i);
01305       }
01306 
01307       lltslv(a, p, wrk1);
01308 
01309       // DECIDE WHETHER TO ACCEPT NEWTON STEP  XPLS=X + P
01310       // OR TO CHOOSE XPLS BY A GLOBAL STRATEGY.
01311       if (iagflg == 0 && method != 1)
01312       {
01313         dltsav = dlt;
01314 
01315         if (method != 2)
01316         {
01317           amusav = amu;
01318           dlpsav = dltp;
01319           phisav = phi;
01320           phpsav = phip0;
01321         }
01322       }
01323 
01324       if (method == 1)
01325       {
01326         lnsrch(x, f, g, p, xpls, fpls, mxtake, iretcd, sx);
01327         if (iretcd == -7) // BAH
01328         {
01329           mLastMessage = iretcd;
01330           return -1;
01331         }
01332       }
01333       else if (method == 2)
01334       {
01335         dogdrv(x, f, g, a, p, xpls, fpls, sx, dlt, iretcd, mxtake, wrk0, wrk1, wrk2, wrk3);
01336       }
01337       else
01338       {
01339         hookdr(x, f, g, a, udiag, p, xpls, fpls, sx, dlt, iretcd, mxtake, amu, dltp, phi, phip0,
01340             wrk0, wrk1, wrk2, itncnt);
01341       }
01342 
01343       // IF COULD NOT FIND SATISFACTORY STEP AND FORWARD DIFFERENCE
01344       // GRADIENT WAS USED, RETRY USING CENTRAL DIFFERENCE GRADIENT.
01345       if (iretcd == 1 && iagflg == 0)
01346       {
01347         // SET IAGFLG FOR CENTRAL DIFFERENCES
01348         iagflg = -1;
01349 
01350         if (!(msg & 4))
01351         {
01352           std::fprintf(mfile, "\nOPTDRV      Shift from forward to central");
01353           std::fprintf(mfile, " differences");
01354           std::fprintf(mfile, "\nOPTDRV      in iteration %d", itncnt);
01355           std::fprintf(mfile, "\n");
01356         }
01357 
01358         fstocd(x, sx, rnf, g);
01359 
01360         if (method == 1)
01361         {
01362           // SOLVE FOR NEWTON STEP:  AP=-G
01363           for (i = 1; i <= n; i++)
01364           {
01365             wrk1(i) = -g(i);
01366           }
01367 
01368           lltslv(a, p, wrk1);
01369 
01370           lnsrch(x, f, g, p, xpls, fpls, mxtake, iretcd, sx);
01371           if (iretcd == -7) // BAH
01372           {
01373             mLastMessage = iretcd;
01374             return -1;
01375           }
01376         }
01377         else
01378         {
01379           dlt = dltsav;
01380           if (method == 2)
01381           {
01382             // SOLVE FOR NEWTON STEP:  AP=-G
01383             for (i = 1; i <= n; i++)
01384             {
01385               wrk1(i) = -g(i);
01386             }
01387 
01388             lltslv(a, p, wrk1);
01389 
01390             dogdrv(x, f, g, a, p, xpls, fpls, sx, dlt, iretcd, mxtake, wrk0, wrk1, wrk2, wrk3);
01391           }
01392           else
01393           {
01394             amu = amusav;
01395             dltp = dlpsav;
01396             phi = phisav;
01397             phip0 = phpsav;
01398 
01399             chlhsn(a, sx, udiag);
01400 
01401             // SOLVE FOR NEWTON STEP:  AP=-G
01402             for (i = 1; i <= n; i++)
01403             {
01404               wrk1(i) = -g(i);
01405             }
01406 
01407             lltslv(a, p, wrk1);
01408 
01409             hookdr(x, f, g, a, udiag, p, xpls, fpls, sx, dlt, iretcd, mxtake, amu, dltp, phi,
01410                 phip0, wrk0, wrk1, wrk2, itncnt);
01411           }
01412         }
01413       }
01414       // CALCULATE STEP FOR OUTPUT
01415       for (i = 1; i <= n; i++)
01416       {
01417         p(i) = xpls(i) - x(i);
01418       }
01419 
01420       // CALCULATE GRADIENT AT XPLS
01421       if (iagflg == -1)
01422       {
01423         // CENTRAL DIFFERENCE GRADIENT
01424         fstocd(xpls, sx, rnf, gpls);
01425       }
01426       else if (iagflg == 0)
01427       {
01428         // FORWARD DIFFERENCE GRADIENT
01429         fstofd(xpls, fpls, gpls, sx, rnf);
01430       }
01431       else
01432       {
01433         // ANALYTIC GRADIENT
01434         minclass->gradient(xpls, gpls);
01435       }
01436 
01437       // CHECK WHETHER STOPPING CRITERIA SATISFIED
01438       optstp(xpls, fpls, gpls, x, itncnt, icscmx, itrmcd, sx, fscale, itnlim, iretcd, mxtake, msg);
01439 
01440       if (itrmcd == 0)
01441       {
01442         // EVALUATE HESSIAN AT XPLS
01443         if (iexp != 0)
01444         {
01445           if (method == 3)
01446           {
01447             secunf(x, g, a, udiag, xpls, gpls, itncnt, rnf, iagflg, noupdt, wrk1, wrk2, wrk3);
01448           }
01449           else
01450           {
01451             secfac(x, g, a, xpls, gpls, itncnt, rnf, iagflg, noupdt, wrk0, wrk1, wrk2, wrk3);
01452           }
01453         }
01454         else
01455         {
01456           if (iahflg == 1)
01457           {
01458             minclass->hessian(xpls, a);
01459           }
01460           else
01461           {
01462             if (iagflg == 1)
01463             {
01464               fstofd(xpls, gpls, a, sx, rnf, wrk1);
01465             }
01466             else
01467             {
01468               sndofd(xpls, fpls, a, sx, rnf, wrk1, wrk2);
01469             }
01470           }
01471         }
01472 
01473         if (msg & 8)
01474         {
01475           result(xpls, fpls, gpls, a, p, itncnt, 1);
01476         }
01477 
01478         // X <-- XPLS  AND  G <-- GPLS  AND  F <-- FPLS
01479         f = fpls;
01480 
01481         for (i = 1; i <= n; i++)
01482         {
01483           x(i) = xpls(i);
01484           g(i) = gpls(i);
01485         }
01486       }
01487     }
01488 
01489     // TERMINATION
01490     // -----------
01491     // RESET XPLS,FPLS,GPLS,  IF PREVIOUS ITERATE SOLUTION
01492     //
01493     if (itrmcd == 3)
01494     {
01495       fpls = f;
01496 
01497       for (i = 1; i <= n; i++)
01498       {
01499         xpls(i) = x(i);
01500         gpls(i) = g(i);
01501       }
01502     }
01503   }
01504 
01505   // PRINT RESULTS
01506   if (!(msg & 4))
01507   {
01508     result(xpls, fpls, gpls, a, p, itncnt, 0);
01509   }
01510   mLastMessage = itrmcd;
01511   return (itrmcd >= 0 && itrmcd <= 3) ? 0 : 1;
01512 }
01513 
01514 /*!
01515   \brief
01516   Version of Minimize in which the hessian is allocated locally rather than passed
01517   as an argument to the function.
01518 
01519   \section template_args Template Parameters
01520   
01521   \param M  SCPPNT Matrix type.
01522   \param V  SCPPNT Vector type.
01523   \param FUNC Type of function to minimize.
01524   
01525   \section function_args Function Parameters
01526    
01527   \param[in]  &start  Address of vector containing initial estimate of location of minimum.
01528   \param[out]  &xpls  Address of vector containing local minimum (size must be n on input).
01529   \param[out]  &fpls  Address of function value at local minimum.
01530   \param[out]  &gpls  Address of vector containing gradient at local minimum (size must be 
01531       n on input). 
01532  */
01533 template<class V, class M, class FUNC> int Uncmin<V, M, FUNC>::Minimize(V &start, V &xpls,
01534     double &fpls, V &gpls)
01535 {
01536 
01537   M *a = new M(n,n);
01538 
01539   int result = Minimize(start, xpls, fpls, gpls, *a);
01540 
01541   delete a;
01542 
01543   return result;
01544 }
01545 
01546 /*!
01547   \brief 
01548   Method to set default values for each input variable to the minimization algorithm.
01549   
01550   Translated by Steve Verrill, August 4, 1998.
01551 
01552   \section template_args Template Parameters
01553   
01554   \param M  SCPPNT Matrix type.
01555   \param V  SCPPNT Vector type.
01556   \param FUNC Type of function to minimize.
01557  */
01558 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::dfault()
01559 {
01560   // Set default algorithm used for minimization (line search)
01561   algorithm = 1;
01562 
01563   // SET TYPICAL SIZE OF X AND MINIMIZATION FUNCTION
01564   typsiz.newsize(n);
01565   typsiz = 1.0;
01566 
01567   fscale = 1.0;
01568 
01569   // SET TOLERANCES
01570   mdlt = -1.0;
01571 
01572   SetTolerances(-1.0, -1.0, -1.0);
01573 
01574   stepmx = 0.0;
01575 
01576   // SET FLAGS and options
01577   iexp = 0;
01578   ndigit = -1;
01579   itnlim = 150;
01580   iagflg = 0;
01581   iahflg = 0;
01582 
01583   fCheckGradient = 0;
01584   fCheckHessian = 0;
01585   fPrintResults = 0;
01586   fPrintIterationResults = 0;
01587 }
01588 
01589 /*!
01590   \brief 
01591   Method to check the input for reasonableness.
01592 
01593   Checks for reasonableness of options requested.
01594   Returns 0 if no serious errors found, otherwise returns 1.
01595 
01596   Translated by Steve Verrill, May 12, 1998.
01597   
01598   \section template_args Template Parameters
01599   
01600   \param M  SCPPNT Matrix type.
01601   \param V  SCPPNT Vector type.
01602   \param FUNC Type of function to minimize.
01603   
01604   \section function_args Function Parameters
01605    
01606   \param[in]      &x      Address of vector containing initial estimate of location of minimum.
01607   \param[out]     &sx     Address of vector containing scaling factors.
01608   \param[in,out]  &msg    Address of error code: If function return value is 1, msg indicates the error found.
01609   \param[in]      &method Address of message or error code: On input, if the method key includes a term 8, then output will be suppressed.
01610       On exit, if function return value = 1, then method contains the key of the error message.
01611  */
01612 template<class V, class M, class FUNC> int Uncmin<V, M, FUNC>::optchk(V &x, V &sx, int &msg,
01613     int &method)
01614 {
01615   int i;
01616   double stpsiz;
01617 
01618   // CHECK THAT PARAMETERS ONLY TAKE ON ACCEPTABLE VALUES.
01619   // IF NOT, SET THEM TO DEFAULT VALUES.
01620   bool printMessages = !(msg & 4);
01621 
01622   if (method< 1 || method> 3)method = 1;
01623   if (iagflg != 1 && iagflg != 0) iagflg = 1;
01624   if (iahflg != 1 && iahflg != 0) iahflg = 1;
01625   if (iexp != 0 && iexp != 1) iexp = 1;
01626 
01627   if (!(msg & 1) && iagflg == 0)
01628   {
01629     if (printMessages)
01630     {
01631       std::fprintf(mfile, "\n\nOPTCHK   User requests that analytic gradient");
01632       std::fprintf(mfile, " be accepted as properly coded,\n");
01633       std::fprintf(mfile, "OPTCHK   but an analytic gradient is not");
01634       std::fprintf(mfile, " supplied,\n");
01635       std::fprintf(mfile, "OPTCHK   msg = %d,\n", msg);
01636       std::fprintf(mfile, "OPTCHK   iagflg = %d.\n\n", iagflg);
01637     }
01638     msg = -1;
01639     return 1;
01640   }
01641 
01642   if (!(msg & 2) && iahflg == 0)
01643   {
01644     if (printMessages)
01645     {
01646       std::fprintf(mfile, "\n\nOPTCHK   User requests that analytic Hessian");
01647       std::fprintf(mfile, " be accepted as properly coded,\n");
01648       std::fprintf(mfile, "OPTCHK   but an analytic Hessian is not");
01649       std::fprintf(mfile, " supplied,\n");
01650       std::fprintf(mfile, "OPTCHK   msg = %d,\n", msg);
01651       std::fprintf(mfile, "OPTCHK   iahflg = %d.\n\n", iahflg);
01652     }
01653     msg = -2;
01654     return 1;
01655   }
01656 
01657   // CHECK DIMENSION OF PROBLEM
01658 
01659   if (n <= 0)
01660   {
01661     if (printMessages) std::fprintf(mfile, "\n\nOPTCHK   Illegal dimension, n = %d\n\n", n);
01662 
01663     msg = -3;
01664     return 1;
01665   }
01666 
01667   if (n == 1 && printMessages)
01668   {
01669     std::fprintf(mfile, "\n\nOPTCHK   !!!WARNING!!!  This class is ");
01670     std::fprintf(mfile, "inefficient for problems of size 1.\n");
01671     std::fprintf(mfile, "OPTCHK   You might want to use a more appropriate routine.\n\n");
01672   }
01673 
01674   // COMPUTE SCALE MATRIX
01675   for (i = 1; i <= n; i++)
01676   {
01677     if (typsiz(i) == 0) typsiz(i) = 1.0;
01678     if (typsiz(i) < 0.0) typsiz(i) = -typsiz(i);
01679     sx(i) = 1.0/typsiz(i);
01680   }
01681 
01682   // CHECK MAXIMUM STEP SIZE
01683   if (stepmx <= 0.0)
01684   {
01685     stpsiz = 0.0;
01686 
01687     for (i = 1; i <= n; i++)
01688     {
01689       stpsiz += x(i)*x(i)*sx(i)*sx(i);
01690     }
01691 
01692     stpsiz = std::sqrt(stpsiz);
01693 
01694     stepmx = max(1000.0*stpsiz,1000.0);
01695   }
01696 
01697   // CHECK FUNCTION SCALE
01698   if (fscale == 0) fscale = 1.0;
01699   if (fscale < 0.0) fscale = -fscale;
01700 
01701   // CHECK GRADIENT TOLERANCE
01702   if (gradtl < 0.0)
01703   {
01704     if (printMessages) std::fprintf(mfile, "\n\nOPTCHK   Illegal tolerance, gradtl = %lf\n\n", gradtl);
01705 
01706     msg = -4;
01707     return 1;
01708   }
01709 
01710   // CHECK ITERATION LIMIT
01711   if (itnlim < 0)
01712   {
01713     if (printMessages)
01714     {
01715       std::fprintf(mfile, "\n\nOPTCHK   Illegal iteration limit,");
01716       std::fprintf(mfile, " itnlim = %d\n\n", itnlim);
01717     }
01718     msg = -5;
01719     return 1;
01720   }
01721 
01722   // CHECK NUMBER OF DIGITS OF ACCURACY IN FUNCTION FCN
01723   if (ndigit == 0)
01724   {
01725     if (printMessages)
01726     {
01727       std::fprintf(mfile, "\n\nOPTCHK   Minimization function has no good");
01728       std::fprintf(mfile, " digits, ndigit = %d\n\n", ndigit);
01729     }
01730 
01731     msg = -6;
01732     return 1;
01733   }
01734 
01735   if (ndigit < 0) ndigit = (int)(-std::log(epsm)/std::log(10.0));
01736 
01737   // CHECK TRUST REGION RADIUS
01738   if (mdlt <= 0.0) mdlt = -1.0;
01739   if (mdlt > stepmx) mdlt = stepmx;
01740 
01741   return 0;
01742 }
01743 
01744 /*!
01745   \brief
01746   Calculate the first order finite difference approximations for gradients.
01747   
01748   Translated by Steve Verrill, April 22, 1998.
01749 
01750   \section template_args Template Parameters
01751   
01752   \param M  SCPPNT Matrix type.
01753   \param V  SCPPNT Vector type.
01754   \param FUNC Type of function to minimize.
01755   
01756   \section function_args Function Parameters
01757    
01758   \param[in]      &xpls   Address of new iterate (X[k]).
01759   \param[in]      fpls    Value of the function to minimize at the new iterate.
01760   \param[in,out]  &g      Address to vector containing finite difference approximation to the gradient.
01761   \param[in]      &sx     Address of scaling vector for x.
01762   \param[in]      rnoise  Relative noise in the function to be minimized.
01763   */                                                  
01764 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::fstofd(V &xpls, double fpls, V &g,
01765     V &sx, double rnoise)                            
01766 {                                                    
01767   double xmult, stepsz, xtmpj, fhat;                 
01768   int j;                                             
01769                                                      
01770   xmult = std::sqrt(rnoise);
01771 
01772   // gradient
01773 
01774   for (j = 1; j <= n; j++)
01775   {
01776     stepsz = xmult*max(std::fabs(xpls(j)), 1.0/sx(j));
01777     xtmpj = xpls(j);
01778     xpls(j) = xtmpj + stepsz;
01779 
01780     fhat = minclass->f_to_minimize(xpls);
01781 
01782     xpls(j) = xtmpj;
01783 
01784     g(j) = (fhat - fpls)/stepsz;
01785   }
01786 }
01787 
01788 /*!
01789   \brief
01790   Implementation of the fstofd method which finds a finite difference approximation to the Hessian.
01791   
01792   Translated by Steve Verrill, April 22, 1998.
01793 
01794   \section template_args Template Parameters
01795   
01796   \param M  SCPPNT Matrix type.
01797   \param V  SCPPNT Vector type.
01798   \param FUNC Type of function to minimize.
01799   
01800   \section function_args Function Parameters
01801    
01802   \param[in,out]  &xpls   Address of new iterate.
01803   \param[in]      &fpls   Address of the gradient of the function to minimize.
01804   \param[in,out]  &a      Address to Matrix containing finite difference 
01805       approximation to the hessian. Only the elements in the diagonal and 
01806       lower-half are returned.
01807   \param[in]      &sx     Address of scaling vector for x.
01808   \param[in]      rnoise  Relative noise in the function to be minimized.
01809   \param[in]      &fhat   Address of workspace vector.
01810  */                                                  
01811 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::fstofd(V &xpls, V &fpls, M &a,
01812     V &sx, double rnoise, V &fhat)
01813 {
01814   double xmult, stepsz, xtmpj;
01815   int i, j, nm1;
01816 
01817   xmult = std::sqrt(rnoise);
01818 
01819   for (j = 1; j <= n; j++)
01820   {
01821     stepsz = xmult * max(std::fabs(xpls(j)), 1.0/sx(j));
01822     xtmpj = xpls(j);
01823     xpls(j) = xtmpj + stepsz;
01824 
01825     minclass->gradient(xpls, fhat);
01826 
01827     xpls(j) = xtmpj;
01828 
01829     for (i = 1; i <= n; i++)
01830     {
01831       a(i, j) = (fhat(i) - fpls(i))/stepsz;
01832     }
01833   }
01834 
01835   nm1 = n - 1;
01836 
01837   for (j = 1; j <= nm1; j++)
01838   {
01839     for (i = j+1; i <= n; i++)
01840     {
01841       a(i, j) = (a(i,j) + a(j,i))/2.0;
01842     }
01843   }
01844   return;
01845 }
01846 
01847 /*!
01848   \brief 
01849   
01850   Method to check the analytic gradient supplied by the user.
01851   
01852   This function compares the analytic gradient to the first-order finite difference gradient.
01853   The function return value zero if gradient checks, or 1 if the analytic gradient appears to be off.
01854   
01855   Translated by Steve Verrill, April 22, 1998.
01856   
01857   \section template_args Template Parameters
01858 
01859   \param M  SCPPNT Matrix type.
01860   \param V  SCPPNT Vector type.
01861   \param FUNC Type of function to minimize.
01862 
01863   \section function_args Function Parameters
01864  
01865   \param[in]  &x      Address of location vector at which the gradient is to be checked.
01866   \param[in]  f       Function value.
01867   \param[in]  &g      Address of vector containing analytic gradient.
01868   \param[in]  &typsiz Address of scale vector for x.
01869   \param[in]  &sx     Relative noise in the function to be minimized.
01870   \param[in]  fscale  Estimate of scale of f_to_minimize. 
01871   \param[in]  rnf     Relative noise in f_to_minimize.
01872   \param[in]  analtl  Tolerance for comparison of estimated and analytical gradient.
01873   \param[in]  &gest   Address of finite difference gradient
01874   \param[out] &msg    Address of message or error code: On input, if msg code includes a term 8, suppresses output.
01875       On output, if function return value = 1, then contains the key of the error message.
01876  */                                                  
01877 template<class V, class M, class FUNC> int Uncmin<V, M, FUNC>::grdchk(V &x, double f, V &g,
01878     V &typsiz, V &sx, double fscale, double rnf, double analtl, V &gest, int &msg)
01879 {
01880   double gs;
01881   int ker, i;
01882 
01883   // COMPUTE FIRST ORDER FINITE DIFFERENCE GRADIENT AND COMPARE TO
01884   // ANALYTIC GRADIENT.
01885   fstofd(x, f, gest, sx, rnf);
01886 
01887   ker = 0;
01888 
01889   for (i = 1; i <= n; i++)
01890   {
01891     gs = max(std::fabs(f), fscale)/max(std::fabs(x(i)), typsiz(i));
01892 
01893     if (std::fabs(g(i) - gest(i)) > max(std::fabs(g(i)), gs)*analtl)
01894       ker = 1;
01895   }
01896 
01897   if (ker == 0)
01898     return 0;
01899 
01900   if (!(msg & 4))
01901   {
01902     std::fprintf(mfile, "\nThere appears to be an error in the coding");
01903     std::fprintf(mfile, " of the gradient method.\n\n\n");
01904     std::fprintf(mfile, "Component   Analytic   Finite Difference\n\n");
01905 
01906     for (i = 1; i <= n; i++)
01907     {
01908       std::fprintf(mfile, "%d  %lf  %lf\n", i, (double) g(i), (double) gest(i));
01909     }
01910   }
01911   msg = -21;
01912   return 1;
01913 }
01914 
01915 /*!
01916   \brief
01917   Test for convergence of iterations.
01918   
01919   Determines whether the algorithm should terminate due to any of the following:
01920    1. Problem solved within user tolerance; 
01921    2. Convergence within user tolerance; 
01922    3. Iteration limit reached; 
01923    4. Divergence or too restrictive maximum step (stepmx) suspected. 
01924    
01925    Translated by Steve Verrill, May 12, 1998.
01926 
01927   \section template_args Template Parameters
01928 
01929   \param M  SCPPNT Matrix type.
01930   \param V  SCPPNT Vector type.
01931   \param FUNC Type of function to minimize.
01932 
01933   \section function_args Function Parameters
01934  
01935   \param[in]  &xpls   Address of location vector at current iteration (X[k]).
01936   \param[in]  fpls    Function value at xpls.
01937   \param[in]  &gpls   Address of gradient or approximation.
01938   \param[in]  &x      Address of location vector at previous iteration (X[k-1]).
01939   \param[in]  &itncnt Address of current iteration number.
01940   \param[out] &icscmx Address of number of consecutive steps >= stepmx (retain 
01941       between successive calls).
01942   \param[out] &itrmcd Address of termination code.
01943   \param[in]  &sx     Address of scaling vector for x.
01944   \param[in]  &fscale Address of estimate of scale of f_to_minimize. 
01945   \param[in]  &itnlim Address of maximum number of allowable iterations.
01946   \param[in]  &iretcd Address of return code.
01947   \param[in]  &mxtake Address of boolean flag indicating step of maximum length was 
01948       used.
01949   \param[out] &msg    Address of message or error code: If msg code includes a term 8, suppresses output.
01950  */
01951 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::optstp(V &xpls, double fpls,
01952     V &gpls, V &x, int &itncnt, int &icscmx, int &itrmcd, V &sx, double &fscale, int &itnlim,
01953     int &iretcd, bool &mxtake, int &msg)
01954 {
01955   int i;
01956   double d, rgx, relgrd, rsx = 0.0, relstp; // Initialized rsx = 0.0. ww, 12-20-2007
01957 
01958   itrmcd = 0;
01959 
01960   // LAST GLOBAL STEP FAILED TO LOCATE A POINT LOWER THAN X
01961   if (iretcd == 1)
01962   {
01963     itrmcd = 3;
01964 
01965     if (!(msg & 4))
01966     {
01967       std::fprintf(mfile, "\n\nOPTSTP    The last global step failed");
01968       std::fprintf(mfile, " to locate a point lower than x.\n");
01969       std::fprintf(mfile, "OPTSTP    Either x is an approximate local");
01970       std::fprintf(mfile, " minimum of the function,\n");
01971       std::fprintf(mfile, "OPTSTP    the function is too nonlinear for");
01972       std::fprintf(mfile, " this algorithm, or\n");
01973       std::fprintf(mfile, "OPTSTP    steptl is too large.\n");
01974     }
01975     return;
01976   }
01977   else
01978   {
01979     // FIND DIRECTION IN WHICH RELATIVE GRADIENT MAXIMUM.
01980     d = max(std::fabs(fpls), fscale);
01981     rgx = 0.0;
01982 
01983     for (i = 1; i <= n; i++)
01984     {
01985       relgrd = std::fabs(gpls(i))*max(std::fabs(xpls(i)), 1.0/sx(i))/d;
01986       rgx = max(rgx, relgrd);
01987     }
01988     mLastGradCrit = rgx; // BAH
01989 
01990     // FIND DIRECTION IN WHICH RELATIVE STEPSIZE MAXIMUM
01991     // Moved before check of gradient criterion so it is always computed - BAH
01992     if (itncnt > 0)
01993     {
01994       rsx = 0.0;
01995 
01996       for (i = 1; i <= n; i++)
01997       {
01998         relstp = std::fabs(xpls(i) - x(i))/ max(std::fabs(xpls(i)), 1.0/sx(i));
01999 
02000         rsx = max(rsx, relstp);
02001       }
02002       mLastStepCrit = rsx;
02003     }
02004     // Check whether gradient criterion is met
02005     if (rgx <= gradtl)
02006     {
02007       itrmcd = 1;
02008 
02009       if (!(msg & 4))
02010       {
02011         std::fprintf(mfile, "\n\nOPTSTP    The relative gradient is close");
02012         std::fprintf(mfile, " to zero.\n");
02013         std::fprintf(mfile, "OPTSTP    The current iterate is probably");
02014         std::fprintf(mfile, " a solution.\n");
02015       }
02016       return;
02017     }
02018     if (itncnt == 0)
02019       return;
02020 
02021     // Check whether maximum step size criteria is met
02022     if (rsx <= steptl)
02023     {
02024       itrmcd = 2;
02025 
02026       if (!(msg & 4))
02027       {
02028         std::fprintf(mfile, "\n\nOPTSTP    Successive iterates are within");
02029         std::fprintf(mfile, " steptl.\n");
02030         std::fprintf(mfile, "OPTSTP    The current iterate is probably");
02031         std::fprintf(mfile, " a solution.\n");
02032       }
02033       return;
02034     }
02035 
02036     // CHECK ITERATION LIMIT
02037     if (itncnt >= itnlim)
02038     {
02039       itrmcd = 4;
02040 
02041       if (!(msg & 4))
02042       {
02043         std::fprintf(mfile, "\n\nOPTSTP    The iteration limit was reached.\n");
02044         std::fprintf(mfile, "OPTSTP    The algorithm failed.\n");
02045       }
02046 
02047       return;
02048     }
02049 
02050     // CHECK NUMBER OF CONSECUTIVE STEPS \ STEPMX
02051     if (!mxtake)
02052     {
02053       icscmx = 0;
02054 
02055       return;
02056     }
02057 
02058     if (!(msg & 4))
02059     {
02060       std::fprintf(mfile, "\n\nOPTSTP    Step of maximum length (stepmx)");
02061       std::fprintf(mfile, " taken.\n");
02062     }
02063 
02064     icscmx++;
02065 
02066     if (icscmx < 5)
02067       return;
02068 
02069     itrmcd = 5;
02070 
02071     if (!(msg & 4))
02072     {
02073       std::fprintf(mfile, "\n\nOPTSTP    Maximum step size exceeded");
02074       std::fprintf(mfile, " five consecutive times.\n");
02075       std::fprintf(mfile, "OPTSTP    Either the function is unbounded");
02076       std::fprintf(mfile, " below,\n");
02077       std::fprintf(mfile, "OPTSTP    becomes asymptotic to a finite value");
02078       std::fprintf(mfile, " from above in some direction, or\n");
02079       std::fprintf(mfile, "OPTSTP    stepmx is too small.\n");
02080     }
02081   }
02082 }
02083 
02084 /*!
02085   \brief
02086   Provides the initial Hessian when secant updates are being used.
02087   
02088   Translated by Steve Verrill, April 27, 1998.
02089 
02090   \section template_args Template Parameters
02091 
02092   \param M  SCPPNT Matrix type.
02093   \param V  SCPPNT Vector type.
02094   \param FUNC Type of function to minimize.
02095 
02096   \section function_args Function Parameters
02097  
02098   \param[out] &a      Address of initial hessian (lower triangular matrix)
02099   \param[in]  &sx     Address of scaling vector for x.
02100   \param[in]  method  Indictor for algorithm to use to solve the minimization problem.
02101                Method = 1,2: Use factored secant method;
02102                method = 3:  Use unfactored secant method.
02103  */
02104 
02105 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::hsnint(M &a, V &sx, int method)
02106 {
02107   int i, j;
02108 
02109   for (j = 1; j <= n; j++)
02110   {
02111     if (method == 3)
02112     {
02113       a(j, j) = sx(j)*sx(j);
02114     }
02115     else
02116     {
02117       a(j, j) = sx(j);
02118     }
02119 
02120     for (i = j + 1; i <= n; i++)
02121     {
02122       a(i, j) = 0.0;
02123     }
02124   }
02125   return;
02126 }
02127 
02128 /*!
02129   \brief
02130   Finds the second order forward finite difference approximations to the Hessian.  
02131   
02132   For optimization use this method to estimate the Hessian of the optimization function
02133   if no analytical user function has been supplied for either the gradient or the Hessian, 
02134   and the optimization function is inexpensive to evaluate.
02135   
02136   Translated by Steve Verrill, May 8, 1998.
02137 
02138   \section template_args Template Parameters
02139 
02140   \param M  SCPPNT Matrix type.
02141   \param V  SCPPNT Vector type.
02142   \param FUNC Type of function to minimize.
02143 
02144   \section function_args Function Parameters
02145  
02146   \param[out]   &xpls       Address of new iterate, (X[k]).
02147   \param[in]    &fpls       Address of function value at the new iterate.
02148   \param[out]   &a          Address of finite difference approximation to the hessian. Only the entries in
02149       the lower triangular matrix and diagonal are returned.
02150   \param[in]    &sx         Address of scaling vector for x.
02151   \param[in]    &rnoise     Address of relative noise in the function to be minimized.
02152   \param[out]   &stepsz     Address of workspace (stepsize in i-th component direction).
02153   \param[out]   &anbr       Address of workspace (neighbor in i-th direction).
02154  */
02155 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::sndofd(V &xpls, double &fpls, M &a,
02156     V &sx, double &rnoise, V &stepsz, V &anbr)
02157 {
02158   double xmult, xtmpi, xtmpj, fhat;
02159   int i, j;
02160 
02161   // FIND I-TH STEPSIZE AND EVALUATE NEIGHBOR IN DIRECTION
02162   // OF I-TH UNIT VECTOR
02163   xmult = std::pow(rnoise, 1.0/3.0);
02164 
02165   for (i = 1; i <= n; i++)
02166   {
02167     stepsz(i) = xmult*max(std::fabs(xpls(i)),1.0/sx(i));
02168     xtmpi = xpls(i);
02169     xpls(i) = xtmpi + stepsz(i);
02170 
02171     anbr(i) = minclass->f_to_minimize(xpls);
02172     xpls(i) = xtmpi;
02173   }
02174 
02175   // CALCULATE COLUMN I OF A
02176   for (i = 1; i <= n; i++)
02177   {
02178     xtmpi = xpls(i);
02179     xpls(i) = xtmpi + 2.0*stepsz(i);
02180     fhat = minclass->f_to_minimize(xpls);
02181     a(i, i) = ((fpls - anbr(i)) + (fhat - anbr(i)))/
02182     (stepsz(i)*stepsz(i));
02183 
02184     // CALCULATE SUB-DIAGONAL ELEMENTS OF COLUMN
02185     if (i != n)
02186     {
02187       xpls(i) = xtmpi + stepsz(i);
02188 
02189       for (j = i+1; j <= n; j++)
02190       {
02191         xtmpj = xpls(j);
02192         xpls(j) = xtmpj + stepsz(j);
02193         fhat = minclass->f_to_minimize(xpls);
02194         a(j, i) = ((fpls - anbr(i)) + (fhat - anbr(j)))/
02195         (stepsz(i)*stepsz(j));
02196         xpls(j) = xtmpj;
02197       }
02198     }
02199     xpls(i) = xtmpi;
02200   }
02201 }
02202 
02203 /*!
02204   \brief
02205   Checks the analytic hessian supplied by the user.
02206   
02207   Returns 0 if hessian checks, otherwise returns 1.
02208   
02209   Translated by Steve Verrill, April 23, 1998.
02210 
02211   \section template_args Template Parameters
02212 
02213   \param M  SCPPNT Matrix type.
02214   \param V  SCPPNT Vector type.
02215   \param FUNC Type of function to minimize.
02216 
02217   \section function_args Function Parameters
02218 
02219   \param[in]  &x          Address of iterate (X[k]) at which the Hessian is to be checked.
02220   \param[in]  &f          Address of function value f(x).
02221   \param[out]  &g         Address of gradient g(x).
02222   \param[out]  &a         Address of hessian h(x). On exit: Hessian in lower triangle.
02223   \param[in]  &typsiz     Address of typical size for each component of x.
02224   \param[in]  &sx         Address of scaling vector for x:  sx[i] = 1.0/typsiz[i].
02225   \param[in]  rnf         Relative noise in f_to_minimize.
02226   \param[in]  analtl      Tolerance for comparison of estimated and analytic gradients.
02227   \param[in]  &iagflg     Address of flag: If iagflg = 1, then an analytic gradient is being supplied.
02228   \param[in]  &udiag      Address of workspace vector.
02229   \param[in]  &wrk1       Address of workspace vector.
02230   \param[in]  &wrk2       Address of workspace vector.
02231   \param[in,out]  &msg    Address of message or error code. On input, if msg code contains factor 8, then print 
02232       discrepancy report for analytic hessian. On output, msg = -22 indicates probably coding error
02233       of hessian.
02234  */
02235 template<class V, class M, class FUNC> int Uncmin<V, M, FUNC>::heschk(V &x, double &f, V &g, M &a,
02236     V &typsiz, V &sx, double rnf, double analtl, int &iagflg, V &udiag, V &wrk1, V &wrk2, int &msg)
02237 {
02238   int i, j, ker;
02239   double hs;
02240 
02241   // COMPUTE FINITE DIFFERENCE APPROXIMATION H TO THE HESSIAN.
02242   if (iagflg == 1)
02243     fstofd(x, g, a, sx, rnf, wrk1);
02244   else
02245     sndofd(x, f, a, sx, rnf, wrk1, wrk2);
02246 
02247   ker = 0;
02248 
02249   // COPY LOWER TRIANGULAR PART OF H TO UPPER TRIANGULAR PART
02250   // AND DIAGONAL OF H TO UDIAG
02251   for (j = 1; j <= n; j++)
02252   {
02253     udiag(j) = a(j,j);
02254 
02255     for (i = j + 1; i <= n; i++)
02256     {
02257       a(j, i) = a(i,j);
02258     }
02259   }
02260 
02261   // COMPUTE ANALYTIC HESSIAN AND COMPARE TO FINITE DIFFERENCE
02262   // APPROXIMATION.
02263   minclass->hessian(x, a);
02264 
02265   for (j = 1; j <= n; j++)
02266   {
02267     hs = max(std::fabs(g(j)), 1.0)/max(std::fabs(x(j)), typsiz(j));
02268 
02269     if (std::fabs(a(j, j) - udiag(j)) > max(std::fabs(udiag(j)), hs)*analtl)
02270       ker = 1;
02271 
02272     for (i = j + 1; i <= n; i++)
02273     {
02274       if (std::fabs(a(i, j) - a(j, i)) > max(std::fabs(a(i, j)), hs)*analtl)
02275         ker = 1;
02276     }
02277   }
02278   if (ker == 0)
02279     return 0;
02280 
02281   if (!(msg & 4))
02282   {
02283     std::fprintf(mfile, "\nThere appears to be an error in the coding");
02284     std::fprintf(mfile, " of the Hessian method.\n\n\n");
02285     std::fprintf(mfile, "Row   Column   Analytic   Finite Difference\n\n");
02286 
02287     for (i = 1; i <= n; i++)
02288     {
02289       for (j = 1; j < i; j++)
02290       {
02291         std::fprintf(mfile, "%d  %d  %lf  %lf\n", i, j, (double) a(i, j), (double) a(j, i));
02292       }
02293       std::fprintf(mfile, "%d  %d  %lf  %lf\n", i, i, (double) a(i, i), (double) udiag(i));
02294     }
02295   }
02296   msg = -22;
02297   return 1;
02298 }
02299 
02300 /*!
02301   \brief
02302   Prints information.
02303   
02304   Translated by Steve Verrill, May 11, 1998.
02305 
02306   \section template_args Template Parameters
02307 
02308   \param M  SCPPNT Matrix type.
02309   \param V  SCPPNT Vector type.
02310   \param FUNC Type of function to minimize.
02311 
02312   \section function_args Function Parameters
02313 
02314   \param[in]  &x       Address of iterate (X[k]) at which the Hessian is to be checked.
02315   \param[in]  &f       Address of function value f(x).
02316   \param[in]  &g       Address of gradient g(x).
02317   \param[in]  &a       Address of hessian h(x). On exit: Hessian in lower triangle.
02318   \param[in]  &p       Address of step taken.
02319   \param[in]  &itncnt  Address of iteration number (k).
02320   \param[in]  iflg     Flag controlling the information to print
02321  */
02322 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::result(V &x, double &f, V &g, M &a,
02323     V &p, int &itncnt, int iflg)
02324 {
02325   int i, j, iii, num5, remain, iii5, iiir;
02326   int ilow, ihigh;
02327 
02328   num5 = n/5;
02329   remain = n%5;
02330 
02331   // PRINT ITERATION NUMBER
02332   std::fprintf(mfile, "\n\nRESULT      Iterate k = %d\n", itncnt);
02333 
02334   if (iflg != 0)
02335   {
02336     // PRINT STEP
02337     std::fprintf(mfile, "\n\nRESULT      Step\n\n");
02338 
02339     ilow = -4;
02340     ihigh = 0;
02341 
02342     for (i = 1; i <= num5; i++)
02343     {
02344       ilow += 5;
02345       ihigh += 5;
02346 
02347       std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02348 
02349       for (j = 1; j <= 5; j++)
02350       {
02351         std::fprintf(mfile, "%lf  ", (double) p(ilow+j-1));
02352       }
02353 
02354       std::fprintf(mfile, "\n");
02355     }
02356 
02357     ilow += 5;
02358     ihigh = ilow + remain - 1;
02359 
02360     std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02361 
02362     for (j = 1; j <= remain; j++)
02363     {
02364       std::fprintf(mfile, "%lf  ", (double) p(ilow+j-1));
02365     }
02366 
02367     std::fprintf(mfile, "\n");
02368   }
02369 
02370   // PRINT CURRENT ITERATE
02371   std::fprintf(mfile, "\n\nRESULT      Current x\n\n");
02372 
02373   ilow = -4;
02374   ihigh = 0;
02375 
02376   for (i = 1; i <= num5; i++)
02377   {
02378     ilow += 5;
02379     ihigh += 5;
02380 
02381     std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02382 
02383     for (j = 1; j <= 5; j++)
02384     {
02385       std::fprintf(mfile, "%lf  ", (double) x(ilow+j-1));
02386     }
02387     std::fprintf(mfile, "\n");
02388   }
02389 
02390   ilow += 5;
02391   ihigh = ilow + remain - 1;
02392 
02393   std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02394 
02395   for (j = 1; j <= remain; j++)
02396   {
02397     std::fprintf(mfile, "%lf  ", (double) x(ilow+j-1));
02398   }
02399 
02400   std::fprintf(mfile, "\n");
02401 
02402   // PRINT FUNCTION VALUE
02403   std::fprintf(mfile, "\n\nRESULT      f_to_minimize at x = %lf\n", f);
02404 
02405   // PRINT GRADIENT
02406   std::fprintf(mfile, "\n\nRESULT      Gradient at x\n\n");
02407 
02408   ilow = -4;
02409   ihigh = 0;
02410 
02411   for (i = 1; i <= num5; i++)
02412   {
02413     ilow += 5;
02414     ihigh += 5;
02415 
02416     std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02417 
02418     for (j = 1; j <= 5; j++)
02419     {
02420       std::fprintf(mfile, "%lf  ", (double) g(ilow+j-1));
02421     }
02422     std::fprintf(mfile, "\n");
02423   }
02424 
02425   ilow += 5;
02426   ihigh = ilow + remain - 1;
02427 
02428   std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02429 
02430   for (j = 1; j <= remain; j++)
02431   {
02432     std::fprintf(mfile, "%lf  ", (double) g(ilow+j-1));
02433   }
02434 
02435   std::fprintf(mfile, "\n");
02436 
02437   // PRINT HESSIAN FROM ITERATION K
02438   if (iflg != 0)
02439   {
02440     std::fprintf(mfile, "\n\nRESULT      Hessian at x\n\n");
02441 
02442     for (iii = 1; iii <= n; iii++)
02443     {
02444       iii5 = iii/5;
02445       iiir = iii%5;
02446 
02447       ilow = -4;
02448       ihigh = 0;
02449 
02450       for (i = 1; i <= iii5; i++)
02451       {
02452         ilow += 5;
02453         ihigh += 5;
02454 
02455         std::fprintf(mfile, "i = %d, j = ", iii);
02456         std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02457 
02458         for (j = 1; j <= 5; j++)
02459         {
02460           std::fprintf(mfile, "%lf  ", (double) a(iii, ilow+j-1));
02461         }
02462         std::fprintf(mfile, "\n");
02463       }
02464       ilow += 5;
02465       ihigh = ilow + iiir - 1;
02466 
02467       std::fprintf(mfile, "i = %d, j = ", iii);
02468       std::fprintf(mfile, "%d--%d     ", ilow, ihigh);
02469 
02470       for (j = 1; j <= iiir; j++)
02471       {
02472         std::fprintf(mfile, "%lf  ", (double) a(iii, ilow+j-1));
02473       }
02474 
02475       std::fprintf(mfile, "\n");
02476     }
02477   }
02478 }
02479 
02480 /*!
02481   \brief
02482   Solves Ax = b where A is an upper triangular matrix.
02483     
02484   Note that A is input as a lower triangular matrix and this method takes its transpose 
02485   implicitly.
02486 
02487   Translated by Steve Verrill, April 14, 1998.
02488 
02489   \section template_args Template Parameters
02490 
02491   \param M  SCPPNT Matrix type.
02492   \param V  SCPPNT Vector type.
02493   \param FUNC Type of function to minimize.
02494 
02495   \section function_args Function Parameters
02496 
02497   \param[in]  &a         Address of n by n lower triangular matrix A (preserved).
02498   \param[out] &x         Address of solution vector x.
02499   \param[in]  &b         Address of right-hand side vector b.
02500  */
02501 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::bakslv(M &a, V &x, V &b)
02502 {
02503   int i, ip1, j;
02504   double sum;
02505 
02506   // SOLVE (L-TRANSPOSE)X=B. (BACK SOLVE)
02507   i = n;
02508   x(i) = b(i)/a(i,i);
02509 
02510   while (i > 1)
02511   {
02512     ip1 = i;
02513     i--;
02514 
02515     sum = 0.0;
02516 
02517     for (j = ip1; j <= n; j++)
02518     {
02519       sum += a(j, i)*x(j);
02520     }
02521     x(i) = (b(i) - sum)/a(i,i);
02522   }
02523 }
02524 
02525 /*!
02526   \brief
02527   Finds the LL' decomposition of the perturbed model hessian matrix
02528   A+mu*I (where mu > 0 and I is the identity matrix) which is safely
02529   positive definite. If A is strictly positiv definite upon entry,
02530   then mu = 0.
02531   
02532   Translated by Steve Verrill, April 14, 1998.
02533 
02534   Description: 
02535   
02536   1. If A has any negative diagonal elements, then choose mu > 0 such
02537   that the diagonal of A1 = A + mu*I is all positive, with the ratio
02538   of its smallest to largest elements in the order of sqrt(epsm).
02539   
02540   2. A1 undergoes a perturbed Cholesky decomposition which results in
02541   and LL' decomposition of A1 + D, where D is a non-negative diagonal
02542   matrix which is implicitly added to A1 during the decomposition, if
02543   A1 is not positive definite. A1 is retained and not changed during
02544   this process by copying L into the upper triangular part of A1 and
02545   the diagonal into udiag. Then the Cholesky decomposition routine is
02546   called. On return, addmax contains the maximum element of D.
02547   
02548   3. If addmax = 0, A1 was positive definite going into step 2 and
02549   the process returns to the calling program. Otherwise, the minimum
02550   number sdd which must be added to the diagonal of A to make it safely
02551   strictly diagonallly dominant is calculated. Since A + addmax*I and
02552   A + sdd*I are both safely positive definite, choose 
02553   mu = min( addmax, sdd) and decompose A + mu*I to obtain L.
02554 
02555   \section template_args Template Parameters
02556 
02557   \param M  SCPPNT Matrix type.
02558   \param V  SCPPNT Vector type.
02559   \param FUNC Type of function to minimize.
02560 
02561   \section function_args Function Parameters
02562 
02563   \param[in,out]  &a      Address of nxn matrix A: On entry, A is the model hessian 
02564       (only the lower triangle and diagonal stored). 
02565       On exit, A contains the L factor of the LL' decomposition of the perturbed model hessian 
02566       in the lower triangle and diagonal, and contains the off-diagonal elements of the 
02567       hessian in the upper triangle (Note: diagonal elements of the hessian are returned 
02568       in udiag).
02569   \param[in]      &sx     Address of scaling vector for x.
02570   \param[out]     &udiag  Address of diagonal of the hessian (returned on exit).              
02571  */
02572 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::chlhsn(M &a, V &sx, V &udiag)
02573 {
02574   int i, j, im1, jm1;
02575   double tol, diagmx, diagmn, posmax, amu, offmax, evmin, evmax, offrow, sdd;
02576 
02577   double addmax;
02578 
02579   // SCALE HESSIAN
02580   // PRE- AND POST- MULTIPLY "A" BY INV(SX)
02581   for (j = 1; j <= n; j++)
02582   {
02583     for (i = j; i <= n; i++)
02584     {
02585       a(i, j) /= (sx(i)*sx(j));
02586     }
02587   }
02588 
02589   // STEP1
02590   // -----
02591   // NOTE:  IF A DIFFERENT TOLERANCE IS DESIRED THROUGHOUT THIS
02592   // ALGORITHM, CHANGE TOLERANCE HERE:
02593   tol = std::sqrt(epsm);
02594 
02595   diagmx = a(1, 1);
02596   diagmn = a(1, 1);
02597 
02598   for (i = 2; i <= n; i++)
02599   {
02600     if (a(i, i) < diagmn)
02601       diagmn = a(i, i);
02602     if (a(i, i) > diagmx)
02603       diagmx = a(i, i);
02604   }
02605   posmax = max(diagmx, 0.0);
02606 
02607   // DIAGMN .LE. 0
02608   if (diagmn <= posmax*tol)
02609   {
02610     amu = tol*(posmax - diagmn) - diagmn;
02611 
02612     if (amu == 0.0)
02613     {
02614       // FIND LARGEST OFF-DIAGONAL ELEMENT OF A
02615       offmax = 0.0;
02616 
02617       for (i = 2; i <= n; i++)
02618       {
02619         im1 = i - 1;
02620 
02621         for (j = 1; j <= im1; j++)
02622         {
02623           if (std::fabs(a(i, j)) > offmax)
02624             offmax = std::fabs(a(i, j));
02625         }
02626       }
02627       amu = offmax;
02628 
02629       if (amu == 0.0)
02630       {
02631         amu = 1.0;
02632       }
02633       else
02634       {
02635         amu *= 1.0 + tol;
02636       }
02637     }
02638 
02639     // A = A + MU*I
02640     for (i = 1; i <= n; i++)
02641     {
02642       a(i, i) += amu;
02643     }
02644 
02645     diagmx += amu;
02646   }
02647 
02648   // STEP2
02649   // -----
02650   // COPY LOWER TRIANGULAR PART OF "A" TO UPPER TRIANGULAR PART
02651   // AND DIAGONAL OF "A" TO UDIAG
02652   for (j = 1; j <= n; j++)
02653   {
02654     udiag(j) = a(j,j);
02655     for (i = j + 1; i <= n; i++)
02656     {
02657       a(j, i) = a(i,j);
02658     }
02659   }
02660   choldc(a, diagmx, tol, addmax);
02661 
02662   // STEP3
02663   // -----
02664   // IF ADDMAX = 0, "A" WAS POSITIVE DEFINITE GOING INTO STEP 2,
02665   // THE LL' DECOMPOSITION HAS BEEN DONE, AND WE RETURN.
02666   // OTHERWISE, ADDMAX > 0.  PERTURB "A" SO THAT IT IS SAFELY
02667   // DIAGONALLY DOMINANT AND FIND LL' DECOMPOSITION
02668   if (addmax > 0.0)
02669   {
02670     // RESTORE ORIGINAL "A" (LOWER TRIANGULAR PART AND DIAGONAL)
02671     for (j = 1; j <= n; j++)
02672     {
02673       a(j, j) = udiag(j);
02674 
02675       for (i = j + 1; i <= n; i++)
02676       {
02677         a(i, j) = a(j,i);
02678       }
02679     }
02680 
02681     // FIND SDD SUCH THAT A + SDD*I IS SAFELY POSITIVE DEFINITE
02682     // NOTE:  EVMIN < 0 SINCE A IS NOT POSITIVE DEFINITE;
02683     evmin = 0.0;
02684     evmax = a(1, 1);
02685     for (i = 1; i <= n; i++)
02686     {
02687       offrow = 0.0;
02688       im1 = i - 1;
02689 
02690       for (j = 1; j <= im1; j++)
02691       {
02692         offrow += std::fabs(a(i, j));
02693       }
02694 
02695       for (j = i + 1; j <= n; j++)
02696       {
02697         offrow += std::fabs(a(j, i));
02698       }
02699 
02700       evmin = min(evmin, a(i, i) - offrow);
02701       evmax = max(evmax, a(i, i) + offrow);
02702     }
02703     sdd = tol*(evmax - evmin) - evmin;
02704 
02705     // PERTURB "A" AND DECOMPOSE AGAIN
02706     amu = min(sdd, addmax);
02707 
02708     for (i = 1; i <= n; i++)
02709     {
02710       a(i, i) += amu;
02711       udiag(i) = a(i,i);
02712     }
02713 
02714     // "A" NOW GUARANTEED SAFELY POSITIVE DEFINITE
02715     choldc(a, 0.0, tol, addmax);
02716   }
02717 
02718   // UNSCALE HESSIAN AND CHOLESKY DECOMPOSITION MATRIX
02719   for (j = 1; j <= n; j++)
02720   {
02721     for (i = j; i <= n; i++)
02722     {
02723       a(i, j) *= sx(i);
02724     }
02725 
02726     jm1 = j - 1;
02727 
02728     for (i = 1; i <= jm1; i++)
02729     {
02730       a(i, j) *= sx(i)*sx(j);
02731     }
02732 
02733     udiag(j) *= sx(j)*sx(j);
02734   }
02735   return;
02736 }
02737 
02738 /*!
02739   \brief
02740   Finds the perturbed LL' decomposition of A + D, where D is a non-
02741   negative diagonal matrix added to A if necessary to allow the
02742   Cholesky decomposition to continue.
02743   
02744   The normal Cholesky decomposition is performed. However, if at any
02745   point the algorithm would attempt to set L(i,i) = SQRT(Temp) with
02746   Temp < Tol*Diagmx, then L(i,i) is set to SQRT(Tol*Diagmx) instead.
02747   This is equivalent to adding Tol_Diagmx - Temp to A(i,i).
02748     
02749   Translated by Steve Verrill, April 15, 1998.
02750 
02751   \section template_args Template Parameters
02752 
02753   \param M  SCPPNT Matrix type.
02754   \param V  SCPPNT Vector type.
02755   \param FUNC Type of function to minimize.
02756 
02757   \section function_args Function Parameters
02758 
02759   \param[in,out]  &a      Address of nxn matrix A: On entry, A is the matrix for which 
02760       to find the perturbed Cholesky decomposition. On exit, A contains L of the LL' 
02761       decomposition in lower triangle including the diagonal.
02762   \param[in]      diagmx  Maximum diagonal element of matrix A.
02763   \param[in]      tol     Tolerance.
02764   \param[out]     &addmax Address of maximum amount implicitly added to diagonal 
02765       of A in forming the Cholesky decomposition of A + D.            
02766  */ 
02767 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::choldc(M &a, double diagmx,
02768     double tol, double &addmax)
02769 {
02770   int i, j, jm1, jp1, k;
02771   double aminl, amnlsq, offmax, sum, temp;
02772 
02773   addmax = 0.0;
02774   aminl = std::sqrt(diagmx*tol);
02775   amnlsq = aminl*aminl;
02776 
02777   // FORM COLUMN J OF L
02778   for (j = 1; j <= n; j++)
02779   {
02780     // FIND DIAGONAL ELEMENTS OF L
02781     sum = 0.0;
02782     jm1 = j - 1;
02783     jp1 = j + 1;
02784 
02785     for (k = 1; k <= jm1; k++)
02786     {
02787       sum += a(j, k)*a(j, k);
02788     }
02789     temp = a(j, j) - sum;
02790 
02791     if (temp >= amnlsq)
02792     {
02793       a(j, j) = std::sqrt(temp);
02794     }
02795     else
02796     {
02797       // FIND MAXIMUM OFF-DIAGONAL ELEMENT IN COLUMN
02798       offmax = 0.0;
02799 
02800       for (i = jp1; i <= n; i++)
02801       {
02802         if (std::fabs(a(i, j)) > offmax)
02803           offmax = std::fabs(a(i, j));
02804       }
02805       if (offmax <= amnlsq)
02806         offmax = amnlsq;
02807 
02808       // ADD TO DIAGONAL ELEMENT  TO ALLOW CHOLESKY DECOMPOSITION TO CONTINUE
02809       a(j, j) = std::sqrt(offmax);
02810       addmax = max(addmax, offmax-temp);
02811     }
02812 
02813     // FIND I,J ELEMENT OF LOWER TRIANGULAR MATRIX
02814     for (i = jp1; i <= n; i++)
02815     {
02816       sum = 0.0;
02817 
02818       for (k = 1; k <= jm1; k++)
02819       {
02820         sum += a(i, k)*a(j, k);
02821       }
02822       a(i, j) = (a(i,j) - sum)/a(j,j);
02823     }
02824   }
02825 }
02826 
02827 /*!
02828   \brief
02829   Finds the next Newton iterate by the double-dogleg method.
02830    
02831   The new iterate is returned in the xpls vector. This method drives the
02832   dogstp function.
02833 
02834   Translated by Steve Verrill, April 15, 1998.
02835 
02836   \section template_args Template Parameters
02837 
02838   \param M  SCPPNT Matrix type.
02839   \param V  SCPPNT Vector type.
02840   \param FUNC Type of function to minimize.
02841 
02842   \section function_args Function Parameters
02843 
02844   \param[in]  &x         Address of old iterate (X[k-1]).
02845   \param[in]  &f         Address of function value at the old iterate.
02846   \param[in]  &g         Address of gradient or approximation at the old iterate.
02847   \param[in]  &a         Address of cholesky decomposition of hessian
02848       in lower triangular part and diagonal.
02849   \param[in]  &p         Address of newton step.
02850   \param[out] &xpls      Address of new iterate (X[k]).
02851   \param[out] &fpls      Address of function value at the new iterate.
02852   \param[in]  &sx        Address of scaling vector for x.
02853   \param[in,out] &dlt    Address of trust region radius (value needs to be retained
02854       between successive calls).
02855   \param[out] &iretcd    Address of return code: 0 -- satisfactory xpls found;
02856        1 -- failed to find satisfactory xpls sufficently distinct from x.
02857   \param[out] &mxtake    Address of boolean flag indicating that a step of maximum
02858       length was used length.
02859   \param[in]  &sc        Address of workspace (current step).
02860   \param[in]  &wrk1      Address of workspace (and place holding argument to tregup).
02861   \param[in]  &wrk2      Address of workspace.
02862   \param[in]  &wrk3      Address of workspace.
02863  */
02864 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::dogdrv(V &x, double &f, V &g, M &a,
02865     V &p, V &xpls, double &fpls, V &sx, double &dlt, int &iretcd, bool &mxtake, V &sc, V &wrk1,
02866     V &wrk2, V &wrk3)
02867 {
02868   int i;
02869   double tmp, rnwtln;
02870   double fplsp;
02871   double cln;
02872   double eta;
02873   bool fstdog;
02874   bool nwtake;
02875 
02876   iretcd = 4;
02877   fstdog = true;
02878   tmp = 0.0;
02879 
02880   for (i = 1; i <= n; i++)
02881   {
02882     tmp += sx(i)*sx(i)*p(i)*p(i);
02883   }
02884   rnwtln = std::sqrt(tmp);
02885 
02886   while (iretcd > 1)
02887   {
02888     // FIND NEW STEP BY DOUBLE DOGLEG ALGORITHM
02889     dogstp(g, a, p, sx, rnwtln, dlt, nwtake, fstdog, wrk1, wrk2, cln, eta, sc);
02890 
02891     // CHECK NEW POINT AND UPDATE TRUST REGION
02892     tregup(x, f, g, a, sc, sx, nwtake, dlt, iretcd, wrk3, fplsp, xpls, fpls, mxtake, 2, wrk1);
02893   }
02894 }
02895 
02896 /*!
02897   \brief
02898   Finds the new step by the double-dogleg appproach.
02899 
02900   Translated by Steve Verrill, April 21, 1998.
02901 
02902   \section template_args Template Parameters
02903 
02904   \param M  SCPPNT Matrix type.
02905   \param V  SCPPNT Vector type.
02906   \param FUNC Type of function to minimize.
02907 
02908   \section function_args Function Parameters
02909 
02910   \param[in]      &g      Address of gradient at current iterate, g(X).
02911   \param[in]      &a      Address of cholesky decomposition of hessian in lower part.
02912       and diagonal.
02913   \param[in]      &p      Address of newton step.
02914   \param[in]      &sx     Address of scaling vector for x.
02915   \param[in]      rnwtln  Newton step length.
02916   \param[in,out] &dlt     Address of trust region radius.
02917   \param[in,out] &nwtake  Address of boolean, nwtake = TRUE if Newton step taken.
02918   \param[in,out] &fstdog  Address of boolean, fstdog = TRUE, if on first leg of dogleg.
02919   \param[in,out] &ssd     Address of workspace (Cauchy step to the minimum of the quadratic
02920       model in the scaled steeped descent direction; retaining value between 
02921       successive calls].
02922   \param[in,out] &v       Address of workspace (retainig value between successive calls).
02923   \param[in,out] &cln     Address of cauchy length (retaining value between successive calls).
02924   \param[in,out] &eta     Address of not documented, but "retains value between successive calls."
02925   \param[out]    &sc      Address of current step.
02926  */
02927 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::dogstp(V &g, M &a, V &p, V &sx,
02928     double rnwtln, double &dlt, bool &nwtake, bool &fstdog, V &ssd, V &v, double &cln, double &eta,
02929     V &sc)
02930 {
02931   double alpha, beta, tmp, dot1, dot2, alam;
02932   int i, j;
02933 
02934   // CAN WE TAKE NEWTON STEP
02935   if (rnwtln <= dlt)
02936   {
02937     nwtake = true;
02938 
02939     for (i = 1; i <= n; i++)
02940     {
02941       sc(i) = p(i);
02942     }
02943     dlt = rnwtln;
02944   }
02945   else
02946   {
02947     // NEWTON STEP TOO LONG
02948     // CAUCHY STEP IS ON DOUBLE DOGLEG CURVE
02949     nwtake = false;
02950 
02951     if (fstdog)
02952     {
02953       // CALCULATE DOUBLE DOGLEG CURVE (SSD)
02954       fstdog = false;
02955       alpha = 0.0;
02956 
02957       for (i = 1; i <= n; i++)
02958       {
02959         alpha += (g(i)*g(i))/(sx(i)*sx(i));
02960       }
02961 
02962       beta = 0.0;
02963 
02964       for (i = 1; i <= n; i++)
02965       {
02966         tmp = 0.0;
02967 
02968         for (j = i; j <= n; j++)
02969         {
02970           tmp += (a(j, i)*g(j))/(sx(j)*sx(j));
02971         }
02972         beta += tmp*tmp;
02973       }
02974 
02975       for (i = 1; i <= n; i++)
02976       {
02977         ssd(i) = -(alpha/beta)*g(i)/sx(i);
02978       }
02979 
02980       cln = alpha*std::sqrt(alpha)/beta;
02981 
02982       eta = .2 + (.8*alpha*alpha)/(-beta*ddot(g, p));
02983 
02984       for (i = 1; i <= n; i++)
02985       {
02986         v(i) = eta*sx(i)*p(i) - ssd(i);
02987       }
02988 
02989       if (dlt == -1.0)
02990         dlt = min(cln, stepmx);
02991     }
02992     if (eta*rnwtln <= dlt)
02993     {
02994       // TAKE PARTIAL STEP IN NEWTON DIRECTION
02995       for (i = 1; i <= n; i++)
02996       {
02997         sc(i) = (dlt/rnwtln)*p(i);
02998       }
02999     }
03000     else
03001     {
03002       if (cln >= dlt)
03003       {
03004         // TAKE STEP IN STEEPEST DESCENT DIRECTION
03005         for (i = 1; i <= n; i++)
03006         {
03007           sc(i) = (dlt/cln)*ssd(i)/sx(i);
03008         }
03009       }
03010       else
03011       {
03012         // CALCULATE CONVEX COMBINATION OF SSD AND ETA*P
03013         // WHICH HAS SCALED LENGTH DLT
03014         dot1 = ddot(v, ssd); // Blas_f77.ddot_f77(n,v,1,ssd,1);
03015         dot2 = ddot(v, v); // Blas_f77.ddot_f77(n,v,1,v,1);
03016 
03017         alam = (-dot1 + std::sqrt((dot1*dot1) - dot2*(cln*cln - dlt*dlt)))/dot2;
03018 
03019         for (i = 1; i <= n; i++)
03020         {
03021           sc(i) = (ssd(i) + alam*v(i))/sx(i);
03022         }
03023       }
03024     }
03025   }
03026 }
03027 
03028 /*!
03029   \brief
03030   Solves Ax = b where A is a lower triangular matrix.
03031 
03032   If  is no longebr required by the calling routine, then the
03033   vectors b and x may share the same storage.
03034 
03035   Translated by Steve Verrill, April 21, 1998.
03036 
03037   \section template_args Template Parameters
03038 
03039   \param M  SCPPNT Matrix type.
03040   \param V  SCPPNT Vector type.
03041   \param FUNC Type of function to minimize.
03042 
03043   \section function_args Function Parameters
03044 
03045   \param[in]     &a     Address of the lower triangular matrix (preserved).
03046   \param[out]    &x     Address of solution vector.
03047   \param[in]     &b     Address of the right-hand side vector.
03048  */
03049 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::forslv(M &a, V &x, V &b)
03050 {
03051   int i, im1, j;
03052   double sum;
03053 
03054   // SOLVE LX=B. (FORWARD SOLVE)
03055   x(1) = b(1)/a(1, 1);
03056 
03057   for (i = 2; i <= n; i++)
03058   {
03059     sum = 0.0;
03060     im1 = i - 1;
03061 
03062     for (j = 1; j <= im1; j++)
03063     {
03064       sum += a(i, j)*x(j);
03065     }
03066     x(i) = (b(i) - sum)/a(i,i);
03067   }
03068 }
03069 
03070 /*!
03071   \brief
03072   Finds a central difference approximation to the gradient g(x) of the function at point x.
03073  
03074   Translated by Steve Verrill, April 21, 1998.
03075 
03076   \section template_args Template Parameters
03077 
03078   \param M  SCPPNT Matrix type.
03079   \param V  SCPPNT Vector type.
03080   \param FUNC Type of function to minimize.
03081 
03082   \section function_args Function Parameters
03083 
03084   \param[in]     &x          Address of the point vector at which the gradient is to be approximated.
03085   \param[in]     &sx         Address of the scaling vector for x.
03086   \param[in]     rnoise      Relative noise in the function to be minimized.
03087   \param[out]    &g          Address of the central difference approximation to the gradient.
03088  */
03089 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::fstocd(V &x, V &sx, double rnoise,
03090     V &g)
03091 {
03092   double stepi, xtempi, fplus, fminus, xmult;
03093   int i;
03094 
03095   // FIND I-TH  STEPSIZE, EVALUATE TWO NEIGHBORS IN DIRECTION OF I-TH
03096   // UNIT VECTOR, AND EVALUATE I-TH  COMPONENT OF GRADIENT.
03097   xmult = std::pow(rnoise, 1.0/3.0);
03098 
03099   for (i = 1; i <= n; i++)
03100   {
03101     stepi = xmult*max(std::fabs(x(i)), 1.0/sx(i));
03102     xtempi = x(i);
03103 
03104     x(i) = xtempi + stepi;
03105     fplus = minclass->f_to_minimize(x);
03106 
03107     x(i) = xtempi - stepi;
03108     fminus = minclass->f_to_minimize(x);
03109 
03110     x(i) = xtempi;
03111 
03112     g(i) = (fplus - fminus)/(2.0*stepi);
03113   }
03114 }
03115 
03116 /*!
03117   \brief
03118   Finds a next Newton iterate (xpls) by the More-Hebdon technique.
03119   
03120   This function drives hookst.
03121  
03122   Translated by Steve Verrill, April 23, 1998.
03123 
03124   \section template_args Template Parameters
03125 
03126   \param M  SCPPNT Matrix type.
03127   \param V  SCPPNT Vector type.
03128   \param FUNC Type of function to minimize.
03129 
03130   \section function_args Function Parameters
03131 
03132   \param[in]      &x          Address of old iterate (X[k-1]).
03133   \param[in]      &f          Address of function value at the old iterate.
03134   \param[in]      &g          Address of gradient or approximation at old iterate.
03135   \param[in]      &a          Address of cholesky decomposition of hessian in lower 
03136       triangle and diagonal. Upper triangle contains the off-diagonal elements of 
03137       the hessian (diagonal of hessian expected in udiag).
03138   \param[in]      &udiag      Address of vector containing diagonal of hessian.
03139   \param[in]      &p          Address of newton step.
03140   \param[out]     &xpls       Address of new iterate (X[k]).
03141   \param[out]     &fpls       Address of function value at the new iterate.
03142   \param[in]      &sx         Address of scaling vector for x.
03143   \param[in,out]  &dlt        Address of trust region radius [data member].
03144   \param[out]     &iretcd     Address of return code: iretcd = 0, satisfactory xpls found;
03145       iretcd = 1, failed to find satisfactory xpls sufficiently distinct from x.
03146   \param[out]     &mxtake     Address of boolean flag indicating step of maximum length used.
03147   \param[in,out]  &amu        Address of ? [Retaining value between successive calls].
03148   \param[in,out]  &dltp       Address of ? [Retaining value between successive calls].
03149   \param[in,out]  &phi        Address of ? [Retaining value between successive calls].
03150   \param[in,out]  &phip0      Address of ? [Retaining value between successive calls].
03151   \param[in]      &sc         Address of workspace.
03152   \param[in]      &xplsp      Address of workspace.
03153   \param[in]      &wrk0       Address of workspace.
03154   \param[in]      &itncnt     Address of iteration count.
03155  */
03156 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::hookdr(V &x, double &f, V &g, M &a,
03157     V &udiag, V &p, V &xpls, double &fpls, V &sx, double &dlt, int &iretcd, bool &mxtake,
03158     double &amu, double &dltp, double &phi, double &phip0, V &sc, V &xplsp, V &wrk0, int &itncnt)
03159 {
03160   int i, j;
03161   bool fstime;
03162   bool nwtake;
03163   double tmp, rnwtln, alpha, beta;
03164 
03165   double fplsp;
03166 
03167   iretcd = 4;
03168   fstime = true;
03169 
03170   tmp = 0.0;
03171 
03172   for (i = 1; i <= n; i++)
03173   {
03174     tmp += sx(i)*sx(i)*p(i)*p(i);
03175   }
03176   rnwtln = std::sqrt(tmp);
03177 
03178   if (itncnt == 1)
03179   {
03180     amu = 0.0;
03181 
03182     // IF FIRST ITERATION AND TRUST REGION NOT PROVIDED BY USER,
03183     // COMPUTE INITIAL TRUST REGION.
03184     if (dlt == -1.0)
03185     {
03186       alpha = 0.0;
03187 
03188       for (i = 1; i <= n; i++)
03189       {
03190         alpha += (g(i)*g(i))/(sx(i)*sx(i));
03191       }
03192       beta = 0.0;
03193 
03194       for (i = 1; i <= n; i++)
03195       {
03196         tmp = 0.0;
03197 
03198         for (j = i; j <= n; j++)
03199         {
03200           tmp += (a(j, i)*g(j))/(sx(j)*sx(j));
03201         }
03202         beta += tmp*tmp;
03203       }
03204       dlt = alpha*std::sqrt(alpha)/beta;
03205       dlt = min(dlt, stepmx);
03206     }
03207   }
03208   while (iretcd > 1)
03209   {
03210     // FIND NEW STEP BY MORE-HEBDON ALGORITHM
03211     hookst(g, a, udiag, p, sx, rnwtln, dlt, amu, dltp, phi, phip0, fstime, sc, nwtake, wrk0);
03212 
03213     dltp = dlt;
03214 
03215     // CHECK NEW POINT AND UPDATE TRUST REGION
03216     tregup(x, f, g, a, sc, sx, nwtake, dlt, iretcd, xplsp, fplsp, xpls, fpls, mxtake, 3, udiag);
03217   }
03218 }
03219 
03220 /*!
03221   \brief
03222   Finds a new step (xpls) by the More-Hebdon algorithm.
03223   
03224   This function is driven by hookdr.
03225   
03226   Translated by Steve Verrill, April 24, 1998.
03227 
03228   \section template_args Template Parameters
03229 
03230   \param M  SCPPNT Matrix type.
03231   \param V  SCPPNT Vector type.
03232   \param FUNC Type of function to minimize.
03233 
03234   \section function_args Function Parameters
03235 
03236   \param[in]      &g          Address of gradient at the current iterate, g(X).
03237   \param[in]      &a          Address of cholesky decomposition of hessian in lower 
03238       triangle and diagonal. Upper triangle contains the off-diagonal elements of 
03239       the hessian (diagonal of hessian expected in udiag).
03240   \param[in]      &udiag      Address of vector containing diagonal of hessian.
03241   \param[in]      &p          Address of newton step.
03242   \param[in]      &sx         Address of scaling vector for x.
03243   \param[in]      rnwtln      Newton step length.
03244   \param[in,out]  &dlt        Address of trust region radius at last exit from this routine.
03245   \param[in,out]  &amu        Address of ? [Retaining value between successive calls].
03246   \param[in]      &dltp       Address of trust region radius at last exit from this routine.
03247   \param[in,out]  &phi        Address of ? [Retaining value between successive calls].
03248   \param[in,out]  &phip0      Address of ? [Retaining value between successive calls].
03249   \param[in,out]  &fstime     Address of boolean flag: fstime = TRUE, if first entry to this 
03250       routine during the k-th iteration.
03251   \param[out]     &sc         Address of current step.
03252   \param[out]     &nwtake     Address of boolean flag: nwtake = TRUE, if newton step taken.
03253   \param[in]      &wrk0       Address of workspace.
03254  */
03255 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::hookst(V &g, M &a, V &udiag, V &p,
03256     V &sx, double rnwtln, double &dlt, double &amu, double &dltp, double &phi, double &phip0,
03257     bool &fstime, V &sc, bool &nwtake, V &wrk0)
03258 {
03259   double hi, alo;
03260   double phip, amulo, amuup, stepln;
03261   int i, j;
03262   bool done;
03263 
03264   double addmax;
03265 
03266   // HI AND ALO ARE CONSTANTS USED IN THIS ROUTINE.
03267   // CHANGE HERE IF OTHER VALUES ARE TO BE SUBSTITUTED.
03268   phip = 0.0;
03269   hi = 1.5;
03270   alo = .75;
03271 
03272   if (rnwtln <= hi*dlt)
03273   {
03274     //       TAKE NEWTON STEP
03275     nwtake = true;
03276 
03277     for (i = 1; i <= n; i++)
03278     {
03279       sc(i) = p(i);
03280     }
03281 
03282     dlt = min(dlt, rnwtln);
03283     amu = 0.0;
03284 
03285     return;
03286   }
03287   else
03288   {
03289     // NEWTON STEP NOT TAKEN
03290     nwtake = false;
03291 
03292     if (amu > 0.0)
03293     {
03294       amu -= (phi + dltp)*((dltp - dlt) + phi)/(dlt*phip);
03295     }
03296 
03297     phi = rnwtln - dlt;
03298 
03299     if (fstime)
03300     {
03301       for (i = 1; i <= n; i++)
03302       {
03303         wrk0(i) = sx(i)*sx(i)*p(i);
03304       }
03305       // SOLVE L*Y = (SX**2)*P
03306       forslv(a, wrk0, wrk0);
03307       phip0 = -std::pow(dnrm2(wrk0), 2)/rnwtln;
03308       fstime = false;
03309     }
03310     phip = phip0;
03311     amulo = -phi/phip;
03312 
03313     amuup = 0.0;
03314 
03315     for (i = 1; i <= n; i++)
03316     {
03317       amuup += (g(i)*g(i))/(sx(i)*sx(i));
03318     }
03319     amuup = std::sqrt(amuup)/dlt;
03320 
03321     done = false;
03322 
03323     // TEST VALUE OF AMU; GENERATE NEXT AMU IF NECESSARY
03324     while (!done)
03325     {
03326       if (amu < amulo || amu > amuup)
03327       {
03328         amu = max(std::sqrt(amulo*amuup),amuup*.001);
03329       }
03330 
03331       // COPY (H,UDIAG) TO L
03332       // WHERE H <-- H+AMU*(SX**2) [DO NOT ACTUALLY CHANGE (H,UDIAG)]
03333       for (j = 1; j <= n; j++)
03334       {
03335         a(j,j) = udiag(j) + amu*sx(j)*sx(j);
03336 
03337         for (i = j + 1; i <= n; i++)
03338         {
03339           a(i,j) = a(j,i);
03340         }
03341       }
03342 
03343       // FACTOR H=L(L+)
03344       choldc(a,0.0,std::sqrt(epsm),addmax);
03345 
03346       // SOLVE H*P = L(L+)*SC = -G
03347       for (i = 1; i <= n; i++)
03348       {
03349         wrk0(i) = -g(i);
03350       }
03351 
03352       lltslv(a,sc,wrk0);
03353 
03354       // RESET H.  NOTE SINCE UDIAG HAS NOT BEEN DESTROYED WE NEED DO
03355       // NOTHING HERE.  H IS IN THE UPPER PART AND IN UDIAG, STILL INTACT
03356       stepln = 0.0;
03357 
03358       for (i = 1; i <= n; i++)
03359       {
03360         stepln += sx(i)*sx(i)*sc(i)*sc(i);
03361       }
03362 
03363       stepln = std::sqrt(stepln);
03364       phi = stepln - dlt;
03365 
03366       for (i = 1; i <= n; i++)
03367       {
03368         wrk0(i) = sx(i)*sx(i)*sc(i);
03369       }
03370 
03371       forslv(a,wrk0,wrk0);
03372 
03373       phip = -std::pow(dnrm2(wrk0),2)/stepln;
03374 
03375       if ((alo*dlt <= stepln && stepln <= hi*dlt) ||
03376       (amuup-amulo <= 0.0))
03377       {
03378         // SC IS ACCEPTABLE HOOKSTEP
03379         done = true;
03380       }
03381       else
03382       {
03383         // SC NOT ACCEPTABLE HOOKSTEP.  SELECT NEW AMU
03384         amulo = max(amulo,amu-(phi/phip));
03385         if (phi < 0.0) amuup = min(amuup,amu);
03386         amu -= (stepln*phi)/(dlt*phip);
03387       }
03388     }
03389   }
03390 }
03391 
03392 /*!
03393   \brief
03394   Solves Ax = b where A has the form LL', and only the lower triangular part, L, is stored.
03395 
03396   Note: If b is not required by the calling program, then b and x may share the same storage.
03397  
03398   Translated by Steve Verrill, April 27, 1998.
03399 
03400   \section template_args Template Parameters
03401 
03402   \param M  SCPPNT Matrix type.
03403   \param V  SCPPNT Vector type.
03404   \param FUNC Type of function to minimize.
03405 
03406   \section function_args Function Parameters
03407 
03408   \param[in]      &a     Address of matrix of form L (A= LL'). On return, this matrix is unchanged.
03409   \param[out]     &x     Address of solution vector.
03410   \param[in]      &b     Address of the right-hand side vector.
03411  */
03412 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::lltslv(M &a, V &x, V &b)
03413 {
03414   // FORWARD SOLVE, RESULT IN X
03415   forslv(a, x, b);
03416   // BACK SOLVE, RESULT IN X
03417   bakslv(a, x, x);
03418 }
03419 
03420 /*!
03421   \brief
03422   Finds a next Newton iterate by line search.
03423   
03424   Translated by Steve Verrill, May 15, 1998.
03425 
03426   \section template_args Template Parameters
03427 
03428   \param M  SCPPNT Matrix type.
03429   \param V  SCPPNT Vector type.
03430   \param FUNC Type of function to minimize.
03431 
03432   \section function_args Function Parameters
03433 
03434   \param[in]  &x      Address of old iterate, (X[k-1]).
03435   \param[in]  &f      Address of function value at old iterate.
03436   \param[in]  &g      Address of gradient or approximation at old iterate.
03437   \param[in]  &p      Address of non-zero Newton step.
03438   \param[out] &xpls   Address of new iterate. (X[k]).
03439   \param[out] &fpls   Address of function value at new iterate.
03440   \param[out] &mxtake Address of boolean flag indicating whether the step of
03441       maximum length was used.
03442   \param[out] &iretcd Address of return code
03443   \param[in]  &sx     Address of scaling vector for x.
03444  */
03445 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::lnsrch(V &x, double &f, V &g, V &p,
03446     V &xpls, double &fpls, bool &mxtake, int &iretcd, V &sx)
03447 {
03448   int i;
03449   double tmp, sln, scl, slp, rln, rmnlmb, almbda, tlmbda;
03450   double t1, t2, t3, a, b, disc, pfpls, plmbda;
03451 
03452   pfpls = 0.0;
03453   plmbda = 0.0;
03454 
03455   mxtake = false;
03456   iretcd = 2;
03457 
03458   tmp = 0.0;
03459 
03460   for (i = 1; i <= n; i++)
03461   {
03462     tmp += sx(i)*sx(i)*p(i)*p(i);
03463   }
03464   sln = std::sqrt(tmp);
03465 
03466   if (sln > stepmx)
03467   {
03468     // NEWTON STEP LONGER THAN MAXIMUM ALLOWED
03469     scl = stepmx/sln;
03470     p *= scl; // Uncmin_f77.sclmul_f77(n,scl,p,p);
03471     sln = stepmx;
03472   }
03473   slp = ddot(g, p);
03474   rln = 0.0;
03475 
03476   for (i = 1; i <= n; i++)
03477   {
03478     rln = max(rln, std::fabs(p(i))/max(std::fabs(x(i)), 1.0/sx(i)));
03479   }
03480 
03481   rmnlmb = steptl/rln;
03482   almbda = 1.0;
03483 
03484   // LOOP
03485   // CHECK IF NEW ITERATE SATISFACTORY.  GENERATE NEW LAMBDA IF NECESSARY.
03486   int iteration = 0; // BAH
03487   while (iretcd >= 2)
03488   {
03489     for (i = 1; i <= n; i++)
03490     {
03491       xpls(i) = x(i) + almbda*p(i);
03492     }
03493     /* Find valid function parameters - BAH */
03494     if (!(minclass->ValidParameters(xpls)))
03495     {
03496       almbda *= 0.1;
03497       if (almbda < rmnlmb)
03498       {
03499         iretcd = 1; // no satisfactory xpls found sufficiently distinct from x
03500         return;
03501       }
03502       continue;
03503     }
03504 
03505     fpls = minclass->f_to_minimize(xpls);
03506 
03507     if (fpls <= (f + slp*.0001*almbda))
03508     {
03509       // SOLUTION FOUND
03510       iretcd = 0;
03511       if (almbda == 1.0 && sln > .99*stepmx)
03512         mxtake = true;
03513     }
03514     else
03515     {
03516       // SOLUTION NOT (YET) FOUND
03517       // test for iteration limit to prevent infinate loop when
03518       // fpls of new iterate is NaN - BAH
03519       if (++iteration > 100)
03520       {
03521         iretcd = -7;
03522         if (fPrintResults)
03523         {
03524           std::fprintf(mfile, "\n\n\nLNSRCH      Number of iterations exceeded.\n");
03525           std::fprintf(mfile, "LNSRCH      fpls = %e\n", fpls);
03526         }
03527         return;
03528       } // BAH
03529 
03530       if (almbda < rmnlmb)
03531       {
03532         // NO SATISFACTORY XPLS FOUND SUFFICIENTLY DISTINCT FROM X
03533         iretcd = 1;
03534       }
03535       else
03536       {
03537         // CALCULATE NEW LAMBDA
03538         if (iteration == 1)
03539         {
03540           // FIRST BACKTRACK: QUADRATIC FIT
03541           tlmbda = -slp/(2.0*(fpls - f - slp));
03542         }
03543         else
03544         {
03545           // ALL SUBSEQUENT BACKTRACKS: CUBIC FIT
03546           t1 = fpls - f - almbda*slp;
03547           t2 = pfpls - f - plmbda*slp;
03548           t3 = 1.0/(almbda - plmbda);
03549           a = t3*(t1/(almbda*almbda) - t2/(plmbda*plmbda));
03550           b = t3*(t2*almbda/(plmbda*plmbda) - t1*plmbda/(almbda*almbda));
03551           disc = b*b - 3.0*a*slp;
03552 
03553           if (disc > b*b)
03554           {
03555             // ONLY ONE POSITIVE CRITICAL POINT, MUST BE MINIMUM
03556             double signone = (a < 0.0) ? -1.0 : 1.0;
03557             tlmbda = (-b + signone*std::sqrt(disc))/(3.0*a);
03558           }
03559           else
03560           {
03561             // BOTH CRITICAL POINTS POSITIVE, FIRST IS MINIMUM
03562             double signone = (a < 0.0) ? -1.0 : 1.0;
03563             tlmbda = (-b - signone*std::sqrt(disc))/(3.0*a);
03564           }
03565 
03566           if (tlmbda > .5*almbda)
03567             tlmbda = .5*almbda;
03568         }
03569         plmbda = almbda;
03570         pfpls = fpls;
03571 
03572         if (tlmbda < almbda/10.0)
03573         {
03574           almbda *= .1;
03575         }
03576         else
03577         {
03578           almbda = tlmbda;
03579         }
03580       }
03581     }
03582   }
03583 }
03584 
03585 /*!
03586   \brief
03587   Computes y = Lx where L is a lower triangular matrix stored in A.
03588  
03589   Translated by Steve Verrill, April 27, 1998.
03590 
03591   \section template_args Template Parameters
03592 
03593   \param M  SCPPNT Matrix type.
03594   \param V  SCPPNT Vector type.
03595   \param FUNC Type of function to minimize.
03596 
03597   \section function_args Function Parameters
03598 
03599   \param[in]      &a     Address of lower triangular matrix A (containing L).
03600   \param[in]      &x     Address of operand vector.
03601   \param[out]     &y     Address of result vector.
03602  */
03603 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::mvmltl(M &a, V &x, V &y)
03604 {
03605   double sum;
03606   int i, j;
03607 
03608   for (i = 1; i <= n; i++)
03609   {
03610     sum = 0.0;
03611     for (j = 1; j <= i; j++)
03612     {
03613       sum += a(i, j)*x(j);
03614     }
03615     y(i) = sum;
03616   }
03617 }
03618 
03619 /*!
03620   \brief
03621   Computes y = Ax where A is a symmetric matrix stored in its lower triangular part; x and y are vectors.
03622  
03623   Translated by Steve Verrill, April 27, 1998.
03624 
03625   \section template_args Template Parameters
03626 
03627   \param M  SCPPNT Matrix type.
03628   \param V  SCPPNT Vector type.
03629   \param FUNC Type of function to minimize.
03630 
03631   \section function_args Function Parameters
03632 
03633   \param[in]      &a     Address of the symmetric matrix A.
03634   \param[in]      &x     Address of operand vector x.
03635   \param[out]     &y     Address of result vector y.
03636  */
03637 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::mvmlts(M &a, V &x, V &y)
03638 {
03639   double sum;
03640   int i, j;
03641 
03642   for (i = 1; i <= n; i++)
03643   {
03644     sum = 0.0;
03645 
03646     for (j = 1; j <= i; j++)
03647     {
03648       sum += a(i, j)*x(j);
03649     }
03650     for (j = i + 1; j <= n; j++)
03651     {
03652       sum += a(j, i)*x(j);
03653     }
03654     y(i) = sum;
03655   }
03656 }
03657 
03658 /*!
03659   \brief
03660   Computes y = L'x where L is a lower triangular matrix stored in A; y and x are vectors.
03661  
03662   Note:  The L transpose is taken implicitly.
03663   
03664   Translated by Steve Verrill, April 27, 1998.
03665 
03666   \section template_args Template Parameters
03667 
03668   \param M  SCPPNT Matrix type.
03669   \param V  SCPPNT Vector type.
03670   \param FUNC Type of function to minimize.
03671 
03672   \section function_args Function Parameters
03673 
03674   \param[in]      &a     Address of the lower triangular matrix A.
03675   \param[in]      &x     Address of operand vector x.
03676   \param[out]     &y     Address of result vector y.
03677  */
03678 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::mvmltu(M &a, V &x, V &y)
03679 {
03680   double sum;
03681   int i, j;
03682 
03683   for (i = 1; i <= n; i++)
03684   {
03685     sum = 0.0;
03686     for (j = i; j <= n; j++)
03687     {
03688       sum += a(j, i)*x(j);
03689     }
03690     y(i) = sum;
03691   }
03692 }
03693 
03694 /*!
03695   \brief
03696   Interchanges rows i and i+1 of the upper Hessenberg matrix r, in columns i to n.
03697 
03698   Translated by Steve Verrill, April 29, 1998.
03699 
03700   \section template_args Template Parameters
03701 
03702   \param M  SCPPNT Matrix type.
03703   \param V  SCPPNT Vector type.
03704   \param FUNC Type of function to minimize.
03705 
03706   \section function_args Function Parameters
03707 
03708   \param[in,out]  &r     Address of upper Hessenberg matrix r.
03709   \param[in]      i      Index of row to interchange (i < n)
03710  */
03711 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::qraux1(M &r, int i)
03712 {
03713   double tmp;
03714   int j, ip1;
03715 
03716   ip1 = i + 1;
03717 
03718   for (j = i; j <= n; j++)
03719   {
03720     tmp = r(i, j);
03721     r(i, j) = r(ip1,j);
03722     r(ip1, j) = tmp;
03723   }
03724 }
03725 
03726 /*!
03727   \brief
03728   Pre-multiplies the upper Hessenberg matrix r by the Jacobi rotation j(i,i+1,a,b).
03729   
03730   Translated by Steve Verrill, April 29, 1998.
03731 
03732   \section template_args Template Parameters
03733 
03734   \param M  SCPPNT Matrix type.
03735   \param V  SCPPNT Vector type.
03736   \param FUNC Type of function to minimize.
03737 
03738   \section function_args Function Parameters
03739 
03740   \param[in,out]  &r    Address of upper Hessenberg matrix r.
03741   \param[in]      i     Index of row.
03742   \param[in]      a     scalar.
03743   \param[in]      b     scalar.
03744  */
03745 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::qraux2(M &r, int i, double a,
03746     double b)
03747 {
03748   double den, c, s, y, z;
03749   int j, ip1;
03750 
03751   ip1 = i + 1;
03752 
03753   den = std::sqrt(a*a + b*b);
03754   c = a/den;
03755   s = b/den;
03756 
03757   for (j = i; j <= n; j++)
03758   {
03759     y = r(i, j);
03760     z = r(ip1, j);
03761     r(i, j) = c*y - s*z;
03762     r(ip1, j) = s*y + c*z;
03763   }
03764 }
03765 
03766 
03767 /*!
03768   \brief
03769   Finds an orthogonal n by n matrix, Q*, and an upper triangular n by n matrix, R*, such that
03770   (Q*)(R*) = R+u*v'.
03771   
03772   Translated by Steve Verrill, May 11, 1998.
03773 
03774   \section template_args Template Parameters
03775 
03776   \param M  SCPPNT Matrix type.
03777   \param V  SCPPNT Vector type.
03778   \param FUNC Type of function to minimize.
03779 
03780   \section function_args Function Parameters
03781 
03782   \param[in,out]  &a    Address of matrix A. Contains R on input; contains R* at output.
03783   \param[in]      &u    Address of input vector u.
03784   \param[in]      &v    Address of input vector v.
03785  */
03786 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::qrupdt(M &a, V &u, V &v)
03787 {
03788   int k, km1, ii, i, j;
03789   double t1, t2;
03790 
03791   // DETERMINE LAST NON-ZERO IN U(.)
03792   k = n;
03793 
03794   while (u(k) == 0 && k > 1)
03795   {
03796     k--;
03797   }
03798 
03799   // (K-1) JACOBI ROTATIONS TRANSFORM
03800   // R + U(V+) --> (R*) + (U(1)*E1)(V+)
03801   // WHICH IS UPPER HESSENBERG
03802   km1 = k - 1;
03803 
03804   for (ii = 1; ii <= km1; ii++)
03805   {
03806     i = km1 - ii + 1;
03807 
03808     if (u(i) == 0.0)
03809     {
03810       qraux1(a, i);
03811       u(i) = u(i+1);
03812     }
03813     else
03814     {
03815       qraux2(a, i, u(i), -u(i+1));
03816       u(i) = std::sqrt(u(i)*u(i) + u(i+1)*u(i+1));
03817     }
03818   }
03819 
03820   // R <-- R + (U(1)*E1)(V+)
03821   for (j = 1; j <= n; j++)
03822   {
03823     a(1, j) += u(1)*v(j);
03824   }
03825 
03826   // (K-1) JACOBI ROTATIONS TRANSFORM UPPER HESSENBERG R
03827   // TO UPPER TRIANGULAR (R*)
03828   km1 = k - 1;
03829 
03830   for (i = 1; i <= km1; i++)
03831   {
03832     if (a(i, i) == 0.0)
03833     {
03834       qraux1(a, i);
03835     }
03836     else
03837     {
03838       t1 = a(i, i);
03839       t2 = -a(i+1, i);
03840 
03841       qraux2(a, i, t1, t2);
03842     }
03843   }
03844 }
03845 
03846 /*!
03847   \brief
03848   Updates the Hessian by the BFGS factored technique.
03849   
03850   Translated by Steve Verrill, May 14, 1998.
03851 
03852   \section template_args Template Parameters
03853 
03854   \param M  SCPPNT Matrix type.
03855   \param V  SCPPNT Vector type.
03856   \param FUNC Type of function to minimize.
03857 
03858   \section function_args Function Parameters
03859 
03860   \param[in]      &x       Address of old iterate (X[k-1]).
03861   \param[in]      &g       Address of gradient or approximation at the old iterate.
03862   \param[in,out]  &a       Address of matrix A. A contains on entry the cholesky decomposition 
03863       of the hessian in its lower triangle and diagonal; at exit, A contains the updated 
03864       cholesky decomposition of the hessian in its lower triangle and diagonal.
03865   \param[in]      &xpls    Address of the new iterate (X[k]).
03866   \param[in]      &gpls    Address of gradient or approximation at the new iterate.
03867   \param[in]      &itncnt  Address of variable containing iteration count.
03868   \param[in]      rnf      Relative noise in optimization function f_to_minimize.
03869   \param[in]      &iagflg  Address of integer Flag: iagflag = 1 if an analytic gradient is 
03870       supplied, 0 otherwise.
03871   \param[in,out]  &noupdt  Address of boolean flag: noupdt = TRUE, when no update yet (retain 
03872       value between successive calls).
03873   \param[in]      &s       Address of Workspace vector s.
03874   \param[in]      &y       Address of Workspace vector y.
03875   \param[in]      &u       Address of Workspace vector u.
03876   \param[in]      &w       Address of Workspace vector w.
03877  */
03878 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::secfac(V &x, V &g, M &a, V &xpls,
03879     V &gpls, int &itncnt, double rnf, int &iagflg, bool &noupdt, V &s, V &y, V &u, V &w)
03880 {
03881   bool skpupd;
03882   int i, j, im1;
03883   double den1, snorm2, ynrm2, den2, alp, reltol;
03884 
03885   if (itncnt == 1)
03886     noupdt = true;
03887 
03888   for (i = 1; i <= n; i++)
03889   {
03890     s(i) = xpls(i) - x(i);
03891     y(i) = gpls(i) - g(i);
03892   }
03893 
03894   den1 = ddot(s, y); // Blas_f77.ddot_f77(n,s,1,y,1);
03895   snorm2 = dnrm2(s); // Blas_f77.dnrm2_f77(n,s,1);
03896   ynrm2 = dnrm2(y); // Blas_f77.dnrm2_f77(n,y,1);
03897 
03898   if (den1 >= std::sqrt(epsm)*snorm2*ynrm2)
03899   {
03900     mvmltu(a, s, u);
03901     den2 = ddot(u, u); // Blas_f77.ddot_f77(n,u,1,u,1);
03902 
03903     // L <-- SQRT(DEN1/DEN2)*L
03904     alp = std::sqrt(den1/den2);
03905 
03906     if (noupdt)
03907     {
03908       for (j = 1; j <= n; j++)
03909       {
03910         u(j) *= alp;
03911 
03912         for (i = j; i <= n; i++)
03913         {
03914           a(i, j) *= alp;
03915         }
03916       }
03917       noupdt = false;
03918       // den2 = den1;  This value of den2 is never used - BAH
03919       alp = 1.0;
03920     }
03921     skpupd = true;
03922 
03923     // W = L(L+)S = HS
03924     mvmltl(a, u, w);
03925 
03926     i = 1;
03927 
03928     if (iagflg == 0)
03929     {
03930       reltol = std::sqrt(rnf);
03931     }
03932     else
03933     {
03934       reltol = rnf;
03935     }
03936 
03937     while (i <= n && skpupd)
03938     {
03939       if (std::fabs(y(i) - w(i)) >= reltol*max(std::fabs(g(i)), std::fabs(gpls(i))))
03940       {
03941         skpupd = false;
03942       }
03943       else
03944       {
03945         i++;
03946       }
03947     }
03948     if (!skpupd)
03949     {
03950       // W=Y-ALP*L(L+)S
03951       for (i = 1; i <= n; i++)
03952       {
03953         w(i) = y(i) - alp*w(i);
03954       }
03955       // ALP=1/SQRT(DEN1*DEN2)
03956       alp /= den1;
03957 
03958       // U=(L+)/SQRT(DEN1*DEN2) = (L+)S/SQRT((Y+)S * (S+)L(L+)S)
03959       for (i = 1; i <= n; i++)
03960       {
03961         u(i) *= alp;
03962       }
03963 
03964       // COPY L INTO UPPER TRIANGULAR PART.  ZERO L.
03965       for (i = 2; i <= n; i++)
03966       {
03967         im1 = i - 1;
03968 
03969         for (j = 1; j <= im1; j++)
03970         {
03971           a(j, i) = a(i,j);
03972           a(i, j) = 0.0;
03973         }
03974       }
03975 
03976       // FIND Q, (L+) SUCH THAT  Q(L+) = (L+) + U(W+)
03977       qrupdt(a, u, w);
03978 
03979       // UPPER TRIANGULAR PART AND DIAGONAL OF A NOW CONTAIN UPDATED
03980       // CHOLESKY DECOMPOSITION OF HESSIAN.  COPY BACK TO LOWER
03981       // TRIANGULAR PART.
03982       for (i = 2; i <= n; i++)
03983       {
03984         im1 = i - 1;
03985 
03986         for (j = 1; j <= im1; j++)
03987         {
03988           a(i, j) = a(j,i);
03989         }
03990       }
03991     }
03992   }
03993 }
03994 
03995 
03996 /*!
03997   \brief
03998   Updates the Hessian by the BFGS unfactored approach.
03999   
04000   Translated by Steve Verrill, May 8, 1998.
04001 
04002   \section template_args Template Parameters
04003 
04004   \param M  SCPPNT Matrix type.
04005   \param V  SCPPNT Vector type.
04006   \param FUNC Type of function to minimize.
04007 
04008   \section function_args Function Parameters
04009 
04010   \param[in]      &x       Address of old iterate (X[k-1]).
04011   \param[in]      &g       Address of gradient or approximation at the old iterate.
04012   \param[in,out]  &a       Address of matrix A. On entry, A contains the off-diagonal entries
04013       of the approximate Hhssian at the old iterate in its upper triangle (Note: The diagonal 
04014       of the hessian is passed in the udiag vector). At exit, A contains the updated approximate
04015       hessian at the new iterate in its lower triangle and diagonal.
04016   \param[in]      &udiag   Address of vector containing diagonal of Hessian at old iterate.    
04017   \param[in]      &xpls    Address of the new iterate (X[k]).
04018   \param[in]      &gpls    Address of gradient or approximation at the new iterate.
04019   \param[in]      &itncnt  Address of variable containing iteration count.
04020   \param[in]      rnf      Relative noise in optimization function f_to_minimize.
04021   \param[in]      &iagflg  Address of integer Flag: iagflag = 1 if an analytic gradient is 
04022       supplied, 0 otherwise.
04023   \param[in,out]  &noupdt  Address of boolean flag: noupdt = TRUE, when no update yet (retain 
04024       value between successive calls).
04025   \param[in]      &s       Address of Workspace vector s.
04026   \param[in]      &y       Address of Workspace vector y.
04027   \param[in]      &t       Address of Workspace vector t.
04028  */
04029 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::secunf(V &x, V &g, M &a, V &udiag,
04030     V &xpls, V &gpls, int &itncnt, double rnf, int &iagflg, bool &noupdt, V &s, V &y, V &t)
04031 {
04032   double den1, snorm2, ynrm2, den2, gam, tol;
04033   int i, j;
04034   bool skpupd;
04035 
04036   // COPY HESSIAN IN UPPER TRIANGULAR PART AND UDIAG TO
04037   // LOWER TRIANGULAR PART AND DIAGONAL
04038   for (j = 1; j <= n; j++)
04039   {
04040     a(j, j) = udiag(j);
04041 
04042     for (i = j+1; i <= n; i++)
04043     {
04044       a(i, j) = a(j,i);
04045     }
04046   }
04047   if (itncnt == 1)
04048     noupdt = true;
04049 
04050   for (i = 1; i <= n; i++)
04051   {
04052     s(i) = xpls(i) - x(i);
04053     y(i) = gpls(i) - g(i);
04054   }
04055 
04056   den1 = ddot(s, y); // Blas_f77.ddot_f77(n,s,1,y,1);
04057 
04058   snorm2 = dnrm2(s); // Blas_f77.dnrm2_f77(n,s,1);
04059 
04060   ynrm2 = dnrm2(y); // Blas_f77.dnrm2_f77(n,y,1);
04061 
04062   if (den1 >= std::sqrt(epsm)*snorm2*ynrm2)
04063   {
04064     mvmlts(a, s, t);
04065 
04066     den2 = ddot(s, t); // Blas_f77.ddot_f77(n,s,1,t,1);
04067 
04068     if (noupdt)
04069     {
04070       // H <-- [(S+)Y/(S+)HS]H
04071       gam = den1/den2;
04072 
04073       den2 = gam*den2;
04074 
04075       for (j = 1; j <= n; j++)
04076       {
04077         t(j) *= gam;
04078 
04079         for (i = j; i <= n; i++)
04080         {
04081           a(i, j) *= gam;
04082         }
04083       }
04084       noupdt = false;
04085     }
04086 
04087     skpupd = true;
04088 
04089     // CHECK UPDATE CONDITION ON ROW I
04090     for (i = 1; i <= n; i++)
04091     {
04092       tol = rnf*max(std::fabs(g(i)), std::fabs(gpls(i)));
04093       if (iagflg == 0)
04094         tol /= std::sqrt(rnf);
04095 
04096       if (std::fabs(y(i) - t(i)) >= tol)
04097       {
04098         skpupd = false;
04099         break;
04100       }
04101     }
04102 
04103     if (!skpupd)
04104     {
04105       // BFGS UPDATE
04106       for (j = 1; j <= n; j++)
04107       {
04108         for (i = j; i <= n; i++)
04109         {
04110           a(i, j) += y(i)*y(j)/den1 - t(i)*t(j)/den2;
04111         }
04112       }
04113     }
04114   }
04115 }
04116 
04117 /*!
04118   \brief
04119   Determines whether to accept xpls = x + sc as the next iterate and update the trust region dlt.
04120 
04121   Translated by Steve Verrill, May 11, 1998.
04122 
04123   \section template_args Template Parameters
04124 
04125   \param M  SCPPNT Matrix type.
04126   \param V  SCPPNT Vector type.
04127   \param FUNC Type of function to minimize.
04128 
04129   \section function_args Function Parameters
04130 
04131   \param[in]      &x        Address of old iterate (X[k-1]).
04132   \param[in]      &f        Address of Function value at old iterate, f(X).
04133   \param[in]      &g        Address of Gradient or approximation at old iterate, G(X).
04134   \param[in]      &a        Address of matrix A, containing cholesky decomposition of 
04135       hessian in lower triangular part and diagonal. The upper triangular section of A
04136       contains the off-diagonal entries of the hessian itself.
04137   \param[in]      &sc       Address of the current step.
04138   \param[in]      &sx       Address of the scaling vector for x.
04139   \param[in]      &nwtake   Address of boolean flag: nwtake = TRUE, if Newton step taken.
04140   \param[in,out]  &dlt      Address of trust region radius. 
04141   \param[in,out]  &iretcd   Address of return code. Explanation of iretcd values are:
04142       If iretcd = 0, then xpls is accepted as next iterate, dlt is the trust region 
04143       radius for the next iteration;
04144       If iretcd = 1, then xpls is unsatisfactory but accepted as next iterate because 
04145       xpls - x is less than the smallest allowable step length;
04146       If iretcd = 2, then f(xpls) is too large -- the current iteration is continued with new 
04147       reduced dlt;
04148       If iretcd = 3, then f(xpls) sufficiently small, but the quadratic model predicts f(xpls) 
04149       sufficiently well to continue current iteration with a new doubled dlt.
04150   \param[in,out]  &xplsp    Address of Workspace (value needs to be retained between
04151  *                  successive calls of k-th global step).
04152   \param[in,out]  &fplsp    Address of ? [Retaining between successive calls].
04153   \param[out]     &xpls     Address of vector containing new iterate (X[k]).
04154   \param[out]     &fpls     Address of Function value at new iterate.
04155   \param[out]     &mxtake   Address of Boolean flag indicating step of maximum length used
04156   \param[in]      method    Algorithm to use to solve minimization problem. Values are:
04157       method = 1, line search;
04158       method = 2, double dogleg;
04159       methos = 3, More-Hebdon;
04160   \param[in]      &udiag    Address of vector containing the diagonal of the hessian at the old iterate.
04161  */
04162 template<class V, class M, class FUNC> void Uncmin<V, M, FUNC>::tregup(V &x, double &f, V &g, M &a,
04163     V &sc, V &sx, bool &nwtake, double &dlt, int &iretcd, V &xplsp, double &fplsp, V &xpls,
04164     double &fpls, bool &mxtake, int method, V &udiag)
04165 {
04166   int i, j;
04167   double rln, temp, dltf, slp, dltmp, dltfp;
04168 
04169   mxtake = false;
04170 
04171   for (i = 1; i <= n; i++)
04172   {
04173     xpls(i) = x(i) + sc(i);
04174   }
04175   fpls = minclass->f_to_minimize(xpls);
04176 
04177   dltf = fpls - f;
04178 
04179   slp = ddot(g, sc); // Blas_f77.ddot_f77(n,g,1,sc,1);
04180 
04181   if (iretcd == 4)
04182     fplsp = 0.0;
04183 
04184   if ((iretcd == 3) && ((fpls >= fplsp) || (dltf > .0001*slp)))
04185   {
04186     // RESET XPLS TO XPLSP AND TERMINATE GLOBAL STEP
04187     iretcd = 0;
04188 
04189     for (i = 1; i <= n; i++)
04190     {
04191       xpls(i) = xplsp(i);
04192     }
04193     fpls = fplsp;
04194     dlt *= .5;
04195   }
04196   else
04197   {
04198     // FPLS TOO LARGE
04199     if (dltf > .0001*slp)
04200     {
04201       rln = 0.0;
04202 
04203       for (i = 1; i <= n; i++)
04204       {
04205         rln = max(rln, std::fabs(sc(i))/ max(std::fabs(xpls(i)), 1.0/sx(i)));
04206       }
04207 
04208       if (rln < steptl)
04209       {
04210         // CANNOT FIND SATISFACTORY XPLS SUFFICIENTLY DISTINCT FROM X
04211         iretcd = 1;
04212       }
04213       else
04214       {
04215         // REDUCE TRUST REGION AND CONTINUE GLOBAL STEP
04216         iretcd = 2;
04217 
04218         dltmp = -slp*dlt/(2.0*(dltf - slp));
04219 
04220         if (dltmp < .1*dlt)
04221         {
04222           dlt *= .1;
04223         }
04224         else
04225         {
04226           dlt = dltmp;
04227         }
04228       }
04229     }
04230     else
04231     {
04232       // FPLS SUFFICIENTLY SMALL
04233       dltfp = 0.0;
04234 
04235       if (method == 2)
04236       {
04237         for (i = 1; i <= n; i++)
04238         {
04239           temp = 0.0;
04240 
04241           for (j = i; j <= n; j++)
04242           {
04243             temp += a(j, i)*sc(j);
04244           }
04245           dltfp += temp*temp;
04246         }
04247       }
04248       else
04249       {
04250         for (i = 1; i <= n; i++)
04251         {
04252           dltfp += udiag(i)*sc(i)*sc(i);
04253           temp = 0.0;
04254 
04255           for (j = i+1; j <= n; j++)
04256           {
04257             temp += a(i, j)*sc(i)*sc(j);
04258           }
04259           dltfp += 2.0*temp;
04260         }
04261       }
04262       dltfp = slp + dltfp/2.0;
04263 
04264       if ((iretcd != 2) && (std::fabs(dltfp - dltf) <= .1*std::fabs(dltf)) && (!nwtake) && (dlt
04265           <= .99*stepmx))
04266       {
04267         // DOUBLE TRUST REGION AND CONTINUE GLOBAL STEP
04268         iretcd = 3;
04269 
04270         for (i = 1; i <= n; i++)
04271         {
04272           xplsp(i) = xpls(i);
04273         }
04274         fplsp = fpls;
04275         dlt = min(2.0*dlt, stepmx);
04276       }
04277       else
04278       {
04279         // ACCEPT XPLS AS NEXT ITERATE.  CHOOSE NEW TRUST REGION.
04280         iretcd = 0;
04281 
04282         if (dlt > .99*stepmx)
04283           mxtake = true;
04284 
04285         if (dltf >= .1*dltfp)
04286         {
04287           // DECREASE TRUST REGION FOR NEXT ITERATION
04288           dlt *= .5;
04289         }
04290         else
04291         {
04292           // CHECK WHETHER TO INCREASE TRUST REGION FOR NEXT ITERATION
04293           if (dltf <= .75*dltfp)
04294             dlt = min(2.0*dlt, stepmx);
04295         }
04296       }
04297     }
04298   }
04299 }
04300 
04301 /*! 
04302   \brief
04303   Calculate the dot product of two vectors. 
04304   
04305   \section template_args Template Parameters
04306   
04307   \param M  SCPPNT Matrix type.
04308   \param V  SCPPNT Vector type.
04309   \param FUNC Type of function to minimize.
04310 
04311   \section function_args Function Parameters
04312    
04313   \param[in]  &x  Address of first vector to multiply.
04314   \param[in]  &y  Address of second vector to multiply.
04315   */
04316 template<class V, class M, class FUNC> double Uncmin<V, M, FUNC>::ddot(V &x, V &y)
04317 {
04318   typename V::iterator ix = x.begin();
04319   typename V::iterator iy = y.begin();
04320   double prod = 0.0;
04321 
04322   // assumes x and y are the same size
04323   for (int n = x.size(); n--; ++ix, ++iy)
04324   {
04325     prod += *ix * *iy;
04326   }
04327 
04328   return prod;
04329 }
04330 
04331 /*! 
04332   \brief
04333   Calculate the Euclidean norm of a vector. 
04334   
04335   It is a translation from FORTRAN to Java of the LINPACK function
04336   DNRM2.
04337 
04338   In the LINPACK listing DNRM2 is attributed to C.L. Lawson
04339   with a date of January 8, 1978.  The routine below is based
04340   on a more recent DNRM2 version that is attributed in LAPACK
04341   documentation to Sven Hammarling.
04342 
04343   Translated by Steve Verrill, February 25, 1997.
04344 
04345   \section template_args Template Parameters
04346   
04347   \param M  SCPPNT Matrix type.
04348   \param V  SCPPNT Vector type.
04349   \param FUNC Type of function to minimize.
04350   
04351   \section function_args Function Parameters
04352    
04353   \param[in]  &x  Address of first vector to multiply.
04354  */
04355 template<class V, class M, class FUNC> double Uncmin<V, M, FUNC>::dnrm2(V &x)
04356 {
04357 
04358   double absxi, norm, scale, ssq, fac;
04359   int ix;
04360   int nelem = x.size();
04361 
04362   if (nelem < 1)
04363   {
04364     norm = 0.0;
04365   }
04366   else if (nelem == 1)
04367   {
04368     norm = std::fabs(x(1));
04369   }
04370   else
04371   {
04372     scale = 0.0;
04373     ssq = 1.0;
04374 
04375     for (ix = 1; ix <= nelem; ++ix)
04376     {
04377       if (x(ix) != 0.0)
04378       {
04379         absxi = std::fabs(x(ix));
04380 
04381         if (scale < absxi)
04382         {
04383           fac = scale/absxi;
04384           ssq = 1.0 + ssq*fac*fac;
04385           scale = absxi;
04386         }
04387         else
04388         {
04389           fac = absxi/scale;
04390           ssq += fac*fac;
04391         }
04392       }
04393     }
04394     norm = scale*std::sqrt(ssq);
04395   }
04396   return norm;
04397 }
04398 
04399 /*! 
04400   \brief
04401   Maximum of two double precision scalars.
04402   
04403   \section template_args Template Parameters
04404   
04405   \param M  SCPPNT Matrix type.
04406   \param V  SCPPNT Vector type.
04407   \param FUNC Type of function to minimize.
04408   
04409   \section function_args Function Parameters
04410    
04411   \param[in]  a First scalar to compare.
04412   \param[in]  b Second scalar to compare.
04413  */
04414 template<class V, class M, class FUNC> double Uncmin<V, M, FUNC>::max(double a, double b)
04415 {
04416   return (a > b) ? a : b;
04417 }
04418 
04419 /*! 
04420   \brief
04421   Minimum of two double precision scalars.
04422   
04423   \section template_args Template Parameters
04424   
04425   \param M  SCPPNT Matrix type.
04426   \param V  SCPPNT Vector type.
04427   \param FUNC Type of function to minimize.
04428   
04429   \section function_args Function Parameters
04430    
04431   \param[in]  a First scalar to compare.
04432   \param[in]  b Second scalar to compare.
04433  */
04434 template<class V, class M, class FUNC> double Uncmin<V, M, FUNC>::min(double a, double b)
04435 {
04436   return (a > b) ? b : a;
04437 }
04438 
04439 #endif // UNCMIN_H_

Generated on Sun Mar 9 14:31:58 2008 for UNCMIN by  doxygen 1.5.4