newmatnl.cpp
Go to the documentation of this file.
00001 
00002 
00003 
00006 
00007 // Copyright (C) 1993,4,5,6: R B Davies
00008 
00009 
00010 #define WANT_MATH
00011 #define WANT_STREAM
00012 
00013 #include "newmatap.h"
00014 #include "newmatnl.h"
00015 
00016 #ifdef use_namespace
00017 namespace NEWMAT {
00018 #endif
00019 
00020 
00021 
00022 void FindMaximum2::Fit(ColumnVector& Theta, int n_it)
00023 {
00024    Tracer tr("FindMaximum2::Fit");
00025    enum State {Start, Restart, Continue, Interpolate, Extrapolate,
00026       Fail, Convergence};
00027    State TheState = Start;
00028    Real z,w,x,x2,g,l1,l2,l3,d1,d2=0,d3;
00029    ColumnVector Theta1, Theta2, Theta3;
00030    int np = Theta.Nrows();
00031    ColumnVector H1(np), H3, HP(np), K, K1(np);
00032    bool oorg, conv;
00033    int counter = 0;
00034    Theta1 = Theta; HP = 0.0; g = 0.0;
00035 
00036    // This is really a set of gotos and labels, but they do not work
00037    // correctly in AT&T C++ and Sun 4.01 C++.
00038 
00039    for(;;)
00040    {
00041       switch (TheState)
00042       {
00043       case Start:
00044          tr.ReName("FindMaximum2::Fit/Start");
00045          Value(Theta1, true, l1, oorg);
00046          if (oorg) Throw(ProgramException("invalid starting value\n"));
00047 
00048       case Restart:
00049          tr.ReName("FindMaximum2::Fit/ReStart");
00050          conv = NextPoint(H1, d1);
00051          if (conv) { TheState = Convergence; break; }
00052          if (counter++ > n_it) { TheState = Fail; break; }
00053 
00054          z = 1.0 / sqrt(d1);
00055          H3 = H1 * z; K = (H3 - HP) * g; HP = H3;
00056          g = 0.0;                     // de-activate to use curved projection
00057          if ( g == 0.0 ) K1 = 0.0; else K1 = K * 0.2 + K1 * 0.6;
00058          // (K - K1) * alpha + K1 * (1 - alpha)
00059          //     = K * alpha + K1 * (1 - 2 * alpha)
00060          K = K1 * d1; g = z;
00061 
00062       case Continue:
00063          tr.ReName("FindMaximum2::Fit/Continue");
00064          Theta2 = Theta1 + H1 + K;
00065          Value(Theta2, false, l2, oorg);
00066          if (counter++ > n_it) { TheState = Fail; break; }
00067          if (oorg)
00068          {
00069             H1 *= 0.5; K *= 0.25; d1 *= 0.5; g *= 2.0;
00070             TheState =  Continue; break;
00071          }
00072          d2 = LastDerivative(H1 + K * 2.0);
00073 
00074       case Interpolate:
00075          tr.ReName("FindMaximum2::Fit/Interpolate");
00076          z = d1 + d2 - 3.0 * (l2 - l1);
00077          w = z * z - d1 * d2;
00078          if (w < 0.0) { TheState = Extrapolate; break; }
00079          w = z + sqrt(w);
00080          if (1.5 * w + d1 < 0.0)
00081             { TheState = Extrapolate; break; }
00082          if (d2 > 0.0 && l2 > l1 && w > 0.0)
00083             { TheState = Extrapolate; break; }
00084          x = d1 / (w + d1); x2 = x * x; g /= x;
00085          Theta3 = Theta1 + H1 * x + K * x2;
00086          Value(Theta3, true, l3, oorg);
00087          if (counter++ > n_it) { TheState = Fail; break; }
00088          if (oorg)
00089          {
00090             if (x <= 1.0)
00091                { x *= 0.5; x2 = x*x; g *= 2.0; d1 *= x; H1 *= x; K *= x2; }
00092             else
00093             {
00094                x = 0.5 * (x-1.0); x2 = x*x; Theta1 = Theta2;
00095                H1 = (H1 + K * 2.0) * x;
00096                K *= x2; g = 0.0; d1 = x * d2; l1 = l2;
00097             }
00098             TheState = Continue; break;
00099          }
00100 
00101          if (l3 >= l1 && l3 >= l2)
00102             { Theta1 = Theta3; l1 = l3; TheState =  Restart; break; }
00103 
00104          d3 = LastDerivative(H1 + K * 2.0);
00105          if (l1 > l2)
00106             { H1 *= x; K *= x2; Theta2 = Theta3; d1 *= x; d2 = d3*x; }
00107          else
00108          {
00109             Theta1 = Theta2; Theta2 = Theta3;
00110             x -= 1.0; x2 = x*x; g = 0.0; H1 = (H1 + K * 2.0) * x;
00111             K *= x2; l1 = l2; l2 = l3; d1 = x*d2; d2 = x*d3;
00112             if (d1 <= 0.0) { TheState = Start; break; }
00113          }
00114          TheState =  Interpolate; break;
00115 
00116       case Extrapolate:
00117          tr.ReName("FindMaximum2::Fit/Extrapolate");
00118          Theta1 = Theta2; g = 0.0; K *= 4.0; H1 = (H1 * 2.0 + K);
00119          d1 = 2.0 * d2; l1 = l2;
00120          TheState = Continue; break;
00121 
00122       case Fail:
00123          Throw(ConvergenceException(Theta));
00124 
00125       case Convergence:
00126          Theta = Theta1; return;
00127       }
00128    }
00129 }
00130 
00131 
00132 
00133 void NonLinearLeastSquares::Value
00134    (const ColumnVector& Parameters, bool, Real& v, bool& oorg)
00135 {
00136    Tracer tr("NonLinearLeastSquares::Value");
00137    Y.resize(n_obs); X.resize(n_obs,n_param);
00138    // put the fitted values in Y, the derivatives in X.
00139    Pred.Set(Parameters);
00140    if (!Pred.IsValid()) { oorg=true; return; }
00141    for (int i=1; i<=n_obs; i++)
00142    {
00143       Y(i) = Pred(i);
00144       X.Row(i) = Pred.Derivatives();
00145    }
00146    if (!Pred.IsValid()) { oorg=true; return; }  // check afterwards as well
00147    Y = *DataPointer - Y; Real ssq = Y.SumSquare();
00148    errorvar =  ssq / (n_obs - n_param);
00149    cout << endl;
00150    cout << setw(15) << setprecision(10) << " " << errorvar;
00151    Derivs = Y.t() * X;          // get the derivative and stash it
00152    oorg = false; v = -0.5 * ssq;
00153 }
00154 
00155 bool NonLinearLeastSquares::NextPoint(ColumnVector& Adj, Real& test)
00156 {
00157    Tracer tr("NonLinearLeastSquares::NextPoint");
00158    QRZ(X, U); QRZ(X, Y, M);     // do the QR decomposition
00159    test = M.SumSquare();
00160    cout << " " << setw(15) << setprecision(10)
00161       << test << " " << Y.SumSquare() / (n_obs - n_param);
00162    Adj = U.i() * M;
00163    if (test < errorvar * criterion) return true;
00164    else return false;
00165 }
00166 
00167 Real NonLinearLeastSquares::LastDerivative(const ColumnVector& H)
00168 { return (Derivs * H).AsScalar(); }
00169 
00170 void NonLinearLeastSquares::Fit(const ColumnVector& Data,
00171    ColumnVector& Parameters)
00172 {
00173    Tracer tr("NonLinearLeastSquares::Fit");
00174    n_param = Parameters.Nrows(); n_obs = Data.Nrows();
00175    DataPointer = &Data;
00176    FindMaximum2::Fit(Parameters, Lim);
00177    cout << "\nConverged" << endl;
00178 }
00179 
00180 void NonLinearLeastSquares::MakeCovariance()
00181 {
00182    if (Covariance.Nrows()==0)
00183    {
00184       UpperTriangularMatrix UI = U.i();
00185       Covariance << UI * UI.t() * errorvar;
00186       SE << Covariance;                 // get diagonals
00187       for (int i = 1; i<=n_param; i++) SE(i) = sqrt(SE(i));
00188    }
00189 }
00190 
00191 void NonLinearLeastSquares::GetStandardErrors(ColumnVector& SEX)
00192    { MakeCovariance(); SEX = SE.AsColumn(); }
00193 
00194 void NonLinearLeastSquares::GetCorrelations(SymmetricMatrix& Corr)
00195    { MakeCovariance(); Corr << SE.i() * Covariance * SE.i(); }
00196 
00197 void NonLinearLeastSquares::GetHatDiagonal(DiagonalMatrix& Hat) const
00198 {
00199    Hat.resize(n_obs);
00200    for (int i = 1; i<=n_obs; i++) Hat(i) = X.Row(i).SumSquare();
00201 }
00202 
00203 
00204 // the MLE_D_FI routines
00205 
00206 void MLE_D_FI::Value
00207    (const ColumnVector& Parameters, bool wg, Real& v, bool& oorg)
00208 {
00209    Tracer tr("MLE_D_FI::Value");
00210    if (!LL.IsValid(Parameters,wg)) { oorg=true; return; }
00211    v = LL.LogLikelihood();
00212    if (!LL.IsValid()) { oorg=true; return; }     // check validity again
00213    cout << endl;
00214    cout << setw(20) << setprecision(10) << v;
00215    oorg = false;
00216    Derivs = LL.Derivatives();                    // Get derivatives
00217 }
00218 
00219 bool MLE_D_FI::NextPoint(ColumnVector& Adj, Real& test)
00220 {
00221    Tracer tr("MLE_D_FI::NextPoint");
00222    SymmetricMatrix FI = LL.FI();
00223    LT = Cholesky(FI);
00224    ColumnVector Adj1 = LT.i() * Derivs;
00225    Adj = LT.t().i() * Adj1;
00226    test = SumSquare(Adj1);
00227    cout << "   " << setw(20) << setprecision(10) << test;
00228    return (test < Criterion);
00229 }
00230 
00231 Real MLE_D_FI::LastDerivative(const ColumnVector& H)
00232 { return (Derivs.t() * H).AsScalar(); }
00233 
00234 void MLE_D_FI::Fit(ColumnVector& Parameters)
00235 {
00236    Tracer tr("MLE_D_FI::Fit");
00237    FindMaximum2::Fit(Parameters,Lim);
00238    cout << "\nConverged" << endl;
00239 }
00240   
00241 void MLE_D_FI::MakeCovariance()
00242 {
00243    if (Covariance.Nrows()==0)
00244    {
00245       LowerTriangularMatrix LTI = LT.i();
00246       Covariance << LTI.t() * LTI;
00247       SE << Covariance;                // get diagonal
00248       int n = Covariance.Nrows();
00249       for (int i=1; i <= n; i++) SE(i) = sqrt(SE(i));
00250    }
00251 }
00252 
00253 void MLE_D_FI::GetStandardErrors(ColumnVector& SEX)
00254 { MakeCovariance(); SEX = SE.AsColumn(); }
00255    
00256 void MLE_D_FI::GetCorrelations(SymmetricMatrix& Corr)
00257 { MakeCovariance(); Corr << SE.i() * Covariance * SE.i(); }
00258 
00259 
00260 
00261 #ifdef use_namespace
00262 }
00263 #endif
00264 
00265 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Defines


kni
Author(s): Neuronics AG (see AUTHORS.txt); ROS wrapper by Martin Günther
autogenerated on Tue May 28 2013 14:52:54