garch.cpp
Go to the documentation of this file.
00001 
00002 
00003 
00018 
00019 #define WANT_STREAM
00020 #define WANT_MATH
00021 #define WANT_FSTREAM
00022 
00023 #include "newmatap.h"
00024 #include "newmatio.h"
00025 #include "newmatnl.h"
00026 
00027 #ifdef use_namespace
00028 using namespace RBD_LIBRARIES;
00029 #endif
00030 
00031 
00032 inline Real square(Real x) { return x*x; }
00033 
00034 // the class that defines the GARCH log-likelihood
00035 
00036 class GARCH11_LL : public LL_D_FI
00037 {
00038    ColumnVector Y;                 // Y values
00039    ColumnVector X;                 // X values
00040    ColumnVector D;                 // derivatives of loglikelihood
00041    SymmetricMatrix D2;             // - approximate second derivatives
00042    int n;                          // number of observations
00043    Real beta, alpha0, alpha1, beta1;
00044                                    // the parameters
00045 
00046 public:
00047 
00048    GARCH11_LL(const ColumnVector& y, const ColumnVector& x)
00049       : Y(y), X(x), n(y.nrows()) {}
00050                                    // constructor - load Y and X values
00051 
00052    void Set(const ColumnVector& p) // set parameter values
00053    {
00054       para = p;
00055       beta = para(1); alpha0 = para(2);
00056       alpha1 = para(3); beta1 = para(4);
00057    }
00058 
00059    bool IsValid();                 // are parameters valid
00060    Real LogLikelihood();           // return the loglikelihood
00061    ReturnMatrix Derivatives();     // derivatives of log-likelihood
00062    ReturnMatrix FI();              // Fisher Information matrix
00063 };
00064 
00065 bool GARCH11_LL::IsValid()
00066 { return alpha0>0 && alpha1>0 && beta1>0 && (alpha1+beta1)<1.0; }
00067 
00068 Real GARCH11_LL::LogLikelihood()
00069 {
00070 //   cout << endl << "                           ";
00071 //   cout << setw(10) << setprecision(5) << beta;
00072 //   cout << setw(10) << setprecision(5) << alpha0;
00073 //   cout << setw(10) << setprecision(5) << alpha1;
00074 //   cout << setw(10) << setprecision(5) << beta1;
00075 //   cout << endl;
00076    ColumnVector H(n);              // residual variances
00077    ColumnVector U = Y - X * beta;  // the residuals
00078    ColumnVector LH(n);     // derivative of log-likelihood wrt H
00079                            // each row corresponds to one observation
00080    LH(1)=0;
00081    Matrix Hderiv(n,4);     // rectangular matrix of derivatives
00082                            // of H wrt parameters
00083                            // each row corresponds to one observation
00084                            // each column to one of the parameters
00085 
00086    // Regard Y(1) as fixed and don't include in likelihood
00087    // then put in an expected value of H(1) in place of actual value
00088    //   which we don't know. Use
00089    // E{H(i)} = alpha0 + alpha1 * E{square(epsilon(i-1))} + beta1 * E{H(i-1)}
00090    // and  E{square(epsilon(i-1))} = E{H(i-1)} = E{H(i)}
00091    Real denom = (1-alpha1-beta1);
00092    H(1) = alpha0/denom;    // the expected value of H
00093    Hderiv(1,1) = 0;
00094    Hderiv(1,2) = 1.0 / denom;
00095    Hderiv(1,3) = alpha0 / square(denom);
00096    Hderiv(1,4) = Hderiv(1,3);
00097    Real LL = 0.0;          // the log likelihood
00098    Real sum1 = 0;          // for forming derivative wrt beta
00099    Real sum2 = 0;          // for forming second derivative wrt beta
00100    for (int i=2; i<=n; i++)
00101    {
00102       Real u1 = U(i-1); Real h1 = H(i-1);
00103       Real h = alpha0 + alpha1*square(u1) + beta1*h1; // variance of this obsv.
00104       H(i) = h; Real u = U(i);
00105       LL += log(h) + square(u) / h;        // -2 * log likelihood
00106       // Hderiv are derivatives of h with respect to the parameters
00107       // need to allow for h1 depending on parameters
00108       Hderiv(i,1) = -2*u1*alpha1*X(i-1) + beta1*Hderiv(i-1,1);  // beta
00109       Hderiv(i,2) = 1 + beta1*Hderiv(i-1,2);                    // alpha0
00110       Hderiv(i,3) = square(u1) + beta1*Hderiv(i-1,3);           // alpha1
00111       Hderiv(i,4) = h1 + beta1*Hderiv(i-1,4);                   // beta1
00112       LH(i) = -0.5 * (1/h - square(u/h));
00113       sum1 += u * X(i)/ h;
00114       sum2 += square(X(i)) / h;
00115    }
00116    D = Hderiv.t()*LH;         // derivatives of likelihood wrt parameters
00117    D(1) += sum1;              // add on deriv wrt beta from square(u) term
00118 //   cout << setw(10) << setprecision(5) << D << endl;
00119 
00120    // do minus expected value of second derivatives
00121    if (wg)                    // do only if second derivatives wanted
00122    {
00123       Hderiv.row(1) = 0.0;
00124       Hderiv = H.as_diagonal().i() * Hderiv;
00125       D2 << Hderiv.t() * Hderiv;  D2 = D2 / 2.0;
00126       D2(1,1) += sum2;
00127 //      cout << setw(10) << setprecision(5) << D2 << endl;
00128 //      DiagonalMatrix DX; EigenValues(D2,DX);
00129 //      cout << setw(10) << setprecision(5) << DX << endl;
00130 
00131    }
00132    return -0.5 * LL;
00133 }
00134 
00135 ReturnMatrix GARCH11_LL::Derivatives()
00136 { return D; }
00137 
00138 ReturnMatrix GARCH11_LL::FI()
00139 {
00140    if (!wg) cout << endl << "unexpected call of FI" << endl;
00141    return D2;
00142 }
00143 
00144 
00145 int my_main()
00146 {
00147    // get data
00148    ifstream fin("garch.dat");
00149    if (!fin) { cout << "cannot find garch.dat\n"; exit(1); }
00150    int n; fin >> n;            // series length
00151    // Y contains the dependant variable, X the predictor variable
00152    ColumnVector Y(n), X(n);
00153    int i;
00154    for (i=1; i<=n; i++) fin >> Y(i) >> X(i);
00155    cout << "Read " << n << " data points - begin fit\n\n";
00156    // now do the fit
00157    ColumnVector H(n);
00158    GARCH11_LL garch11(Y,X);                  // loglikehood "object"
00159    MLE_D_FI mle_d_fi(garch11,100,0.0001);    // mle "object"
00160    ColumnVector Para(4);                     // to hold the parameters
00161    Para << 0.0 << 0.1 << 0.1 << 0.1;         // starting values
00162       // (Should change starting values to a more intelligent formula)
00163    mle_d_fi.Fit(Para);                       // do the fit
00164    ColumnVector SE;
00165    mle_d_fi.GetStandardErrors(SE);
00166    cout << "\n\n";
00167    cout << "estimates and standard errors\n";
00168    cout << setw(15) << setprecision(5) << (Para | SE) << endl << endl;
00169    SymmetricMatrix Corr;
00170    mle_d_fi.GetCorrelations(Corr);
00171    cout << "correlation matrix\n";
00172    cout << setw(10) << setprecision(2) << Corr << endl << endl;
00173    cout << "inverse of correlation matrix\n";
00174    cout << setw(10) << setprecision(2) << Corr.i() << endl << endl;
00175    return 0;
00176 }
00177 
00178 
00179 // call my_main() - use this to catch exceptions
00180 int main()
00181 {
00182    Try
00183    {
00184       return my_main();
00185    }
00186    Catch(BaseException)
00187    {
00188       cout << BaseException::what() << "\n";
00189    }
00190    CatchAll
00191    {
00192       cout << "\nProgram fails - exception generated\n\n"; 
00193    }
00194    return 0;
00195 }
00196 
00197 


kni
Author(s): Martin Günther
autogenerated on Mon Aug 14 2017 02:44:12