Go to the documentation of this file.00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00034 #include <acado/sparse_solver/sparse_solver.hpp>
00035
00036
00037 BEGIN_NAMESPACE_ACADO
00038
00039
00040
00041
00042
00043
00044
00045
00046 NormalConjugateGradientMethod::NormalConjugateGradientMethod( )
00047 :ConjugateGradientMethod( ){
00048
00049 index = 0;
00050 nIndex = 0;
00051 iResult = 0;
00052 }
00053
00054
00055 NormalConjugateGradientMethod::NormalConjugateGradientMethod( const NormalConjugateGradientMethod &arg )
00056 :ConjugateGradientMethod(arg){
00057
00058 int run1, run2;
00059
00060 if( arg.nIndex != 0 ){
00061 nIndex = new int[dim];
00062 for( run1 = 0; run1 < dim; run1++ ){
00063 nIndex[run1] = arg.nIndex[run1];
00064 }
00065 }
00066 else nIndex = 0;
00067
00068 if( arg.index != 0 ){
00069 index = new int*[dim];
00070 for( run1 = 0; run1 < dim; run1++ ){
00071 index[run1] = new int[nIndex[run1]];
00072 for( run2 = 0; run2 < nIndex[run1]; run2++ )
00073 index[run1][run2] = arg.index[run1][run2];
00074 }
00075 }
00076 else index = 0;
00077
00078 if( arg.iResult != 0 ){
00079 iResult = new double[dim];
00080 for( run1 = 0; run1 < dim; run1++ ){
00081 iResult[run1] = arg.iResult[run1];
00082 }
00083 }
00084 else iResult = 0;
00085 }
00086
00087
00088 NormalConjugateGradientMethod::~NormalConjugateGradientMethod( ){
00089
00090 int run1;
00091
00092 if( nIndex != 0 ) delete[] nIndex;
00093
00094 if( iResult != 0 ) delete[] iResult;
00095
00096 if( index != 0 ){
00097 for( run1 = 0; run1 < dim; run1++ )
00098 delete[] index[run1];
00099 delete[] index;
00100 }
00101 }
00102
00103
00104
00105 SparseSolver* NormalConjugateGradientMethod::clone() const{
00106
00107 return new NormalConjugateGradientMethod(*this);
00108 }
00109
00110
00111 returnValue NormalConjugateGradientMethod::setIndices( const int *rowIdx_, const int *colIdx_ ){
00112
00113 return ACADOERROR( RET_NOT_IMPLEMENTED_YET );
00114 }
00115
00116
00117 returnValue NormalConjugateGradientMethod::setIndices( const int *indices ){
00118
00119
00120
00121
00122
00123 if( dim <= 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
00124 if( nDense <= 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
00125
00126
00127
00128
00129
00130 if( nIndex != 0 ) delete[] nIndex;
00131 nIndex = new int[dim];
00132
00133 if( iResult != 0 ) delete[] iResult;
00134 iResult = new double[dim];
00135
00136
00137
00138
00139 int run1, run2;
00140 int counter0 ;
00141 int bound ;
00142 int counter = 0;
00143
00144
00145
00146
00147
00148
00149 for( run1 = 0; run1 < dim; run1++ ){
00150
00151 counter0 = counter;
00152 bound = dim*(run1+1);
00153
00154 while( counter < nDense ){
00155
00156 if( indices[counter] >= bound ) break;
00157 counter++;
00158 }
00159
00160 nIndex[run1] = counter - counter0;
00161 }
00162
00163 if( index != 0 ){
00164 for( run1 = 0; run1 < dim; run1++ )
00165 delete[] index[run1];
00166 delete[] index;
00167 }
00168 index = new int*[dim];
00169
00170 counter = 0;
00171
00172 for( run1 = 0; run1 < dim; run1++ ){
00173 index[run1] = new int[nIndex[run1]];
00174 for( run2 = 0; run2 < nIndex[run1]; run2++ ){
00175 index [run1][run2] = indices[counter];
00176 counter++;
00177 }
00178 }
00179
00180 return SUCCESSFUL_RETURN;
00181 }
00182
00183
00184
00185
00186
00187
00188
00189
00190
00191
00192
00193
00194
00195
00196 void NormalConjugateGradientMethod::multiply( double *xx , double *result ){
00197
00198
00199 int i,j,counter, offset;
00200
00201 counter = 0;
00202
00203 for( i = 0; i < dim; i++ ){
00204 iResult[i] = 0.0;
00205 result [i] = 0.0;
00206 offset = dim*i;
00207 for( j = 0; j < nIndex[i]; j++ ){
00208 iResult[i] += A[counter]*xx[index[i][j]-offset];
00209 counter++;
00210 }
00211 }
00212
00213 counter = 0;
00214
00215 for( i = 0; i < dim; i++ ){
00216 offset = dim*i;
00217 for( j = 0; j < nIndex[i]; j++ ){
00218 result[index[i][j]-offset] += A[counter]*iResult[i];
00219 counter++;
00220 }
00221 }
00222 }
00223
00224
00225
00226
00227 returnValue NormalConjugateGradientMethod::computePreconditioner( double* A_ ){
00228
00229 int i,j,counter;
00230
00231 for( i = 0; i < dim; i++ ){
00232 condScale[i] = 0.0;
00233 }
00234
00235 int offset;
00236 counter = 0;
00237
00238 for( i = 0; i < dim; i++ ){
00239 offset = dim*i;
00240 for( j = 0; j < nIndex[i]; j++ ){
00241 condScale[index[i][j]-offset] += A_[counter]*A_[counter];
00242 counter++;
00243 }
00244 }
00245
00246 for( i = 0; i < dim; i++ ){
00247 condScale[i] = sqrt(condScale[i]);
00248 }
00249
00250 counter = 0;
00251
00252 for( i = 0; i < dim; i++ ){
00253 for( j = 0; j < nIndex[i]; j++ ){
00254 A[counter] = A_[counter]/condScale[i];
00255 counter++;
00256 }
00257 }
00258
00259 return SUCCESSFUL_RETURN;
00260 }
00261
00262
00263
00264 returnValue NormalConjugateGradientMethod::applyPreconditioner( double *b ){
00265
00266 int i,j, counter;
00267
00268 for( i = 0; i < dim; i++ )
00269 iResult[i] = b[i]/condScale[i];
00270
00271 counter = 0;
00272 int offset;
00273
00274 for( i = 0; i < dim; i++ )
00275 r[i] = 0.0;
00276
00277 for( i = 0; i < dim; i++ ){
00278 offset = dim*i;
00279 for( j = 0; j < nIndex[i]; j++ ){
00280 r[index[i][j]-offset] += A[counter]*iResult[i];
00281 counter++;
00282 }
00283 }
00284 return SUCCESSFUL_RETURN;
00285 }
00286
00287
00288
00289
00290 returnValue NormalConjugateGradientMethod::applyInversePreconditioner( double *x_ ){
00291
00292 int run1;
00293
00294 for( run1 = 0; run1 < dim; run1++ )
00295 x_[run1] = x[run1];
00296
00297 return SUCCESSFUL_RETURN;
00298 }
00299
00300
00301
00302 CLOSE_NAMESPACE_ACADO
00303
00304
00305
00306
00307