irk_lifted_symmetric_export.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ACADO Toolkit.
3  *
4  * ACADO Toolkit -- A Toolkit for Automatic Control and Dynamic Optimization.
5  * Copyright (C) 2008-2014 by Boris Houska, Hans Joachim Ferreau,
6  * Milan Vukov, Rien Quirynen, KU Leuven.
7  * Developed within the Optimization in Engineering Center (OPTEC)
8  * under supervision of Moritz Diehl. All rights reserved.
9  *
10  * ACADO Toolkit is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 3 of the License, or (at your option) any later version.
14  *
15  * ACADO Toolkit is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with ACADO Toolkit; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  *
24  */
25 
26 
27 
36 
37 using namespace std;
38 
40 
41 //
42 // PUBLIC MEMBER FUNCTIONS:
43 //
44 
46  const std::string& _commonHeaderName
47  ) : ForwardLiftedIRKExport( _userInteraction,_commonHeaderName )
48 {
49 }
50 
52 {
53 }
54 
55 
57 {
58  if ( solver )
59  delete solver;
60  solver = 0;
61 
62  clear( );
63 }
64 
65 
67 
68  if( this != &arg ){
69 
71  copy( arg );
72  }
73  return *this;
74 }
75 
76 
78 {
79  ExportVariable max;
80  if( NX1 > 0 ) {
82  }
83  if( NX2 > 0 || NXA > 0 ) {
84  if( rhs.getGlobalExportVariable().getDim() >= max.getDim() ) {
86  }
87  if( diffs_rhs.getGlobalExportVariable().getDim() >= max.getDim() ) {
89  }
92  }
95  }
98  }
99  }
100  if( NX3 > 0 ) {
101  if( rhs3.getGlobalExportVariable().getDim() >= max.getDim() ) {
103  }
104  if( diffs_rhs3.getGlobalExportVariable().getDim() >= max.getDim() ) {
106  }
107  }
108  uint i;
109  for( i = 0; i < outputs.size(); i++ ) {
110  if( outputs[i].getGlobalExportVariable().getDim() >= max.getDim() ) {
111  max = outputs[i].getGlobalExportVariable();
112  }
113  if( diffs_outputs[i].getGlobalExportVariable().getDim() >= max.getDim() ) {
114  max = diffs_outputs[i].getGlobalExportVariable();
115  }
116  }
117  return max;
118 }
119 
120 
122  ExportStruct dataStruct
123  ) const
124 {
125  ForwardLiftedIRKExport::getDataDeclarations( declarations, dataStruct );
126 
127  declarations.addDeclaration( rk_A_traj,dataStruct );
128  declarations.addDeclaration( rk_aux_traj,dataStruct );
129  declarations.addDeclaration( rk_S_traj,dataStruct );
130 
131  declarations.addDeclaration( rk_kkk_prev,dataStruct );
132  declarations.addDeclaration( rk_kkk_delta,dataStruct );
133  declarations.addDeclaration( rk_delta_full,dataStruct );
134  declarations.addDeclaration( rk_diff_mu,dataStruct );
135  declarations.addDeclaration( rk_diff_lam,dataStruct );
136 
137  declarations.addDeclaration( rk_lambda,dataStruct );
138  declarations.addDeclaration( rk_b_mu,dataStruct );
139 
140  return SUCCESSFUL_RETURN;
141 }
142 
143 
145  ) const
146 {
148 
149  return SUCCESSFUL_RETURN;
150 }
151 
152 
154 {
155  int sensGen;
156  get( DYNAMIC_SENSITIVITY,sensGen );
157  if( rhs_.getDim() > 0 ) {
158  OnlineData dummy0;
159  Control dummy1;
160  DifferentialState dummy2;
161  AlgebraicState dummy3;
163  dummy0.clearStaticCounters();
164  dummy1.clearStaticCounters();
165  dummy2.clearStaticCounters();
166  dummy3.clearStaticCounters();
167  dummy4.clearStaticCounters();
168 
169  NX2 = rhs_.getDim() - NXA;
170  x = DifferentialState("", NX1+NX2, 1);
171  z = AlgebraicState("", NXA, 1);
172  u = Control("", NU, 1);
173  od = OnlineData("", NOD, 1);
174 
176  f << rhs_;
177 
178  NDX2 = f.getNDX();
179  if( NDX2 > 0 && (NDX2 < NX2 || NDX2 > (NX1+NX2)) ) {
180  return ACADOERROR( RET_INVALID_OPTION );
181  }
182  else if( NDX2 > 0 ) NDX2 = NX1+NX2;
184 
186  for( uint i = 0; i < rhs_.getDim(); i++ ) {
187  g << forwardDerivative( rhs_(i), x );
188  g << forwardDerivative( rhs_(i), z );
189  g << forwardDerivative( rhs_(i), u );
190  g << forwardDerivative( rhs_(i), dx );
191  }
192 
193  // FORWARD SWEEP:
194  DifferentialState sX("", NX,NX+NU);
195  Expression Gx = sX.getCols(0,NX);
196  Expression Gu = sX.getCols(NX,NX+NU);
197  DifferentialEquation forward;
198  for( uint i = 0; i < rhs_.getDim(); i++ ) {
199  // NOT YET IMPLEMENTED FOR DAES OR IMPLICIT ODES
200  if( NDX2 > 0 || NXA > 0 ) return ACADOERROR(RET_NOT_YET_IMPLEMENTED);
201  forward << multipleForwardDerivative( rhs_(i), x, Gx );
202  forward << multipleForwardDerivative( rhs_(i), x, Gu ) + forwardDerivative( rhs_(i), u );
203  }
204 
205  // FIRST ORDER ADJOINT SWEEP:
206  DifferentialState lambda("", NX,1);
207  DifferentialEquation backward, adj_update;
208 // backward << backwardDerivative( rhs_, x, lambda );
209 // adj_update << backwardDerivative( rhs_, x, lambda );
210 
211  // SECOND ORDER ADJOINT SWEEP:
212  Expression arg;
213  arg << x;
214  arg << u;
215  Expression S_tmp = sX;
216  S_tmp.appendRows(zeros<double>(NU,NX).appendCols(eye<double>(NU)));
217 
218  if( NDX2 > 0 || NXA > 0 ) return ACADOERROR(RET_NOT_YET_IMPLEMENTED);
219  Expression dfS, dfL;
220  Expression symmetric = symmetricDerivative( rhs_, arg, S_tmp, lambda, &dfS, &dfL );
221  backward << dfL.getRows(0,NX);
222  backward << returnLowerTriangular( symmetric );
223 
224  // GRADIENT UPDATE:
225  int gradientUp;
226  get( LIFTED_GRADIENT_UPDATE, gradientUp );
227  bool gradientUpdate = (bool) gradientUp;
228 
229  uint nHat = 0;
230  if( gradientUpdate ) {
231  DifferentialState hat("",1,NX);
232  Expression tmp = hat.getCols(0,NX)*dfL.getRows(0,NX);
233  backward << multipleForwardDerivative( tmp, arg, S_tmp );
234  nHat = hat.getNumCols();
235  }
236 
237  int linSolver;
238  get( LINEAR_ALGEBRA_SOLVER, linSolver );
239  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) {
240  adj_update << dfL.getRows(0,NX);
241  }
242  else if( gradientUpdate ) {
243  adj_update << (forwardDerivative( dfL, x )).transpose();
244  }
245 
246  if( f.getNT() > 0 ) timeDependant = true;
247 
248  return (rhs.init( f,"acado_rhs",NX,NXA,NU,NP,NDX,NOD ) &
249  diffs_rhs.init( g,"acado_diffs",NX,NXA,NU,NP,NDX,NOD ) &
250  forward_sweep.init( forward,"acado_forward",NX*(2+NX+NU)+nHat,NXA,NU,NP,NDX,NOD ) &
251  adjoint_sweep.init( backward,"acado_backward",NX*(2+NX+NU)+nHat,NXA,NU,NP,NDX,NOD ) &
252  diffs_sweep.init( adj_update,"acado_adjoint_update",NX*(2+NX+NU)+nHat,NXA,NU,NP,NDX,NOD ));
253  }
254  return SUCCESSFUL_RETURN;
255 }
256 
257 
259 // std::cout << "returnLowerTriangular with " << expr.getNumRows() << " rows and " << expr.getNumCols() << " columns\n";
260  ASSERT( expr.getNumRows() == expr.getNumCols() );
261 
262  Expression new_expr;
263  for( uint i = 0; i < expr.getNumRows(); i++ ) {
264  for( uint j = 0; j <= i; j++ ) {
265  new_expr << expr(i,j);
266  }
267  }
268  return new_expr;
269 }
270 
271 
273 {
274  int sensGen;
275  get( DYNAMIC_SENSITIVITY, sensGen );
276  int mode;
277  get( IMPLICIT_INTEGRATOR_MODE, mode );
278  int linSolver;
279  get( LINEAR_ALGEBRA_SOLVER, linSolver );
280  int liftMode = 1;
281  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) liftMode = 4;
284  if( liftMode != 1 && liftMode != 4 ) ACADOERROR( RET_NOT_IMPLEMENTED_YET );
286 
287  int gradientUp;
288  get( LIFTED_GRADIENT_UPDATE, gradientUp );
289  bool gradientUpdate = (bool) gradientUp;
290 
291  // NOTE: liftMode == 4 --> inexact Newton based implementation
292 
294 
295  int useOMP;
296  get(CG_USE_OPENMP, useOMP);
297  if ( useOMP ) ACADOERROR( RET_NOT_IMPLEMENTED_YET );
298 
300 
301  if( exportRhs ) {
302  if( NX2 > 0 || NXA > 0 ) {
303  code.addFunction( rhs );
304  code.addStatement( "\n\n" );
305  code.addFunction( diffs_rhs );
306  code.addStatement( "\n\n" );
307 // if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) {
308  code.addFunction( diffs_sweep );
309  code.addStatement( "\n\n" );
310 // }
311  if( liftMode == 4 ) { // ONLY for the inexact Newton based schemes
312  code.addFunction( forward_sweep );
313  code.addStatement( "\n\n" );
314  }
315  code.addFunction( adjoint_sweep );
316  code.addStatement( "\n\n" );
317  }
318 
320 
322  }
323  if( NX2 > 0 || NXA > 0 ) solver->getCode( code );
324  code.addLinebreak(2);
325 
326  int measGrid;
327  get( MEASUREMENT_GRID, measGrid );
328 
329  // export RK scheme
330  uint run5, run6;
331  std::string tempString;
332 
335 
336  double h = (grid.getLastTime() - grid.getFirstTime())/grid.getNumIntervals();
337  DMatrix tmp = AA;
338  ExportVariable Ah( "Ah_mat", tmp*=h, STATIC_CONST_REAL );
339  code.addDeclaration( Ah );
340  code.addLinebreak( 2 );
341  // TODO: Ask Milan why this does NOT work properly !!
343 
344  DVector BB( bb );
345  ExportVariable Bh( "Bh_mat", DMatrix( BB*=h ) );
346 
347  DVector CC( cc );
348  ExportVariable C;
349  if( timeDependant ) {
350  C = ExportVariable( "C_mat", DMatrix( CC*=(1.0/grid.getNumIntervals()) ), STATIC_CONST_REAL );
351  code.addDeclaration( C );
352  code.addLinebreak( 2 );
354  }
355 
356  code.addComment(std::string("Fixed step size:") + toString(h));
357 
358  ExportVariable determinant( "det", 1, 1, REAL, ACADO_LOCAL, true );
359  integrate.addDeclaration( determinant );
360 
361  ExportIndex i( "i" );
362  ExportIndex j( "j" );
363  ExportIndex k( "k" );
364  ExportIndex run( "run" );
365  ExportIndex run1( "run1" );
366  ExportIndex tmp_index1("tmp_index1");
367  ExportIndex tmp_index2("tmp_index2");
368  ExportIndex tmp_index3("tmp_index3");
369  ExportIndex tmp_index4("tmp_index4");
370  ExportIndex k_index("k_index");
371  ExportIndex shooting_index("shoot_index");
372  ExportVariable tmp_meas("tmp_meas", 1, outputGrids.size(), INT, ACADO_LOCAL);
373 
374  ExportVariable numInt( "numInts", 1, 1, INT );
375  if( !equidistantControlGrid() ) {
376  ExportVariable numStepsV( "numSteps", numSteps, STATIC_CONST_INT );
377  code.addDeclaration( numStepsV );
378  code.addLinebreak( 2 );
379  integrate.addStatement( std::string( "int " ) + numInt.getName() + " = " + numStepsV.getName() + "[" + rk_index.getName() + "];\n" );
380  }
381 
382  prepareOutputEvaluation( code );
383 
384  integrate.addIndex( i );
385  integrate.addIndex( j );
386  integrate.addIndex( k );
387  integrate.addIndex( run );
388  integrate.addIndex( run1 );
389  integrate.addIndex( tmp_index1 );
390  integrate.addIndex( tmp_index2 );
391  integrate.addIndex( tmp_index3 );
392  integrate.addIndex( shooting_index );
393  integrate.addIndex( k_index );
394  if( rk_outputs.size() > 0 && (grid.getNumIntervals() > 1 || !equidistantControlGrid()) ) {
395  integrate.addIndex( tmp_index4 );
396  }
397  integrate << shooting_index.getFullName() << " = " << rk_index.getFullName() << ";\n";
399  if( (inputDim-diffsDim) > NX+NXA ) {
403  }
405  if( liftMode == 1 || liftMode == 4 ) {
406  integrate.addStatement( rk_delta.getCols( 0,NX ) == rk_eta.getCols( 0,NX ) - rk_Xprev.getRow(shooting_index) );
407  integrate.addStatement( rk_Xprev.getRow(shooting_index) == rk_eta.getCols( 0,NX ) );
408 
411  }
412  if( liftMode == 1 ) {
414  }
416 
417 
418  if( liftMode == 1 ) { // Compute mu variables for the EXACT scheme !!!
419  // FORWARD loop for rk_delta_full:
420  ExportForLoop deltaLoop( run, 0, grid.getNumIntervals() );
421 
422  deltaLoop.addStatement( k_index == (shooting_index*grid.getNumIntervals()+run)*(NX+NXA) );
423  // FIRST update K variables using term from optimization variables:
424  ExportForLoop loopTemp1( i,0,NX+NXA );
425  loopTemp1.addStatement( j == k_index+i );
426  loopTemp1.addStatement( tmp_index1 == j*(NX+NU) );
427  ExportForLoop loopTemp2( run1,0,numStages );
428  loopTemp2.addStatement( rk_kkk.getElement( j,run1 ) += rk_delta*rk_diffK.getSubMatrix( tmp_index1,tmp_index1+NX+NU,run1,run1+1 ) );
429  loopTemp1.addStatement( loopTemp2 );
430  deltaLoop.addStatement( loopTemp1 );
431 
432  deltaLoop.addStatement( tmp_index1 == shooting_index*grid.getNumIntervals()+run );
433  deltaLoop.addStatement( rk_kkk_delta.getRows(run*NX,(run+1)*NX) == rk_kkk.getRows(tmp_index1*(NX+NXA),(tmp_index1+1)*(NX+NXA))-rk_kkk_prev.getRows(tmp_index1*(NX+NXA),(tmp_index1+1)*(NX+NXA)) );
434  deltaLoop.addStatement( rk_kkk_prev.getRows(tmp_index1*(NX+NXA),(tmp_index1+1)*(NX+NXA)) == rk_kkk.getRows(tmp_index1*(NX+NXA),(tmp_index1+1)*(NX+NXA)) );
435  // update rk_delta_full:
436  deltaLoop.addStatement( rk_delta_full.getRow(run+1) == rk_delta_full.getRow(run) );
437  for( run5 = 0; run5 < numStages; run5++ ) {
438  deltaLoop.addStatement( rk_delta_full.getSubMatrix(run+1,run+2,0,NX) += Bh.getRow(run5)*(rk_kkk_delta.getSubMatrix( run*NX,(run+1)*NX,run5,run5+1 ).getTranspose()) );
439  }
440  DMatrix zeroV = zeros<double>(1,numStages*NX);
441  deltaLoop.addStatement( rk_b_mu.getCols(run*numStages*NX,(run+1)*numStages*NX) == zeroV );
442  integrate.addStatement( deltaLoop );
443 
444  // BACKWARD loop for lambda and MU variables:
446  ExportForLoop lambdaLoop( run, grid.getNumIntervals()-1, -1, -1 );
447  lambdaLoop.addStatement( tmp_index1 == shooting_index*grid.getNumIntervals()+run );
448  // Compute \hat{lambda}:
449  // vec(j*NX+1:j*NX+NX) = -Bh_vec(j+1)*dir_tmp;
450  for( run5 = 0; run5 < numStages; run5++ ) {
451  lambdaLoop.addStatement( rk_b_mu.getCols(run*numStages*NX+run5*NX,run*numStages*NX+(run5+1)*NX) -= Bh.getRow(run5)*rk_lambda.getRow(run+1) );
452  }
453  for( run5 = 0; run5 < numStages; run5++ ) {
454  lambdaLoop.addStatement( rk_seed.getCols(0,NX+NU) == rk_delta_full.getRow(run) );
455  ExportForLoop deltaLoop0( i, 0, numStages );
456  deltaLoop0.addStatement( rk_seed.getCols(0,NX) += Ah.getElement(run5,i)*(rk_kkk_delta.getSubMatrix( run*NX,(run+1)*NX,i,i+1 ).getTranspose()) );
457  lambdaLoop.addStatement( deltaLoop0 );
458 
459  for( run6 = 0; run6 < NX; run6++ ) {
460  lambdaLoop.addStatement( rk_seed.getCol(NX+NU+run6) == rk_diff_mu.getSubMatrix(tmp_index1,tmp_index1+1,(run5*NX+run6)*(NX+NU),(run5*NX+run6+1)*(NX+NU))*(rk_seed.getCols(0,NX+NU).getTranspose()) );
461  }
462  for( run6 = 0; run6 < numStages; run6++ ) {
463  lambdaLoop.addStatement( rk_b_mu.getCols(run*numStages*NX+run6*NX,run*numStages*NX+run6*NX+NX) -= Ah.getElement(run5,run6)*rk_seed.getCols(NX+NU,2*NX+NU) );
464  }
465  ExportForLoop deltaLoop3( i, 0, run );
466  for( run6 = 0; run6 < numStages; run6++ ) {
467  deltaLoop3.addStatement( rk_b_mu.getCols(i*numStages*NX+run6*NX,i*numStages*NX+run6*NX+NX) -= Bh.getRow(run6)*rk_seed.getCols(NX+NU,2*NX+NU) );
468  }
469  lambdaLoop.addStatement( deltaLoop3 );
470  }
471  lambdaLoop.addFunctionCall( solver->getNameSolveTransposeReuseFunction(),rk_A_traj.getAddress(tmp_index1*numStages*(NX2+NXA),0),rk_b_mu.getAddress(0,run*numStages*NX),rk_aux_traj.getAddress(tmp_index1,0) );
472  lambdaLoop.addStatement( rk_adj_traj.getRow(tmp_index1) == rk_b_mu.getCols(run*numStages*NX,(run+1)*numStages*NX) );
473 
474  lambdaLoop.addStatement( rk_lambda.getRow(run) == rk_lambda.getRow(run+1) );
475  for( run5 = 0; run5 < numStages; run5++ ) {
476  lambdaLoop.addStatement( rk_lambda.getRow(run) += rk_adj_traj.getSubMatrix(tmp_index1,tmp_index1+1,run5*NX,run5*NX+NX)*rk_diff_lam.getRows(tmp_index1*numStages*NX+run5*NX,tmp_index1*numStages*NX+run5*NX+NX) );
477  }
478  integrate.addStatement( lambdaLoop );
479  }
480 
481  // set current Hessian to zero
482  uint numX = NX*(NX+1)/2.0;
483  uint numU = NU*(NU+1)/2.0;
484  DMatrix zeroM;
485  if( !gradientUpdate ) zeroM = zeros<double>(1,numX+numU+NX*NU);
486  else zeroM = zeros<double>(1,numX+numU+NX*NU+NX+NU);
488 
489  // integrator FORWARD loop:
490  integrate.addComment("------------ Forward loop ------------:");
491  ExportForLoop tmpLoop( run, 0, grid.getNumIntervals() );
492  ExportStatementBlock *loop;
493  if( equidistantControlGrid() ) {
494  loop = &tmpLoop;
495  }
496  else {
497  loop = &integrate;
498  loop->addStatement( std::string("for(") + run.getName() + " = 0; " + run.getName() + " < " + numInt.getName() + "; " + run.getName() + "++ ) {\n" );
499  }
500 
501 // if( grid.getNumIntervals() > 1 || !equidistantControlGrid() ) {
502  // Set rk_diffsPrev:
503  loop->addStatement( std::string("if( run > 0 ) {\n") );
504  if( NX1 > 0 ) {
505  ExportForLoop loopTemp1( i,0,NX1 );
506  loopTemp1.addStatement( rk_diffsPrev1.getSubMatrix( i,i+1,0,NX1 ) == rk_eta.getCols( i*NX+2*NX+NXA,i*NX+2*NX+NXA+NX1 ) );
507  if( NU > 0 ) loopTemp1.addStatement( rk_diffsPrev1.getSubMatrix( i,i+1,NX1,NX1+NU ) == rk_eta.getCols( i*NU+(NX+NXA)*(NX+1)+NX,i*NU+(NX+NXA)*(NX+1)+NX+NU ) );
508  loop->addStatement( loopTemp1 );
509  }
510  if( NX2 > 0 ) {
511  ExportForLoop loopTemp2( i,0,NX2 );
512  loopTemp2.addStatement( rk_diffsPrev2.getSubMatrix( i,i+1,0,NX1+NX2 ) == rk_eta.getCols( i*NX+2*NX+NXA+NX1*NX,i*NX+2*NX+NXA+NX1*NX+NX1+NX2 ) );
513  if( NU > 0 ) loopTemp2.addStatement( rk_diffsPrev2.getSubMatrix( i,i+1,NX1+NX2,NX1+NX2+NU ) == rk_eta.getCols( i*NU+(NX+NXA)*(NX+1)+NX1*NU+NX,i*NU+(NX+NXA)*(NX+1)+NX1*NU+NU+NX ) );
514  loop->addStatement( loopTemp2 );
515  }
516  if( NX3 > 0 ) {
517  ExportForLoop loopTemp3( i,0,NX3 );
518  loopTemp3.addStatement( rk_diffsPrev3.getSubMatrix( i,i+1,0,NX ) == rk_eta.getCols( i*NX+2*NX+NXA+(NX1+NX2)*NX,i*NX+2*NX+NXA+(NX1+NX2)*NX+NX ) );
519  if( NU > 0 ) loopTemp3.addStatement( rk_diffsPrev3.getSubMatrix( i,i+1,NX,NX+NU ) == rk_eta.getCols( i*NU+(NX+NXA)*(NX+1)+(NX1+NX2)*NU+NX,i*NU+(NX+NXA)*(NX+1)+(NX1+NX2)*NU+NU+NX ) );
520  loop->addStatement( loopTemp3 );
521  }
522  loop->addStatement( std::string("}\nelse{\n") );
523  DMatrix eyeM = eye<double>(NX);
524  eyeM.appendCols(zeros<double>(NX,NU));
525  loop->addStatement( rk_diffsPrev2 == eyeM );
526  loop->addStatement( std::string("}\n") );
527 // }
528 
529  // SAVE rk_diffsPrev2 in the rk_S_traj variable:
530  loop->addStatement( rk_S_traj.getRows(run*NX,(run+1)*NX) == rk_diffsPrev2 );
531 
532  loop->addStatement( k_index == (shooting_index*grid.getNumIntervals()+run)*(NX+NXA) );
533 
534  // FIRST update using term from optimization variables:
535  if( liftMode == 4 ) { // liftMode == 1 --> see earlier!
536  ExportForLoop loopTemp1( i,0,NX+NXA );
537  loopTemp1.addStatement( j == k_index+i );
538  loopTemp1.addStatement( tmp_index1 == j*(NX+NU) );
539  ExportForLoop loopTemp2( run1,0,numStages );
540  loopTemp2.addStatement( rk_kkk.getElement( j,run1 ) += rk_delta*rk_diffK.getSubMatrix( tmp_index1,tmp_index1+NX+NU,run1,run1+1 ) );
541  loopTemp1.addStatement( loopTemp2 );
542  loop->addStatement( loopTemp1 );
543  }
544 
545  // Evaluate all stage values for reuse:
546  evaluateAllStatesImplicitSystem( loop, k_index, Ah, C, run1, j, tmp_index1 );
547 
548  // SAVE rk_stageValues in the rk_xxx_traj variable:
549  loop->addStatement( rk_xxx_traj.getCols(run*numStages*(NX2+NXA),(run+1)*numStages*(NX2+NXA)) == rk_stageValues );
550 
551  solveImplicitSystem( loop, i, run1, j, tmp_index1, k_index, Ah, C, determinant, true );
552 
553  // NEW: UPDATE RK_B WITH THE CONSTANT COMING FROM THE PREVIOUS INTEGRATION STEP
554  if( gradientUpdate) { // LIFTING with GRADIENT UPDATE
555  loop->addStatement( std::string("if( run > 0 ) {\n") );
556  loop->addStatement( rk_Xhat_traj.getRows(run*NX,(run+1)*NX) == rk_Xhat_traj.getRows((run-1)*NX,run*NX) );
557  for( run5 = 0; run5 < numStages; run5++ ) {
558  loop->addStatement( rk_Xhat_traj.getRows(run*NX,(run+1)*NX) += rk_Khat_traj.getSubMatrix( run-1,run,run5*(NX2+NXA),(run5+1)*(NX2+NXA) ).getTranspose()*Bh.getElement(run5,0) );
559  }
560  if( (LinearAlgebraSolver) linSolver != SIMPLIFIED_IRK_NEWTON && (LinearAlgebraSolver) linSolver != SINGLE_IRK_NEWTON ) { // YOU NEED THE FULL rk_diffsTemp2 HERE !!
561  ExportForLoop deltaLoop( i,0,numStages );
562  ExportForLoop deltaLoop2( j,0,NX );
563  deltaLoop2.addStatement( tmp_index1 == i*(NX2+NXA)+j );
564  deltaLoop2.addStatement( tmp_index2 == numStages*k_index+tmp_index1 );
565  // SAVE rk_diff_lam:
566  deltaLoop2.addStatement( rk_diff_lam.getRow(tmp_index2) == rk_diffsTemp2.getSubMatrix(i,i+1,j*NVARS2,j*NVARS2+NX) );
567  deltaLoop2.addStatement( rk_b.getElement(tmp_index1,0) -= rk_diffsTemp2.getSubMatrix(i,i+1,j*NVARS2,j*NVARS2+NX)*rk_Xhat_traj.getRows(run*NX,(run+1)*NX) );
568  deltaLoop.addStatement( deltaLoop2 );
569  loop->addStatement( deltaLoop );
570  }
571  loop->addStatement( std::string("}\nelse{\n") );
572  loop->addStatement( rk_Xhat_traj.getRows(0,NX) == zeros<double>(NX,1) );
573  loop->addStatement( std::string("}\n") );
574  }
575 
576  // SAVE rk_A in the rk_A_traj variable:
577  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) {
578  // YOU DO NOT NEED TO SAVE THE TRAJECTORY FOR THE INEXACT NEWTON SCHEME !
579 // loop->addStatement( rk_A_traj.getRows(run*(NX2+NXA),(run+1)*(NX2+NXA)) == rk_A );
580 // loop->addStatement( rk_aux_traj.getRow(run) == rk_auxSolver.makeRowVector() );
581  }
582  else if( liftMode == 1 ) {
583  loop->addStatement( tmp_index1 == shooting_index*grid.getNumIntervals()+run );
584  loop->addStatement( rk_A_traj.getRows(tmp_index1*numStages*(NX2+NXA),(tmp_index1+1)*numStages*(NX2+NXA)) == rk_A );
585  loop->addStatement( rk_aux_traj.getRow(tmp_index1) == rk_auxSolver.makeRowVector() );
586  }
587  else {
589  loop->addStatement( rk_A_traj.getRows(run*numStages*(NX2+NXA),(run+1)*numStages*(NX2+NXA)) == rk_A );
590  loop->addStatement( rk_aux_traj.getRow(run) == rk_auxSolver.makeRowVector() );
591  }
592 
593  if( liftMode == 1 ) {
594 // // Evaluate sensitivities:
595 // // !! NEW !! Let us propagate the forward sensitivities as in a VDE system
596 // loop->addStatement( rk_seed.getCols(NX,NX+NX*(NX+NU)) == rk_diffsPrev2.makeRowVector() );
597 // ExportForLoop loop_sens( i,0,numStages );
598 // loop_sens.addStatement( rk_seed.getCols(0,NX) == rk_stageValues.getCols(i*(NX+NXA),i*(NX+NXA)+NX) );
599 // loop_sens.addFunctionCall( forward_sweep.getName(), rk_seed, rk_diffsTemp2.getAddress(i,0) );
600 // loop->addStatement( loop_sens );
601 
602  // Let us reuse the derivatives computed to form the linear system (in rk_diffsTemp2) !!
603 
604  evaluateRhsSensitivities( loop, run1, i, j, tmp_index1, tmp_index2 );
605  allSensitivitiesImplicitSystem( loop, run1, i, j, tmp_index1, tmp_index2, tmp_index3, k_index, Bh, false );
606  }
607  else if( liftMode == 4 ) {
608  evaluateRhsInexactSensitivities( loop, run1, i, j, tmp_index1, tmp_index2, tmp_index3, k_index, Ah );
609 
610  // GRADIENT UPDATE INIS SCHEME:
611  if( gradientUpdate ) {
612  loop->addStatement( tmp_index1 == shooting_index*grid.getNumIntervals()+run );
613  loop->addStatement( rk_eta.getCols(NX+diffsDim-NX-NU,NX+diffsDim) += rk_adj_traj.getRow(tmp_index1)*rk_b.getCols(1,1+NX+NU) );
614  }
615 
616  allSensitivitiesImplicitSystem( loop, run1, i, j, tmp_index1, tmp_index2, tmp_index3, k_index, Bh, true );
617  }
618  else return ACADOERROR( RET_NOT_IMPLEMENTED_YET );
619 
620  if( liftMode == 1 ) { // EXACT LIFTING
621  // !!! update rk_xxx_lin (YOU NEED TO UPDATE THE LINEARIZATION POINT BEFORE YOU UPDATE RK_KKK): !!!
622  for( run5 = 0; run5 < NX; run5++ ) {
623  loop->addStatement( rk_xxx_lin.getCol( run5 ) += rk_kkk.getRow( k_index+run5 )*Bh );
624  }
625  }
626 
627  // update rk_kkk:
628  ExportForLoop loopTemp( j,0,numStages );
629  for( run5 = 0; run5 < NX2; run5++ ) {
630  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) {
631  loopTemp.addStatement( rk_kkk.getElement( k_index+NX1+run5,j ) += rk_b.getElement( j*(NX2+NXA)+run5,0 ) ); // differential states
632  }
633  else {
634  loopTemp.addStatement( rk_kkk.getElement( k_index+NX1+run5,j ) += rk_b.getElement( j*NX2+run5,0 ) ); // differential states
635  }
636  }
637  for( run5 = 0; run5 < NXA; run5++ ) {
638  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) {
639  loopTemp.addStatement( rk_kkk.getElement( k_index+NX+run5,j ) += rk_b.getElement( j*(NX2+NXA)+NX2+run5,0 ) ); // algebraic states
640  }
641  else {
642  loopTemp.addStatement( rk_kkk.getElement( k_index+NX+run5,j ) += rk_b.getElement( numStages*NX2+j*NXA+run5,0 ) ); // algebraic states
643  }
644  }
645  loop->addStatement( loopTemp );
646  if( gradientUpdate ) { // save the rk_b first column results:
647  loop->addStatement( rk_Khat_traj.getRow(run) == rk_b.getCol(0).getTranspose() );
648  }
649 
650  // update rk_eta:
651  for( run5 = 0; run5 < NX; run5++ ) {
652  loop->addStatement( rk_eta.getCol( run5 ) += rk_kkk.getRow( k_index+run5 )*Bh );
653  }
654  if( NXA > 0) {
655  DMatrix tempCoefs( evaluateDerivedPolynomial( 0.0 ) );
656  if( !equidistantControlGrid() || grid.getNumIntervals() > 1 ) {
657  loop->addStatement( std::string("if( run == 0 ) {\n") );
658  }
659  for( run5 = 0; run5 < NXA; run5++ ) {
660  loop->addStatement( rk_eta.getCol( NX+run5 ) == rk_kkk.getRow( k_index+NX+run5 )*tempCoefs );
661  }
662  if( !equidistantControlGrid() || grid.getNumIntervals() > 1 ) {
663  loop->addStatement( std::string("}\n") );
664  }
665  }
666  if( liftMode != 1 ) { // INEXACT LIFTING
667  // !!! update rk_xxx_lin (YOU NEED TO UPDATE THE LINEARIZATION POINT AFTER YOU UPDATE RK_KKK): !!!
668  loop->addStatement( rk_xxx_lin == rk_eta.getCols(0,NX) );
669  }
670 
671  // Computation of the sensitivities using the CHAIN RULE:
672  updateImplicitSystem(loop, i, j, tmp_index2);
673 
674 // loop->addStatement( std::string( reset_int.get(0,0) ) + " = 0;\n" );
675 
676  loop->addStatement( rk_ttt += DMatrix(1.0/grid.getNumIntervals()) );
677 
678  // end of the forward integrator loop.
679  if( !equidistantControlGrid() ) {
680  loop->addStatement( "}\n" );
681  }
682  else {
683  integrate.addStatement( *loop );
684  }
685 
686  // integrator BACKWARD loop:
687  integrate.addComment("------------ BACKWARD loop ------------:");
688 
689  ExportForLoop tmpLoop2( run, grid.getNumIntervals()-1, -1, -1 );
690  ExportStatementBlock *loop2;
691  if( equidistantControlGrid() ) {
692  loop2 = &tmpLoop2;
693  }
694  else {
696  }
697 
698  loop2->addStatement( k_index == (shooting_index*grid.getNumIntervals()+run)*(NX+NXA)*(NX+NU) );
699 
700 
701  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) { // INEXACT
702  // Compute \hat{lambda}:
703  // vec(j*NX+1:j*NX+NX) = -Bh_vec(j+1)*dir_tmp;
704  for( run5 = 0; run5 < numStages; run5++ ) {
705  DMatrix zeroV = zeros<double>(1,NX);
706  loop2->addStatement( rk_b_trans.getCols(run5*NX,(run5+1)*NX) == zeroV );
707  loop2->addStatement( rk_b_trans.getCols(run5*NX,(run5+1)*NX) -= Bh.getRow(run5)*rk_eta.getCols(NX,2*NX) );
708  }
709  loop2->addStatement( tmp_index1 == shooting_index*grid.getNumIntervals()+run );
710  for( run5 = 0; run5 < numStages; run5++ ) {
711  loop2->addStatement( rk_seed.getCols(0,NX) == rk_xxx_traj.getCols((run*numStages+run5)*(NX2+NXA),(run*numStages+run5+1)*(NX2+NXA)) );
712  loop2->addStatement( rk_seed.getCols(NX*(1+NX+NU),NX*(2+NX+NU)) == rk_adj_traj.getSubMatrix(tmp_index1,tmp_index1+1,run5*(NX+NXA),(run5+1)*(NX+NXA)) );
713  loop2->addFunctionCall( diffs_sweep.getName(), rk_seed, rk_adj_diffs_tmp );
714  for( run6 = 0; run6 < numStages; run6++ ) {
715  loop2->addStatement( rk_b_trans.getCols(run6*NX,(run6+1)*NX) -= Ah.getElement(run5,run6)*rk_adj_diffs_tmp.getCols(0,NX) );
716  }
717  loop2->addStatement( rk_b_trans.getCols(run5*NX,(run5+1)*NX) += rk_adj_traj.getSubMatrix(tmp_index1,tmp_index1+1,run5*(NX+NXA),(run5+1)*(NX+NXA)) ); // BECAUSE EXPLICIT ODE
718  }
720  loop2->addStatement( rk_adj_traj.getRow(tmp_index1) += rk_b_trans );
721  loop2->addStatement( rk_b_trans == rk_adj_traj.getRow(tmp_index1) );
722  }
723  else { // EXACT SCHEME (liftMode == 1)
724  // (RIEN) THIS IS A NEW PART TO MAKE IT EXACTLY EQUIVALENT TO DIRECT COLLOCATION:
725  // WE SAVE THE SENSITIVITIES TO COMPUTE THE RIGHT-HAND SIDES TO UPDATE THE MU VARIABLES AT THE BEGINNING OF THE INTEGRATOR CALL
726  loop2->addStatement( tmp_index1 == shooting_index*grid.getNumIntervals()+run );
727  if( gradientUpdate ) {
728  for( run5 = 0; run5 < numStages; run5++ ) {
729  loop2->addStatement( rk_seed.getCols(0,NX) == rk_xxx_traj.getCols((run*numStages+run5)*(NX2+NXA),(run*numStages+run5+1)*(NX2+NXA)) );
730  loop2->addStatement( rk_seed.getCols(NX*(1+NX+NU),NX*(2+NX+NU)) == rk_adj_traj.getSubMatrix(tmp_index1,tmp_index1+1,run5*(NX+NXA),(run5+1)*(NX+NXA)) );
731 // loop2->addStatement( rk_seed.getCols(NX*(2+NX+NU),NX*(3+NX+NU)) == rk_delta );
732 // loop2->addStatement( rk_seed.getCols(NX*(2+NX+NU),NX*(3+NX+NU)) += rk_Xhat_traj.getRows(run*NX,(run+1)*NX).getTranspose() );
733 // ExportForLoop gradLoop( i, 0, numStages );
734 // gradLoop.addStatement( rk_seed.getCols(NX*(2+NX+NU),NX*(3+NX+NU)) += Ah.getElement(run5,i)*rk_Khat_traj.getSubMatrix( run,run+1,i*(NX2+NXA),(i+1)*(NX2+NXA) ) );
735 // loop2->addStatement( gradLoop );
736  loop2->addFunctionCall( diffs_sweep.getName(), rk_seed, rk_adj_diffs_tmp );
737  loop2->addStatement( rk_diff_mu.getSubMatrix(tmp_index1,tmp_index1+1,run5*NX*(NX+NU),(run5+1)*NX*(NX+NU)) == rk_adj_diffs_tmp.getCols(0,NX*(NX+NU)) );
738 // for( run6 = 0; run6 < numStages; run6++ ) {
739 // loop2->addStatement( rk_b_trans.getCols(run6*NX,(run6+1)*NX) -= Ah.getElement(run5,run6)*rk_adj_diffs_tmp.getCols(0,NX) );
740 // }
741  // loop2->addStatement( rk_b_trans.getCols(run5*NX,(run5+1)*NX) += rk_adj_traj.getSubMatrix(tmp_index1,tmp_index1+1,run5*(NX+NXA),(run5+1)*(NX+NXA)) ); // BECAUSE EXPLICIT ODE (BUT ZERO FOR SECOND ORDER DERIVATIVES)
742  }
743  }
744  }
745 
746  loop2->addStatement( tmp_index3 == shooting_index*grid.getNumIntervals()+run );
747  for( run5 = 0; run5 < numStages; run5++ ) {
748  loop2->addStatement( rk_seed.getCols(0,NX) == rk_xxx_traj.getCols((run*numStages+run5)*(NX2+NXA),(run*numStages+run5+1)*(NX2+NXA)) );
749  loop2->addStatement( rk_diffsPrev2 == rk_S_traj.getRows(run*NX,(run+1)*NX) );
750 
751  ExportForLoop diffLoop1( i, 0, NX );
752  diffLoop1.addStatement( tmp_index1 == k_index+i*(NX+NU) );
753  ExportForLoop diffLoop2( j, 0, NX+NU );
754  diffLoop2.addStatement( tmp_index2 == tmp_index1+j );
755  for( run6 = 0; run6 < numStages; run6++ ) {
756  diffLoop2.addStatement( rk_diffsPrev2.getElement(i,j) += Ah.getElement(run5,run6)*rk_diffK.getElement( tmp_index2,run6 ) );
757  }
758  diffLoop1.addStatement( diffLoop2 );
759  loop2->addStatement( diffLoop1 );
760 
761  loop2->addStatement( rk_seed.getCols(NX,NX*(1+NX+NU)) == rk_diffsPrev2.makeRowVector() );
762  loop2->addStatement( rk_seed.getCols(NX*(1+NX+NU),NX*(2+NX+NU)) == rk_adj_traj.getSubMatrix(tmp_index3,tmp_index3+1,run5*NX,(run5+1)*NX) );
763  if( gradientUpdate ) {
764  loop2->addStatement( rk_seed.getCols(NX*(2+NX+NU),NX*(3+NX+NU)) == rk_Xhat_traj.getRows(run*NX,(run+1)*NX).getTranspose() );
765  ExportForLoop gradLoop( i, 0, numStages );
766  gradLoop.addStatement( rk_seed.getCols(NX*(2+NX+NU),NX*(3+NX+NU)) += Ah.getElement(run5,i)*rk_Khat_traj.getSubMatrix( run,run+1,i*(NX2+NXA),(i+1)*(NX2+NXA) ) );
767  loop2->addStatement( gradLoop );
768  }
769  loop2->addFunctionCall( adjoint_sweep.getName(), rk_seed, rk_adj_diffs_tmp.getAddress(0,0) );
770  loop2->addStatement( rk_eta.getCols(NX,2*NX) += rk_adj_diffs_tmp.getCols(0,NX) );
771  if( !gradientUpdate ) {
772  loop2->addStatement( rk_eta.getCols(NX*(2+NX+NU),NX+diffsDim) += rk_adj_diffs_tmp.getCols(NX,NX+numX+numU+NX*NU) );
773  }
774  else {
775  loop2->addStatement( rk_eta.getCols(NX*(2+NX+NU),NX+diffsDim) += rk_adj_diffs_tmp.getCols(NX,NX+numX+numU+NX*NU+NX+NU) );
776  }
777  }
778 
779  loop2->addStatement( rk_ttt -= DMatrix(1.0/grid.getNumIntervals()) );
780  // end of the backward integrator loop.
781  if( !equidistantControlGrid() ) {
783  }
784  else {
785  integrate.addStatement( *loop2 );
786  }
787 
788 // integrate.addStatement( std::string( "if( " ) + determinant.getFullName() + " < 1e-12 ) {\n" );
789 // integrate.addStatement( error_code == 2 );
790 // integrate.addStatement( std::string( "} else if( " ) + determinant.getFullName() + " < 1e-6 ) {\n" );
791 // integrate.addStatement( error_code == 1 );
792 // integrate.addStatement( std::string( "} else {\n" ) );
794 // integrate.addStatement( std::string( "}\n" ) );
795 
796  code.addFunction( integrate );
797  code.addLinebreak( 2 );
798 
799  return SUCCESSFUL_RETURN;
800 }
801 
802 
804 {
805  if( NX2 > 0 ) {
806  ExportForLoop loop01( index1,NX1,NX1+NX2 );
807  ExportForLoop loop02( index2,0,NX1+NX2 );
808  loop02.addStatement( tmp_index == index2+index1*NX );
809  loop02.addStatement( rk_eta.getCol( tmp_index+2*NX+NXA ) == rk_diffsNew2.getSubMatrix( index1-NX1,index1-NX1+1,index2,index2+1 ) );
810  loop01.addStatement( loop02 );
811 
812  if( NU > 0 ) {
813  ExportForLoop loop03( index2,0,NU );
814  loop03.addStatement( tmp_index == index2+index1*NU );
815  loop03.addStatement( rk_eta.getCol( tmp_index+(NX+NXA)*(1+NX)+NX ) == rk_diffsNew2.getSubMatrix( index1-NX1,index1-NX1+1,NX1+NX2+index2,NX1+NX2+index2+1 ) );
816  loop01.addStatement( loop03 );
817  }
818  block->addStatement( loop01 );
819  }
820  // ALGEBRAIC STATES
821  if( NXA > 0 ) {
823  }
824 
825  return SUCCESSFUL_RETURN;
826 }
827 
828 
830 {
832 
833  int gradientUp;
834  get( LIFTED_GRADIENT_UPDATE, gradientUp );
835  bool gradientUpdate = (bool) gradientUp;
836 
837  uint numX = NX*(NX+1)/2.0;
838  uint numU = NU*(NU+1)/2.0;
839  diffsDim = NX + NX*(NX+NU) + numX + NX*NU + numU;
840  if( gradientUpdate ) diffsDim += NX+NU;
841  inputDim = NX + diffsDim + NU + NOD;
842 
843  int useOMP;
844  get(CG_USE_OPENMP, useOMP);
845  ExportStruct structWspace;
846  structWspace = useOMP ? ACADO_LOCAL : ACADO_WORKSPACE;
847 
848  uint timeDep = 0;
849  if( timeDependant ) timeDep = 1;
850 
851  rk_eta = ExportVariable( "rk_eta", 1, inputDim, REAL );
852 
853  rk_b_trans = ExportVariable( "rk_b_trans", 1, numStages*(NX+NXA), REAL, structWspace );
854 
855  rk_adj_diffs_tmp = ExportVariable( "rk_adjoint", 1, NX + numX + NX*NU + numU, REAL, structWspace );
856  if( gradientUpdate ) {
857  if( (NX+numX+NX*NU+numU+NX+NU) > NX*(NX+NU) ) {
858  rk_adj_diffs_tmp = ExportVariable( "rk_adjoint", 1, NX + numX + NX*NU + numU + NX+NU, REAL, structWspace );
859  }
860  else {
861  rk_adj_diffs_tmp = ExportVariable( "rk_adjoint", 1, NX*(NX+NU), REAL, structWspace );
862  }
863  rk_Khat_traj = ExportVariable( "rk_Khat", grid.getNumIntervals(), numStages*(NX+NXA), REAL, structWspace );
864  rk_Xhat_traj = ExportVariable( "rk_Xhat", grid.getNumIntervals()*NX, 1, REAL, structWspace );
865  }
866 
867  int linSolver;
868  get( LINEAR_ALGEBRA_SOLVER, linSolver );
869  int liftMode = 1;
870  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) liftMode = 4;
871 
872  rk_seed = ExportVariable( "rk_seed", 1, NX+NX*(NX+NU)+NX+NU+NOD+timeDep, REAL, structWspace );
873  if( gradientUpdate ) rk_seed = ExportVariable( "rk_seed", 1, NX+NX*(NX+NU)+2*NX+NU+NOD+timeDep, REAL, structWspace );
874 // rk_A_traj = ExportVariable( "rk_A_traj", grid.getNumIntervals()*numStages*(NX2+NXA), numStages*(NX2+NXA), REAL, structWspace );
875 // rk_aux_traj = ExportVariable( "rk_aux_traj", grid.getNumIntervals(), numStages*(NX2+NXA), INT, structWspace );
876  if( liftMode == 1 ) {
877  rk_A_traj = ExportVariable( "rk_A_traj", N*grid.getNumIntervals()*numStages*(NX2+NXA), numStages*(NX2+NXA), REAL, structWspace );
878  rk_aux_traj = ExportVariable( "rk_aux_traj", N*grid.getNumIntervals(), numStages*(NX2+NXA), INT, structWspace );
879  }
880  rk_xxx_traj = ExportVariable( "rk_stageV_traj", 1, grid.getNumIntervals()*numStages*(NX+NXA), REAL, structWspace );
881  rk_S_traj = ExportVariable( "rk_S_traj", grid.getNumIntervals()*NX, NX+NU, REAL, structWspace );
882 
883  if( (LinearAlgebraSolver) linSolver == SIMPLIFIED_IRK_NEWTON || (LinearAlgebraSolver) linSolver == SINGLE_IRK_NEWTON ) {
884 // rk_A = ExportVariable( "rk_J", NX2+NXA, NX2+NXA, REAL, structWspace );
885 // if(NDX2 > 0) rk_I = ExportVariable( "rk_I", NX2+NXA, NX2+NXA, REAL, structWspace );
886 // rk_A_traj = ExportVariable( "rk_J_traj", grid.getNumIntervals()*(NX2+NXA), NX2+NXA, REAL, structWspace );
887 // rk_aux_traj = ExportVariable( "rk_aux_traj", grid.getNumIntervals(), rk_auxSolver.getNumRows()*rk_auxSolver.getNumCols(), INT, structWspace );
888  rk_diffsTemp2 = ExportVariable( "rk_diffsTemp2", NX2+NXA, NVARS2, REAL, structWspace );
889  }
891 
892  if( liftMode == 1 ) {
893  rk_kkk_prev = ExportVariable( "rk_Ktraj_prev", N*grid.getNumIntervals()*(NX+NXA), numStages, REAL, ACADO_WORKSPACE );
896  rk_diff_mu = ExportVariable( "rk_diff_mu", N*grid.getNumIntervals(), numStages*(NX+NXA)*(NX+NU), REAL, ACADO_WORKSPACE );
897  rk_diff_lam = ExportVariable( "rk_diff_lam", N*grid.getNumIntervals()*numStages*NX, NX, REAL, ACADO_WORKSPACE );
899  rk_b_mu = ExportVariable( "rk_b_mu", 1, grid.getNumIntervals()*numStages*(NX+NXA), REAL, structWspace );
900  }
901 
902  return SUCCESSFUL_RETURN;
903 }
904 
905 
906 
907 // PROTECTED:
908 
909 
911 
912 // end of file.
virtual returnValue getDataDeclarations(ExportStatementBlock &declarations, ExportStruct dataStruct=ACADO_ANY) const
ExportVariable rk_diffsPrev1
Expression symmetricDerivative(const Expression &arg1, const Expression &arg2, const Expression &forward_seed, const Expression &backward_seed, Expression *forward_result, Expression *backward_result)
Allows to export a tailored lifted implicit Runge-Kutta integrator with forward sensitivity generatio...
virtual returnValue updateImplicitSystem(ExportStatementBlock *block, const ExportIndex &index1, const ExportIndex &index2, const ExportIndex &tmp_index)
virtual returnValue allSensitivitiesImplicitSystem(ExportStatementBlock *block, const ExportIndex &index1, const ExportIndex &index2, const ExportIndex &index3, const ExportIndex &tmp_index1, const ExportIndex &tmp_index2, const ExportIndex &tmp_index3, const ExportIndex &k_index, const ExportVariable &Bh, bool update)
ExportVariable getRow(const ExportIndex &idx) const
ExportVariable rk_diffK
Definition: irk_export.hpp:571
const std::string getNameSolveTransposeReuseFunction()
SymmetricLiftedIRKExport(UserInteraction *_userInteraction=0, const std::string &_commonHeaderName="")
Allows to export a tailored lifted implicit Runge-Kutta integrator with symmetric second order sensit...
returnValue initializeDDMatrix()
Definition: irk_export.cpp:864
ExportVariable getGlobalExportVariable() const
double getFirstTime() const
LinearAlgebraSolver
ExportVariable getTranspose() const
virtual returnValue getFunctionDeclarations(ExportStatementBlock &declarations) const
GenericMatrix & appendCols(const GenericMatrix &_arg)
Definition: matrix.cpp:83
std::vector< Grid > outputGrids
ExportAcadoFunction diffs_rhs
int getNDX() const
Definition: function.cpp:217
virtual returnValue solveImplicitSystem(ExportStatementBlock *block, const ExportIndex &index1, const ExportIndex &index2, const ExportIndex &index3, const ExportIndex &tmp_index, const ExportIndex &k_index, const ExportVariable &Ah, const ExportVariable &C, const ExportVariable &det, bool DERIVATIVES=false)
ExportVariable rk_index
std::vector< ExportAcadoFunction > outputs
ExportVariable rk_diffsPrev3
Allows to pass back messages to the calling function.
ExportVariable rk_kkk
Definition: rk_export.hpp:189
DifferentialState x
returnValue initializeCoefficients()
Definition: irk_export.cpp:883
Expression forwardDerivative(const Expression &arg1, const Expression &arg2)
ExportLinearSolver * solver
Definition: irk_export.hpp:530
uint getNumCols() const
uint getNumRows() const
returnValue addComment(const std::string &_comment)
ExportVariable getAuxVariable() const
Block< Derived > block(Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: BlockMethods.h:56
BEGIN_NAMESPACE_ACADO typedef unsigned int uint
Definition: acado_types.hpp:42
std::vector< ExportAcadoFunction > diffs_outputs
ExportVariable rk_diffsPrev2
Allows to export code of a for-loop.
Expression getCols(const uint &colIdx1, const uint &colIdx2) const
Definition: expression.cpp:582
string toString(T const &value)
ImplicitIntegratorMode
ExportVariable getElement(const ExportIndex &rowIdx, const ExportIndex &colIdx) const
ExportVariable rk_diffsTemp2
#define CLOSE_NAMESPACE_ACADO
virtual returnValue evaluateRhsSensitivities(ExportStatementBlock *block, const ExportIndex &index1, const ExportIndex &index2, const ExportIndex &index3, const ExportIndex &tmp_index1, const ExportIndex &tmp_index2)
ExportVariable getSubMatrix(const ExportIndex &rowIdx1, const ExportIndex &rowIdx2, const ExportIndex &colIdx1, const ExportIndex &colIdx2) const
GenericMatrix< double > DMatrix
Definition: matrix.hpp:457
Defines a scalar-valued index variable to be used for exporting code.
ExportAcadoFunction rhs3
Expression returnLowerTriangular(const Expression &expr)
virtual returnValue copy(const ImplicitRungeKuttaExport &arg)
Base class for all variables within the symbolic expressions family.
Definition: expression.hpp:56
virtual returnValue getFunctionDeclarations(ExportStatementBlock &declarations) const
ExportVariable rk_eta
ExportAcadoFunction diffs_rhs3
virtual returnValue setDifferentialEquation(const Expression &rhs)
ExportStruct
DVector evaluateDerivedPolynomial(double time)
ExportVariable getCols(const ExportIndex &idx1, const ExportIndex &idx2) const
Expression multipleForwardDerivative(const Expression &arg1, const Expression &arg2, const Expression &seed)
ExportVariable rk_auxSolver
Definition: irk_export.hpp:563
ExportVariable makeRowVector() const
Expression & appendRows(const Expression &arg)
Definition: expression.cpp:141
virtual returnValue getCode(ExportStatementBlock &code)
Encapsulates all user interaction for setting options, logging data and plotting results.
virtual uint getDim() const
returnValue addStatement(const ExportStatement &_statement)
int getNT() const
Definition: function.cpp:251
ExportFunction integrate
std::string getFullName() const
returnValue addLinebreak(uint num=1)
virtual returnValue evaluateAllStatesImplicitSystem(ExportStatementBlock *block, const ExportIndex &k_index, const ExportVariable &Ah, const ExportVariable &C, const ExportIndex &stage, const ExportIndex &i, const ExportIndex &tmp_index)
#define ASSERT(x)
ExportVariable rk_diffsNew2
uint getNumIntervals() const
returnValue prepareOutputEvaluation(ExportStatementBlock &code)
ExportVariable rk_xxx
DifferentialStateDerivative dx
std::string getName() const
ExportVariable getRows(const ExportIndex &idx1, const ExportIndex &idx2) const
returnValue addDeclaration(const ExportVariable &_data, ExportStruct _dataStruct=ACADO_ANY)
virtual returnValue evaluateRhsInexactSensitivities(ExportStatementBlock *block, const ExportIndex &index1, const ExportIndex &index2, const ExportIndex &index3, const ExportIndex &tmp_index1, const ExportIndex &tmp_index2, const ExportIndex &tmp_index3, const ExportIndex &k_index, const ExportVariable &Ah)
ExportAcadoFunction lin_input
double getLastTime() const
ExportVariable rk_ttt
std::vector< ExportVariable > rk_outputs
Definition: irk_export.hpp:552
#define BEGIN_NAMESPACE_ACADO
ExportVariable error_code
returnValue clearStaticCounters()
Definition: expression.hpp:398
returnValue addFunction(const ExportFunction &_function)
virtual returnValue clear()
virtual returnValue getCode(ExportStatementBlock &code)=0
Allows to export code for a block of statements.
virtual returnValue getDataDeclarations(ExportStatementBlock &declarations, ExportStruct dataStruct=ACADO_ANY) const
Expression getRows(const uint &rowIdx1, const uint &rowIdx2) const
Definition: expression.cpp:548
ExportArgument getAddress(const ExportIndex &_rowIdx, const ExportIndex &_colIdx=emptyConstExportIndex) const
SymmetricLiftedIRKExport & operator=(const SymmetricLiftedIRKExport &arg)
ExportVariable getCol(const ExportIndex &idx) const
returnValue init(const Function &_f, const std::string &_name="acadoFcn", const uint _numX=0, const uint _numXA=0, const uint _numU=0, const uint _numP=0, const uint _numDX=0, const uint _numOD=0)
ExportFunction & addIndex(const ExportIndex &_index)
#define ACADOERROR(retval)
virtual bool equidistantControlGrid() const
ForwardLiftedIRKExport & operator=(const ForwardLiftedIRKExport &arg)
Defines a matrix-valued variable to be used for exporting code.
ExportAcadoFunction rhs
Allows to setup and evaluate differential equations (ODEs and DAEs) based on SymbolicExpressions.
std::string getName() const
Definition: export_data.cpp:86
uint getDim() const


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:43