algebraic_consistency_constraint.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ACADO Toolkit.
3  *
4  * ACADO Toolkit -- A Toolkit for Automatic Control and Dynamic Optimization.
5  * Copyright (C) 2008-2014 by Boris Houska, Hans Joachim Ferreau,
6  * Milan Vukov, Rien Quirynen, KU Leuven.
7  * Developed within the Optimization in Engineering Center (OPTEC)
8  * under supervision of Moritz Diehl. All rights reserved.
9  *
10  * ACADO Toolkit is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 3 of the License, or (at your option) any later version.
14  *
15  * ACADO Toolkit is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with ACADO Toolkit; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  *
24  */
25 
26 
27 
36 
37 
39 
40 
41 //
42 // PUBLIC MEMBER FUNCTIONS:
43 //
44 
45 
48 
49  numberOfStages = 0;
50  counter = 0;
53  breakPoints = 0;
54 }
55 
56 
58  :ConstraintElement(grid_, numberOfStages_, 0 ){
59 
60  numberOfStages = numberOfStages_;
61  counter = 0;
64  breakPoints = new int[numberOfStages+1];
65 }
66 
67 
69  :ConstraintElement(rhs){
70 
71  int run1;
72 
74  counter = rhs.counter ;
75 
76  if( numberOfStages != 0 ){
79  breakPoints = new int[numberOfStages+1];
80  for( run1 = 0; run1 < counter; run1++ ){
83  }
84  for( run1 = 0; run1 < counter+1; run1++ )
85  breakPoints[run1] = rhs.breakPoints[run1];
86  }
87  else{
90  breakPoints = 0;
91  }
92 }
93 
94 
96 
99 
100  if( numberOfAlgebraicStates != 0 )
101  delete[] numberOfAlgebraicStates;
102 
103  if( breakPoints != 0 )
104  delete[] breakPoints;
105 }
106 
107 
109 
110  int run1;
111 
112  if( this != &rhs ){
113 
114  if( numberOfDifferentialStates != 0 )
116 
117  if( numberOfAlgebraicStates != 0 )
118  delete[] numberOfAlgebraicStates;
119 
120  if( breakPoints != 0 )
121  delete[] breakPoints;
122 
124 
126  counter = rhs.counter ;
127 
128  if( numberOfStages != 0 ){
131  breakPoints = new int[numberOfStages+1];
132  for( run1 = 0; run1 < counter; run1++ ){
135  }
136  for( run1 = 0; run1 < counter+1; run1++ )
137  breakPoints[run1] = rhs.breakPoints[run1];
138  }
139  else{
142  breakPoints = 0;
143  }
144  }
145  return *this;
146 }
147 
148 
149 
151 
152  int run1, run2;
153 
154  if( fcn == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
156 
157  int stageIdx = 0;
158  const int T = grid.getLastIndex();
159 
160  residuumL.init(T+1,1);
161  residuumU.init(T+1,1);
162 
163 
164  for( run1 = 0; run1 <= T; run1++ ){
165 
166  int nc = numberOfAlgebraicStates[stageIdx];
167  DMatrix res( nc, 1 );
168 
169  z[stageIdx].setZ( run1, iter );
170  DVector result = fcn[stageIdx].evaluate( z[stageIdx],run1 );
171 
172  for( run2 = 0; run2 < nc; run2++ )
173  res( run2, 0 ) = -result(numberOfDifferentialStates[stageIdx]+run2);
174 
175  // STORE THE RESULTS:
176  // ------------------
177  residuumL.setDense( run1, 0, res );
178  residuumU.setDense( run1, 0, res );
179 
180  // INCREASE THE STAGE COUNTER IF NECESSARY:
181  // ----------------------------------------
182  if( run1 == breakPoints[stageIdx+1] ) stageIdx++;
183  }
184 
185  return SUCCESSFUL_RETURN;
186 }
187 
188 
190 
191 
192  // EVALUATION OF THE SENSITIVITIES:
193  // --------------------------------
194 
195  int run1=0, run3;
196  returnValue returnvalue;
197 
198  if( fcn == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
200 
201  int stageIdx = 0;
202  const int N = grid.getNumPoints();
203 
204 
205  if( bSeed != 0 )
206  {
207 
208  if( xSeed != 0 || xaSeed != 0 || pSeed != 0 || uSeed != 0 || wSeed != 0 ||
209  xSeed2 != 0 || xaSeed2 != 0 || pSeed2 != 0 || uSeed2 != 0 || wSeed2 != 0 )
211 
212  int nBDirs;
213 
214  dBackward.init( N, 5*N );
215 
216 
217  for( run3 = 0; run3 < N; run3++ )
218  {
219  DMatrix bseed_;
220  bSeed->getSubBlock( 0, run3, bseed_ );
221 
222  DVector bseedTmp( numberOfDifferentialStates[stageIdx] );
223  bseedTmp.setZero();
224 
225  nBDirs = bSeed->getNumRows( 0, run3 );
226 
227  DMatrix Dx ( nBDirs, nx );
228  DMatrix Dxa( nBDirs, na );
229  DMatrix Dp ( nBDirs, np );
230  DMatrix Du ( nBDirs, nu );
231  DMatrix Dw ( nBDirs, nw );
232 
233  for( run1 = 0; run1 < nBDirs; run1++ )
234  {
235  bseedTmp.append( bseed_.getRow(run1) );
236  ACADO_TRY( fcn[stageIdx].AD_backward( bseedTmp,JJ[stageIdx],run3 ) );
237 
238  if( nx > 0 ) Dx .setRow( run1, JJ[stageIdx].getX () );
239  if( na > 0 ) Dxa.setRow( run1, JJ[stageIdx].getXA() );
240  if( np > 0 ) Dp .setRow( run1, JJ[stageIdx].getP () );
241  if( nu > 0 ) Du .setRow( run1, JJ[stageIdx].getU () );
242  if( nw > 0 ) Dw .setRow( run1, JJ[stageIdx].getW () );
243 
244  JJ[stageIdx].setZero( );
245  }
246 
247  if( nx > 0 )
248  dBackward.setDense( run3, run3, Dx );
249 
250  if( na > 0 )
251  dBackward.setDense( run3, N+run3, Dxa );
252 
253  if( np > 0 )
254  dBackward.setDense( run3, 2*N+run3, Dp );
255 
256  if( nu > 0 )
257  dBackward.setDense( run3, 3*N+run3, Du );
258 
259  if( nw > 0 )
260  dBackward.setDense( run3, 4*N+run3, Dw );
261 
262  if( run3 == breakPoints[stageIdx+1] ) stageIdx++;
263  }
264 
265  return SUCCESSFUL_RETURN;
266  }
267 
268 
269  // COMPUTATION OF DIRECTIONAL SENSITIVITIES IN FORWARD MODE:
270  // ---------------------------------------------------------
271 
272  if( xSeed != 0 || xaSeed != 0 || pSeed != 0 || uSeed != 0 || wSeed != 0 ){
273 
274  if( bSeed != 0 ) return ACADOERROR( RET_WRONG_DEFINITION_OF_SEEDS );
276 
277  dForward.init( N, 5*N );
278 
279  for( run3 = 0; run3 < N; run3++ ){
280 
281  if( xSeed != 0 ){
282  DMatrix tmp;
283  xSeed->getSubBlock(0,0,tmp);
284  returnvalue = computeForwardSensitivityBlock( run3, run3, 0, stageIdx, &tmp );
285  if( returnvalue != SUCCESSFUL_RETURN ) return ACADOERROR(returnvalue);
286  }
287 
288  if( xaSeed != 0 ){
289  DMatrix tmp;
290  xaSeed->getSubBlock(0,0,tmp);
291  returnvalue = computeForwardSensitivityBlock( run3, 1*N+run3, nx, stageIdx, &tmp );
292  if( returnvalue != SUCCESSFUL_RETURN ) return ACADOERROR(returnvalue);
293  }
294 
295  if( pSeed != 0 ){
296  DMatrix tmp;
297  pSeed->getSubBlock(0,0,tmp);
298  returnvalue = computeForwardSensitivityBlock( run3, 2*N+run3, nx+na, stageIdx, &tmp );
299  if( returnvalue != SUCCESSFUL_RETURN ) return ACADOERROR(returnvalue);
300  }
301 
302  if( uSeed != 0 ){
303  DMatrix tmp;
304  uSeed->getSubBlock(0,0,tmp);
305  returnvalue = computeForwardSensitivityBlock( run3, 3*N+run3, nx+na+np, stageIdx, &tmp );
306  if( returnvalue != SUCCESSFUL_RETURN ) return ACADOERROR(returnvalue);
307  }
308 
309  if( wSeed != 0 ){
310  DMatrix tmp;
311  wSeed->getSubBlock(0,0,tmp);
312  returnvalue = computeForwardSensitivityBlock( run3, 4*N+run3, nx+na+np+nu, stageIdx, &tmp );
313  if( returnvalue != SUCCESSFUL_RETURN ) return ACADOERROR(returnvalue);
314  }
315 
316  if( run3 == breakPoints[stageIdx+1] ) stageIdx++;
317  }
318  return SUCCESSFUL_RETURN;
319  }
320 
321 
323 }
324 
325 
326 
328  const BlockMatrix &seed_,
329  BlockMatrix &hessian ){
330 
331  int run3;
332  const int N = grid.getNumPoints();
333  if( fcn == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
335 
336  int stageIdx = 0;
337 
338  dBackward.init( N, 5*N );
339 
340  for( run3 = 0; run3 < N; run3++ ){
341 
342  int nc = numberOfAlgebraicStates[stageIdx];
343 
344  DMatrix seed;
345  seed_.getSubBlock( count, 0, seed, nc, 1 );
346  count++;
347 
348  // EVALUATION OF THE SENSITIVITIES:
349  // --------------------------------
350 
351  int run1, run2;
352 
353  double *bseed1 = new double[fcn[stageIdx].getDim()];
354  double *bseed2 = new double[fcn[stageIdx].getDim()];
355  double *R = new double[fcn[stageIdx].getDim()];
356  double *J = new double[fcn[stageIdx].getNumberOfVariables() +1];
357  double *H = new double[fcn[stageIdx].getNumberOfVariables() +1];
358  double *fseed = new double[fcn[stageIdx].getNumberOfVariables() +1];
359 
360  for( run1 = 0; run1 < fcn[stageIdx].getDim()-nc; run1++ ){
361  bseed1[run1] = 0.0;
362  bseed2[run1] = 0.0;
363  }
364 
365  for( run1 = 0; run1 < nc; run1++ ){
366  bseed1[fcn[stageIdx].getDim() - nc + run1] = seed(run1,0);
367  bseed2[fcn[stageIdx].getDim() - nc + run1] = 0.0;
368  }
369 
370  for( run1 = 0; run1 < fcn[stageIdx].getNumberOfVariables()+1; run1++ )
371  fseed[run1] = 0.0;
372 
373  DMatrix Dx ( nc, nx );
374  DMatrix Dxa( nc, na );
375  DMatrix Dp ( nc, np );
376  DMatrix Du ( nc, nu );
377  DMatrix Dw ( nc, nw );
378 
379  DMatrix Hx ( nx, nx );
380  DMatrix Hxa( nx, na );
381  DMatrix Hp ( nx, np );
382  DMatrix Hu ( nx, nu );
383  DMatrix Hw ( nx, nw );
384 
385  for( run2 = 0; run2 < nx; run2++ ){
386 
387  // FIRST ORDER DERIVATIVES:
388  // ------------------------
389  fseed[y_index[stageIdx][run2]] = 1.0;
390  fcn[stageIdx].AD_forward( run3, fseed, R );
391  for( run1 = 0; run1 < nc; run1++ )
392  Dx( run1, run2 ) = R[fcn[stageIdx].getDim() - nc + run1];
393  fseed[y_index[stageIdx][run2]] = 0.0;
394 
395  // SECOND ORDER DERIVATIVES:
396  // -------------------------
397  for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
398  J[run1] = 0.0;
399  H[run1] = 0.0;
400  }
401 
402  fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
403 
404  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2, run1 ) = -H[y_index[stageIdx][run1]];
405  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2, run1-nx ) = -H[y_index[stageIdx][run1]];
406  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2, run1-nx-na ) = -H[y_index[stageIdx][run1]];
407  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
408  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
409  }
410 
411  if( nx > 0 ){
412 
413  dBackward.setDense( run3, run3, Dx );
414 
415  if( nx > 0 ) hessian.addDense( run3, run3, Hx );
416  if( na > 0 ) hessian.addDense( run3, N + run3, Hxa );
417  if( np > 0 ) hessian.addDense( run3, 2*N + run3, Hp );
418  if( nu > 0 ) hessian.addDense( run3, 3*N + run3, Hu );
419  if( nw > 0 ) hessian.addDense( run3, 4*N + run3, Hw );
420  }
421 
422  Hx.init ( na, nx );
423  Hxa.init( na, na );
424  Hp.init ( na, np );
425  Hu.init ( na, nu );
426  Hw.init ( na, nw );
427 
428  for( run2 = nx; run2 < nx+na; run2++ ){
429 
430  // FIRST ORDER DERIVATIVES:
431  // ------------------------
432  fseed[y_index[stageIdx][run2]] = 1.0;
433  fcn[stageIdx].AD_forward( run3, fseed, R );
434  for( run1 = 0; run1 < nc; run1++ )
435  Dxa( run1, run2-nx ) = R[fcn[stageIdx].getDim() - nc + run1];
436  fseed[y_index[stageIdx][run2]] = 0.0;
437 
438  // SECOND ORDER DERIVATIVES:
439  // -------------------------
440  for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
441  J[run1] = 0.0;
442  H[run1] = 0.0;
443  }
444 
445  fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
446 
447  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx, run1 ) = -H[y_index[stageIdx][run1]];
448  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx, run1-nx ) = -H[y_index[stageIdx][run1]];
449  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx, run1-nx-na ) = -H[y_index[stageIdx][run1]];
450  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
451  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
452  }
453 
454  if( na > 0 ){
455 
456  dBackward.setDense( run3, N+run3, Dxa );
457 
458  if( nx > 0 ) hessian.addDense( N+run3, run3, Hx );
459  if( na > 0 ) hessian.addDense( N+run3, N + run3, Hxa );
460  if( np > 0 ) hessian.addDense( N+run3, 2*N + run3, Hp );
461  if( nu > 0 ) hessian.addDense( N+run3, 3*N + run3, Hu );
462  if( nw > 0 ) hessian.addDense( N+run3, 4*N + run3, Hw );
463  }
464 
465  Hx.init ( np, nx );
466  Hxa.init( np, na );
467  Hp.init ( np, np );
468  Hu.init ( np, nu );
469  Hw.init ( np, nw );
470 
471  for( run2 = nx+na; run2 < nx+na+np; run2++ ){
472 
473  // FIRST ORDER DERIVATIVES:
474  // ------------------------
475  fseed[y_index[stageIdx][run2]] = 1.0;
476  fcn[stageIdx].AD_forward( run3, fseed, R );
477  for( run1 = 0; run1 < nc; run1++ )
478  Dp( run1, run2-nx-na ) = R[fcn[stageIdx].getDim() - nc + run1];
479  fseed[y_index[stageIdx][run2]] = 0.0;
480 
481  // SECOND ORDER DERIVATIVES:
482  // -------------------------
483  for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
484  J[run1] = 0.0;
485  H[run1] = 0.0;
486  }
487 
488  fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
489 
490  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na, run1 ) = -H[y_index[stageIdx][run1]];
491  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na, run1-nx ) = -H[y_index[stageIdx][run1]];
492  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na, run1-nx-na ) = -H[y_index[stageIdx][run1]];
493  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
494  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
495  }
496 
497  if( np > 0 ){
498 
499  dBackward.setDense( run3, 2*N+run3, Dp );
500 
501  if( nx > 0 ) hessian.addDense( 2*N+run3, run3, Hx );
502  if( na > 0 ) hessian.addDense( 2*N+run3, N + run3, Hxa );
503  if( np > 0 ) hessian.addDense( 2*N+run3, 2*N + run3, Hp );
504  if( nu > 0 ) hessian.addDense( 2*N+run3, 3*N + run3, Hu );
505  if( nw > 0 ) hessian.addDense( 2*N+run3, 4*N + run3, Hw );
506  }
507 
508 
509  Hx.init ( nu, nx );
510  Hxa.init( nu, na );
511  Hp.init ( nu, np );
512  Hu.init ( nu, nu );
513  Hw.init ( nu, nw );
514 
515  for( run2 = nx+na+np; run2 < nx+na+np+nu; run2++ ){
516 
517  // FIRST ORDER DERIVATIVES:
518  // ------------------------
519  fseed[y_index[stageIdx][run2]] = 1.0;
520  fcn[stageIdx].AD_forward( run3, fseed, R );
521  for( run1 = 0; run1 < nc; run1++ )
522  Du( run1, run2-nx-na-np ) = R[fcn[stageIdx].getDim() - nc + run1];
523  fseed[y_index[stageIdx][run2]] = 0.0;
524 
525  // SECOND ORDER DERIVATIVES:
526  // -------------------------
527  for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
528  J[run1] = 0.0;
529  H[run1] = 0.0;
530  }
531 
532  fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
533 
534  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np, run1 ) = -H[y_index[stageIdx][run1]];
535  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np, run1-nx ) = -H[y_index[stageIdx][run1]];
536  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np, run1-nx-na ) = -H[y_index[stageIdx][run1]];
537  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
538  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
539  }
540 
541  if( nu > 0 ){
542 
543  dBackward.setDense( run3, 3*N+run3, Du );
544 
545  if( nx > 0 ) hessian.addDense( 3*N+run3, run3, Hx );
546  if( na > 0 ) hessian.addDense( 3*N+run3, N + run3, Hxa );
547  if( np > 0 ) hessian.addDense( 3*N+run3, 2*N + run3, Hp );
548  if( nu > 0 ) hessian.addDense( 3*N+run3, 3*N + run3, Hu );
549  if( nw > 0 ) hessian.addDense( 3*N+run3, 4*N + run3, Hw );
550  }
551 
552  Hx.init ( nw, nx );
553  Hxa.init( nw, na );
554  Hp.init ( nw, np );
555  Hu.init ( nw, nu );
556  Hw.init ( nw, nw );
557 
558  for( run2 = nx+na+np+nu; run2 < nx+na+np+nu+nw; run2++ ){
559 
560  // FIRST ORDER DERIVATIVES:
561  // ------------------------
562  fseed[y_index[stageIdx][run2]] = 1.0;
563  fcn[stageIdx].AD_forward( run3, fseed, R );
564  for( run1 = 0; run1 < nc; run1++ )
565  Dw( run1, run2-nx-na-np-nu ) = R[fcn[stageIdx].getDim() - nc + run1];
566  fseed[y_index[stageIdx][run2]] = 0.0;
567 
568  // SECOND ORDER DERIVATIVES:
569  // -------------------------
570  for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
571  J[run1] = 0.0;
572  H[run1] = 0.0;
573  }
574 
575  fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
576 
577  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np-nu, run1 ) = -H[y_index[stageIdx][run1]];
578  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np-nu, run1-nx ) = -H[y_index[stageIdx][run1]];
579  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np-nu, run1-nx-na ) = -H[y_index[stageIdx][run1]];
580  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np-nu, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
581  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np-nu, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
582  }
583 
584  if( nw > 0 ){
585 
586  dBackward.setDense( run3, 4*N+run3, Dw );
587 
588  if( nx > 0 ) hessian.addDense( 4*N+run3, run3, Hx );
589  if( na > 0 ) hessian.addDense( 4*N+run3, N + run3, Hxa );
590  if( np > 0 ) hessian.addDense( 4*N+run3, 2*N + run3, Hp );
591  if( nu > 0 ) hessian.addDense( 4*N+run3, 3*N + run3, Hu );
592  if( nw > 0 ) hessian.addDense( 4*N+run3, 4*N + run3, Hw );
593  }
594 
595  delete[] bseed1;
596  delete[] bseed2;
597  delete[] R ;
598  delete[] J ;
599  delete[] H ;
600  delete[] fseed ;
601 
602  if( run3 == breakPoints[stageIdx+1] ) stageIdx++;
603  }
604 
605  return SUCCESSFUL_RETURN;
606 }
607 
608 
609 
610 //
611 // PROTECTED MEMBER FUNCTIONS:
612 //
613 
615 {
616  if ( z != 0 )
617  delete[] z;
618 
619  if ( JJ != 0 )
620  delete[] JJ;
621 
622  z = new EvaluationPoint[nFcn];
623  JJ = new EvaluationPoint[nFcn];
624 
625  int stageIdx = 0;
626  for( int i=0; i<nFcn; ++i )
627  {
628  z[i].init( fcn[i], iter );
629  JJ[i].init( fcn[i], iter );
630 
631  if( i == breakPoints[stageIdx+1] )
632  stageIdx++;
633  }
634 
635  return SUCCESSFUL_RETURN;
636 }
637 
638 
639 
640 
642 
643 // end of file.
#define N
virtual returnValue initializeEvaluationPoints(const OCPiterate &iter)
Data class for storing generic optimization variables.
Definition: ocp_iterate.hpp:57
Implements a very rudimentary block sparse matrix class.
EvaluationPoint * z
USING_NAMESPACE_ACADO typedef TaylorVariable< Interval > T
ConstraintElement & operator=(const ConstraintElement &rhs)
void init(unsigned _nRows=0, unsigned _nCols=0)
Definition: matrix.hpp:135
returnValue evaluate(const OCPiterate &iter)
returnValue setDense(uint rowIdx, uint colIdx, const DMatrix &value)
returnValue computeForwardSensitivityBlock(int offset1, int offset2, int offset3, int stageIdx, DMatrix *seed)
Allows to pass back messages to the calling function.
DVector evaluate(const EvaluationPoint &x, const int &number=0)
Definition: function.cpp:520
AlgebraicConsistencyConstraint & operator=(const AlgebraicConsistencyConstraint &rhs)
returnValue setZ(const uint &idx, const OCPiterate &iter)
Allows to setup function evaluation points.
BEGIN_NAMESPACE_ACADO typedef unsigned int uint
Definition: acado_types.hpp:42
Allows to conveniently handle (one-dimensional) grids consisting of time points.
Definition: grid.hpp:58
uint getLastIndex() const
#define CLOSE_NAMESPACE_ACADO
returnValue init(const Function &f, uint nx_=0, uint na_=0, uint np_=0, uint nu_=0, uint nw_=0, uint nd_=0, uint N_=0)
#define ACADO_TRY(X)
Deals with algebraic consistency constraints within optimal control problems.
returnValue getSubBlock(uint rowIdx, uint colIdx, DMatrix &value) const
Base class for all kind of constraints (except for bounds) within optimal control problems...
returnValue init(uint _nRows, uint _nCols)
EvaluationPoint * JJ
int getDim() const
GenericMatrix & setRow(unsigned _idx, const GenericVector< T > &_values)
Definition: matrix.hpp:213
returnValue setZero()
Derived & setZero(Index size)
void rhs(const real_t *x, real_t *f)
GenericVector & append(const GenericVector &_arg)
Definition: vector.cpp:42
uint getNumPoints() const
#define BEGIN_NAMESPACE_ACADO
returnValue addDense(uint rowIdx, uint colIdx, const DMatrix &value)
#define R
GenericVector< T > getRow(unsigned _idx) const
Definition: matrix.hpp:197
#define ACADOERROR(retval)
uint getNumRows() const


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:27