boundary_constraint.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ACADO Toolkit.
3  *
4  * ACADO Toolkit -- A Toolkit for Automatic Control and Dynamic Optimization.
5  * Copyright (C) 2008-2014 by Boris Houska, Hans Joachim Ferreau,
6  * Milan Vukov, Rien Quirynen, KU Leuven.
7  * Developed within the Optimization in Engineering Center (OPTEC)
8  * under supervision of Moritz Diehl. All rights reserved.
9  *
10  * ACADO Toolkit is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 3 of the License, or (at your option) any later version.
14  *
15  * ACADO Toolkit is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with ACADO Toolkit; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  *
24  */
25 
26 
27 
36 
37 
38 
40 
41 
42 
43 //
44 // PUBLIC MEMBER FUNCTIONS:
45 //
46 
47 
50 
51 }
52 
53 
55  :ConstraintElement(grid_, 2, 1 ){
56 
57 }
58 
59 
61  :ConstraintElement(rhs){
62 
63 
64 }
65 
66 
68 
69 }
70 
71 
73 
74  if( this != &rhs ){
75 
77 
78  }
79 
80  return *this;
81 }
82 
83 
84 
85 
87 
88  int run1;
89 
90  const int nc = getNC();
91  const int T = grid.getLastIndex();
92 
93  if( nc == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
94 
95  DMatrix resL( nc, 1 );
96  DMatrix resU( nc, 1 );
97 
98 
99  // EVALUATION AT THE START POINT:
100  // ------------------------------
101 
102  z[0].setZ( 0, iter );
103  z[1].setZ( T, iter );
104 
105  DVector result = fcn[0].evaluate( z[0] );
106 
107  for( run1 = 0; run1 < nc; run1++ ){
108  resL( run1, 0 ) = lb[0][run1] - result(run1);
109  resU( run1, 0 ) = ub[0][run1] - result(run1);
110  }
111 
112 
113  // EVALUATION AT THE END POINT:
114  // ------------------------------
115 
116  result = fcn[1].evaluate( z[1] );
117 
118  for( run1 = 0; run1 < nc; run1++ ){
119  resL( run1, 0 ) -= result(run1);
120  resU( run1, 0 ) -= result(run1);
121  }
122 
123 
124  // STORE THE RESULTS:
125  // ------------------
126 
127  residuumL.init(1,1);
128  residuumU.init(1,1);
129 
130  residuumL.setDense( 0, 0, resL );
131  residuumU.setDense( 0, 0, resU );
132 
133 
134  return SUCCESSFUL_RETURN;
135 }
136 
137 
139 
140  int run1;
141 
142  const int N = grid.getNumPoints();
143 
144  // EVALUATION OF THE SENSITIVITIES:
145  // --------------------------------
146 
147  if( bSeed != 0 )
148  {
149 
150  if( xSeed != 0 || pSeed != 0 || uSeed != 0 || wSeed != 0 ||
151  xSeed2 != 0 || pSeed2 != 0 || uSeed2 != 0 || wSeed2 != 0 )
153 
154  int nBDirs = bSeed->getNumRows( 0, 0 );
155 
156  DMatrix bseed_;
157  bSeed->getSubBlock( 0, 0, bseed_);
158 
159  dBackward.init( 1, 5*N );
160 
161  DMatrix Dx ( nBDirs, nx );
162  DMatrix Dxa( nBDirs, na );
163  DMatrix Dp ( nBDirs, np );
164  DMatrix Du ( nBDirs, nu );
165  DMatrix Dw ( nBDirs, nw );
166 
167  // evaluate at start
168  for( run1 = 0; run1 < nBDirs; run1++ )
169  {
170  ACADO_TRY( fcn[0].AD_backward( bseed_.getRow(run1), JJ[0], 0 ) );
171 
172  if( nx > 0 ) Dx .setRow( run1, JJ[0].getX () );
173  if( na > 0 ) Dxa.setRow( run1, JJ[0].getXA() );
174  if( np > 0 ) Dp .setRow( run1, JJ[0].getP () );
175  if( nu > 0 ) Du .setRow( run1, JJ[0].getU () );
176  if( nw > 0 ) Dw .setRow( run1, JJ[0].getW () );
177 
178  JJ[0].setZero( );
179  }
180 
181  if( nx > 0 )
182  dBackward.setDense( 0, 0 , Dx );
183 
184  if( na > 0 )
185  dBackward.setDense( 0, N, Dxa );
186 
187  if( np > 0 )
188  dBackward.setDense( 0, 2*N, Dp );
189 
190  if( nu > 0 )
191  dBackward.setDense( 0, 3*N, Du );
192 
193  if( nw > 0 )
194  dBackward.setDense( 0, 4*N, Dw );
195 
196  // evaluate at end
197  for( run1 = 0; run1 < nBDirs; run1++ )
198  {
199  ACADO_TRY( fcn[1].AD_backward( bseed_.getRow(run1), JJ[1], 0 ) );
200 
201  if( nx > 0 ) Dx .setRow( run1, JJ[1].getX () );
202  if( na > 0 ) Dxa.setRow( run1, JJ[1].getXA() );
203  if( np > 0 ) Dp .setRow( run1, JJ[1].getP () );
204  if( nu > 0 ) Du .setRow( run1, JJ[1].getU () );
205  if( nw > 0 ) Dw .setRow( run1, JJ[1].getW () );
206 
207  JJ[1].setZero( );
208  }
209 
210  if( nx > 0 )
211  dBackward.setDense( 0, N-1, Dx );
212 
213  if( na > 0 )
214  dBackward.setDense( 0, 2*N-1, Dxa );
215 
216  if( np > 0 )
217  dBackward.setDense( 0, 3*N-1, Dp );
218 
219  if( nu > 0 )
220  dBackward.setDense( 0, 4*N-1, Du );
221 
222  if( nw > 0 )
223  dBackward.setDense( 0, 5*N-1, Dw );
224 
225  return SUCCESSFUL_RETURN;
226  }
227 
228  // TODO: implement forward mode
229 
231 }
232 
233 
235 
236  // EVALUATION OF THE SENSITIVITIES:
237  // --------------------------------
238 
239  int run1, run2;
240 
241  const int nc = getNC();
242  const int N = grid.getNumPoints();
243 
244  ASSERT( (int) seed.getNumRows() == nc );
245 
246  double *bseed1 = new double[nc];
247  double *bseed2 = new double[nc];
248  double *R = new double[nc];
249 
250  double *J1 = new double[fcn[0].getNumberOfVariables() +1];
251  double *H1 = new double[fcn[0].getNumberOfVariables() +1];
252  double *fseed1 = new double[fcn[0].getNumberOfVariables() +1];
253 
254  double *J2 = new double[fcn[1].getNumberOfVariables() +1];
255  double *H2 = new double[fcn[1].getNumberOfVariables() +1];
256  double *fseed2 = new double[fcn[1].getNumberOfVariables() +1];
257 
258  for( run1 = 0; run1 < nc; run1++ ){
259  bseed1[run1] = seed(run1,0);
260  bseed2[run1] = 0.0;
261  }
262 
263  for( run1 = 0; run1 < fcn[0].getNumberOfVariables()+1; run1++ )
264  fseed1[run1] = 0.0;
265 
266  for( run1 = 0; run1 < fcn[1].getNumberOfVariables()+1; run1++ )
267  fseed2[run1] = 0.0;
268 
269  dBackward.init( 1, 5*N );
270 
271  DMatrix Dx ( nc, nx );
272  DMatrix Dxa( nc, na );
273  DMatrix Dp ( nc, np );
274  DMatrix Du ( nc, nu );
275  DMatrix Dw ( nc, nw );
276 
277  DMatrix Hx ( nx, nx );
278  DMatrix Hxa( nx, na );
279  DMatrix Hp ( nx, np );
280  DMatrix Hu ( nx, nu );
281  DMatrix Hw ( nx, nw );
282 
283  for( run2 = 0; run2 < nx; run2++ ){
284 
285  // FIRST ORDER DERIVATIVES:
286  // ------------------------
287  fseed1[y_index[0][run2]] = 1.0;
288  fcn[0].AD_forward( 0, fseed1, R );
289  for( run1 = 0; run1 < nc; run1++ )
290  Dx( run1, run2 ) = R[run1];
291  fseed1[y_index[0][run2]] = 0.0;
292 
293  // SECOND ORDER DERIVATIVES:
294  // -------------------------
295  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
296  J1[run1] = 0.0;
297  H1[run1] = 0.0;
298  }
299 
300  fcn[0].AD_backward2( 0, bseed1, bseed2, J1, H1 );
301 
302  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2, run1 ) = -H1[y_index[0][run1]];
303  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2, run1-nx ) = -H1[y_index[0][run1]];
304  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2, run1-nx-na ) = -H1[y_index[0][run1]];
305  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H1[y_index[0][run1]];
306  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
307  }
308 
309  if( nx > 0 ){
310 
311  dBackward.setDense( 0, 0, Dx );
312 
313  if( nx > 0 ) hessian.addDense( 0, 0, Hx );
314  if( na > 0 ) hessian.addDense( 0, N, Hxa );
315  if( np > 0 ) hessian.addDense( 0, 2*N, Hp );
316  if( nu > 0 ) hessian.addDense( 0, 3*N, Hu );
317  if( nw > 0 ) hessian.addDense( 0, 4*N, Hw );
318  }
319 
320  Hx.init ( nx, nx );
321  Hxa.init( nx, na );
322  Hp.init ( nx, np );
323  Hu.init ( nx, nu );
324  Hw.init ( nx, nw );
325 
326 
327  for( run2 = 0; run2 < nx; run2++ ){
328 
329  // FIRST ORDER DERIVATIVES:
330  // ------------------------
331  fseed2[y_index[1][run2]] = 1.0;
332  fcn[1].AD_forward( 0, fseed2, R );
333  for( run1 = 0; run1 < nc; run1++ )
334  Dx( run1, run2 ) = R[run1];
335  fseed2[y_index[1][run2]] = 0.0;
336 
337  // SECOND ORDER DERIVATIVES:
338  // -------------------------
339  for( run1 = 0; run1 <= fcn[1].getNumberOfVariables(); run1++ ){
340  J2[run1] = 0.0;
341  H2[run1] = 0.0;
342  }
343 
344  fcn[1].AD_backward2( 0, bseed1, bseed2, J2, H2 );
345 
346  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2, run1 ) = -H2[y_index[1][run1]];
347  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2, run1-nx ) = -H2[y_index[1][run1]];
348  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2, run1-nx-na ) = -H2[y_index[1][run1]];
349  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H2[y_index[1][run1]];
350  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H2[y_index[1][run1]];
351  }
352 
353  if( nx > 0 ){
354 
355  dBackward.setDense( 0, N-1, Dx );
356 
357  if( nx > 0 ) hessian.addDense( N-1, N-1, Hx );
358  if( na > 0 ) hessian.addDense( N-1, 2*N-1, Hxa );
359  if( np > 0 ) hessian.addDense( N-1, 3*N-1, Hp );
360  if( nu > 0 ) hessian.addDense( N-1, 4*N-1, Hu );
361  if( nw > 0 ) hessian.addDense( N-1, 5*N-1, Hw );
362  }
363 
364  Hx.init ( na, nx );
365  Hxa.init( na, na );
366  Hp.init ( na, np );
367  Hu.init ( na, nu );
368  Hw.init ( na, nw );
369 
370 
371  for( run2 = nx; run2 < nx+na; run2++ ){
372 
373  // FIRST ORDER DERIVATIVES:
374  // ------------------------
375  fseed1[y_index[0][run2]] = 1.0;
376  fcn[0].AD_forward( 0, fseed1, R );
377  for( run1 = 0; run1 < nc; run1++ )
378  Dxa( run1, run2-nx ) = R[run1];
379  fseed1[y_index[0][run2]] = 0.0;
380 
381  // SECOND ORDER DERIVATIVES:
382  // -------------------------
383  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
384  J1[run1] = 0.0;
385  H1[run1] = 0.0;
386  }
387 
388  fcn[0].AD_backward2( 0, bseed1, bseed2, J1, H1 );
389 
390  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx, run1 ) = -H1[y_index[0][run1]];
391  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx, run1-nx ) = -H1[y_index[0][run1]];
392  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx, run1-nx-na ) = -H1[y_index[0][run1]];
393  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx, run1-nx-na-np ) = -H1[y_index[0][run1]];
394  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
395  }
396 
397  if( na > 0 ){
398 
399  dBackward.setDense( 0, N, Dxa );
400 
401  if( nx > 0 ) hessian.addDense( N, 0, Hx );
402  if( na > 0 ) hessian.addDense( N, N, Hxa );
403  if( np > 0 ) hessian.addDense( N, 2*N, Hp );
404  if( nu > 0 ) hessian.addDense( N, 3*N, Hu );
405  if( nw > 0 ) hessian.addDense( N, 4*N, Hw );
406  }
407 
408  Hx.init ( na, nx );
409  Hxa.init( na, na );
410  Hp.init ( na, np );
411  Hu.init ( na, nu );
412  Hw.init ( na, nw );
413 
414 
415  for( run2 = nx; run2 < nx+na; run2++ ){
416 
417  // FIRST ORDER DERIVATIVES:
418  // ------------------------
419  fseed2[y_index[1][run2]] = 1.0;
420  fcn[1].AD_forward( 0, fseed2, R );
421  for( run1 = 0; run1 < nc; run1++ )
422  Dxa( run1, run2-nx ) = R[run1];
423  fseed2[y_index[1][run2]] = 0.0;
424 
425  // SECOND ORDER DERIVATIVES:
426  // -------------------------
427  for( run1 = 0; run1 <= fcn[1].getNumberOfVariables(); run1++ ){
428  J2[run1] = 0.0;
429  H2[run1] = 0.0;
430  }
431 
432  fcn[1].AD_backward2( 0, bseed1, bseed2, J2, H2 );
433 
434  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx, run1 ) = -H2[y_index[1][run1]];
435  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx, run1-nx ) = -H2[y_index[1][run1]];
436  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx, run1-nx-na ) = -H2[y_index[1][run1]];
437  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx, run1-nx-na-np ) = -H2[y_index[1][run1]];
438  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx, run1-nx-na-np-nu ) = -H2[y_index[1][run1]];
439  }
440 
441  if( na > 0 ){
442 
443  dBackward.setDense( 0, 2*N-1, Dxa );
444 
445  if( nx > 0 ) hessian.addDense( 2*N-1, N-1, Hx );
446  if( na > 0 ) hessian.addDense( 2*N-1, 2*N-1, Hxa );
447  if( np > 0 ) hessian.addDense( 2*N-1, 3*N-1, Hp );
448  if( nu > 0 ) hessian.addDense( 2*N-1, 4*N-1, Hu );
449  if( nw > 0 ) hessian.addDense( 2*N-1, 5*N-1, Hw );
450  }
451 
452  Hx.init ( np, nx );
453  Hxa.init( np, na );
454  Hp.init ( np, np );
455  Hu.init ( np, nu );
456  Hw.init ( np, nw );
457 
458 
459  for( run2 = nx+na; run2 < nx+na+np; run2++ ){
460 
461  // FIRST ORDER DERIVATIVES:
462  // ------------------------
463  fseed1[y_index[0][run2]] = 1.0;
464  fcn[0].AD_forward( 0, fseed1, R );
465  for( run1 = 0; run1 < nc; run1++ )
466  Dp( run1, run2-nx-na ) = R[run1];
467  fseed1[y_index[0][run2]] = 0.0;
468 
469  // SECOND ORDER DERIVATIVES:
470  // -------------------------
471  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
472  J1[run1] = 0.0;
473  H1[run1] = 0.0;
474  }
475 
476  fcn[0].AD_backward2( 0, bseed1, bseed2, J1, H1 );
477 
478  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na, run1 ) = -H1[y_index[0][run1]];
479  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na, run1-nx ) = -H1[y_index[0][run1]];
480  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na, run1-nx-na ) = -H1[y_index[0][run1]];
481  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na, run1-nx-na-np ) = -H1[y_index[0][run1]];
482  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
483  }
484 
485  if( np > 0 ){
486 
487  dBackward.setDense( 0, 2*N, Dp );
488 
489  if( nx > 0 ) hessian.addDense( 2*N, 0, Hx );
490  if( na > 0 ) hessian.addDense( 2*N, N, Hxa );
491  if( np > 0 ) hessian.addDense( 2*N, 2*N, Hp );
492  if( nu > 0 ) hessian.addDense( 2*N, 3*N, Hu );
493  if( nw > 0 ) hessian.addDense( 2*N, 4*N, Hw );
494  }
495 
496 
497  Hx.init ( np, nx );
498  Hxa.init( np, na );
499  Hp.init ( np, np );
500  Hu.init ( np, nu );
501  Hw.init ( np, nw );
502 
503 
504  for( run2 = nx+na; run2 < nx+na+np; run2++ ){
505 
506  // FIRST ORDER DERIVATIVES:
507  // ------------------------
508  fseed2[y_index[1][run2]] = 1.0;
509  fcn[1].AD_forward( 0, fseed2, R );
510  for( run1 = 0; run1 < nc; run1++ )
511  Dp( run1, run2-nx-na ) = R[run1];
512  fseed2[y_index[1][run2]] = 0.0;
513 
514  // SECOND ORDER DERIVATIVES:
515  // -------------------------
516  for( run1 = 0; run1 <= fcn[1].getNumberOfVariables(); run1++ ){
517  J2[run1] = 0.0;
518  H2[run1] = 0.0;
519  }
520 
521  fcn[1].AD_backward2( 0, bseed1, bseed2, J2, H2 );
522 
523  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na, run1 ) = -H2[y_index[1][run1]];
524  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na, run1-nx ) = -H2[y_index[1][run1]];
525  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na, run1-nx-na ) = -H2[y_index[1][run1]];
526  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na, run1-nx-na-np ) = -H2[y_index[1][run1]];
527  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na, run1-nx-na-np-nu ) = -H2[y_index[1][run1]];
528  }
529 
530  if( np > 0 ){
531 
532  dBackward.setDense( 0, 3*N-1, Dp );
533 
534  if( nx > 0 ) hessian.addDense( 3*N-1, N-1, Hx );
535  if( na > 0 ) hessian.addDense( 3*N-1, 2*N-1, Hxa );
536  if( np > 0 ) hessian.addDense( 3*N-1, 3*N-1, Hp );
537  if( nu > 0 ) hessian.addDense( 3*N-1, 4*N-1, Hu );
538  if( nw > 0 ) hessian.addDense( 3*N-1, 5*N-1, Hw );
539  }
540 
541 
542  Hx.init ( nu, nx );
543  Hxa.init( nu, na );
544  Hp.init ( nu, np );
545  Hu.init ( nu, nu );
546  Hw.init ( nu, nw );
547 
548 
549  for( run2 = nx+na+np; run2 < nx+na+np+nu; run2++ ){
550 
551  // FIRST ORDER DERIVATIVES:
552  // ------------------------
553  fseed1[y_index[0][run2]] = 1.0;
554  fcn[0].AD_forward( 0, fseed1, R );
555  for( run1 = 0; run1 < nc; run1++ )
556  Du( run1, run2-nx-na-np ) = R[run1];
557  fseed1[y_index[0][run2]] = 0.0;
558 
559  // SECOND ORDER DERIVATIVES:
560  // -------------------------
561  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
562  J1[run1] = 0.0;
563  H1[run1] = 0.0;
564  }
565 
566  fcn[0].AD_backward2( 0, bseed1, bseed2, J1, H1 );
567 
568  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np, run1 ) = -H1[y_index[0][run1]];
569  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np, run1-nx ) = -H1[y_index[0][run1]];
570  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np, run1-nx-na ) = -H1[y_index[0][run1]];
571  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np, run1-nx-na-np ) = -H1[y_index[0][run1]];
572  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
573  }
574 
575  if( nu > 0 ){
576 
577  dBackward.setDense( 0, 3*N, Du );
578 
579  if( nx > 0 ) hessian.addDense( 3*N, 0, Hx );
580  if( na > 0 ) hessian.addDense( 3*N, N, Hxa );
581  if( np > 0 ) hessian.addDense( 3*N, 2*N, Hp );
582  if( nu > 0 ) hessian.addDense( 3*N, 3*N, Hu );
583  if( nw > 0 ) hessian.addDense( 3*N, 4*N, Hw );
584  }
585 
586  Hx.init ( nu, nx );
587  Hxa.init( nu, na );
588  Hp.init ( nu, np );
589  Hu.init ( nu, nu );
590  Hw.init ( nu, nw );
591 
592 
593  for( run2 = nx+na+np; run2 < nx+na+np+nu; run2++ ){
594 
595  // FIRST ORDER DERIVATIVES:
596  // ------------------------
597  fseed2[y_index[1][run2]] = 1.0;
598  fcn[1].AD_forward( 0, fseed2, R );
599  for( run1 = 0; run1 < nc; run1++ )
600  Du( run1, run2-nx-na-np ) = R[run1];
601  fseed2[y_index[1][run2]] = 0.0;
602 
603  // SECOND ORDER DERIVATIVES:
604  // -------------------------
605  for( run1 = 0; run1 <= fcn[1].getNumberOfVariables(); run1++ ){
606  J2[run1] = 0.0;
607  H2[run1] = 0.0;
608  }
609 
610  fcn[1].AD_backward2( 0, bseed1, bseed2, J2, H2 );
611 
612  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np, run1 ) = -H2[y_index[1][run1]];
613  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np, run1-nx ) = -H2[y_index[1][run1]];
614  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np, run1-nx-na ) = -H2[y_index[1][run1]];
615  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np, run1-nx-na-np ) = -H2[y_index[1][run1]];
616  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np, run1-nx-na-np-nu ) = -H2[y_index[1][run1]];
617  }
618 
619  if( nu > 0 ){
620 
621  dBackward.setDense( 0, 4*N-1, Du );
622 
623  if( nx > 0 ) hessian.addDense( 4*N-1, N-1, Hx );
624  if( na > 0 ) hessian.addDense( 4*N-1, 2*N-1, Hxa );
625  if( np > 0 ) hessian.addDense( 4*N-1, 3*N-1, Hp );
626  if( nu > 0 ) hessian.addDense( 4*N-1, 4*N-1, Hu );
627  if( nw > 0 ) hessian.addDense( 4*N-1, 5*N-1, Hw );
628  }
629 
630  Hx.init ( nw, nx );
631  Hxa.init( nw, na );
632  Hp.init ( nw, np );
633  Hu.init ( nw, nu );
634  Hw.init ( nw, nw );
635 
636 
637  for( run2 = nx+na+np+nu; run2 < nx+na+np+nu+nw; run2++ ){
638 
639  // FIRST ORDER DERIVATIVES:
640  // ------------------------
641  fseed1[y_index[0][run2]] = 1.0;
642  fcn[0].AD_forward( 0, fseed1, R );
643  for( run1 = 0; run1 < nc; run1++ )
644  Dw( run1, run2-nx-na-np-nu ) = R[run1];
645  fseed1[y_index[0][run2]] = 0.0;
646 
647  // SECOND ORDER DERIVATIVES:
648  // -------------------------
649  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
650  J1[run1] = 0.0;
651  H1[run1] = 0.0;
652  }
653 
654  fcn[0].AD_backward2( 0, bseed1, bseed2, J1, H1 );
655 
656  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np-nu, run1 ) = -H1[y_index[0][run1]];
657  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np-nu, run1-nx ) = -H1[y_index[0][run1]];
658  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np-nu, run1-nx-na ) = -H1[y_index[0][run1]];
659  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np-nu, run1-nx-na-np ) = -H1[y_index[0][run1]];
660  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np-nu, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
661  }
662 
663  if( nw > 0 ){
664 
665  dBackward.setDense( 0, 4*N, Dw );
666 
667  if( nx > 0 ) hessian.addDense( 4*N, 0, Hx );
668  if( na > 0 ) hessian.addDense( 4*N, N, Hxa );
669  if( np > 0 ) hessian.addDense( 4*N, 2*N, Hp );
670  if( nu > 0 ) hessian.addDense( 4*N, 3*N, Hu );
671  if( nw > 0 ) hessian.addDense( 4*N, 4*N, Hw );
672  }
673 
674  Hx.init ( nw, nx );
675  Hxa.init( nw, na );
676  Hp.init ( nw, np );
677  Hu.init ( nw, nu );
678  Hw.init ( nw, nw );
679 
680 
681  for( run2 = nx+na+np+nu; run2 < nx+na+np+nu+nw; run2++ ){
682 
683  // FIRST ORDER DERIVATIVES:
684  // ------------------------
685  fseed2[y_index[1][run2]] = 1.0;
686  fcn[1].AD_forward( 0, fseed2, R );
687  for( run1 = 0; run1 < nc; run1++ )
688  Dw( run1, run2-nx-na-np-nu ) = R[run1];
689  fseed2[y_index[1][run2]] = 0.0;
690 
691  // SECOND ORDER DERIVATIVES:
692  // -------------------------
693  for( run1 = 0; run1 <= fcn[1].getNumberOfVariables(); run1++ ){
694  J2[run1] = 0.0;
695  H2[run1] = 0.0;
696  }
697 
698  fcn[1].AD_backward2( 0, bseed1, bseed2, J2, H2 );
699 
700  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np-nu, run1 ) = -H2[y_index[1][run1]];
701  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np-nu, run1-nx ) = -H2[y_index[1][run1]];
702  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np-nu, run1-nx-na ) = -H2[y_index[1][run1]];
703  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np-nu, run1-nx-na-np ) = -H2[y_index[1][run1]];
704  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np-nu, run1-nx-na-np-nu ) = -H2[y_index[1][run1]];
705  }
706 
707  if( nw > 0 ){
708 
709  dBackward.setDense( 0, 5*N-1, Dw );
710 
711  if( nx > 0 ) hessian.addDense( 5*N-1, N-1, Hx );
712  if( na > 0 ) hessian.addDense( 5*N-1, 2*N-1, Hxa );
713  if( np > 0 ) hessian.addDense( 5*N-1, 3*N-1, Hp );
714  if( nu > 0 ) hessian.addDense( 5*N-1, 4*N-1, Hu );
715  if( nw > 0 ) hessian.addDense( 5*N-1, 5*N-1, Hw );
716  }
717 
718  delete[] bseed1;
719  delete[] bseed2;
720  delete[] R ;
721  delete[] J1 ;
722  delete[] H1 ;
723  delete[] fseed1;
724  delete[] J2 ;
725  delete[] H2 ;
726  delete[] fseed2;
727 
728  return SUCCESSFUL_RETURN;
729 }
730 
731 
732 
734 
735 // end of file.
#define N
Data class for storing generic optimization variables.
Definition: ocp_iterate.hpp:57
Implements a very rudimentary block sparse matrix class.
EvaluationPoint * z
USING_NAMESPACE_ACADO typedef TaylorVariable< Interval > T
ConstraintElement & operator=(const ConstraintElement &rhs)
void init(unsigned _nRows=0, unsigned _nCols=0)
Definition: matrix.hpp:135
returnValue setDense(uint rowIdx, uint colIdx, const DMatrix &value)
Allows to pass back messages to the calling function.
DVector evaluate(const EvaluationPoint &x, const int &number=0)
Definition: function.cpp:520
returnValue setZ(const uint &idx, const OCPiterate &iter)
Allows to conveniently handle (one-dimensional) grids consisting of time points.
Definition: grid.hpp:58
Stores and evaluates boundary constraints within optimal control problems.
uint getLastIndex() const
#define CLOSE_NAMESPACE_ACADO
#define ACADO_TRY(X)
returnValue evaluate(const OCPiterate &iter)
returnValue getSubBlock(uint rowIdx, uint colIdx, DMatrix &value) const
int getNC() const
BoundaryConstraint & operator=(const BoundaryConstraint &rhs)
Base class for all kind of constraints (except for bounds) within optimal control problems...
returnValue init(uint _nRows, uint _nCols)
EvaluationPoint * JJ
GenericMatrix & setRow(unsigned _idx, const GenericVector< T > &_values)
Definition: matrix.hpp:213
returnValue setZero()
void rhs(const real_t *x, real_t *f)
unsigned getNumRows() const
Definition: matrix.hpp:185
#define ASSERT(x)
uint getNumPoints() const
returnValue evaluateSensitivities()
#define BEGIN_NAMESPACE_ACADO
returnValue addDense(uint rowIdx, uint colIdx, const DMatrix &value)
#define R
int getNumberOfVariables() const
Definition: function.cpp:264
GenericVector< T > getRow(unsigned _idx) const
Definition: matrix.hpp:197
#define ACADOERROR(retval)
uint getNumRows() const


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:29