coupled_path_constraint.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ACADO Toolkit.
3  *
4  * ACADO Toolkit -- A Toolkit for Automatic Control and Dynamic Optimization.
5  * Copyright (C) 2008-2014 by Boris Houska, Hans Joachim Ferreau,
6  * Milan Vukov, Rien Quirynen, KU Leuven.
7  * Developed within the Optimization in Engineering Center (OPTEC)
8  * under supervision of Moritz Diehl. All rights reserved.
9  *
10  * ACADO Toolkit is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 3 of the License, or (at your option) any later version.
14  *
15  * ACADO Toolkit is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with ACADO Toolkit; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  *
24  */
25 
26 
27 
36 
37 
38 
40 
41 
42 
43 //
44 // PUBLIC MEMBER FUNCTIONS:
45 //
46 
47 
50 
51 }
52 
54  :ConstraintElement(grid_, grid_.getNumPoints(), 1 ){
55 
56 }
57 
59  :ConstraintElement(rhs){
60 
61 }
62 
64 
65 }
66 
68 
69  if( this != &rhs ){
70 
72  }
73  return *this;
74 }
75 
76 
77 
79 
80  int run1, run2;
81 
82  const int nc = getNC();
83  const int T = grid.getLastIndex();
84 
85  if( nc == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
86 
87  DMatrix resL( nc, 1 );
88  DMatrix resU( nc, 1 );
89 
90  for( run1 = 0; run1 < nc; run1++ ){
91  resL( run1, 0 ) = lb[0][run1];
92  resU( run1, 0 ) = ub[0][run1];
93  }
94 
95 
96  DVector result;
97 
98  for( run2 = 0; run2 <= T; run2++ ){
99 
100  z[run2].setZ( run2, iter );
101  result = fcn[run2].evaluate( z[run2] );
102 
103  for( run1 = 0; run1 < nc; run1++ ){
104  resL( run1, 0 ) -= result(run1);
105  resU( run1, 0 ) -= result(run1);
106  }
107  }
108 
109  // STORE THE RESULTS:
110  // ------------------
111 
112  residuumL.init(1,1);
113  residuumU.init(1,1);
114 
115  residuumL.setDense( 0, 0, resL );
116  residuumU.setDense( 0, 0, resU );
117 
118  return SUCCESSFUL_RETURN;
119 }
120 
121 
123 
124 
125  // EVALUATION OF THE SENSITIVITIES:
126  // --------------------------------
127 
128  int run1,/*run2, */run3;
129 
130 // const int nc = getNC();
131  const int N = grid.getNumPoints();
132 
133 
134  // EVALUATION OF THE SENSITIVITIES:
135  // --------------------------------
136 
137  if( bSeed != 0 )
138  {
139 
140  if( xSeed != 0 || pSeed != 0 || uSeed != 0 || wSeed != 0 ||
141  xSeed2 != 0 || pSeed2 != 0 || uSeed2 != 0 || wSeed2 != 0 )
143 
144 // double* bseed1 = new double[nc];
145 
146  DMatrix bseed_;
147  bSeed->getSubBlock( 0, 0, bseed_);
148 
149  dBackward.init( 1, 5*N );
150 
151  int nBDirs = bSeed->getNumRows( 0, 0 );
152 
153  DMatrix Dx ( nBDirs, nx );
154  DMatrix Dxa( nBDirs, na );
155  DMatrix Dp ( nBDirs, np );
156  DMatrix Du ( nBDirs, nu );
157  DMatrix Dw ( nBDirs, nw );
158 
159  for( run3 = 0; run3 < N; run3++ )
160  {
161 
162 // double* dresult1 = new double[fcn[run3].getNumberOfVariables()+1];
163 //
164  for( run1 = 0; run1 < nBDirs; run1++ )
165  {
166 //
167 // for( run2 = 0; run2 < nc; run2++ )
168 // bseed1[run2] = bseed_(run1,run2);
169 //
170 // for( run2 = 0; run2 <= fcn[run3].getNumberOfVariables(); run2++ )
171 // dresult1[run2] = 0.0;
172 
173 // fcn[run3].AD_backward( 0, bseed1, dresult1 );
174  ACADO_TRY( fcn[run3].AD_backward( bseed_.getRow(run1),JJ[run3],0 ) );
175 
176  if( nx > 0 ) Dx .setRow( run1, JJ[run3].getX () );
177  if( na > 0 ) Dxa.setRow( run1, JJ[run3].getXA() );
178  if( np > 0 ) Dp .setRow( run1, JJ[run3].getP () );
179  if( nu > 0 ) Du .setRow( run1, JJ[run3].getU () );
180  if( nw > 0 ) Dw .setRow( run1, JJ[run3].getW () );
181 
182  JJ[run3].setZero( );
183 
184 
185 // for( run2 = 0; run2 < nx; run2++ ){
186 // Dx ( run1, run2 ) = dresult1[y_index[run3][run2]];
187 // }
188 // for( run2 = nx; run2 < nx+na; run2++ ){
189 // Dxa( run1, run2-nx ) = dresult1[y_index[run3][run2]];
190 // }
191 // for( run2 = nx+na; run2 < nx+na+np; run2++ ){
192 // Dp ( run1, run2-nx-na ) = dresult1[y_index[run3][run2]];
193 // }
194 // for( run2 = nx+na+np; run2 < nx+na+np+nu; run2++ ){
195 // Du ( run1, run2-nx-na-np ) = dresult1[y_index[run3][run2]];
196 // }
197 // for( run2 = nx+na+np+nu; run2 < nx+na+np+nu+nw; run2++ ){
198 // Dw ( run1, run2-nx-na-np-nu ) = dresult1[y_index[run3][run2]];
199 // }
200  }
201 
202  if( nx > 0 )
203  dBackward.setDense( 0, run3, Dx );
204 
205  if( na > 0 )
206  dBackward.setDense( 0, N+run3, Dxa );
207 
208  if( np > 0 )
209  dBackward.setDense( 0, 2*N+run3, Dp );
210 
211  if( nu > 0 )
212  dBackward.setDense( 0, 3*N+run3, Du );
213 
214  if( nw > 0 )
215  dBackward.setDense( 0, 4*N+run3, Dw );
216 
217 // delete[] dresult1;
218  }
219 
220 // delete[] bseed1 ;
221  return SUCCESSFUL_RETURN;
222  }
223 
224  // TODO: implement forward mode
225 
227 }
228 
229 // addition of second order derivatives --> see file test.cpp in constraint directory
230 
232  // EVALUATION OF THE SENSITIVITIES:
233  // --------------------------------
234 
235  int run1, run2, run3;
236 
237  const int nc = getNC();
238  const int N = grid.getNumPoints();
239 
240  ASSERT( (int) seed.getNumRows() == nc );
241 
242  for( run3 = 0; run3 < N; run3++ ){
243  // printf("Test: ,%d \n",fcn[run3].getNumberOfVariables() +1);
244 
245  }
246 
247  dBackward.init( 1, 5*N );
248 
249  for( run3 = 0; run3 < N; run3++ ){
250 
251  double *bseed1 = new double[nc];
252  double *bseed2 = new double[nc];
253  double *R = new double[nc];
254 
255  double *J1 = new double[fcn[run3].getNumberOfVariables() +1];
256  double *H1 = new double[fcn[run3].getNumberOfVariables() +1];
257  double *fseed1 = new double[fcn[run3].getNumberOfVariables() +1];
258 
259  for( run1 = 0; run1 < nc; run1++ ){
260  bseed1[run1] = seed(run1,0);
261  bseed2[run1] = 0.0;
262  }
263  for( run1 = 0; run1 < fcn[run3].getNumberOfVariables()+1; run1++ )
264  fseed1[run1] = 0.0;
265 
266  DMatrix Dx ( nc, nx );
267  DMatrix Dxa( nc, na );
268  DMatrix Dp ( nc, np );
269  DMatrix Du ( nc, nu );
270  DMatrix Dw ( nc, nw );
271 
272  DMatrix Hx ( nx, nx );
273  DMatrix Hxa( nx, na );
274  DMatrix Hp ( nx, np );
275  DMatrix Hu ( nx, nu );
276  DMatrix Hw ( nx, nw );
277 
278  // DERIVATIVES W.R.T. STATES
279  for( run2 = 0; run2 < nx; run2++ ){
280 
281  // FIRST ORDER DERIVATIVES:
282  // ------------------------
283  fseed1[y_index[0][run2]] = 1.0;
284  fcn[run3].AD_forward( 0, fseed1, R );
285  for( run1 = 0; run1 < nc; run1++ )
286  Dx( run1, run2 ) = R[run1];
287  fseed1[y_index[0][run2]] = 0.0;
288 
289 
290  // SECOND ORDER DERIVATIVES:
291  // -------------------------
292  for( run1 = 0; run1 <= fcn[run3].getNumberOfVariables(); run1++ ){
293  J1[run1] = 0.0;
294  H1[run1] = 0.0;
295  }
296 
297  fcn[run3].AD_backward2( 0, bseed1, bseed2, J1, H1 );
298 
299  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2, run1 ) = -H1[y_index[0][run1]];
300  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2, run1-nx ) = -H1[y_index[0][run1]];
301  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2, run1-nx-na ) = -H1[y_index[0][run1]];
302  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H1[y_index[0][run1]];
303  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
304  }
305 
306  if( nx > 0 ){
307 
308  dBackward.setDense( 0, run3, Dx );
309 
310  if( nx > 0 ) hessian.addDense( run3, run3, Hx );
311  if( na > 0 ) hessian.addDense( run3, N + run3, Hxa );
312  if( np > 0 ) hessian.addDense( run3, 2*N + run3, Hp );
313  if( nu > 0 ) hessian.addDense( run3, 3*N + run3, Hu );
314  if( nw > 0 ) hessian.addDense( run3, 4*N + run3, Hw );
315  }
316 
317  Hx.init ( na, nx );
318  Hxa.init( na, na );
319  Hp.init ( na, np );
320  Hu.init ( na, nu );
321  Hw.init ( na, nw );
322 
323  // DERIVATIVES W.R.T. ALGEBRAIC STATES
324 
325  for( run2 = nx; run2 < nx+na; run2++ ){
326 
327  // FIRST ORDER DERIVATIVES:
328  // ------------------------
329  fseed1[y_index[0][run2]] = 1.0;
330  fcn[run3].AD_forward( 0, fseed1, R );
331  for( run1 = 0; run1 < nc; run1++ )
332  Dxa( run1, run2-nx ) = R[run1];
333  fseed1[y_index[0][run2]] = 0.0;
334 
335  // SECOND ORDER DERIVATIVES:
336  // -------------------------
337  for( run1 = 0; run1 <= fcn[run3].getNumberOfVariables(); run1++ ){
338  J1[run1] = 0.0;
339  H1[run1] = 0.0;
340  }
341 
342  fcn[run3].AD_backward2( 0, bseed1, bseed2, J1, H1 );
343 
344  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx, run1 ) = -H1[y_index[0][run1]];
345  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx, run1-nx ) = -H1[y_index[0][run1]];
346  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx, run1-nx-na ) = -H1[y_index[0][run1]];
347  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx, run1-nx-na-np ) = -H1[y_index[0][run1]];
348  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
349  }
350 
351  if( na > 0 ){
352 
353  dBackward.setDense( 0, N+run3, Dxa );
354 
355  if( nx > 0 ) hessian.addDense( N+run3, run3, Hx );
356  if( na > 0 ) hessian.addDense( N+run3, N + run3, Hxa );
357  if( np > 0 ) hessian.addDense( N+run3, 2*N + run3, Hp );
358  if( nu > 0 ) hessian.addDense( N+run3, 3*N + run3, Hu );
359  if( nw > 0 ) hessian.addDense( N+run3, 4*N + run3, Hw );
360  }
361 
362 
363  Hx.init ( np, nx );
364  Hxa.init( np, na );
365  Hp.init ( np, np );
366  Hu.init ( np, nu );
367  Hw.init ( np, nw );
368 
369  //DERIVATIVES W.R.T. THE PARAMETERS
370 
371  for( run2 = nx+na; run2 < nx+na+np; run2++ ){
372 
373  // FIRST ORDER DERIVATIVES:
374  // ------------------------
375  fseed1[y_index[0][run2]] = 1.0;
376  fcn[run3].AD_forward( 0, fseed1, R );
377  for( run1 = 0; run1 < nc; run1++ )
378  Dp( run1, run2-nx-na ) = R[run1];
379  fseed1[y_index[0][run2]] = 0.0;
380 
381  // SECOND ORDER DERIVATIVES:
382  // -------------------------
383  for( run1 = 0; run1 <= fcn[run3].getNumberOfVariables(); run1++ ){
384  J1[run1] = 0.0;
385  H1[run1] = 0.0;
386  }
387 
388  fcn[run3].AD_backward2( 0, bseed1, bseed2, J1, H1 );
389 
390  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na, run1 ) = -H1[y_index[0][run1]];
391  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na, run1-nx ) = -H1[y_index[0][run1]];
392  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na, run1-nx-na ) = -H1[y_index[0][run1]];
393  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na, run1-nx-na-np ) = -H1[y_index[0][run1]];
394  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
395  }
396 
397  if( np > 0 ){
398  dBackward.setDense( 0, 2*N+run3, Dp );
399  if( nx > 0 ) hessian.addDense( 2*N+run3, run3, Hx );
400  if( na > 0 ) hessian.addDense( 2*N+run3, N + run3, Hxa );
401  if( np > 0 ) hessian.addDense( 2*N+run3, 2*N + run3, Hp );
402  if( nu > 0 ) hessian.addDense( 2*N+run3, 3*N + run3, Hu );
403  if( nw > 0 ) hessian.addDense( 2*N+run3, 4*N + run3, Hw );
404  }
405 
406  Hx.init ( nu, nx );
407  Hxa.init( nu, na );
408  Hp.init ( nu, np );
409  Hu.init ( nu, nu );
410  Hw.init ( nu, nw );
411 
412  //DERIVATIVES W.R.T. THE CONTROLS
413 
414  for( run2 = nx+na+np; run2 < nx+na+np+nu; run2++ ){
415 
416  // FIRST ORDER DERIVATIVES:
417  // ------------------------
418  fseed1[y_index[0][run2]] = 1.0;
419  fcn[run3].AD_forward( 0, fseed1, R );
420  for( run1 = 0; run1 < nc; run1++ )
421  Du( run1, run2-nx-na-np ) = R[run1];
422  fseed1[y_index[0][run2]] = 0.0;
423 
424  // SECOND ORDER DERIVATIVES:
425  // -------------------------
426  for( run1 = 0; run1 <= fcn[run3].getNumberOfVariables(); run1++ ){
427  J1[run1] = 0.0;
428  H1[run1] = 0.0;
429  }
430 
431  fcn[run3].AD_backward2( 0, bseed1, bseed2, J1, H1 );
432 
433  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np, run1 ) = -H1[y_index[0][run1]];
434  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np, run1-nx ) = -H1[y_index[0][run1]];
435  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np, run1-nx-na ) = -H1[y_index[0][run1]];
436  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np, run1-nx-na-np ) = -H1[y_index[0][run1]];
437  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
438  }
439 
440  if( nu > 0 ){
441 
442  dBackward.setDense( 0, 3*N+run3, Du );
443 
444  if( nx > 0 ) hessian.addDense( 3*N+run3, run3, Hx );
445  if( na > 0 ) hessian.addDense( 3*N+run3, N + run3, Hxa );
446  if( np > 0 ) hessian.addDense( 3*N+run3, 2*N + run3, Hp );
447  if( nu > 0 ) hessian.addDense( 3*N+run3, 3*N + run3, Hu );
448  if( nw > 0 ) hessian.addDense( 3*N+run3, 4*N + run3, Hw );
449  }
450 
451 
452  Hx.init ( nw, nx );
453  Hxa.init( nw, na );
454  Hp.init ( nw, np );
455  Hu.init ( nw, nu );
456  Hw.init ( nw, nw );
457 
458 
459  for( run2 = nx+na+np+nu; run2 < nx+na+np+nu+nw; run2++ ){
460 
461  // FIRST ORDER DERIVATIVES:
462  // ------------------------
463  fseed1[y_index[0][run2]] = 1.0;
464  fcn[run3].AD_forward( 0, fseed1, R );
465  for( run1 = 0; run1 < nc; run1++ )
466  Dw( run1, run2-nx-na-np-nu ) = R[run1];
467  fseed1[y_index[0][run2]] = 0.0;
468 
469  // SECOND ORDER DERIVATIVES:
470  // -------------------------
471  for( run1 = 0; run1 <= fcn[run3].getNumberOfVariables(); run1++ ){
472  J1[run1] = 0.0;
473  H1[run1] = 0.0;
474  }
475 
476  fcn[run3].AD_backward2( 0, bseed1, bseed2, J1, H1 );
477 
478  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np-nu, run1 ) = -H1[y_index[0][run1]];
479  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np-nu, run1-nx ) = -H1[y_index[0][run1]];
480  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np-nu, run1-nx-na ) = -H1[y_index[0][run1]];
481  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np-nu, run1-nx-na-np ) = -H1[y_index[0][run1]];
482  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np-nu, run1-nx-na-np-nu ) = -H1[y_index[0][run1]];
483  }
484 
485  if( nw > 0 ){
486 
487  dBackward.setDense( 0, 4*N+run3, Dw );
488 
489  if( nx > 0 ) hessian.addDense( 4*N+run3, run3, Hx );
490  if( na > 0 ) hessian.addDense( 4*N+run3, N + run3, Hxa );
491  if( np > 0 ) hessian.addDense( 4*N+run3, 2*N + run3, Hp );
492  if( nu > 0 ) hessian.addDense( 4*N+run3, 3*N + run3, Hu );
493  if( nw > 0 ) hessian.addDense( 4*N+run3, 4*N + run3, Hw );
494  }
495 
496 
497  delete[] bseed1;
498  delete[] bseed2;
499  delete[] R ;
500  delete[] J1 ;
501  delete[] H1 ;
502  delete[] fseed1;
503 
504  }
505 
506  return SUCCESSFUL_RETURN;
507 }
508 
509 
510 
512 
513 // end of file.
#define N
Data class for storing generic optimization variables.
Definition: ocp_iterate.hpp:57
Implements a very rudimentary block sparse matrix class.
EvaluationPoint * z
USING_NAMESPACE_ACADO typedef TaylorVariable< Interval > T
ConstraintElement & operator=(const ConstraintElement &rhs)
void init(unsigned _nRows=0, unsigned _nCols=0)
Definition: matrix.hpp:135
returnValue setDense(uint rowIdx, uint colIdx, const DMatrix &value)
Allows to pass back messages to the calling function.
DVector evaluate(const EvaluationPoint &x, const int &number=0)
Definition: function.cpp:520
returnValue setZ(const uint &idx, const OCPiterate &iter)
CoupledPathConstraint & operator=(const CoupledPathConstraint &rhs)
Allows to conveniently handle (one-dimensional) grids consisting of time points.
Definition: grid.hpp:58
uint getLastIndex() const
#define CLOSE_NAMESPACE_ACADO
#define ACADO_TRY(X)
returnValue getSubBlock(uint rowIdx, uint colIdx, DMatrix &value) const
Base class for all kind of constraints (except for bounds) within optimal control problems...
returnValue init(uint _nRows, uint _nCols)
EvaluationPoint * JJ
GenericMatrix & setRow(unsigned _idx, const GenericVector< T > &_values)
Definition: matrix.hpp:213
returnValue setZero()
Stores and evaluates coupled path constraints within optimal control problems.
returnValue evaluate(const OCPiterate &iter)
void rhs(const real_t *x, real_t *f)
unsigned getNumRows() const
Definition: matrix.hpp:185
#define ASSERT(x)
uint getNumPoints() const
#define BEGIN_NAMESPACE_ACADO
returnValue addDense(uint rowIdx, uint colIdx, const DMatrix &value)
#define R
int getNumberOfVariables() const
Definition: function.cpp:264
GenericVector< T > getRow(unsigned _idx) const
Definition: matrix.hpp:197
#define ACADOERROR(retval)
uint getNumRows() const


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:31