path_constraint.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ACADO Toolkit.
3  *
4  * ACADO Toolkit -- A Toolkit for Automatic Control and Dynamic Optimization.
5  * Copyright (C) 2008-2014 by Boris Houska, Hans Joachim Ferreau,
6  * Milan Vukov, Rien Quirynen, KU Leuven.
7  * Developed within the Optimization in Engineering Center (OPTEC)
8  * under supervision of Moritz Diehl. All rights reserved.
9  *
10  * ACADO Toolkit is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 3 of the License, or (at your option) any later version.
14  *
15  * ACADO Toolkit is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with ACADO Toolkit; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  *
24  */
25 
26 
27 
36 
37 
39 
40 
41 //
42 // PUBLIC MEMBER FUNCTIONS:
43 //
44 
45 
48 
49 }
50 
52  :ConstraintElement(grid_, 1, grid_.getNumPoints() ){
53 
54 }
55 
57  :ConstraintElement(rhs){
58 
59 }
60 
62 
63 }
64 
66 
67  if( this != &rhs ){
68 
70  }
71  return *this;
72 }
73 
74 
75 
77 
78  int run1, run2;
79 
80  if( fcn == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
81 
82  const int nc = fcn[0].getDim();
83  const int T = grid.getLastIndex();
84 
85  if( nc == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
86 
87  residuumL.init(T+1,1);
88  residuumU.init(T+1,1);
89 
90  for( run1 = 0; run1 <= T; run1++ ){
91 
92  DMatrix resL( nc, 1 );
93  DMatrix resU( nc, 1 );
94 
95  z[0].setZ( run1, iter );
96  DVector result = fcn[0].evaluate( z[0],run1 );
97 
98  for( run2 = 0; run2 < nc; run2++ ){
99  resL( run2, 0 ) = lb[run1][run2] - result(run2);
100  resU( run2, 0 ) = ub[run1][run2] - result(run2);
101  }
102 
103  // STORE THE RESULTS:
104  // ------------------
105  residuumL.setDense( run1, 0, resL );
106  residuumU.setDense( run1, 0, resU );
107  }
108 
109  return SUCCESSFUL_RETURN;
110 }
111 
112 
114 
115 
116  int run1, run3;
117  returnValue returnvalue;
118 
119  if( fcn == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
120 
121  const int N = grid.getNumPoints();
122 
123  // EVALUATION OF THE SENSITIVITIES:
124  // --------------------------------
125 
126  if( bSeed != 0 )
127  {
128 
129  if( xSeed != 0 || pSeed != 0 || uSeed != 0 || wSeed != 0 ||
130  xSeed2 != 0 || pSeed2 != 0 || uSeed2 != 0 || wSeed2 != 0 )
132 
133  int nBDirs;
134 
135  dBackward.init( N, 5*N );
136 
137 
138  for( run3 = 0; run3 < N; run3++ )
139  {
140  DMatrix bseed_;
141  bSeed->getSubBlock( 0, run3, bseed_);
142 
143  nBDirs = bSeed->getNumRows( 0, run3 );
144 
145  DMatrix Dx ( nBDirs, nx );
146  DMatrix Dxa( nBDirs, na );
147  DMatrix Dp ( nBDirs, np );
148  DMatrix Du ( nBDirs, nu );
149  DMatrix Dw ( nBDirs, nw );
150 
151  for( run1 = 0; run1 < nBDirs; run1++ )
152  {
153  ACADO_TRY( fcn[0].AD_backward( bseed_.getRow(run1),JJ[0],run3 ) );
154 
155  if( nx > 0 ) Dx .setRow( run1, JJ[0].getX () );
156  if( na > 0 ) Dxa.setRow( run1, JJ[0].getXA() );
157  if( np > 0 ) Dp .setRow( run1, JJ[0].getP () );
158  if( nu > 0 ) Du .setRow( run1, JJ[0].getU () );
159  if( nw > 0 ) Dw .setRow( run1, JJ[0].getW () );
160 
161  JJ[0].setZero( );
162 
163  }
164 
165  if( nx > 0 )
166  dBackward.setDense( run3, run3, Dx );
167 
168  if( na > 0 )
169  dBackward.setDense( run3, N+run3, Dxa );
170 
171  if( np > 0 )
172  dBackward.setDense( run3, 2*N+run3, Dp );
173 
174  if( nu > 0 )
175  dBackward.setDense( run3, 3*N+run3, Du );
176 
177  if( nw > 0 )
178  dBackward.setDense( run3, 4*N+run3, Dw );
179  }
180 
181  return SUCCESSFUL_RETURN;
182  }
183 
184  // TODO: implement forward mode
185 
187 }
188 
189 
190 
192 
193  int run3;
194  const int N = grid.getNumPoints();
195  if( fcn == 0 ) return ACADOERROR(RET_MEMBER_NOT_INITIALISED);
196 
197  const int nc = fcn[0].getDim();
198 
199  dBackward.init( N, 5*N );
200 
201  for( run3 = 0; run3 < N; run3++ ){
202 
203  DMatrix seed;
204  seed_.getSubBlock( count, 0, seed, nc, 1 );
205  count++;
206 
207  // EVALUATION OF THE SENSITIVITIES:
208  // --------------------------------
209 
210  int run1, run2;
211 
212  double *bseed1 = new double[nc];
213  double *bseed2 = new double[nc];
214  double *R = new double[nc];
215  double *J = new double[fcn[0].getNumberOfVariables() +1];
216  double *H = new double[fcn[0].getNumberOfVariables() +1];
217  double *fseed = new double[fcn[0].getNumberOfVariables() +1];
218 
219  for( run1 = 0; run1 < nc; run1++ ){
220  bseed1[run1] = seed(run1,0);
221  bseed2[run1] = 0.0;
222  }
223 
224  for( run1 = 0; run1 < fcn[0].getNumberOfVariables()+1; run1++ )
225  fseed[run1] = 0.0;
226 
227  DMatrix Dx ( nc, nx );
228  DMatrix Dxa( nc, na );
229  DMatrix Dp ( nc, np );
230  DMatrix Du ( nc, nu );
231  DMatrix Dw ( nc, nw );
232 
233  DMatrix Hx ( nx, nx );
234  DMatrix Hxa( nx, na );
235  DMatrix Hp ( nx, np );
236  DMatrix Hu ( nx, nu );
237  DMatrix Hw ( nx, nw );
238 
239  for( run2 = 0; run2 < nx; run2++ ){
240 
241  // FIRST ORDER DERIVATIVES:
242  // ------------------------
243  fseed[y_index[0][run2]] = 1.0;
244  fcn[0].AD_forward( run3, fseed, R );
245  for( run1 = 0; run1 < nc; run1++ )
246  Dx( run1, run2 ) = R[run1];
247  fseed[y_index[0][run2]] = 0.0;
248 
249  // SECOND ORDER DERIVATIVES:
250  // -------------------------
251  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
252  J[run1] = 0.0;
253  H[run1] = 0.0;
254  }
255 
256  fcn[0].AD_backward2( run3, bseed1, bseed2, J, H );
257 
258  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2, run1 ) = -H[y_index[0][run1]];
259  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2, run1-nx ) = -H[y_index[0][run1]];
260  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2, run1-nx-na ) = -H[y_index[0][run1]];
261  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H[y_index[0][run1]];
262  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H[y_index[0][run1]];
263  }
264 
265  if( nx > 0 ){
266 
267  dBackward.setDense( run3, run3, Dx );
268 
269  if( nx > 0 ) hessian.addDense( run3, run3, Hx );
270  if( na > 0 ) hessian.addDense( run3, N + run3, Hxa );
271  if( np > 0 ) hessian.addDense( run3, 2*N + run3, Hp );
272  if( nu > 0 ) hessian.addDense( run3, 3*N + run3, Hu );
273  if( nw > 0 ) hessian.addDense( run3, 4*N + run3, Hw );
274  }
275 
276  Hx.init ( na, nx );
277  Hxa.init( na, na );
278  Hp.init ( na, np );
279  Hu.init ( na, nu );
280  Hw.init ( na, nw );
281 
282  for( run2 = nx; run2 < nx+na; run2++ ){
283 
284  // FIRST ORDER DERIVATIVES:
285  // ------------------------
286  fseed[y_index[0][run2]] = 1.0;
287  fcn[0].AD_forward( run3, fseed, R );
288  for( run1 = 0; run1 < nc; run1++ )
289  Dxa( run1, run2-nx ) = R[run1];
290  fseed[y_index[0][run2]] = 0.0;
291 
292  // SECOND ORDER DERIVATIVES:
293  // -------------------------
294  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
295  J[run1] = 0.0;
296  H[run1] = 0.0;
297  }
298 
299  fcn[0].AD_backward2( run3, bseed1, bseed2, J, H );
300 
301  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx, run1 ) = -H[y_index[0][run1]];
302  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx, run1-nx ) = -H[y_index[0][run1]];
303  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx, run1-nx-na ) = -H[y_index[0][run1]];
304  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx, run1-nx-na-np ) = -H[y_index[0][run1]];
305  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx, run1-nx-na-np-nu ) = -H[y_index[0][run1]];
306  }
307 
308  if( na > 0 ){
309 
310  dBackward.setDense( run3, N+run3, Dxa );
311 
312  if( nx > 0 ) hessian.addDense( N+run3, run3, Hx );
313  if( na > 0 ) hessian.addDense( N+run3, N + run3, Hxa );
314  if( np > 0 ) hessian.addDense( N+run3, 2*N + run3, Hp );
315  if( nu > 0 ) hessian.addDense( N+run3, 3*N + run3, Hu );
316  if( nw > 0 ) hessian.addDense( N+run3, 4*N + run3, Hw );
317  }
318 
319  Hx.init ( np, nx );
320  Hxa.init( np, na );
321  Hp.init ( np, np );
322  Hu.init ( np, nu );
323  Hw.init ( np, nw );
324 
325  for( run2 = nx+na; run2 < nx+na+np; run2++ ){
326 
327  // FIRST ORDER DERIVATIVES:
328  // ------------------------
329  fseed[y_index[0][run2]] = 1.0;
330  fcn[0].AD_forward( run3, fseed, R );
331  for( run1 = 0; run1 < nc; run1++ )
332  Dp( run1, run2-nx-na ) = R[run1];
333  fseed[y_index[0][run2]] = 0.0;
334 
335  // SECOND ORDER DERIVATIVES:
336  // -------------------------
337  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
338  J[run1] = 0.0;
339  H[run1] = 0.0;
340  }
341 
342  fcn[0].AD_backward2( run3, bseed1, bseed2, J, H );
343 
344  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na, run1 ) = -H[y_index[0][run1]];
345  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na, run1-nx ) = -H[y_index[0][run1]];
346  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na, run1-nx-na ) = -H[y_index[0][run1]];
347  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na, run1-nx-na-np ) = -H[y_index[0][run1]];
348  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na, run1-nx-na-np-nu ) = -H[y_index[0][run1]];
349  }
350 
351  if( np > 0 ){
352 
353  dBackward.setDense( run3, 2*N+run3, Dp );
354 
355  if( nx > 0 ) hessian.addDense( 2*N+run3, run3, Hx );
356  if( na > 0 ) hessian.addDense( 2*N+run3, N + run3, Hxa );
357  if( np > 0 ) hessian.addDense( 2*N+run3, 2*N + run3, Hp );
358  if( nu > 0 ) hessian.addDense( 2*N+run3, 3*N + run3, Hu );
359  if( nw > 0 ) hessian.addDense( 2*N+run3, 4*N + run3, Hw );
360  }
361 
362 
363  Hx.init ( nu, nx );
364  Hxa.init( nu, na );
365  Hp.init ( nu, np );
366  Hu.init ( nu, nu );
367  Hw.init ( nu, nw );
368 
369  for( run2 = nx+na+np; run2 < nx+na+np+nu; run2++ ){
370 
371  // FIRST ORDER DERIVATIVES:
372  // ------------------------
373  fseed[y_index[0][run2]] = 1.0;
374  fcn[0].AD_forward( run3, fseed, R );
375  for( run1 = 0; run1 < nc; run1++ )
376  Du( run1, run2-nx-na-np ) = R[run1];
377  fseed[y_index[0][run2]] = 0.0;
378 
379  // SECOND ORDER DERIVATIVES:
380  // -------------------------
381  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
382  J[run1] = 0.0;
383  H[run1] = 0.0;
384  }
385 
386  fcn[0].AD_backward2( run3, bseed1, bseed2, J, H );
387 
388  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np, run1 ) = -H[y_index[0][run1]];
389  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np, run1-nx ) = -H[y_index[0][run1]];
390  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np, run1-nx-na ) = -H[y_index[0][run1]];
391  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np, run1-nx-na-np ) = -H[y_index[0][run1]];
392  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np, run1-nx-na-np-nu ) = -H[y_index[0][run1]];
393  }
394 
395  if( nu > 0 ){
396 
397  dBackward.setDense( run3, 3*N+run3, Du );
398 
399  if( nx > 0 ) hessian.addDense( 3*N+run3, run3, Hx );
400  if( na > 0 ) hessian.addDense( 3*N+run3, N + run3, Hxa );
401  if( np > 0 ) hessian.addDense( 3*N+run3, 2*N + run3, Hp );
402  if( nu > 0 ) hessian.addDense( 3*N+run3, 3*N + run3, Hu );
403  if( nw > 0 ) hessian.addDense( 3*N+run3, 4*N + run3, Hw );
404  }
405 
406  Hx.init ( nw, nx );
407  Hxa.init( nw, na );
408  Hp.init ( nw, np );
409  Hu.init ( nw, nu );
410  Hw.init ( nw, nw );
411 
412  for( run2 = nx+na+np+nu; run2 < nx+na+np+nu+nw; run2++ ){
413 
414  // FIRST ORDER DERIVATIVES:
415  // ------------------------
416  fseed[y_index[0][run2]] = 1.0;
417  fcn[0].AD_forward( run3, fseed, R );
418  for( run1 = 0; run1 < nc; run1++ )
419  Dw( run1, run2-nx-na-np-nu ) = R[run1];
420  fseed[y_index[0][run2]] = 0.0;
421 
422  // SECOND ORDER DERIVATIVES:
423  // -------------------------
424  for( run1 = 0; run1 <= fcn[0].getNumberOfVariables(); run1++ ){
425  J[run1] = 0.0;
426  H[run1] = 0.0;
427  }
428 
429  fcn[0].AD_backward2( run3, bseed1, bseed2, J, H );
430 
431  for( run1 = 0 ; run1 < nx ; run1++ ) Hx ( run2-nx-na-np-nu, run1 ) = -H[y_index[0][run1]];
432  for( run1 = nx ; run1 < nx+na ; run1++ ) Hxa( run2-nx-na-np-nu, run1-nx ) = -H[y_index[0][run1]];
433  for( run1 = nx+na ; run1 < nx+na+np ; run1++ ) Hp ( run2-nx-na-np-nu, run1-nx-na ) = -H[y_index[0][run1]];
434  for( run1 = nx+na+np ; run1 < nx+na+np+nu ; run1++ ) Hu ( run2-nx-na-np-nu, run1-nx-na-np ) = -H[y_index[0][run1]];
435  for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+nw; run1++ ) Hw ( run2-nx-na-np-nu, run1-nx-na-np-nu ) = -H[y_index[0][run1]];
436  }
437 
438  if( nw > 0 ){
439 
440  dBackward.setDense( run3, 4*N+run3, Dw );
441 
442  if( nx > 0 ) hessian.addDense( 4*N+run3, run3, Hx );
443  if( na > 0 ) hessian.addDense( 4*N+run3, N + run3, Hxa );
444  if( np > 0 ) hessian.addDense( 4*N+run3, 2*N + run3, Hp );
445  if( nu > 0 ) hessian.addDense( 4*N+run3, 3*N + run3, Hu );
446  if( nw > 0 ) hessian.addDense( 4*N+run3, 4*N + run3, Hw );
447  }
448 
449  delete[] bseed1;
450  delete[] bseed2;
451  delete[] R ;
452  delete[] J ;
453  delete[] H ;
454  delete[] fseed ;
455  }
456 
457 
458  return SUCCESSFUL_RETURN;
459 }
460 
462 
463 // end of file.
#define N
Data class for storing generic optimization variables.
Definition: ocp_iterate.hpp:57
Implements a very rudimentary block sparse matrix class.
returnValue evaluateSensitivities()
EvaluationPoint * z
USING_NAMESPACE_ACADO typedef TaylorVariable< Interval > T
ConstraintElement & operator=(const ConstraintElement &rhs)
void init(unsigned _nRows=0, unsigned _nCols=0)
Definition: matrix.hpp:135
returnValue setDense(uint rowIdx, uint colIdx, const DMatrix &value)
Allows to pass back messages to the calling function.
DVector evaluate(const EvaluationPoint &x, const int &number=0)
Definition: function.cpp:520
returnValue setZ(const uint &idx, const OCPiterate &iter)
Allows to conveniently handle (one-dimensional) grids consisting of time points.
Definition: grid.hpp:58
uint getLastIndex() const
#define CLOSE_NAMESPACE_ACADO
Stores and evaluates path constraints within optimal control problems.
#define ACADO_TRY(X)
returnValue getSubBlock(uint rowIdx, uint colIdx, DMatrix &value) const
Base class for all kind of constraints (except for bounds) within optimal control problems...
returnValue init(uint _nRows, uint _nCols)
EvaluationPoint * JJ
int getDim() const
GenericMatrix & setRow(unsigned _idx, const GenericVector< T > &_values)
Definition: matrix.hpp:213
returnValue setZero()
void rhs(const real_t *x, real_t *f)
returnValue evaluate(const OCPiterate &iter)
PathConstraint & operator=(const PathConstraint &rhs)
uint getNumPoints() const
virtual ~PathConstraint()
#define BEGIN_NAMESPACE_ACADO
returnValue addDense(uint rowIdx, uint colIdx, const DMatrix &value)
#define R
int getNumberOfVariables() const
Definition: function.cpp:264
GenericVector< T > getRow(unsigned _idx) const
Definition: matrix.hpp:197
#define ACADOERROR(retval)
uint getNumRows() const


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:58