80 for( run1 = 0; run1 <
counter; run1++ ){
84 for( run1 = 0; run1 < counter+1; run1++ )
132 for( run1 = 0; run1 <
counter; run1++ ){
136 for( run1 = 0; run1 < counter+1; run1++ )
164 for( run1 = 0; run1 <=
T; run1++ ){
169 z[stageIdx].
setZ( run1, iter );
172 for( run2 = 0; run2 < nc; run2++ )
217 for( run3 = 0; run3 <
N; run3++ )
233 for( run1 = 0; run1 < nBDirs; run1++ )
236 ACADO_TRY(
fcn[stageIdx].AD_backward( bseedTmp,
JJ[stageIdx],run3 ) );
238 if(
nx > 0 ) Dx .
setRow( run1,
JJ[stageIdx].getX () );
239 if(
na > 0 ) Dxa.
setRow( run1,
JJ[stageIdx].getXA() );
240 if(
np > 0 ) Dp .
setRow( run1,
JJ[stageIdx].getP () );
241 if(
nu > 0 ) Du .
setRow( run1,
JJ[stageIdx].getU () );
242 if(
nw > 0 ) Dw .
setRow( run1,
JJ[stageIdx].getW () );
279 for( run3 = 0; run3 <
N; run3++ ){
340 for( run3 = 0; run3 <
N; run3++ ){
353 double *bseed1 =
new double[
fcn[stageIdx].
getDim()];
354 double *bseed2 =
new double[fcn[stageIdx].getDim()];
355 double *
R =
new double[fcn[stageIdx].getDim()];
356 double *J =
new double[fcn[stageIdx].getNumberOfVariables() +1];
357 double *
H =
new double[fcn[stageIdx].getNumberOfVariables() +1];
358 double *fseed =
new double[fcn[stageIdx].getNumberOfVariables() +1];
360 for( run1 = 0; run1 < fcn[stageIdx].getDim()-nc; run1++ ){
365 for( run1 = 0; run1 < nc; run1++ ){
366 bseed1[fcn[stageIdx].getDim() - nc + run1] = seed(run1,0);
367 bseed2[fcn[stageIdx].getDim() - nc + run1] = 0.0;
370 for( run1 = 0; run1 < fcn[stageIdx].getNumberOfVariables()+1; run1++ )
385 for( run2 = 0; run2 <
nx; run2++ ){
389 fseed[
y_index[stageIdx][run2]] = 1.0;
390 fcn[stageIdx].AD_forward( run3, fseed, R );
391 for( run1 = 0; run1 < nc; run1++ )
392 Dx( run1, run2 ) = R[fcn[stageIdx].getDim() - nc + run1];
393 fseed[y_index[stageIdx][run2]] = 0.0;
397 for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
402 fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
404 for( run1 = 0 ; run1 <
nx ; run1++ ) Hx ( run2, run1 ) = -H[y_index[stageIdx][run1]];
405 for( run1 = nx ; run1 < nx+
na ; run1++ ) Hxa( run2, run1-nx ) = -H[y_index[stageIdx][run1]];
406 for( run1 = nx+na ; run1 < nx+na+
np ; run1++ ) Hp ( run2, run1-nx-na ) = -H[y_index[stageIdx][run1]];
407 for( run1 = nx+na+np ; run1 < nx+na+np+
nu ; run1++ ) Hu ( run2, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
408 for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+
nw; run1++ ) Hw ( run2, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
415 if( nx > 0 ) hessian.
addDense( run3, run3, Hx );
416 if(
na > 0 ) hessian.
addDense( run3, N + run3, Hxa );
417 if(
np > 0 ) hessian.
addDense( run3, 2*N + run3, Hp );
418 if(
nu > 0 ) hessian.
addDense( run3, 3*N + run3, Hu );
419 if(
nw > 0 ) hessian.
addDense( run3, 4*N + run3, Hw );
428 for( run2 = nx; run2 < nx+
na; run2++ ){
432 fseed[
y_index[stageIdx][run2]] = 1.0;
433 fcn[stageIdx].AD_forward( run3, fseed, R );
434 for( run1 = 0; run1 < nc; run1++ )
435 Dxa( run1, run2-nx ) = R[fcn[stageIdx].getDim() - nc + run1];
436 fseed[y_index[stageIdx][run2]] = 0.0;
440 for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
445 fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
447 for( run1 = 0 ; run1 <
nx ; run1++ ) Hx ( run2-nx, run1 ) = -H[y_index[stageIdx][run1]];
448 for( run1 = nx ; run1 < nx+
na ; run1++ ) Hxa( run2-nx, run1-nx ) = -H[y_index[stageIdx][run1]];
449 for( run1 = nx+na ; run1 < nx+na+
np ; run1++ ) Hp ( run2-nx, run1-nx-na ) = -H[y_index[stageIdx][run1]];
450 for( run1 = nx+na+np ; run1 < nx+na+np+
nu ; run1++ ) Hu ( run2-nx, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
451 for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+
nw; run1++ ) Hw ( run2-nx, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
458 if( nx > 0 ) hessian.
addDense( N+run3, run3, Hx );
459 if( na > 0 ) hessian.
addDense( N+run3, N + run3, Hxa );
460 if(
np > 0 ) hessian.
addDense( N+run3, 2*N + run3, Hp );
461 if(
nu > 0 ) hessian.
addDense( N+run3, 3*N + run3, Hu );
462 if(
nw > 0 ) hessian.
addDense( N+run3, 4*N + run3, Hw );
471 for( run2 = nx+na; run2 < nx+na+
np; run2++ ){
475 fseed[
y_index[stageIdx][run2]] = 1.0;
476 fcn[stageIdx].AD_forward( run3, fseed, R );
477 for( run1 = 0; run1 < nc; run1++ )
478 Dp( run1, run2-nx-na ) = R[fcn[stageIdx].getDim() - nc + run1];
479 fseed[y_index[stageIdx][run2]] = 0.0;
483 for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
488 fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
490 for( run1 = 0 ; run1 <
nx ; run1++ ) Hx ( run2-nx-na, run1 ) = -H[y_index[stageIdx][run1]];
491 for( run1 = nx ; run1 < nx+
na ; run1++ ) Hxa( run2-nx-na, run1-nx ) = -H[y_index[stageIdx][run1]];
492 for( run1 = nx+na ; run1 < nx+na+
np ; run1++ ) Hp ( run2-nx-na, run1-nx-na ) = -H[y_index[stageIdx][run1]];
493 for( run1 = nx+na+np ; run1 < nx+na+np+
nu ; run1++ ) Hu ( run2-nx-na, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
494 for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+
nw; run1++ ) Hw ( run2-nx-na, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
501 if( nx > 0 ) hessian.
addDense( 2*N+run3, run3, Hx );
502 if( na > 0 ) hessian.
addDense( 2*N+run3, N + run3, Hxa );
503 if( np > 0 ) hessian.
addDense( 2*N+run3, 2*N + run3, Hp );
504 if(
nu > 0 ) hessian.
addDense( 2*N+run3, 3*N + run3, Hu );
505 if(
nw > 0 ) hessian.
addDense( 2*N+run3, 4*N + run3, Hw );
515 for( run2 = nx+na+np; run2 < nx+na+np+
nu; run2++ ){
519 fseed[
y_index[stageIdx][run2]] = 1.0;
520 fcn[stageIdx].AD_forward( run3, fseed, R );
521 for( run1 = 0; run1 < nc; run1++ )
522 Du( run1, run2-nx-na-np ) = R[fcn[stageIdx].getDim() - nc + run1];
523 fseed[y_index[stageIdx][run2]] = 0.0;
527 for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
532 fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
534 for( run1 = 0 ; run1 <
nx ; run1++ ) Hx ( run2-nx-na-np, run1 ) = -H[y_index[stageIdx][run1]];
535 for( run1 = nx ; run1 < nx+
na ; run1++ ) Hxa( run2-nx-na-np, run1-nx ) = -H[y_index[stageIdx][run1]];
536 for( run1 = nx+na ; run1 < nx+na+
np ; run1++ ) Hp ( run2-nx-na-np, run1-nx-na ) = -H[y_index[stageIdx][run1]];
537 for( run1 = nx+na+np ; run1 < nx+na+np+
nu ; run1++ ) Hu ( run2-nx-na-np, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
538 for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+
nw; run1++ ) Hw ( run2-nx-na-np, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
545 if( nx > 0 ) hessian.
addDense( 3*N+run3, run3, Hx );
546 if( na > 0 ) hessian.
addDense( 3*N+run3, N + run3, Hxa );
547 if( np > 0 ) hessian.
addDense( 3*N+run3, 2*N + run3, Hp );
548 if( nu > 0 ) hessian.
addDense( 3*N+run3, 3*N + run3, Hu );
549 if(
nw > 0 ) hessian.
addDense( 3*N+run3, 4*N + run3, Hw );
558 for( run2 = nx+na+np+nu; run2 < nx+na+np+nu+
nw; run2++ ){
562 fseed[
y_index[stageIdx][run2]] = 1.0;
563 fcn[stageIdx].AD_forward( run3, fseed, R );
564 for( run1 = 0; run1 < nc; run1++ )
565 Dw( run1, run2-nx-na-np-nu ) = R[fcn[stageIdx].getDim() - nc + run1];
566 fseed[y_index[stageIdx][run2]] = 0.0;
570 for( run1 = 0; run1 <= fcn[stageIdx].getNumberOfVariables(); run1++ ){
575 fcn[stageIdx].AD_backward2( run3, bseed1, bseed2, J, H );
577 for( run1 = 0 ; run1 <
nx ; run1++ ) Hx ( run2-nx-na-np-nu, run1 ) = -H[y_index[stageIdx][run1]];
578 for( run1 = nx ; run1 < nx+
na ; run1++ ) Hxa( run2-nx-na-np-nu, run1-nx ) = -H[y_index[stageIdx][run1]];
579 for( run1 = nx+na ; run1 < nx+na+
np ; run1++ ) Hp ( run2-nx-na-np-nu, run1-nx-na ) = -H[y_index[stageIdx][run1]];
580 for( run1 = nx+na+np ; run1 < nx+na+np+
nu ; run1++ ) Hu ( run2-nx-na-np-nu, run1-nx-na-np ) = -H[y_index[stageIdx][run1]];
581 for( run1 = nx+na+np+nu; run1 < nx+na+np+nu+
nw; run1++ ) Hw ( run2-nx-na-np-nu, run1-nx-na-np-nu ) = -H[y_index[stageIdx][run1]];
588 if( nx > 0 ) hessian.
addDense( 4*N+run3, run3, Hx );
589 if( na > 0 ) hessian.
addDense( 4*N+run3, N + run3, Hxa );
590 if( np > 0 ) hessian.
addDense( 4*N+run3, 2*N + run3, Hp );
591 if( nu > 0 ) hessian.
addDense( 4*N+run3, 3*N + run3, Hu );
592 if( nw > 0 ) hessian.
addDense( 4*N+run3, 4*N + run3, Hw );
626 for(
int i=0; i<
nFcn; ++i )
virtual returnValue initializeEvaluationPoints(const OCPiterate &iter)
int * numberOfDifferentialStates
Data class for storing generic optimization variables.
Implements a very rudimentary block sparse matrix class.
USING_NAMESPACE_ACADO typedef TaylorVariable< Interval > T
ConstraintElement & operator=(const ConstraintElement &rhs)
void init(unsigned _nRows=0, unsigned _nCols=0)
returnValue evaluate(const OCPiterate &iter)
returnValue setDense(uint rowIdx, uint colIdx, const DMatrix &value)
returnValue computeForwardSensitivityBlock(int offset1, int offset2, int offset3, int stageIdx, DMatrix *seed)
Allows to pass back messages to the calling function.
DVector evaluate(const EvaluationPoint &x, const int &number=0)
AlgebraicConsistencyConstraint & operator=(const AlgebraicConsistencyConstraint &rhs)
returnValue setZ(const uint &idx, const OCPiterate &iter)
Allows to setup function evaluation points.
BEGIN_NAMESPACE_ACADO typedef unsigned int uint
Allows to conveniently handle (one-dimensional) grids consisting of time points.
uint getLastIndex() const
#define CLOSE_NAMESPACE_ACADO
int * numberOfAlgebraicStates
returnValue init(const Function &f, uint nx_=0, uint na_=0, uint np_=0, uint nu_=0, uint nw_=0, uint nd_=0, uint N_=0)
Deals with algebraic consistency constraints within optimal control problems.
returnValue getSubBlock(uint rowIdx, uint colIdx, DMatrix &value) const
Base class for all kind of constraints (except for bounds) within optimal control problems...
returnValue init(uint _nRows, uint _nCols)
GenericMatrix & setRow(unsigned _idx, const GenericVector< T > &_values)
virtual ~AlgebraicConsistencyConstraint()
Derived & setZero(Index size)
void rhs(const real_t *x, real_t *f)
GenericVector & append(const GenericVector &_arg)
uint getNumPoints() const
AlgebraicConsistencyConstraint()
#define BEGIN_NAMESPACE_ACADO
returnValue addDense(uint rowIdx, uint colIdx, const DMatrix &value)
returnValue evaluateSensitivities()
GenericVector< T > getRow(unsigned _idx) const
#define ACADOERROR(retval)