92 S[run1] = rhs.
S[run1];
99 r[run1] = rhs.
r[run1];
107 for( run2 = 0; (int) run2 <
fcn.
getDim(); run2++ ){
175 S[run1] = rhs.
S[run1];
182 r[run1] = rhs.
r[run1];
190 for( run2 = 0; (int) run2 <
fcn.
getDim(); run2++ ){
203 uint run1, run2, run3;
217 for( run1 = 0; run1 <
N; run1++ ){
228 if( run1 == 0 || run1 == 1 ){
231 r[run1].
print(
"reference");
246 if ( !(
S[run1].getNumCols() == nh &&
S[run1].getNumRows() == nh) )
248 The weighting matrix in the LSQ objective has a wrong dimension.);
250 for( run2 = 0; run2 < nh; run2++ ){
252 for( run3 = 0; run3 < nh; run3++ )
253 S_h_res[run1][run2] +=
S[run1].
operator()(run2,run3)*h_res(run3);
256 for( run2 = 0; run2 < nh; run2++ ){
257 currentValue += 0.5*h_res(run2)*
S_h_res[run1][run2];
261 for( run2 = 0; run2 < nh; run2++ ){
262 S_h_res[run1][run2] = h_res(run2);
263 currentValue += 0.5*h_res(run2)*h_res(run2);
267 allValues( run1,0 ) = currentValue;
275 obj = allValues(0, 0);
287 int run1, run2, run3, run4;
297 double *bseed =
new double [nh];
298 double **J =
new double*[nh];
300 for( run2 = 0; run2 < nh; run2++ )
316 for( run1 = 0; run1 <
N; run1++ ){
324 for( run2 = 0; run2 < nh; run2++ ) bseed[run2] = 0;
326 for( run2 = 0; run2 < nh; run2++ ){
334 for( run3 = 0; run3 <
nx; run3++ ){
335 Dx( 0, run3 ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
337 for( run3 = nx; run3 < nx+
na; run3++ ){
338 Dxa( 0, run3-nx ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
340 for( run3 = nx+na; run3 < nx+na+
np; run3++ ){
341 Dp( 0, run3-nx-na ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
343 for( run3 = nx+na+np; run3 < nx+na+np+
nu; run3++ ){
344 Du( 0, run3-nx-na-np ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
346 for( run3 = nx+na+np+nu; run3 < nx+na+np+nu+
nw; run3++ ){
347 Dw( 0, run3-nx-na-np-nu ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
363 for( run3 = 0; run3 < nnn; run3++ ){
364 for( run2 = 0; run2 < nh; run2++ ){
366 tmp( run2, run3 ) = 0.0;
367 for( run4 = 0; run4 < nh; run4++ ){
368 tmp( run2, run3 ) +=
S[run1].operator()(run2,run4)*J[run4][
y_index[run3]];
372 tmp( run2, run3 ) = J[run2][
y_index[run3]];
378 int *Sidx =
new int[6];
379 int *Hidx =
new int[5];
385 Sidx[4] =
nx+na+np+
nu;
386 Sidx[5] =
nx+na+np+nu+
nw;
394 for( i = 0; i < 5; i++ ){
395 for( j = 0; j < 5; j++ ){
397 tmp2.
init(Sidx[i+1]-Sidx[i],Sidx[j+1]-Sidx[j]);
400 for( run3 = Sidx[i]; run3 < Sidx[i+1]; run3++ )
401 for( run4 = Sidx[j]; run4 < Sidx[j+1]; run4++ )
402 for( run2 = 0; run2 < nh; run2++ )
403 tmp2(run3-Sidx[i],run4-Sidx[j]) += J[run2][
y_index[run3]]*tmp(run2,run4);
412 for( run2 = 0; run2 < nh; run2++ )
424 int run1, run2, run3, run4;
434 double *bseed =
new double [nh];
435 double **J =
new double*[nh];
437 for( run2 = 0; run2 < nh; run2++ )
453 for( run1 = 0; run1 <
N; run1++ ){
461 for( run2 = 0; run2 < nh; run2++ ) bseed[run2] = 0;
473 for( run2 = 0; run2 < nh; run2++ )
474 J[run2][run3] = bseed[run2];
479 for( run2 = 0; run2 < nh; run2++ ){
489 for( run3 = 0; run3 <
nx; run3++ ){
490 Dx( 0, run3 ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
492 for( run3 = nx; run3 < nx+
na; run3++ ){
493 Dxa( 0, run3-nx ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
495 for( run3 = nx+na; run3 < nx+na+
np; run3++ ){
496 Dp( 0, run3-nx-na ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
498 for( run3 = nx+na+np; run3 < nx+na+np+
nu; run3++ ){
499 Du( 0, run3-nx-na-np ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
501 for( run3 = nx+na+np+nu; run3 < nx+na+np+nu+
nw; run3++ ){
502 Dw( 0, run3-nx-na-np-nu ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run1][run2];
514 if( GNhessian != 0 ){
519 for( run3 = 0; run3 < nnn; run3++ ){
520 for( run2 = 0; run2 < nh; run2++ ){
522 tmp( run2, run3 ) = 0.0;
523 for( run4 = 0; run4 < nh; run4++ ){
524 tmp( run2, run3 ) +=
S[run1].operator()(run2,run4)*J[run4][
y_index[run3]];
528 tmp( run2, run3 ) = J[run2][
y_index[run3]];
534 int *Sidx =
new int[6];
535 int *Hidx =
new int[5];
541 Sidx[4] =
nx+na+np+
nu;
542 Sidx[5] =
nx+na+np+nu+
nw;
550 for( i = 0; i < 5; i++ ){
551 for( j = 0; j < 5; j++ ){
553 tmp2.
init(Sidx[i+1]-Sidx[i],Sidx[j+1]-Sidx[j]);
556 for( run3 = Sidx[i]; run3 < Sidx[i+1]; run3++ )
557 for( run4 = Sidx[j]; run4 < Sidx[j+1]; run4++ )
558 for( run2 = 0; run2 < nh; run2++ )
559 tmp2(run3-Sidx[i],run4-Sidx[j]) += J[run2][
y_index[run3]]*tmp(run2,run4);
561 if( tmp2.
getDim() != 0 ) GNhessian->
addDense(Hidx[i],Hidx[j],tmp2);
569 for( run2 = 0; run2 < nh; run2++ )
598 for( run1 = 0; run1 <
N; run1++ )
603 for( run1 = 0; run1 <
N; run1++ ) {
Data class for storing generic optimization variables.
returnValue init(const OCPiterate &x)
Implements a very rudimentary block sparse matrix class.
Base class for all kind of objective function terms within optimal control problems.
Allows to setup and evaluate a general function based on SymbolicExpressions.
void init(unsigned _nRows=0, unsigned _nCols=0)
double getTime(uint pointIdx) const
double getFirstTime() const
returnValue evaluate(double t, double *result) const
returnValue setDense(uint rowIdx, uint colIdx, const DMatrix &value)
returnValue setReference(const VariablesGrid &ref)
returnValue add(double tStart, double tEnd, const DVector constant)
Provides a time grid consisting of vector-valued optimization variables at each grid point...
returnValue getWeigthingtMatrix(const unsigned _index, DMatrix &_matrix) const
Allows to pass back messages to the calling function.
DVector evaluate(const EvaluationPoint &x, const int &number=0)
returnValue evaluateSensitivitiesGN(BlockMatrix *GNhessian)
returnValue setZ(const uint &idx, const OCPiterate &iter)
BEGIN_NAMESPACE_ACADO typedef unsigned int uint
#define CLOSE_NAMESPACE_ACADO
returnValue getSubBlock(uint rowIdx, uint colIdx, DMatrix &value) const
returnValue evaluateSensitivities(BlockMatrix *hessian)
MatrixVariablesGrid * S_temp
Provides a time grid consisting of matrix-valued optimization variables at each grid point...
returnValue init(uint _nRows, uint _nCols)
virtual returnValue print(std::ostream &stream=std::cout, const std::string &name=DEFAULT_LABEL, const std::string &startString=DEFAULT_START_STRING, const std::string &endString=DEFAULT_END_STRING, uint width=DEFAULT_WIDTH, uint precision=DEFAULT_PRECISION, const std::string &colSeparator=DEFAULT_COL_SEPARATOR, const std::string &rowSeparator=DEFAULT_ROW_SEPARATOR) const
Derived & setZero(Index size)
Allows to work with piecewise-continous function defined over a scalar time interval.
BooleanType ADisSupported() const
returnValue evaluate(const OCPiterate &x)
void rhs(const real_t *x, real_t *f)
DMatrix getMatrix(uint pointIdx) const
returnValue AD_backward(const DVector &seed, EvaluationPoint &df, const int &number=0)
DVector AD_forward(const EvaluationPoint &x, const int &number=0)
uint getNumPoints() const
DVector getVector(uint pointIdx) const
#define ACADOWARNING(retval)
#define BEGIN_NAMESPACE_ACADO
Stores and evaluates LSQ terms within optimal control problems.
returnValue getIntegral(InterpolationMode mode, DVector &value) const
returnValue addDense(uint rowIdx, uint colIdx, const DMatrix &value)
int getNumberOfVariables() const
LSQTerm & operator=(const LSQTerm &rhs)
ObjectiveElement & operator=(const ObjectiveElement &rhs)
#define ACADOERROR(retval)
#define ACADOERRORTEXT(retval, text)