119 for( run2 = 0; run2 < nh; run2++ )
120 h_res(run2) -=
r.operator()(run2);
122 for( run2 = 0; run2 < nh; run2++ ){
124 for( run3 = 0; run3 < nh; run3++ )
125 S_h_res[run2] +=
S.operator()(run2,run3)*h_res(run3);
128 for( run2 = 0; run2 < nh; run2++ )
145 int run2, run3, run4;
155 double *bseed =
new double [nh];
156 double **J =
new double*[nh];
158 for( run2 = 0; run2 < nh; run2++ )
180 for( run2 = 0; run2 < nh; run2++ ) bseed[run2] = 0;
182 for( run2 = 0; run2 < nh; run2++ ){
190 for( run3 = 0; run3 <
nx; run3++ ){
191 Dx( 0, run3 ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run2];
193 for( run3 = nx; run3 < nx+
na; run3++ ){
194 Dxa( 0, run3-nx ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run2];
196 for( run3 = nx+na; run3 < nx+na+
np; run3++ ){
197 Dp( 0, run3-nx-na ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run2];
199 for( run3 = nx+na+np; run3 < nx+na+np+
nu; run3++ ){
200 Du( 0, run3-nx-na-np ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run2];
202 for( run3 = nx+na+np+nu; run3 < nx+na+np+nu+
nw; run3++ ){
203 Dw( 0, run3-nx-na-np-nu ) += bseed_(0,0)*J[run2][
y_index[run3]]*
S_h_res[run2];
216 if( GNhessian != 0 ){
221 for( run3 = 0; run3 < nnn; run3++ ){
222 for( run2 = 0; run2 < nh; run2++ ){
223 tmp( run2, run3 ) = 0.0;
224 for( run4 = 0; run4 < nh; run4++ ){
225 tmp( run2, run3 ) +=
S.operator()(run2,run4)*J[run4][
y_index[run3]];
231 int *Sidx =
new int[6];
232 int *Hidx =
new int[5];
238 Sidx[4] =
nx+na+np+
nu;
239 Sidx[5] =
nx+na+np+nu+
nw;
247 for( i = 0; i < 5; i++ ){
248 for( j = 0; j < 5; j++ ){
250 tmp2.
init(Sidx[i+1]-Sidx[i],Sidx[j+1]-Sidx[j]);
253 for( run3 = Sidx[i]; run3 < Sidx[i+1]; run3++ )
254 for( run4 = Sidx[j]; run4 < Sidx[j+1]; run4++ )
255 for( run2 = 0; run2 < nh; run2++ )
256 tmp2(run3-Sidx[i],run4-Sidx[j]) += J[run2][
y_index[run3]]*tmp(run2,run4);
258 if( tmp2.
getDim() != 0 ) GNhessian->
addDense(Hidx[i],Hidx[j],tmp2);
266 for( run2 = 0; run2 < nh; run2++ )
Data class for storing generic optimization variables.
returnValue init(const OCPiterate &x)
Implements a very rudimentary block sparse matrix class.
Base class for all kind of objective function terms within optimal control problems.
LSQEndTerm & operator=(const LSQEndTerm &rhs)
Allows to setup and evaluate a general function based on SymbolicExpressions.
void init(unsigned _nRows=0, unsigned _nCols=0)
returnValue setDense(uint rowIdx, uint colIdx, const DMatrix &value)
Allows to pass back messages to the calling function.
DVector evaluate(const EvaluationPoint &x, const int &number=0)
returnValue setZ(const uint &idx, const OCPiterate &iter)
BEGIN_NAMESPACE_ACADO typedef unsigned int uint
Allows to conveniently handle (one-dimensional) grids consisting of time points.
uint getLastIndex() const
#define CLOSE_NAMESPACE_ACADO
returnValue evaluateSensitivitiesGN(BlockMatrix *GNhessian)
returnValue getSubBlock(uint rowIdx, uint colIdx, DMatrix &value) const
Stores and evaluates LSQ mayer terms within optimal control problems.
returnValue evaluateSensitivities(BlockMatrix *hessian)
returnValue init(uint _nRows, uint _nCols)
Derived & setZero(Index size)
void rhs(const real_t *x, real_t *f)
returnValue AD_backward(const DVector &seed, EvaluationPoint &df, const int &number=0)
uint getNumPoints() const
#define ACADOWARNING(retval)
#define BEGIN_NAMESPACE_ACADO
returnValue evaluate(const OCPiterate &x)
returnValue addDense(uint rowIdx, uint colIdx, const DMatrix &value)
int getNumberOfVariables() const
ObjectiveElement & operator=(const ObjectiveElement &rhs)
#define ACADOERROR(retval)