optimization_problem_interface.cpp
Go to the documentation of this file.
1 /*********************************************************************
2  *
3  * Software License Agreement
4  *
5  * Copyright (c) 2020,
6  * TU Dortmund - Institute of Control Theory and Systems Engineering.
7  * All rights reserved.
8  *
9  * This program is free software: you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation, either version 3 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program. If not, see <https://www.gnu.org/licenses/>.
21  *
22  * Authors: Christoph Rösmann
23  *********************************************************************/
24 
26 
28 
29 namespace corbo {
30 
32 {
33  double value = computeValueNonLsqObjective();
34  if (getLsqObjectiveDimension() > 0)
35  {
36  Eigen::VectorXd values(getLsqObjectiveDimension());
38  value += values.squaredNorm();
39  }
40  return value;
41 }
42 
44 {
45  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::getParameterVector(): default implementation might be slow.");
46  assert(x.size() == getParameterDimension());
47  for (int i = 0; i < getParameterDimension(); ++i) x[i] = getParameterValue(i);
48 }
49 
51 {
52  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::setParameterVector(): default implementation might be slow.");
53  for (int i = 0; i < getParameterDimension(); ++i) setParameterValue(i, x[i]);
54 }
55 
57 {
58  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::applyIncrement(): default implementation might be slow.");
59  assert(increment.size() == getParameterDimension());
60  for (int i = 0; i < increment.size(); ++i) setParameterValue(i, getParameterValue(i) + increment[i]);
61 }
62 
63 void OptimizationProblemInterface::applyIncrement(int idx, double increment)
64 {
65  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::applyIncrement(): default implementation might be slow.");
66  setParameterValue(idx, getParameterValue(idx) + increment);
67 }
68 
70 {
71  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::setBounds(): default implementation might be slow.");
72  assert(lb.size() == getParameterDimension());
73  assert(ub.size() == getParameterDimension());
74  for (int i = 0; i < getParameterDimension(); ++i)
75  {
76  setLowerBound(i, lb[i]);
77  setUpperBound(i, ub[i]);
78  }
79 }
80 
82 {
84  "OptimizationProblemInterface::finiteCombinedBoundsDimension(): default implementation might be slow.");
85  assert(lb.size() == getParameterDimension());
86  assert(ub.size() == getParameterDimension());
87  for (int i = 0; i < getParameterDimension(); ++i)
88  {
89  lb[i] = getLowerBound(i);
90  ub[i] = getUpperBound(i);
91  }
92 }
93 
95 {
97  "OptimizationProblemInterface::finiteCombinedBoundsDimension(): default implementation might be slow.");
98  int dim = 0;
99  for (int i = 0; i < getParameterDimension(); ++i)
100  {
101  if (getLowerBound(i) > -CORBO_INF_DBL || getUpperBound(i) < CORBO_INF_DBL) ++dim;
102  }
103  return dim;
104 }
105 
107 {
108  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::finiteBoundsDimension(): default implementation might be slow.");
109  int dim = 0;
110  for (int i = 0; i < getParameterDimension(); ++i)
111  {
112  if (getLowerBound(i) > -CORBO_INF_DBL) ++dim;
113  if (getUpperBound(i) < CORBO_INF_DBL) ++dim;
114  }
115  return dim;
116 }
117 
119 {
121  "OptimizationProblemInterface::computeValuesActiveInequality(): default implementation might be slow.");
122  computeValuesInequality(values);
123  for (int i = 0; i < values.size(); ++i)
124  {
125  if (values[i] < 0)
126  values[i] = 0;
127  else
128  values[i] *= weight;
129  }
130 }
131 
133 {
135  "OptimizationProblemInterface::computeDistanceFiniteCombinedBounds(): default implementation might be slow.");
136  double lb, ub, x;
137  int idx = 0;
138  for (int i = 0; i < getParameterDimension(); ++i)
139  {
140  lb = getLowerBound(i);
141  ub = getUpperBound(i);
142  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
143  {
144  assert(idx < finiteBoundsDimension());
145  x = getParameterValue(i);
146  if (x < lb)
147  values[idx] = lb - x;
148  else if (x > ub)
149  values[idx] = x - ub;
150  else
151  values[idx] = 0;
152  ++idx;
153  }
154  }
155 }
156 
158 {
159  getBounds(lb_minus_x, ub_minus_x);
160  Eigen::VectorXd x(getParameterDimension());
162  lb_minus_x -= x;
163  ub_minus_x -= x;
164 }
165 
167  Eigen::Ref<Eigen::VectorXd> ub_finite_bounds,
168  Eigen::Ref<Eigen::VectorXd> x_finite_bounds)
169 {
171  "OptimizationProblemInterface::computeDistanceFiniteCombinedBounds(): default implementation might be slow.");
172  double lb, ub;
173  int idx = 0;
174  for (int i = 0; i < getParameterDimension(); ++i)
175  {
176  lb = getLowerBound(i);
177  ub = getUpperBound(i);
178  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
179  {
180  assert(idx < finiteBoundsDimension());
181  lb_finite_bounds(idx) = lb;
182  ub_finite_bounds(idx) = ub;
183  x_finite_bounds(idx) = getParameterValue(i);
184  ++idx;
185  }
186  }
187 }
188 
190 {
192  "OptimizationProblemInterface::computeGradientObjective(): default implementation might be slow.");
193  if (getObjectiveDimension() < 1) return;
194  int dim_x = getParameterDimension();
195  assert(gradient.size() == dim_x);
196 
197  // TODO(roesmann) generic interface
198  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
199  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { values[0] = computeValueObjective(); };
200  CentralDifferences::jacobian(inc, eval, gradient.transpose());
201 }
202 
204 {
206  "OptimizationProblemInterface::computeGradientNonLsqObjective(): default implementation might be slow.");
207  if (getNonLsqObjectiveDimension() < 1) return;
208  int dim_x = getParameterDimension();
209  assert(gradient.size() == dim_x);
210 
211  // TODO(roesmann) generic interface
212  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
213  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { values[0] = computeValueNonLsqObjective(); };
214  CentralDifferences::jacobian(inc, eval, gradient.transpose());
215 }
216 
218 {
220  "OptimizationProblemInterface::computeDenseJacobianLsqObjective(): default implementation might be slow.");
221  int dim_obj = getLsqObjectiveDimension();
222  if (dim_obj < 1) return;
223  int dim_x = getParameterDimension();
224  assert(jacobian.rows() == dim_obj);
225  assert(jacobian.cols() == dim_x);
226 
227  // TODO(roesmann) generic interface
228  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
229  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { computeValuesLsqObjective(values); };
230  CentralDifferences::jacobian(inc, eval, jacobian);
231 
232  if (multipliers) jacobian.array().colwise() *= Eigen::Map<const Eigen::ArrayXd>(multipliers, dim_obj);
233 }
234 
236 {
238  "OptimizationProblemInterface::computeSparseJacobianLsqObjectiveNNZ(): default implementation might be slow.");
239  // the default implementation is not aware of any sparse structure, hence we assume worst case
241 }
243 {
245  "OptimizationProblemInterface::computeSparseJacobianLsqObjectiveStructure(): default implementation might be slow.");
246  if (getLsqObjectiveDimension() < 1) return;
247  // worst-case implementation
248  int nz_idx = 0;
249  for (int i = 0; i < getLsqObjectiveDimension(); ++i)
250  {
251  for (int j = 0; j < getParameterDimension(); ++j)
252  {
253  i_row[nz_idx] = i;
254  j_col[nz_idx] = j;
255  ++nz_idx;
256  }
257  }
258 }
260 {
262  "OptimizationProblemInterface::computeSparseJacobianLsqObjectiveValues(): default implementation might be slow.");
263  if (getLsqObjectiveDimension() < 1) return;
264  // worst-case implementation
265  int dim_obj = getLsqObjectiveDimension();
266  int dim_x = getParameterDimension();
267  Eigen::MatrixXd dense_jacob(dim_obj, dim_x);
268  computeDenseJacobianLsqObjective(dense_jacob, multipliers);
269 
270  int nz_idx = 0;
271  for (int i = 0; i < getLsqObjectiveDimension(); ++i)
272  {
273  for (int j = 0; j < getParameterDimension(); ++j)
274  {
275  values[nz_idx] = dense_jacob(i, j);
276  ++nz_idx;
277  }
278  }
279 }
280 
282 {
284  "OptimizationProblemInterface::computeSparseJacobianLsqObjective(): default implementation might be slow.");
285  int dim_obj = getLsqObjectiveDimension();
286  if (dim_obj < 1) return;
287  int dim_x = getParameterDimension();
288  Eigen::MatrixXd dense_jacob(dim_obj, dim_x);
289  computeDenseJacobianLsqObjective(dense_jacob, multipliers);
290  jacobian = dense_jacob.sparseView();
291 }
292 
294 {
296  "OptimizationProblemInterface::computeDenseJacobianEqualities(): default implementation might be slow.");
297  int dim_eq = getEqualityDimension();
298  int dim_x = getParameterDimension();
299  assert(jacobian.rows() == dim_eq);
300  assert(jacobian.cols() == dim_x);
301 
302  // TODO(roesmann) generic interface
303  CentralDifferences diff;
304  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
305  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { computeValuesEquality(values); };
306  diff.computeJacobian(inc, eval, jacobian);
307 
308  if (multipliers) jacobian.array().colwise() *= Eigen::Map<const Eigen::ArrayXd>(multipliers, dim_eq);
309 }
310 
312 {
314  "OptimizationProblemInterface::computeSparseJacobianEqualitiesNNZ(): default implementation might be slow.");
315  // the default implementation is not aware of any sparse structure, hence we assume worst case
317 }
319 {
321  "OptimizationProblemInterface::computeSparseJacobianEqualitiesStructure(): default implementation might be slow.");
322  // worst-case implementation
323  int nz_idx = 0;
324  for (int i = 0; i < getEqualityDimension(); ++i)
325  {
326  for (int j = 0; j < getParameterDimension(); ++j)
327  {
328  i_row[nz_idx] = i;
329  j_col[nz_idx] = j;
330  ++nz_idx;
331  }
332  }
333 }
335 {
337  "OptimizationProblemInterface::computeSparseJacobianEqualitiesValues(): default implementation might be slow.");
338  // worst-case implementation
339  int dim_eq = getEqualityDimension();
340  int dim_x = getParameterDimension();
341  Eigen::MatrixXd dense_jacob(dim_eq, dim_x);
342  computeDenseJacobianEqualities(dense_jacob, multipliers);
343 
344  int nz_idx = 0;
345  for (int i = 0; i < getEqualityDimension(); ++i)
346  {
347  for (int j = 0; j < getParameterDimension(); ++j)
348  {
349  values[nz_idx] = dense_jacob(i, j);
350  ++nz_idx;
351  }
352  }
353 }
354 
356 {
358  "OptimizationProblemInterface::computeSparseJacobianEqualities(): default implementation might be slow.");
359  int dim_eq = getEqualityDimension();
360  int dim_x = getParameterDimension();
361  Eigen::MatrixXd dense_jacob(dim_eq, dim_x);
362  computeDenseJacobianEqualities(dense_jacob, multipliers);
363  jacobian = dense_jacob.sparseView();
364 }
365 
367 {
369  "OptimizationProblemInterface::computeDenseJacobianInequalities(): default implementation might be slow.");
370  int dim_ineq = getInequalityDimension();
371  int dim_x = getParameterDimension();
372  assert(jacobian.rows() == dim_ineq);
373  assert(jacobian.cols() == dim_x);
374 
375  // TODO(roesmann) generic interface
376  CentralDifferences diff;
377  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
378  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { computeValuesInequality(values); };
379  diff.computeJacobian(inc, eval, jacobian);
380 
381  if (multipliers) jacobian.array().colwise() *= Eigen::Map<const Eigen::ArrayXd>(multipliers, dim_ineq);
382 }
383 
385 {
387  "OptimizationProblemInterface::computeSparseJacobianInequalitiesNNZ(): default implementation might be slow.");
388  // the default implementation is not aware of any sparse structure, hence we assume worst case
390 }
392 {
394  "OptimizationProblemInterface::computeSparseJacobianInequalitiesStructure(): default implementation might be slow.");
395  // worst-case implementation
396  int nz_idx = 0;
397  for (int i = 0; i < getInequalityDimension(); ++i)
398  {
399  for (int j = 0; j < getParameterDimension(); ++j)
400  {
401  i_row[nz_idx] = i;
402  j_col[nz_idx] = j;
403  ++nz_idx;
404  }
405  }
406 }
408 {
410  "OptimizationProblemInterface::computeSparseJacobianInequalitiesValues(): default implementation might be slow.");
411  // worst-case implementation
412  int dim_ineq = getInequalityDimension();
413  int dim_x = getParameterDimension();
414  Eigen::MatrixXd dense_jacob(dim_ineq, dim_x);
415  computeDenseJacobianInequalities(dense_jacob, multipliers);
416 
417  int nz_idx = 0;
418  for (int i = 0; i < getInequalityDimension(); ++i)
419  {
420  for (int j = 0; j < getParameterDimension(); ++j)
421  {
422  values[nz_idx] = dense_jacob(i, j);
423  ++nz_idx;
424  }
425  }
426 }
427 
429 {
431  "OptimizationProblemInterface::computeSparseJacobianInequalities(): default implementation might be slow.");
432  int dim_ineq = getInequalityDimension();
433  int dim_x = getParameterDimension();
434  Eigen::MatrixXd dense_jacob(dim_ineq, dim_x);
435  computeDenseJacobianInequalities(dense_jacob, multipliers);
436  jacobian = dense_jacob.sparseView();
437 }
438 
440 {
442  "OptimizationProblemInterface::computeDenseJacobianActiveInequalities(): default implementation might be slow.");
443  int dim_ineq = getInequalityDimension();
444  int dim_x = getParameterDimension();
445  assert(jacobian.rows() == dim_ineq);
446  assert(jacobian.cols() == dim_x);
447 
448  // TODO(roesmann) generic interface
449  CentralDifferences diff;
450  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
451  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { computeValuesActiveInequality(values); };
452  // TODO(roesmann) inefficient! We can better iterate all values, check if active, and compute n x 1 subjacobians each
453  diff.computeJacobian(inc, eval, jacobian);
454 
455  if (weight != 1) jacobian *= weight;
456 }
458 {
460  "OptimizationProblemInterface::computeSparseJacobianActiveInequalitiesValues(): default implementation might be slow.");
461  // worst-case implementation
462  int dim_ineq = getInequalityDimension();
463  int dim_x = getParameterDimension();
464  Eigen::MatrixXd dense_jacob(dim_ineq, dim_x);
465  computeDenseJacobianActiveInequalities(dense_jacob, weight);
466 
467  int nz_idx = 0;
468  for (int i = 0; i < getInequalityDimension(); ++i)
469  {
470  for (int j = 0; j < getParameterDimension(); ++j)
471  {
472  values[nz_idx] = dense_jacob(i, j);
473  ++nz_idx;
474  }
475  }
476 }
477 
479 {
481  "OptimizationProblemInterface::computeSparseJacobianActiveInequalities(): default implementation might be slow.");
482  int dim_ineq = getInequalityDimension();
483  int dim_x = getParameterDimension();
484  Eigen::MatrixXd dense_jacob(dim_ineq, dim_x);
485  computeDenseJacobianActiveInequalities(dense_jacob, weight);
486  jacobian = dense_jacob.sparseView();
487 }
488 
490  Eigen::Ref<Eigen::MatrixXd> jacobian_lsq_obj, Eigen::Ref<Eigen::MatrixXd> jacobian_eq,
491  Eigen::Ref<Eigen::MatrixXd> jacobian_ineq, const double* multipliers_lsq_obj,
492  const double* multipliers_eq, const double* multipliers_ineq, bool active_ineq,
493  double active_ineq_weight)
494 {
495  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::computeDenseJacobians(): default implementation might be slow.");
496  int non_lsq_obj_dim = getNonLsqObjectiveDimension();
497  int lsq_obj_dim = getLsqObjectiveDimension();
498  int eq_dim = getEqualityDimension();
499  int ineq_dim = getInequalityDimension();
500  if (non_lsq_obj_dim)
501  {
502  computeGradientNonLsqObjective(gradient_non_lsq_obj);
503  }
504  if (lsq_obj_dim > 0)
505  {
506  computeDenseJacobianLsqObjective(jacobian_lsq_obj, multipliers_lsq_obj);
507  }
508  if (eq_dim > 0)
509  {
510  computeDenseJacobianEqualities(jacobian_eq, multipliers_eq);
511  }
512  if (ineq_dim > 0)
513  {
514  if (active_ineq)
515  computeDenseJacobianActiveInequalities(jacobian_ineq, active_ineq_weight);
516  else
517  computeDenseJacobianInequalities(jacobian_ineq, multipliers_ineq);
518  }
519 }
520 
522  Eigen::SparseMatrix<double>& jacobian_ineq, const double* multipliers_lsq_obj,
523  const double* multipliers_eq, const double* multipliers_ineq, bool active_ineq,
524  double active_ineq_weight)
525 {
526  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::computeSparseJacobians(): default implementation might be slow.");
527  computeSparseJacobianLsqObjective(jacobian_lsq_obj, multipliers_lsq_obj);
528  computeSparseJacobianEqualities(jacobian_eq, multipliers_eq);
529  if (active_ineq)
530  computeSparseJacobianActiveInequalities(jacobian_ineq, active_ineq_weight);
531  else
532  computeSparseJacobianInequalities(jacobian_ineq, multipliers_ineq);
533 }
534 
535 void OptimizationProblemInterface::computeSparseJacobiansNNZ(int& nnz_lsq_obj, int& nnz_eq, int& nnz_ineq)
536 {
537  nnz_lsq_obj = computeSparseJacobianLsqObjectiveNNZ();
540 }
541 
545 {
546  computeSparseJacobianLsqObjectiveStructure(i_row_obj, j_col_obj);
547  computeSparseJacobianEqualitiesStructure(i_row_eq, j_col_eq);
548  computeSparseJacobianInequalitiesStructure(i_row_ineq, j_col_ineq);
549 }
550 
552  Eigen::Ref<Eigen::VectorXd> values_ineq, const double* multipliers_obj,
553  const double* multipliers_eq, const double* multipliers_ineq, bool active_ineq,
554  double active_ineq_weight)
555 {
557  "OptimizationProblemInterface::computeSparseJacobiansValues(): default implementation might be slow.");
558  computeSparseJacobianLsqObjectiveValues(values_obj, multipliers_obj);
559  computeSparseJacobianEqualitiesValues(values_eq, multipliers_eq);
560  if (active_ineq)
561  computeSparseJacobianActiveInequalitiesValues(values_ineq, active_ineq_weight);
562  else
563  computeSparseJacobianInequalitiesValues(values_ineq, multipliers_ineq);
564 }
565 
567 {
569  "OptimizationProblemInterface::computeDenseJacobianFiniteCombinedBounds(): default implementation might be slow.");
570  jacobian.setZero();
571 
572  int row_idx = 0;
573  for (int i = 0; i < getParameterDimension(); ++i)
574  {
575  // check if finite
576  double lb = getLowerBound(i);
577  double ub = getUpperBound(i);
578  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
579  {
580  double x = getParameterValue(i);
581  if (x < lb)
582  {
583  jacobian(row_idx, i) = -weight;
584  }
585  else if (x > ub)
586  {
587  jacobian(row_idx, i) = weight;
588  }
589  ++row_idx; // increase row index in jacobian
590  }
591  }
592 }
593 
595 {
596  // we have a single value per row
598 }
599 
602 {
605  "OptimizationProblemInterface::computeSparseJacobianFiniteCombinedBoundsStructure(): default implementation might be slow.");
606  // we have a single value per row
607  int jac_row_idx = 0;
608  for (int i = 0; i < getParameterDimension(); ++i)
609  {
610  // check if finite
612  {
613  i_row[jac_row_idx] = jac_row_idx;
614  j_col[jac_row_idx] = i;
615  ++jac_row_idx; // increase row index in jacobian
616  }
617  }
618 }
620 {
621  int jac_row_idx = 0;
622  for (int i = 0; i < getParameterDimension(); ++i)
623  {
624  // check if finite
625  double lb = getLowerBound(i);
626  double ub = getUpperBound(i);
627  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
628  {
629  double x = getParameterValue(i);
630  if (x < lb)
631  {
632  values[jac_row_idx] = -weight;
633  }
634  else if (x > ub)
635  {
636  values[jac_row_idx] = weight;
637  }
638  else
639  {
640  values[jac_row_idx] = 0.0;
641  }
642  ++jac_row_idx; // increase row index in jacobian
643  }
644  }
645 }
646 
648 {
650  "OptimizationProblemInterface::computeSparseJacobianFiniteCombinedBounds(): default implementation might be slow.");
651  // jacobian.reserve(finiteCombinedBoundsDimension());
652  jacobian.setZero();
653 
654  // we have a single value per row
655  int jac_row_idx = 0;
656  for (int i = 0; i < getParameterDimension(); ++i)
657  {
658  // check if finite
659  double lb = getLowerBound(i);
660  double ub = getUpperBound(i);
661  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
662  {
663  double x = getParameterValue(i);
664  if (x < lb)
665  {
666  jacobian.insert(jac_row_idx, i) = -weight;
667  }
668  else if (x > ub)
669  {
670  jacobian.insert(jac_row_idx, i) = weight;
671  }
672  // else
673  //{
674  // jacobian.insert(jac_row_idx, i) = 0.0;
675  //}
676  ++jac_row_idx; // increase row index in jacobian
677  }
678  }
679  // jacobian.makeCompressed();
680 }
681 
683 {
685  "OptimizationProblemInterface::computeDenseJacobianFiniteCombinedBoundsIdentity(): default implementation might be slow.");
686  jacobian.setZero();
687 
688  int row_idx = 0;
689  for (int i = 0; i < getParameterDimension(); ++i)
690  {
691  // check if finite
692  double lb = getLowerBound(i);
693  double ub = getUpperBound(i);
694  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
695  {
696  jacobian(row_idx, i) = 1;
697  ++row_idx; // increase row index in jacobian
698  }
699  }
700 }
701 
703  bool inequality, bool finite_combined_bounds, bool active_ineq, double weight_eq,
704  double weight_ineq, double weight_bounds, const Eigen::VectorXd* values,
705  const Eigen::VectorXi* col_nnz)
706 {
708  "OptimizationProblemInterface::computeCombinedSparseJacobian(): default implementation might be slow.");
709  int no_cols = getParameterDimension();
710  int no_rows = 0;
711  // int nnz = 0;
712  int dim_obj = 0;
713  int dim_eq = 0;
714  int dim_ineq = 0;
715  int dim_bounds = 0;
716  if (objective_lsq)
717  {
718  dim_obj = getLsqObjectiveDimension();
719  no_rows += dim_obj;
720  // nnz += computeSparseJacobianObjectiveNNZ();
721  }
722  if (equality)
723  {
724  dim_eq = getEqualityDimension();
725  no_rows += dim_eq;
726  // nnz += computeSparseJacobianEqualitiesNNZ();
727  }
728  if (inequality)
729  {
730  dim_ineq = getInequalityDimension();
731  no_rows += dim_ineq;
732  // nnz += computeSparseJacobianInequalitiesNNZ();
733  }
734  if (finite_combined_bounds)
735  {
736  dim_bounds = finiteCombinedBoundsDimension();
737  no_rows += dim_bounds;
738  // nnz += computeSparseJacobianFiniteCombinedBoundsNNZ();
739  }
740 
741  assert(jacobian.rows() == no_rows);
742  assert(jacobian.cols() == no_cols);
743 
744  jacobian.setZero();
745  // jacobian.reserve(nnz);
746 
747  // default implementation, just concatenate submatrices
748  // this is inefficient due to the temporary matrices
749  // TODO(roesmann) maybe we should use row-major format for vertical concatenation?
750  // e.g., https://stackoverflow.com/questions/41756428/concatenate-sparse-matrix-eigen
751 
752  int row_offset = 0;
753 
754  if (objective_lsq && dim_obj > 0)
755  {
756  Eigen::SparseMatrix<double> temp_jacob(dim_obj, no_cols);
758  for (int k = 0; k < temp_jacob.outerSize(); ++k)
759  {
760  for (Eigen::SparseMatrix<double>::InnerIterator it(temp_jacob, k); it; ++it)
761  {
762  jacobian.insert(it.row(), it.col()) = it.value();
763  }
764  }
765  row_offset += dim_obj;
766  }
767 
768  if (equality && dim_eq > 0)
769  {
770  Eigen::SparseMatrix<double> temp_jacob(dim_eq, no_cols);
772  for (int k = 0; k < temp_jacob.outerSize(); ++k)
773  {
774  for (Eigen::SparseMatrix<double>::InnerIterator it(temp_jacob, k); it; ++it)
775  {
776  jacobian.insert(row_offset + it.row(), it.col()) = it.value() * weight_eq;
777  }
778  }
779  row_offset += dim_eq;
780  }
781 
782  if (inequality && dim_ineq > 0)
783  {
784  Eigen::SparseMatrix<double> temp_jacob(dim_ineq, no_cols);
785  if (active_ineq)
787  else
789  for (int k = 0; k < temp_jacob.outerSize(); ++k)
790  {
791  for (Eigen::SparseMatrix<double>::InnerIterator it(temp_jacob, k); it; ++it)
792  {
793  jacobian.insert(row_offset + it.row(), it.col()) = it.value() * weight_ineq;
794  }
795  }
796  row_offset += dim_ineq;
797  }
798 
799  if (finite_combined_bounds && dim_bounds > 0)
800  {
801  Eigen::SparseMatrix<double> temp_jacob(dim_bounds, no_cols);
803  for (int k = 0; k < temp_jacob.outerSize(); ++k)
804  {
805  for (Eigen::SparseMatrix<double>::InnerIterator it(temp_jacob, k); it; ++it)
806  {
807  jacobian.insert(row_offset + it.row(), it.col()) = it.value() * weight_bounds;
808  }
809  }
810  }
811 }
812 
813 int OptimizationProblemInterface::computeCombinedSparseJacobiansNNZ(bool objective_lsq, bool equality, bool inequality)
814 {
816  "OptimizationProblemInterface::computeCombinedSparseJacobiansNNZ(): default implementation might be slow.");
817  int nnz = 0;
818  // worst case nnz
819  if (objective_lsq) nnz += getLsqObjectiveDimension() * getParameterDimension();
820  if (equality) nnz += getEqualityDimension() * getParameterDimension();
821  if (inequality) nnz += getInequalityDimension() * getParameterDimension();
822  return nnz;
823 }
825  bool objective_lsq, bool equality, bool inequality)
826 {
828  "OptimizationProblemInterface::computeCombinedSparseJacobiansStructure(): default implementation might be slow.");
829  // worst-case implementation
830  int nz_idx = 0;
831  int row_idx = 0;
832  if (objective_lsq)
833  {
834  for (int i = 0; i < getLsqObjectiveDimension(); ++i, ++row_idx)
835  {
836  for (int j = 0; j < getParameterDimension(); ++j)
837  {
838  i_row[nz_idx] = row_idx;
839  j_col[nz_idx] = j;
840  ++nz_idx;
841  }
842  }
843  }
844 
845  if (equality)
846  {
847  for (int i = 0; i < getEqualityDimension(); ++i, ++row_idx)
848  {
849  for (int j = 0; j < getParameterDimension(); ++j)
850  {
851  i_row[nz_idx] = row_idx;
852  j_col[nz_idx] = j;
853  ++nz_idx;
854  }
855  }
856  }
857 
858  if (inequality)
859  {
860  for (int i = 0; i < getInequalityDimension(); ++i, ++row_idx)
861  {
862  for (int j = 0; j < getParameterDimension(); ++j)
863  {
864  i_row[nz_idx] = row_idx;
865  j_col[nz_idx] = j;
866  ++nz_idx;
867  }
868  }
869  }
870 }
871 
873  bool inequality, const double* multipliers_obj, const double* multipliers_eq,
874  const double* multipliers_ineq)
875 {
877  "OptimizationProblemInterface::computeCombinedSparseJacobiansValues(): default implementation might be slow.");
878  int dim_x = getParameterDimension();
879  int nz_idx = 0;
880 
881  // worst-case implementation
882 
883  if (objective_lsq)
884  {
885  int dim_obj = getLsqObjectiveDimension();
886  Eigen::MatrixXd dense_jacob(dim_obj, dim_x);
887  computeDenseJacobianLsqObjective(dense_jacob, multipliers_obj);
888 
889  for (int i = 0; i < dim_obj; ++i)
890  {
891  for (int j = 0; j < getParameterDimension(); ++j)
892  {
893  values[nz_idx] = dense_jacob(i, j);
894  ++nz_idx;
895  }
896  }
897  }
898 
899  if (equality)
900  {
901  int dim_eq = getEqualityDimension();
902  Eigen::MatrixXd dense_jacob(dim_eq, dim_x);
903  computeDenseJacobianEqualities(dense_jacob, multipliers_eq);
904 
905  for (int i = 0; i < dim_eq; ++i)
906  {
907  for (int j = 0; j < getParameterDimension(); ++j)
908  {
909  values[nz_idx] = dense_jacob(i, j);
910  ++nz_idx;
911  }
912  }
913  }
914 
915  if (inequality)
916  {
917  int dim_ineq = getInequalityDimension();
918  Eigen::MatrixXd dense_jacob(dim_ineq, dim_x);
919  computeDenseJacobianInequalities(dense_jacob, multipliers_ineq);
920 
921  for (int i = 0; i < dim_ineq; ++i)
922  {
923  for (int j = 0; j < getParameterDimension(); ++j)
924  {
925  values[nz_idx] = dense_jacob(i, j);
926  ++nz_idx;
927  }
928  }
929  }
930 }
931 
933  Eigen::Ref<Eigen::VectorXd> jac_values, bool equality,
934  bool inequality, const double* multipliers_eq,
935  const double* multipliers_ineq)
936 {
938  "OptimizationProblemInterface::computeDenseHessianObjective(): default implementation might be slow.");
939  computeGradientObjective(gradient);
940  computeCombinedSparseJacobiansValues(jac_values, false, equality, inequality, nullptr, multipliers_eq, multipliers_ineq);
941 }
942 
944  bool include_finite_bounds, const Eigen::VectorXi* col_nnz)
945 {
947  "OptimizationProblemInterface::computeCombinedSparseJacobian(): default implementation might be slow.");
948 
949  int no_cols = getParameterDimension();
950  // int nnz = 0;
951  int dim_eq = getEqualityDimension();
952  int dim_ineq = getInequalityDimension();
953  int dim_bounds = 0;
954 
955  int no_rows = dim_eq + dim_ineq;
956 
957  if (include_finite_bounds)
958  {
959  dim_bounds = finiteCombinedBoundsDimension();
960  no_rows += dim_bounds;
961  // nnz += computeSparseJacobianFiniteCombinedBoundsNNZ();
962  }
963 
964  assert(A.rows() == no_rows);
965  assert(A.cols() == no_cols);
966 
967  A.setZero();
968  if (col_nnz) A.reserve(*col_nnz);
969  // jacobian.reserve(nnz);
970 
971  // default implementation, just concatenate submatrices
972  // this is inefficient due to the temporary matrices
973  // TODO(roesmann) maybe we should use row-major format for vertical concatenation?
974  // e.g., https://stackoverflow.com/questions/41756428/concatenate-sparse-matrix-eigen
975 
976  int row_offset = 0;
977 
978  if (dim_eq > 0)
979  {
980  Eigen::SparseMatrix<double> temp_jacob(dim_eq, no_cols);
982  for (int k = 0; k < temp_jacob.outerSize(); ++k)
983  {
984  for (Eigen::SparseMatrix<double>::InnerIterator it(temp_jacob, k); it; ++it)
985  {
986  A.insert(row_offset + it.row(), it.col()) = it.value();
987  }
988  }
989  row_offset += dim_eq;
990  }
991 
992  if (dim_ineq > 0)
993  {
994  Eigen::SparseMatrix<double> temp_jacob(dim_ineq, no_cols);
996  for (int k = 0; k < temp_jacob.outerSize(); ++k)
997  {
998  for (Eigen::SparseMatrix<double>::InnerIterator it(temp_jacob, k); it; ++it)
999  {
1000  A.insert(row_offset + it.row(), it.col()) = it.value();
1001  }
1002  }
1003  row_offset += dim_ineq;
1004  }
1005 
1006  if (include_finite_bounds && dim_bounds > 0)
1007  {
1008  // TODO(roesmann): inefficient
1009  Eigen::MatrixXd temp_jacob_dense(dim_bounds, no_cols);
1011  Eigen::SparseMatrix<double> temp_jacob = temp_jacob_dense.sparseView();
1012  for (int k = 0; k < temp_jacob.outerSize(); ++k)
1013  {
1014  for (Eigen::SparseMatrix<double>::InnerIterator it(temp_jacob, k); it; ++it)
1015  {
1016  A.insert(row_offset + it.row(), it.col()) = it.value();
1017  }
1018  }
1019  }
1020 }
1021 
1023  bool include_finite_bounds)
1024 {
1027  "OptimizationProblemInterface::computeSparseJacobianTwoSideBoundedLinearFormNNZPerColumn(): default implementation might be slow.");
1028 
1029  assert(col_nnz.size() == getParameterDimension());
1030 
1031  // worst case nnz
1032  int dim_eq = getEqualityDimension();
1033  int dim_ineq = getInequalityDimension();
1034  int dim_bounds = include_finite_bounds ? finiteCombinedBoundsDimension() : 0;
1035  col_nnz.setConstant(dim_eq + dim_ineq + dim_bounds); // full matrix
1036 }
1037 
1039 {
1041  "OptimizationProblemInterface::computeSparseJacobianTwoSideBoundedLinearFormNNZ(): default implementation might be slow.");
1042  int nnz = 0;
1043  // worst case nnz
1046  if (include_finite_bounds) nnz += finiteCombinedBoundsDimension();
1047  return nnz;
1048 }
1049 
1052  bool include_finite_bounds)
1053 {
1056  "OptimizationProblemInterface::computeSparseJacobianTwoSideBoundedLinearFormStructure(): default implementation might be slow.");
1057  // worst-case implementation
1058  int nz_idx = 0;
1059  int row_idx = 0;
1060 
1061  for (int i = 0; i < getEqualityDimension(); ++i, ++row_idx)
1062  {
1063  for (int j = 0; j < getParameterDimension(); ++j)
1064  {
1065  i_row[nz_idx] = row_idx;
1066  j_col[nz_idx] = j;
1067  ++nz_idx;
1068  }
1069  }
1070 
1071  for (int i = 0; i < getInequalityDimension(); ++i, ++row_idx)
1072  {
1073  for (int j = 0; j < getParameterDimension(); ++j)
1074  {
1075  i_row[nz_idx] = row_idx;
1076  j_col[nz_idx] = j;
1077  ++nz_idx;
1078  }
1079  }
1080 
1081  if (include_finite_bounds)
1082  {
1083  for (int i = 0; i < getParameterDimension(); ++i)
1084  {
1085  // check if finite
1086  double lb = getLowerBound(i);
1087  double ub = getUpperBound(i);
1088  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
1089  {
1090  i_row[nz_idx] = row_idx;
1091  j_col[nz_idx] = i;
1092  ++nz_idx;
1093  ++row_idx; // increase row index in jacobian
1094  }
1095  }
1096  }
1097 }
1098 
1100 {
1102  "OptimizationProblemInterface::computeCombinedSparseJacobiansValues(): default implementation might be slow.");
1103  int dim_x = getParameterDimension();
1104  int nz_idx = 0;
1105 
1106  // worst-case implementation
1107 
1108  int dim_eq = getEqualityDimension();
1109  Eigen::MatrixXd dense_jacob_eq(dim_eq, dim_x);
1110  computeDenseJacobianEqualities(dense_jacob_eq);
1111 
1112  for (int i = 0; i < dim_eq; ++i)
1113  {
1114  for (int j = 0; j < getParameterDimension(); ++j)
1115  {
1116  values[nz_idx] = dense_jacob_eq(i, j);
1117  ++nz_idx;
1118  }
1119  }
1120 
1121  int dim_ineq = getInequalityDimension();
1122  Eigen::MatrixXd dense_jacob_ineq(dim_ineq, dim_x);
1123  computeDenseJacobianInequalities(dense_jacob_ineq);
1124 
1125  for (int i = 0; i < dim_ineq; ++i)
1126  {
1127  for (int j = 0; j < getParameterDimension(); ++j)
1128  {
1129  values[nz_idx] = dense_jacob_ineq(i, j);
1130  ++nz_idx;
1131  }
1132  }
1133 
1134  int dim_bounds = finiteCombinedBoundsDimension();
1135  if (include_finite_bounds && dim_bounds > 0)
1136  {
1137  values.tail(dim_bounds).setOnes();
1138  }
1139 }
1140 
1142  bool include_finite_bounds)
1143 {
1144  int dim_eq = getEqualityDimension();
1145  int dim_ineq = getInequalityDimension();
1146  int dim_bounds = include_finite_bounds ? finiteCombinedBoundsDimension() : 0;
1147 
1148  assert(lbA.size() == dim_eq + dim_ineq + dim_bounds);
1149  assert(ubA.size() == dim_eq + dim_ineq + dim_bounds);
1150 
1151  if (dim_eq > 0)
1152  {
1153  computeValuesEquality(lbA.head(dim_eq));
1154  lbA.head(dim_eq) *= -1;
1155  ubA.head(dim_eq) = lbA.head(dim_eq);
1156  }
1157 
1158  if (dim_ineq > 0)
1159  {
1160  lbA.segment(dim_eq, dim_ineq).setConstant(-CORBO_INF_DBL); // should be smaller than -OSQP_INFTY
1161  computeValuesInequality(ubA.segment(dim_eq, dim_ineq));
1162  ubA.segment(dim_eq, dim_ineq) *= -1;
1163  }
1164 
1165  if (dim_bounds > 0)
1166  {
1167  double lb, ub, x;
1168  int idx = dim_eq + dim_ineq;
1169  for (int i = 0; i < getParameterDimension(); ++i)
1170  {
1171  lb = getLowerBound(i);
1172  ub = getUpperBound(i);
1173  if (lb > -CORBO_INF_DBL || ub < CORBO_INF_DBL)
1174  {
1175  assert(idx < finiteBoundsDimension());
1176  x = getParameterValue(i);
1177  lbA(idx) = lb - x;
1178  ubA(idx) = x - ub;
1179  ++idx;
1180  }
1181  }
1182  }
1183 }
1184 
1186  Eigen::Ref<Eigen::MatrixXd> hessian, const double* multipliers, bool jacob_scaled)
1187 {
1188  PRINT_ERROR("OptimizationProblemInterface::computeDenseHessianObjective(): NOT_YET_IMPLEMENTED");
1189 }
1190 
1192 {
1194  "OptimizationProblemInterface::computeDenseHessianObjective(): default implementation might be slow.");
1195  if (getObjectiveDimension() == 0) return;
1196 
1197  int dim_x = getParameterDimension();
1198  assert(hessian.rows() == dim_x);
1199  assert(hessian.cols() == dim_x);
1200 
1201  // TODO(roesmann) generic interface
1202  CentralDifferences diff;
1203  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
1204  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { values[0] = computeValueObjective(); };
1205  diff.computeHessian(inc, eval, 1, hessian, nullptr);
1206  if (multiplier != 1.0) hessian *= multiplier;
1207 }
1208 
1210 {
1212  "OptimizationProblemInterface::computeSparseHessianObjectiveNNZ(): default implementation might be slow.");
1213  // the default implementation is not aware of any sparse structure, hence we assume worst case
1214  if (lower_part_only) return (int)(0.5 * getParameterDimension() * (getParameterDimension() + 1));
1216 }
1218  bool lower_part_only)
1219 {
1221  "OptimizationProblemInterface::computeSparseHessianObjectiveStructure(): default implementation might be slow.");
1222  if (getObjectiveDimension() == 0) return;
1223 
1224  // worst-case implementation
1225  int nz_idx = 0;
1226  for (int col = 0; col < getParameterDimension(); ++col)
1227  {
1228  int row_start = lower_part_only ? col : 0;
1229  for (int row = row_start; row < getParameterDimension(); ++row)
1230  {
1231  i_row[nz_idx] = row;
1232  j_col[nz_idx] = col;
1233  ++nz_idx;
1234  }
1235  }
1236 }
1238 {
1240  "OptimizationProblemInterface::computeSparseHessianObjectiveValues(): default implementation might be slow.");
1241  if (getObjectiveDimension() == 0) return;
1242 
1243  // worst-case implementation
1244  int dim_x = getParameterDimension();
1245  Eigen::MatrixXd dense_hessian(dim_x, dim_x);
1246  computeDenseHessianObjective(dense_hessian);
1247 
1248  int nz_idx = 0;
1249  for (int col = 0; col < getParameterDimension(); ++col)
1250  {
1251  int row_start = lower_part_only ? col : 0;
1252  for (int row = row_start; row < getParameterDimension(); ++row)
1253  {
1254  values[nz_idx] = dense_hessian(row, col);
1255  ++nz_idx;
1256  }
1257  }
1258 
1259  if (multiplier != 1.0) values *= multiplier;
1260 }
1261 
1263 {
1265  "OptimizationProblemInterface::computeSparseHessianObjective(): default implementation might be slow.");
1266  if (getObjectiveDimension() == 0) return;
1267 
1268  int dim_x = getParameterDimension();
1269  Eigen::MatrixXd dense_hessian(dim_x, dim_x);
1270  computeDenseHessianObjective(dense_hessian);
1271  hessian = dense_hessian.sparseView();
1272 
1273  if (multiplier != 1.0) hessian *= multiplier;
1274 }
1275 
1277  const Eigen::VectorXi* col_nnz, bool upper_part_only)
1278 {
1280  "OptimizationProblemInterface::computeSparseHessiansValues(): default implementation might be slow.");
1281 
1282  int dim_x = getParameterDimension();
1283 
1284  H.setZero();
1285 
1286  // TODO(roesmann): can be parallelized
1287 
1288  if (upper_part_only)
1289  {
1290  // we cannot use .selfadjointView<>() with mixed datatypes (SparseMatrix with long long and int indices)
1291  Eigen::SparseMatrix<double> H_obj(dim_x, dim_x);
1292  computeSparseHessianObjective(H_obj, 1.0);
1293 
1295  H_temp = H_obj;
1297  }
1298  else
1299  {
1300  Eigen::SparseMatrix<double> H_obj(dim_x, dim_x);
1301  computeSparseHessianObjective(H_obj, 1.0);
1302  H = H_obj; // different format (TODO(roesmann): templates...)
1303  }
1304 }
1305 
1307 {
1309  "OptimizationProblemInterface::computeSparseHessianLagrangianNNZperCol(): default implementation might be slow.");
1310 
1311  assert(col_nnz.size() == getParameterDimension());
1312 
1313  if (upper_part_only)
1314  {
1315  for (int i = 0; i < col_nnz.size(); ++i)
1316  {
1317  col_nnz(i) = i + 1; // worst case nnz
1318  }
1319  }
1320  else
1321  col_nnz.setConstant(getParameterDimension()); // worst case nnz
1322 }
1323 
1325 {
1327  "OptimizationProblemInterface::computeDenseHessianEqualities(): default implementation might be slow.");
1328  int dim_eq = getEqualityDimension();
1329  int dim_x = getParameterDimension();
1330 
1331  if (dim_eq == 0) return;
1332 
1333  assert(hessian.rows() == dim_x);
1334  assert(hessian.cols() == dim_x);
1335 
1336  // TODO(roesmann) generic interface
1337  CentralDifferences diff;
1338  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
1339  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { computeValuesEquality(values); };
1340  diff.computeHessian(inc, eval, dim_eq, hessian, multipliers);
1341 }
1342 
1344 {
1346  "OptimizationProblemInterface::computeSparseHessianEqualitiesNNZ(): default implementation might be slow.");
1347  // the default implementation is not aware of any sparse structure, hence we assume worst case
1348  if (lower_part_only) return (int)(0.5 * getParameterDimension() * (getParameterDimension() + 1));
1350 }
1352  bool lower_part_only)
1353 {
1355  "OptimizationProblemInterface::computeSparseHessianEqualitiesStructure(): default implementation might be slow.");
1356  // worst-case implementation
1357  int nz_idx = 0;
1358  for (int col = 0; col < getParameterDimension(); ++col)
1359  {
1360  int row_start = lower_part_only ? col : 0;
1361  for (int row = row_start; row < getParameterDimension(); ++row)
1362  {
1363  i_row[nz_idx] = row;
1364  j_col[nz_idx] = col;
1365  ++nz_idx;
1366  }
1367  }
1368 }
1370  bool lower_part_only)
1371 {
1373  "OptimizationProblemInterface::computeSparseHessianEqualitiesValues(): default implementation might be slow.");
1374  // worst-case implementation
1375  int dim_x = getParameterDimension();
1376  Eigen::MatrixXd dense_hessian(dim_x, dim_x);
1377  computeDenseHessianEqualities(dense_hessian, multipliers);
1378 
1379  int nz_idx = 0;
1380  for (int col = 0; col < getParameterDimension(); ++col)
1381  {
1382  int row_start = lower_part_only ? col : 0;
1383  for (int row = row_start; row < getParameterDimension(); ++row)
1384  {
1385  values[nz_idx] = dense_hessian(row, col);
1386  ++nz_idx;
1387  }
1388  }
1389 }
1390 
1392 {
1394  "OptimizationProblemInterface::computeSparseHessianEqualities(): default implementation might be slow.");
1395  int dim_x = getParameterDimension();
1396  Eigen::MatrixXd dense_hessian(dim_x, dim_x);
1397  computeDenseHessianEqualities(dense_hessian, multipliers);
1398  hessian = dense_hessian.sparseView();
1399 }
1400 
1402 {
1404  "OptimizationProblemInterface::computeDenseHessianInequalities(): default implementation might be slow.");
1405  int dim_ineq = getInequalityDimension();
1406  int dim_x = getParameterDimension();
1407 
1408  if (dim_ineq == 0) return;
1409 
1410  assert(hessian.rows() == dim_x);
1411  assert(hessian.cols() == dim_x);
1412 
1413  // TODO(roesmann) generic interface
1414  CentralDifferences diff;
1415  auto inc = [this](int idx, double inc) { applyIncrement(idx, inc); };
1416  auto eval = [this](Eigen::Ref<Eigen::VectorXd> values) { computeValuesInequality(values); };
1417  diff.computeHessian(inc, eval, dim_ineq, hessian, multipliers);
1418 }
1419 
1421 {
1423  "OptimizationProblemInterface::computeSparseHessianInequalitiesNNZ(): default implementation might be slow.");
1424  // the default implementation is not aware of any sparse structure, hence we assume worst case
1425  if (lower_part_only) return (int)(0.5 * getParameterDimension() * (getParameterDimension() + 1));
1427 }
1429  bool lower_part_only)
1430 {
1432  "OptimizationProblemInterface::computeSparseHessianInequalitiesStructure(): default implementation might be slow.");
1433  // worst-case implementation
1434  int nz_idx = 0;
1435  for (int col = 0; col < getParameterDimension(); ++col)
1436  {
1437  int row_start = lower_part_only ? col : 0;
1438  for (int row = row_start; row < getParameterDimension(); ++row)
1439  {
1440  i_row[nz_idx] = row;
1441  j_col[nz_idx] = col;
1442  ++nz_idx;
1443  }
1444  }
1445 }
1447  bool lower_part_only)
1448 {
1450  "OptimizationProblemInterface::computeSparseHessianInequalitiesValues(): default implementation might be slow.");
1451  // worst-case implementation
1452  int dim_x = getParameterDimension();
1453  Eigen::MatrixXd dense_hessian(dim_x, dim_x);
1454  computeDenseHessianInequalities(dense_hessian, multipliers);
1455 
1456  int nz_idx = 0;
1457  for (int col = 0; col < getParameterDimension(); ++col)
1458  {
1459  int row_start = lower_part_only ? col : 0;
1460  for (int row = row_start; row < getParameterDimension(); ++row)
1461  {
1462  values[nz_idx] = dense_hessian(row, col);
1463  ++nz_idx;
1464  }
1465  }
1466 }
1467 
1469 {
1471  "OptimizationProblemInterface::computeSparseHessianInequalities(): default implementation might be slow.");
1472  int dim_x = getParameterDimension();
1473  Eigen::MatrixXd dense_hessian(dim_x, dim_x);
1474  computeDenseHessianInequalities(dense_hessian, multipliers);
1475  hessian = dense_hessian.sparseView();
1476 }
1477 
1478 // void OptimizationProblemInterface::computeDenseJacobianHessianObjective(Eigen::Ref<Eigen::MatrixXd> jacobian, Eigen::Ref<Eigen::MatrixXd> hessian,
1479 // const double* multipliers)
1480 //{
1481 // computeDenseJacobianLsqObjective(jacobian, multipliers);
1482 // computeDenseHessianObjective(hessian, multipliers);
1483 //}
1484 // void OptimizationProblemInterface::computeDenseJacobianHessianEqualities(Eigen::Ref<Eigen::MatrixXd> jacobian, Eigen::Ref<Eigen::MatrixXd> hessian,
1485 // const double* multipliers)
1486 //{
1487 // computeDenseJacobianEqualities(jacobian, multipliers);
1488 // computeDenseHessianEqualities(hessian, multipliers);
1489 //}
1490 // void OptimizationProblemInterface::computeDenseJacobianHessianInequalities(Eigen::Ref<Eigen::MatrixXd> jacobian, Eigen::Ref<Eigen::MatrixXd>
1491 // hessian,
1492 // const double* multipliers)
1493 //{
1494 // computeDenseJacobianInequalities(jacobian, multipliers);
1495 // computeDenseHessianInequalities(hessian, multipliers);
1496 //}
1497 
1499  Eigen::Ref<Eigen::MatrixXd> hessian_ineq, double multiplier_obj, const double* multipliers_eq,
1500  const double* multipliers_ineq)
1501 {
1502  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::computeDenseHessians(): default implementation might be slow.");
1503  int obj_dim = getObjectiveDimension();
1504  int eq_dim = getEqualityDimension();
1505  int ineq_dim = getInequalityDimension();
1506  int idx = 0;
1507  if (obj_dim > 0)
1508  {
1509  computeDenseHessianObjective(hessian_obj, multiplier_obj);
1510  idx += obj_dim;
1511  }
1512  if (eq_dim > 0)
1513  {
1514  computeDenseHessianEqualities(hessian_eq, multipliers_eq);
1515  idx += eq_dim;
1516  }
1517  if (ineq_dim > 0)
1518  {
1519  computeDenseHessianInequalities(hessian_ineq, multipliers_ineq);
1520  }
1521 }
1522 
1524  Eigen::SparseMatrix<double>& hessian_ineq, double multiplier_obj,
1525  const double* multipliers_eq, const double* multipliers_ineq)
1526 {
1527  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::computeSparseHessians(): default implementation might be slow.");
1528  computeSparseHessianObjective(hessian_obj, multiplier_obj);
1529  computeSparseHessianEqualities(hessian_eq, multipliers_eq);
1530  computeSparseHessianInequalities(hessian_ineq, multipliers_ineq);
1531 }
1532 
1533 void OptimizationProblemInterface::computeSparseHessiansNNZ(int& nnz_obj, int& nnz_eq, int& nnz_ineq, bool lower_part_only)
1534 {
1536  "OptimizationProblemInterface::computeSparseHessiansNNZ(): default implementation might be slow.");
1537  nnz_obj = computeSparseHessianObjectiveNNZ(lower_part_only);
1538  nnz_eq = computeSparseHessianEqualitiesNNZ(lower_part_only);
1539  nnz_ineq = computeSparseHessianInequalitiesNNZ(lower_part_only);
1540 }
1541 
1545  bool lower_part_only)
1546 {
1548  "OptimizationProblemInterface::computeSparseHessiansValues(): default implementation might be slow.");
1549  computeSparseHessianObjectiveStructure(i_row_obj, j_col_obj, lower_part_only);
1550  computeSparseHessianEqualitiesStructure(i_row_eq, j_col_eq, lower_part_only);
1551  computeSparseHessianInequalitiesStructure(i_row_ineq, j_col_ineq, lower_part_only);
1552 }
1553 
1555  Eigen::Ref<Eigen::VectorXd> values_ineq, double multiplier_obj,
1556  const double* multipliers_eq, const double* multipliers_ineq, bool lower_part_only)
1557 {
1559  "OptimizationProblemInterface::computeSparseHessiansValues(): default implementation might be slow.");
1560  computeSparseHessianObjectiveValues(values_obj, multiplier_obj, lower_part_only);
1561  computeSparseHessianEqualitiesValues(values_eq, multipliers_eq, lower_part_only);
1562  computeSparseHessianInequalitiesValues(values_ineq, multipliers_ineq, lower_part_only);
1563 }
1564 
1566  const double* multipliers_eq, const double* multipliers_ineq,
1567  const Eigen::VectorXi* col_nnz, bool upper_part_only)
1568 {
1570  "OptimizationProblemInterface::computeSparseHessiansValues(): default implementation might be slow.");
1571 
1572  int dim_x = getParameterDimension();
1573  int dim_eq = getEqualityDimension();
1574  int dim_ineq = getInequalityDimension();
1575 
1576  H.setZero();
1577 
1578  // TODO(roesmann): can be parallelized
1579 
1580  Eigen::SparseMatrix<double> H_obj(dim_x, dim_x), H_eq(dim_x, dim_x), H_ineq(dim_x, dim_x);
1581  computeSparseHessianObjective(H_obj, 1.0);
1582  if (dim_eq > 0) computeSparseHessianEqualities(H_eq, multipliers_eq);
1583  if (dim_ineq > 0) computeSparseHessianInequalities(H_ineq, multipliers_ineq);
1584 
1585  if (upper_part_only)
1586  {
1587  // we cannot use .selfadjointView<>() with mixed datatypes (SparseMatrix with long long and int indices)
1589 
1590  if (dim_eq > 0 && dim_ineq > 0)
1591  {
1592  H_temp = H_obj + H_eq + H_ineq;
1593  }
1594  else if (dim_eq > 0)
1595  {
1596  H_temp = H_obj + H_eq;
1597  }
1598  else if (dim_ineq > 0)
1599  {
1600  H_temp = H_obj + H_ineq;
1601  }
1602  else
1603  {
1604  // if (upper_part_only)
1605  // H.selfadjointView<Eigen::Upper>() = H_obj.selfadjointView<Eigen::Upper>();
1606  // else
1607  H_temp = H_obj;
1608  }
1609  H.selfadjointView<Eigen::Upper>() = H_temp.selfadjointView<Eigen::Upper>(); // we note above!
1610  }
1611  else
1612  {
1613  if (dim_eq > 0 && dim_ineq > 0)
1614  {
1615  H = H_obj + H_eq + H_ineq;
1616  }
1617  else if (dim_eq > 0)
1618  {
1619  H = H_obj + H_eq;
1620  }
1621  else if (dim_ineq > 0)
1622  {
1623  H = H_obj + H_ineq;
1624  }
1625  else
1626  {
1627  H = H_obj;
1628  }
1629  }
1630 }
1631 
1633 {
1635  "OptimizationProblemInterface::computeSparseHessianLagrangianNNZperCol(): default implementation might be slow.");
1636 
1637  assert(col_nnz.size() == getParameterDimension());
1638 
1639  if (upper_part_only)
1640  {
1641  for (int i = 0; i < col_nnz.size(); ++i)
1642  {
1643  col_nnz(i) = i + 1; // worst case nnz
1644  }
1645  }
1646  else
1647  col_nnz.setConstant(getParameterDimension()); // worst case nnz
1648 }
1649 
1651  Eigen::SparseMatrix<double, Eigen::ColMajor, long long>& H, const double* multipliers_eq, const double* multipliers_ineq,
1652  Eigen::SparseMatrix<double, Eigen::ColMajor, long long>& A, bool include_finite_bounds, const Eigen::VectorXi* col_nnz_H,
1653  const Eigen::VectorXi* col_nnz_A, bool upper_part_only_H)
1654 {
1655  computeSparseJacobianTwoSideBoundedLinearForm(A, include_finite_bounds, col_nnz_A);
1656  computeSparseHessianLagrangian(H, multipliers_eq, multipliers_ineq, col_nnz_H, upper_part_only_H);
1657 }
1658 
1661  bool include_finite_bounds, const Eigen::VectorXi* col_nnz_H, const Eigen::VectorXi* col_nnz_A, bool upper_part_only_H)
1662 {
1663  computeSparseJacobianTwoSideBoundedLinearForm(A, include_finite_bounds, col_nnz_A);
1664  computeSparseHessianObjectiveLL(H, col_nnz_H, upper_part_only_H);
1665 }
1666 
1667 bool OptimizationProblemInterface::checkIfAllUnfixedParam(std::function<bool(double, int)> fun)
1668 {
1669  PRINT_DEBUG_COND_ONCE(_warn_if_not_specialized, "OptimizationProblemInterface::checkIfAllUnfixedParam(): default implementation might be slow.");
1670 
1671  for (int i = 0; i < getParameterDimension(); ++i)
1672  {
1673  if (!fun(getParameterValue(i), i)) return false;
1674  }
1675  return true;
1676 }
1677 
1678 } // namespace corbo
virtual void computeSparseJacobianTwoSideBoundedLinearFormValues(Eigen::Ref< Eigen::VectorXd > values, bool include_finite_bounds)
virtual double getUpperBound(int idx)=0
Return specific upper bound of a parameter.
virtual void computeDenseHessians(Eigen::Ref< Eigen::MatrixXd > hessian_obj, Eigen::Ref< Eigen::MatrixXd > hessian_eq, Eigen::Ref< Eigen::MatrixXd > hessian_ineq, double multiplier_obj=1.0, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr)
Index cols() const
Definition: SparseMatrix.h:138
virtual int finiteCombinedBoundsDimension()
Dimension of the set of finite bounds (combined such that each ub and lb component define a single di...
virtual void computeSparseHessiansStructure(Eigen::Ref< Eigen::VectorXi > i_row_obj, Eigen::Ref< Eigen::VectorXi > j_col_obj, Eigen::Ref< Eigen::VectorXi > i_row_eq, Eigen::Ref< Eigen::VectorXi > j_col_eq, Eigen::Ref< Eigen::VectorXi > i_row_ineq, Eigen::Ref< Eigen::VectorXi > j_col_ineq, bool lower_part_only=false)
Scalar * x
virtual void computeSparseJacobianInequalities(Eigen::SparseMatrix< double > &jacobian, const double *multipliers=nullptr)
#define PRINT_DEBUG_COND_ONCE(cond, msg)
Print msg-stream only if cond == true, only once and only if project is compiled in Debug-mode...
Definition: console.h:77
virtual void computeSparseJacobianLsqObjectiveStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col)
virtual void computeBoundsForTwoSideBoundedLinearForm(Eigen::Ref< Eigen::VectorXd > lbA, Eigen::Ref< Eigen::VectorXd > ubA, bool include_finite_bounds)
Compute lower and upper bounds lbA and ubA for the linear form lbA <= A x <= ubA. ...
virtual void setBounds(const Eigen::Ref< const Eigen::VectorXd > &lb, const Eigen::Ref< const Eigen::VectorXd > &ub)
Set lower and upper bound vector.
virtual int getInequalityDimension()=0
Total dimension of general inequality constraints.
virtual void computeDenseJacobianEqualities(Eigen::Ref< Eigen::MatrixXd > jacobian, const double *multipliers=nullptr)
Compute the equality constraint Jacobian Jceq(x) for the current parameter set.
virtual void setParameterValue(int idx, double x)=0
Set specific value of the parameter vector.
static void jacobian(IncFun inc_fun, EvalFun eval_fun, Eigen::Ref< Eigen::MatrixXd > jacobian)
Compute Jacobian of a desired function.
Index rows() const
Definition: SparseMatrix.h:136
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
virtual int finiteBoundsDimension()
Dimension of the set of finite bounds (individual bounds ub and lb)
virtual void computeSparseHessiansNNZ(int &nnz_obj, int &nnz_eq, int &nnz_ineq, bool lower_part_only=false)
virtual void computeSparseHessians(Eigen::SparseMatrix< double > &hessian_obj, Eigen::SparseMatrix< double > &hessian_eq, Eigen::SparseMatrix< double > &hessian_ineq, double multiplier_obj=1.0, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr)
EIGEN_DEVICE_FUNC ColXpr col(Index i)
This is the const version of col().
Definition: BlockMethods.h:838
virtual void computeSparseJacobianTwoSideBoundedLinearFormStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col, bool include_finite_bounds)
virtual int computeSparseHessianObjectiveNNZ(bool lower_part_only=false)
virtual void computeSparseJacobianFiniteCombinedBounds(Eigen::SparseMatrix< double > &jacobian, double weight=1.0)
virtual void computeDenseJacobianFiniteCombinedBounds(Eigen::Ref< Eigen::MatrixXd > jacobian, double weight=1.0)
Compute the Jacobian for finite combined bounds.
virtual void computeDenseJacobianActiveInequalities(Eigen::Ref< Eigen::MatrixXd > jacobian, double weight=1.0)
Compute the Jacobian Jc(x) with non-zeros for active constraints c(x)>= 0 and zeros for inactive ones...
virtual void computeDistanceFiniteCombinedBounds(Eigen::Ref< Eigen::VectorXd > values)
Compute the distance to finite bound values (combined lower and upper)
virtual void computeSparseHessianEqualitiesValues(Eigen::Ref< Eigen::VectorXd > values, const double *multipliers=nullptr, bool lower_part_only=false)
virtual void computeCombinedSparseJacobiansValues(Eigen::Ref< Eigen::VectorXd > values, bool objective_lsq=true, bool equality=true, bool inequality=true, const double *multipliers_obj=nullptr, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr)
virtual void computeSparseJacobianEqualities(Eigen::SparseMatrix< double > &jacobian, const double *multipliers=nullptr)
virtual void computeSparseJacobianFiniteCombinedBoundsStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col)
virtual int getEqualityDimension()=0
Total dimension of equality constraints.
virtual void computeSparseJacobianTwoSideBoundedLinearFormAndHessianObjective(Eigen::SparseMatrix< double, Eigen::ColMajor, long long > &H, Eigen::SparseMatrix< double, Eigen::ColMajor, long long > &A, bool include_finite_bounds, const Eigen::VectorXi *col_nnz_H, const Eigen::VectorXi *col_nnz_A, bool upper_part_only_H)
virtual void computeSparseJacobianLsqObjectiveValues(Eigen::Ref< Eigen::VectorXd > values, const double *multipliers=nullptr)
virtual void computeSparseJacobianEqualitiesValues(Eigen::Ref< Eigen::VectorXd > values, const double *multipliers=nullptr)
virtual void computeSparseJacobianLsqObjective(Eigen::SparseMatrix< double > &jacobian, const double *multipliers=nullptr)
virtual void computeDenseJacobianFiniteCombinedBoundsIdentity(Eigen::Ref< Eigen::MatrixXd > jacobian)
Compute the Jacobian for finite combined bounds.
virtual void applyIncrement(const Eigen::Ref< const Eigen::VectorXd > &increment)
Apply increment to the current parameter set.
virtual void computeSparseHessianObjectiveStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col, bool lower_part_only=false)
MatrixType A(a, *n, *n, *lda)
virtual void computeSparseHessianInequalitiesValues(Eigen::Ref< Eigen::VectorXd > values, const double *multipliers=nullptr, bool lower_part_only=false)
virtual void setLowerBound(int idx, double lb)=0
Set specific lower bound of a parameter.
virtual int computeCombinedSparseJacobiansNNZ(bool objective_lsq=true, bool equality=true, bool inequality=true)
virtual int getLsqObjectiveDimension()=0
Total dimension of least-squares objective function terms.
virtual double getParameterValue(int idx)=0
Return specific value of the parameter vector.
#define PRINT_WARNING_COND_ONCE(cond, msg)
Print msg-stream only if cond == true, only once.
Definition: console.h:163
virtual void computeSparseHessianObjective(Eigen::SparseMatrix< double > &hessian, double multiplier=1.0)
virtual int computeSparseHessianInequalitiesNNZ(bool lower_part_only=false)
virtual void setUpperBound(int idx, double ub)=0
Set specific upper bound of a parameter.
Finite differences via central differences.
virtual void computeSparseHessianLagrangianNNZperCol(Eigen::Ref< Eigen::VectorXi > col_nnz, bool upper_part_only)
virtual void computeLowerAndUpperBoundDiff(Eigen::Ref< Eigen::VectorXd > lb_minus_x, Eigen::Ref< Eigen::VectorXd > ub_minus_x)
Compute the distance between parameters and bounds.
virtual int computeSparseHessianEqualitiesNNZ(bool lower_part_only=false)
virtual void computeValuesActiveInequality(Eigen::Ref< Eigen::VectorXd > values, double weight=1.0)
Compute the values of the active inequality constraints (elementwise max(0, c(x))) ...
virtual void computeSparseJacobianTwoSideBoundedLinearFormAndHessianLagrangian(Eigen::SparseMatrix< double, Eigen::ColMajor, long long > &H, const double *multipliers_eq, const double *multipliers_ineq, Eigen::SparseMatrix< double, Eigen::ColMajor, long long > &A, bool include_finite_bounds, const Eigen::VectorXi *col_nnz_H=nullptr, const Eigen::VectorXi *col_nnz_A=nullptr, bool upper_part_only_H=false)
Compute the Jacobian and Hessian of the lagrangian.
virtual void computeSparseJacobianInequalitiesStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col)
virtual void computeSparseJacobiansNNZ(int &nnz_lsq_obj, int &nnz_eq, int &nnz_ineq)
virtual void computeSparseJacobianEqualitiesStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col)
virtual void computeSparseHessianObjectiveValues(Eigen::Ref< Eigen::VectorXd > values, double multiplier=1.0, bool lower_part_only=false)
virtual void computeSparseJacobians(Eigen::SparseMatrix< double > &jacobian_lsq_obj, Eigen::SparseMatrix< double > &jacobian_eq, Eigen::SparseMatrix< double > &jacobian_ineq, const double *multipliers_lsq_obj=nullptr, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr, bool active_ineq=false, double active_ineq_weight=1.0)
Scalar & insert(Index row, Index col)
virtual void getParameterVector(Eigen::Ref< Eigen::VectorXd > x)
Return deep copy of the complete parameter vector.
virtual double computeValueNonLsqObjective()=0
virtual void computeSparseHessianObjectiveLL(Eigen::SparseMatrix< double, Eigen::ColMajor, long long > &H, const Eigen::VectorXi *col_nnz=nullptr, bool upper_part_only=false)
virtual void computeValuesEquality(Eigen::Ref< Eigen::VectorXd > values)=0
Compute the equality constraint values ceq(x) for the current parameter set.
virtual void computeSparseJacobianInequalitiesValues(Eigen::Ref< Eigen::VectorXd > values, const double *multipliers=nullptr)
constexpr const double CORBO_INF_DBL
Representation for infinity (double version)
virtual void getBounds(Eigen::Ref< Eigen::VectorXd > lb, Eigen::Ref< Eigen::VectorXd > ub)
Get lower and upper bound vector.
virtual double getLowerBound(int idx)=0
Return specific lower bound value of a parameter.
virtual void computeValuesInequality(Eigen::Ref< Eigen::VectorXd > values)=0
Compute the inequality constraint values c(x) for the current parameter set.
virtual int computeSparseJacobianTwoSideBoundedLinearFormNNZ(bool include_finite_bounds)
A matrix or vector expression mapping an existing expression.
Definition: Ref.h:192
virtual void computeGradientNonLsqObjective(Eigen::Ref< Eigen::VectorXd > gradient)
ConstSelfAdjointViewReturnType< UpLo >::Type selfadjointView() const
virtual void computeSparseJacobianFiniteCombinedBoundsValues(Eigen::Ref< Eigen::VectorXd > values, double weight=1.0)
virtual void computeGradientObjectiveAndCombinedSparseJacobiansValues(Eigen::Ref< Eigen::VectorXd > gradient, Eigen::Ref< Eigen::VectorXd > jac_values, bool equality=true, bool inequality=true, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr)
virtual void computeSparseJacobianTwoSideBoundedLinearFormNNZPerColumn(Eigen::Ref< Eigen::VectorXi > col_nnz, bool include_finite_bounds)
virtual void computeSparseJacobianActiveInequalities(Eigen::SparseMatrix< double > &jacobian, double weight=1.0)
virtual int getNonLsqObjectiveDimension()=0
Get dimension of the objective (should be zero or one)
virtual void computeDenseHessianInequalities(Eigen::Ref< Eigen::MatrixXd > hessian, const double *multipliers=nullptr)
virtual void computeSparseJacobiansStructure(Eigen::Ref< Eigen::VectorXi > i_row_obj, Eigen::Ref< Eigen::VectorXi > j_col_obj, Eigen::Ref< Eigen::VectorXi > i_row_eq, Eigen::Ref< Eigen::VectorXi > j_col_eq, Eigen::Ref< Eigen::VectorXi > i_row_ineq, Eigen::Ref< Eigen::VectorXi > j_col_ineq)
virtual void computeSparseJacobiansValues(Eigen::Ref< Eigen::VectorXd > values_obj, Eigen::Ref< Eigen::VectorXd > values_eq, Eigen::Ref< Eigen::VectorXd > values_ineq, const double *multipliers_obj=nullptr, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr, bool active_ineq=false, double active_ineq_weight=1.0)
virtual void computeSparseHessiansValues(Eigen::Ref< Eigen::VectorXd > values_obj, Eigen::Ref< Eigen::VectorXd > values_eq, Eigen::Ref< Eigen::VectorXd > values_ineq, double multiplier_obj=1.0, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr, bool lower_part_only=false)
virtual void computeDenseHessianEqualities(Eigen::Ref< Eigen::MatrixXd > hessian, const double *multipliers=nullptr)
virtual void computeSparseHessianLagrangian(Eigen::SparseMatrix< double, Eigen::ColMajor, long long > &H, const double *multipliers_eq, const double *multipliers_ineq, const Eigen::VectorXi *col_nnz=nullptr, bool upper_part_only=false)
Compute the hessian of the lagrangian L(x) = f(x) + lambda1 * c(x) + lambda2 * ceq(x) ...
virtual void getParametersAndBoundsFinite(Eigen::Ref< Eigen::VectorXd > lb_finite_bounds, Eigen::Ref< Eigen::VectorXd > ub_finite_bounds, Eigen::Ref< Eigen::VectorXd > x_finite_bounds)
Return bound and parameter vectors only for finite boudns.
virtual void computeValuesLsqObjective(Eigen::Ref< Eigen::VectorXd > values)=0
Compute the objective function values f(x) for the current parameter set.
virtual int getObjectiveDimension()=0
Get dimension of the objective (should be zero or one, includes Lsq objectives if present) ...
EIGEN_DEVICE_FUNC RowXpr row(Index i)
This is the const version of row(). */.
Definition: BlockMethods.h:859
void reserve(Index reserveSize)
Definition: SparseMatrix.h:262
Index outerSize() const
Definition: SparseMatrix.h:143
virtual void computeSparseHessianEqualitiesStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col, bool lower_part_only=false)
virtual int getParameterDimension()=0
Effictive dimension of the optimization parameter set (changeable, non-fixed part) ...
virtual void computeCombinedSparseJacobiansStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col, bool objective_lsq=true, bool equality=true, bool inequality=true)
virtual void computeDenseJacobianInequalities(Eigen::Ref< Eigen::MatrixXd > jacobian, const double *multipliers=nullptr)
Compute the inequality constraint Jacobian Jc(x) for the current parameter set.
virtual void computeSparseHessianInequalitiesStructure(Eigen::Ref< Eigen::VectorXi > i_row, Eigen::Ref< Eigen::VectorXi > j_col, bool lower_part_only=false)
virtual void computeSparseHessianObjectiveNNZperCol(Eigen::Ref< Eigen::VectorXi > col_nnz, bool upper_part_only=false)
virtual void setParameterVector(const Eigen::Ref< const Eigen::VectorXd > &x)
Set complete parameter vector.
virtual bool checkIfAllUnfixedParam(std::function< bool(double, int)> fun)
Check if a function taking the parameter value and unfixed-idx is true for all unfixed parameter valu...
virtual void computeDenseHessianObjective(const Eigen::Ref< const Eigen::MatrixXd > &jacobian, Eigen::Ref< Eigen::MatrixXd > hessian, const double *multipliers=nullptr, bool jacob_scaled=true)
Compute the objective Hessian Hf(x) for the current parameter set.
virtual void computeSparseJacobianTwoSideBoundedLinearForm(Eigen::SparseMatrix< double, Eigen::ColMajor, long long > &A, bool include_finite_bounds, const Eigen::VectorXi *col_nnz=nullptr)
Compute the jacobian A for the linear form lbA <= A x <= lbB.
virtual void computeDenseJacobianLsqObjective(Eigen::Ref< Eigen::MatrixXd > jacobian, const double *multipliers=nullptr)
Compute the objective Jacobian Jf(x) for the current parameter set.
virtual void computeSparseHessianEqualities(Eigen::SparseMatrix< double > &hessian, const double *multipliers=nullptr)
virtual void computeGradientObjective(Eigen::Ref< Eigen::VectorXd > gradient)
virtual void computeSparseHessianInequalities(Eigen::SparseMatrix< double > &hessian, const double *multipliers=nullptr)
virtual void computeDenseJacobians(Eigen::Ref< Eigen::VectorXd > gradient_non_lsq_obj, Eigen::Ref< Eigen::MatrixXd > jacobian_lsq_obj, Eigen::Ref< Eigen::MatrixXd > jacobian_eq, Eigen::Ref< Eigen::MatrixXd > jacobian_ineq, const double *multipliers_lsq_obj=nullptr, const double *multipliers_eq=nullptr, const double *multipliers_ineq=nullptr, bool active_ineq=false, double active_ineq_weight=1.0)
Compute the objective and constraint Jacobians at once.
#define PRINT_ERROR(msg)
Print msg-stream as error msg.
Definition: console.h:173
virtual void computeSparseJacobianActiveInequalitiesValues(Eigen::Ref< Eigen::VectorXd > values, double weight=1.0)
virtual void computeCombinedSparseJacobian(Eigen::SparseMatrix< double > &jacobian, bool objective_lsq, bool equality, bool inequality, bool finite_combined_bounds, bool active_ineq=false, double weight_eq=1.0, double weight_ineq=1.0, double weight_bounds=1.0, const Eigen::VectorXd *values=nullptr, const Eigen::VectorXi *col_nnz=nullptr)


control_box_rst
Author(s): Christoph Rösmann
autogenerated on Mon Feb 28 2022 22:07:06