ActiveSetSolver-inl.h
Go to the documentation of this file.
1 /* ----------------------------------------------------------------------------
2 
3  * GTSAM Copyright 2010, Georgia Tech Research Corporation,
4  * Atlanta, Georgia 30332-0415
5  * All Rights Reserved
6  * Authors: Frank Dellaert, et al. (see THANKS for the full author list)
7 
8  * See LICENSE for the license information
9 
10  * -------------------------------------------------------------------------- */
11 
20 #pragma once
21 
23 
24 /******************************************************************************/
25 // Convenient macros to reduce syntactic noise. undef later.
26 #define Template template <class PROBLEM, class POLICY, class INITSOLVER>
27 #define This ActiveSetSolver<PROBLEM, POLICY, INITSOLVER>
28 
29 /******************************************************************************/
30 
31 namespace gtsam {
32 
33 /* We have to make sure the new solution with alpha satisfies all INACTIVE inequality constraints
34  * If some inactive inequality constraints complain about the full step (alpha = 1),
35  * we have to adjust alpha to stay within the inequality constraints' feasible regions.
36  *
37  * For each inactive inequality j:
38  * - We already have: aj'*xk - bj <= 0, since xk satisfies all inequality constraints
39  * - We want: aj'*(xk + alpha*p) - bj <= 0
40  * - If aj'*p <= 0, we have: aj'*(xk + alpha*p) <= aj'*xk <= bj, for all alpha>0
41  * it's good!
42  * - We only care when aj'*p > 0. In this case, we need to choose alpha so that
43  * aj'*xk + alpha*aj'*p - bj <= 0 --> alpha <= (bj - aj'*xk) / (aj'*p)
44  * We want to step as far as possible, so we should choose alpha = (bj - aj'*xk) / (aj'*p)
45  *
46  * We want the minimum of all those alphas among all inactive inequality.
47  */
48 Template std::tuple<double, int> This::computeStepSize(
49  const InequalityFactorGraph& workingSet, const VectorValues& xk,
50  const VectorValues& p, const double& maxAlpha) const {
51  double minAlpha = maxAlpha;
52  int closestFactorIx = -1;
53  for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) {
54  const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
55  double b = factor->getb()[0];
56  // only check inactive factors
57  if (!factor->active()) {
58  // Compute a'*p
59  double aTp = factor->dotProductRow(p);
60 
61  // Check if a'*p >0. Don't care if it's not.
62  if (aTp <= 0)
63  continue;
64 
65  // Compute a'*xk
66  double aTx = factor->dotProductRow(xk);
67 
68  // alpha = (b - a'*xk) / (a'*p)
69  double alpha = (b - aTx) / aTp;
70  // We want the minimum of all those max alphas
71  if (alpha < minAlpha) {
72  closestFactorIx = factorIx;
73  minAlpha = alpha;
74  }
75  }
76  }
77  return std::make_tuple(minAlpha, closestFactorIx);
78 }
79 
80 /******************************************************************************/
81 /*
82  * The goal of this function is to find currently active inequality constraints
83  * that violate the condition to be active. The one that violates the condition
84  * the most will be removed from the active set. See Nocedal06book, pg 469-471
85  *
86  * Find the BAD active inequality that pulls x strongest to the wrong direction
87  * of its constraint (i.e. it is pulling towards >0, while its feasible region is <=0)
88  *
89  * For active inequality constraints (those that are enforced as equality constraints
90  * in the current working set), we want lambda < 0.
91  * This is because:
92  * - From the Lagrangian L = f - lambda*c, we know that the constraint force
93  * is (lambda * \grad c) = \grad f. Intuitively, to keep the solution x stay
94  * on the constraint surface, the constraint force has to balance out with
95  * other unconstrained forces that are pulling x towards the unconstrained
96  * minimum point. The other unconstrained forces are pulling x toward (-\grad f),
97  * hence the constraint force has to be exactly \grad f, so that the total
98  * force is 0.
99  * - We also know that at the constraint surface c(x)=0, \grad c points towards + (>= 0),
100  * while we are solving for - (<=0) constraint.
101  * - We want the constraint force (lambda * \grad c) to pull x towards the - (<=0) direction
102  * i.e., the opposite direction of \grad c where the inequality constraint <=0 is satisfied.
103  * That means we want lambda < 0.
104  * - This is because when the constrained force pulls x towards the infeasible region (+),
105  * the unconstrained force is pulling x towards the opposite direction into
106  * the feasible region (again because the total force has to be 0 to make x stay still)
107  * So we can drop this constraint to have a lower error but feasible solution.
108  *
109  * In short, active inequality constraints with lambda > 0 are BAD, because they
110  * violate the condition to be active.
111  *
112  * And we want to remove the worst one with the largest lambda from the active set.
113  *
114  */
115 Template int This::identifyLeavingConstraint(
116  const InequalityFactorGraph& workingSet,
117  const VectorValues& lambdas) const {
118  int worstFactorIx = -1;
119  // preset the maxLambda to 0.0: if lambda is <= 0.0, the constraint is either
120  // inactive or a good inequality constraint, so we don't care!
121  double maxLambda = 0.0;
122  for (size_t factorIx = 0; factorIx < workingSet.size(); ++factorIx) {
123  const LinearInequality::shared_ptr& factor = workingSet.at(factorIx);
124  if (factor->active()) {
125  double lambda = lambdas.at(factor->dualKey())[0];
126  if (lambda > maxLambda) {
127  worstFactorIx = factorIx;
128  maxLambda = lambda;
129  }
130  }
131  }
132  return worstFactorIx;
133 }
134 
135 //******************************************************************************
136 Template JacobianFactor::shared_ptr This::createDualFactor(
137  Key key, const InequalityFactorGraph& workingSet,
138  const VectorValues& delta) const {
139  // Transpose the A matrix of constrained factors to have the jacobian of the
140  // dual key
141  TermsContainer Aterms = collectDualJacobians<LinearEquality>(
142  key, problem_.equalities, equalityVariableIndex_);
143  TermsContainer AtermsInequalities = collectDualJacobians<LinearInequality>(
144  key, workingSet, inequalityVariableIndex_);
145  Aterms.insert(Aterms.end(), AtermsInequalities.begin(),
146  AtermsInequalities.end());
147 
148  // Collect the gradients of unconstrained cost factors to the b vector
149  if (Aterms.size() > 0) {
150  Vector b = problem_.costGradient(key, delta);
151  // to compute the least-square approximation of dual variables
152  return std::make_shared<JacobianFactor>(Aterms, b);
153  } else {
154  return nullptr;
155  }
156 }
157 
158 /******************************************************************************/
159 /* This function will create a dual graph that solves for the
160  * lagrange multipliers for the current working set.
161  * You can use lagrange multipliers as a necessary condition for optimality.
162  * The factor graph that is being solved is f' = -lambda * g'
163  * where f is the optimized function and g is the function resulting from
164  * aggregating the working set.
165  * The lambdas give you information about the feasibility of a constraint.
166  * if lambda < 0 the constraint is Ok
167  * if lambda = 0 you are on the constraint
168  * if lambda > 0 you are violating the constraint.
169  */
170 Template GaussianFactorGraph This::buildDualGraph(
171  const InequalityFactorGraph& workingSet, const VectorValues& delta) const {
172  GaussianFactorGraph dualGraph;
173  for (Key key : constrainedKeys_) {
174  // Each constrained key becomes a factor in the dual graph
175  auto dualFactor = createDualFactor(key, workingSet, delta);
176  if (dualFactor) dualGraph.push_back(dualFactor);
177  }
178  return dualGraph;
179 }
180 
181 //******************************************************************************
182 Template GaussianFactorGraph
183 This::buildWorkingGraph(const InequalityFactorGraph& workingSet,
184  const VectorValues& xk) const {
185  GaussianFactorGraph workingGraph;
186  workingGraph.push_back(POLICY::buildCostFunction(problem_, xk));
187  workingGraph.push_back(problem_.equalities);
188  for (const LinearInequality::shared_ptr& factor : workingSet)
189  if (factor->active()) workingGraph.push_back(factor);
190  return workingGraph;
191 }
192 
193 //******************************************************************************
194 Template typename This::State This::iterate(
195  const typename This::State& state) const {
196  // Algorithm 16.3 from Nocedal06book.
197  // Solve with the current working set eqn 16.39, but solve for x not p
198  auto workingGraph = buildWorkingGraph(state.workingSet, state.values);
199  VectorValues newValues = workingGraph.optimize();
200  // If we CAN'T move further
201  // if p_k = 0 is the original condition, modified by Duy to say that the state
202  // update is zero.
203  if (newValues.equals(state.values, 1e-7)) {
204  // Compute lambda from the dual graph
205  auto dualGraph = buildDualGraph(state.workingSet, newValues);
206  VectorValues duals = dualGraph.optimize();
207  int leavingFactor = identifyLeavingConstraint(state.workingSet, duals);
208  // If all inequality constraints are satisfied: We have the solution!!
209  if (leavingFactor < 0) {
210  return State(newValues, duals, state.workingSet, true,
211  state.iterations + 1);
212  } else {
213  // Inactivate the leaving constraint
214  InequalityFactorGraph newWorkingSet = state.workingSet;
215  newWorkingSet.at(leavingFactor)->inactivate();
216  return State(newValues, duals, newWorkingSet, false,
217  state.iterations + 1);
218  }
219  } else {
220  // If we CAN make some progress, i.e. p_k != 0
221  // Adapt stepsize if some inactive constraints complain about this move
222  VectorValues p = newValues - state.values;
223  const auto [alpha, factorIx] = // using 16.41
224  computeStepSize(state.workingSet, state.values, p, POLICY::maxAlpha);
225  // also add to the working set the one that complains the most
226  InequalityFactorGraph newWorkingSet = state.workingSet;
227  if (factorIx >= 0)
228  newWorkingSet.at(factorIx)->activate();
229  // step!
230  newValues = state.values + alpha * p;
231  return State(newValues, state.duals, newWorkingSet, false,
232  state.iterations + 1);
233  }
234 }
235 
236 //******************************************************************************
237 Template InequalityFactorGraph This::identifyActiveConstraints(
238  const InequalityFactorGraph& inequalities,
239  const VectorValues& initialValues, const VectorValues& duals,
240  bool useWarmStart) const {
241  InequalityFactorGraph workingSet;
242  for (const LinearInequality::shared_ptr& factor : inequalities) {
243  LinearInequality::shared_ptr workingFactor(new LinearInequality(*factor));
244  if (useWarmStart && duals.size() > 0) {
245  if (duals.exists(workingFactor->dualKey())) workingFactor->activate();
246  else workingFactor->inactivate();
247  } else {
248  double error = workingFactor->error(initialValues);
249  // Safety guard. This should not happen unless users provide a bad init
250  if (error > 0) throw InfeasibleInitialValues();
251  if (std::abs(error) < 1e-7)
252  workingFactor->activate();
253  else
254  workingFactor->inactivate();
255  }
256  workingSet.push_back(workingFactor);
257  }
258  return workingSet;
259 }
260 
261 //******************************************************************************
262 Template std::pair<VectorValues, VectorValues> This::optimize(
263  const VectorValues& initialValues, const VectorValues& duals,
264  bool useWarmStart) const {
265  // Initialize workingSet from the feasible initialValues
266  InequalityFactorGraph workingSet = identifyActiveConstraints(
267  problem_.inequalities, initialValues, duals, useWarmStart);
268  State state(initialValues, duals, workingSet, false, 0);
269 
271  while (!state.converged) state = iterate(state);
272 
273  return std::make_pair(state.values, state.duals);
274 }
275 
276 //******************************************************************************
277 Template std::pair<VectorValues, VectorValues> This::optimize() const {
278  INITSOLVER initSolver(problem_);
279  VectorValues initValues = initSolver.solve();
280  return optimize(initValues);
281 }
282 
283 }
284 
285 #undef Template
286 #undef This
VectorValues
alpha
RealScalar alpha
Definition: level1_cplx_impl.h:147
e
Array< double, 1, 3 > e(1./3., 0.5, 2.)
optimize
int optimize(const SfmData &db, const NonlinearFactorGraph &graph, const Values &initial, bool separateCalibration=false)
Definition: timeSFMBAL.h:64
InfeasibleInitialValues.h
Exception thrown when given Infeasible Initial Values.
gtsam::optimize
Point3 optimize(const NonlinearFactorGraph &graph, const Values &values, Key landmarkKey)
Definition: triangulation.cpp:177
gtsam::Vector
Eigen::VectorXd Vector
Definition: Vector.h:39
pruning_fixture::factor
DecisionTreeFactor factor(D &C &B &A, "0.0 0.0 0.0 0.60658897 0.61241912 0.61241969 0.61247685 0.61247742 0.0 " "0.0 0.0 0.99995287 1.0 1.0 1.0 1.0")
gtsam.examples.PlanarManipulatorExample.delta
def delta(g0, g1)
Definition: PlanarManipulatorExample.py:45
make_tuple
tuple make_tuple()
Definition: cast.h:1383
gtsam::State
internal::DoglegState State
Definition: DoglegOptimizer.cpp:62
gtsam::LinearInequality::shared_ptr
std::shared_ptr< This > shared_ptr
shared_ptr to this class
Definition: LinearInequality.h:37
Template
#define Template
Definition: ActiveSetSolver-inl.h:26
lambda
static double lambda[]
Definition: jv.c:524
key
const gtsam::Symbol key('X', 0)
gtsam::b
const G & b
Definition: Group.h:79
State
Definition: testKalmanFilter.cpp:31
gtsam
traits
Definition: SFMdata.h:40
error
static double error
Definition: testRot3.cpp:37
p
float * p
Definition: Tutorial_Map_using.cpp:9
gtsam::JacobianFactor::shared_ptr
std::shared_ptr< This > shared_ptr
shared_ptr to this class
Definition: JacobianFactor.h:97
abs
#define abs(x)
Definition: datatypes.h:17
gtsam::Key
std::uint64_t Key
Integer nonlinear key type.
Definition: types.h:97
make_changelog.state
state
Definition: make_changelog.py:29


gtsam
Author(s):
autogenerated on Sun Dec 22 2024 04:11:05