test_solver_ipopt.cpp
Go to the documentation of this file.
1 /*********************************************************************
2  *
3  * Software License Agreement
4  *
5  * Copyright (c) 2020,
6  * TU Dortmund - Institute of Control Theory and Systems Engineering.
7  * All rights reserved.
8  *
9  * This program is free software: you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation, either version 3 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program. If not, see <https://www.gnu.org/licenses/>.
21  *
22  * Authors: Christoph Rösmann
23  *********************************************************************/
24 
25 #include <corbo-core/console.h>
26 
27 #include "gtest/gtest.h"
28 
29 #ifdef IPOPT
30 
33 
34 #include <corbo-core/macros.h>
35 #include <corbo-core/utilities.h>
37 
38 #include <array>
39 #include <functional>
40 
41 using corbo::SolverIpopt;
44 
45 class TestSolverIpopt : public testing::Test
46 {
47  protected:
48  // You can do set-up work for each test here.
49  TestSolverIpopt() {}
50  // You can do clean-up work that doesn't throw exceptions here.
51  virtual ~TestSolverIpopt() {}
52  // If the constructor and destructor are not enough for setting up
53  // and cleaning up each test, you can define the following methods:
54 
55  // Code here will be called immediately after the constructor (right
56  // before each test).
57  void SetUp() override
58  {
59  // configure solver
60  EXPECT_TRUE(solver.initialize(&optim)); // we need to initialize the sover before setting parameters
61  solver.setIterations(1000);
62  solver.setRelTolerance(1e-8);
63  }
64  // Code here will be called immediately after each test (right
65  // before the destructor).
66  // virtual void TearDown()
67 
68  SimpleOptimizationProblemWithCallbacks optim;
69 
70  SolverIpopt solver;
71 };
72 
73 TEST_F(TestSolverIpopt, solve_unconstr_1)
74 {
75  // parameters
76  optim.resizeParameterVector(1);
77  Eigen::VectorXd x(1);
78  x.setOnes();
79  optim.setX(x);
80 
81  // create objective function
82  // (x-2)^2
83  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) { values[0] = x[0] - 2; };
84  optim.setObjectiveFunction(objective, 1, true); // least-squares type
85 
86  EXPECT_TRUE(solver.initialize(&optim));
87 
88  SolverStatus status = solver.solve(optim, true);
89 
90  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
91 
92  EXPECT_TRUE(success);
93  EXPECT_NEAR(optim.getX()[0], 2.0, 1e-3);
94 }
95 
96 TEST_F(TestSolverIpopt, solve_unconstr_2)
97 {
98  // parameters
99  optim.resizeParameterVector(3);
100  Eigen::VectorXd x(3);
101  x.setOnes();
102  optim.setX(x);
103 
104  // create objective function
105  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) {
106  values[0] = x[0] - 5;
107  values[1] = x[1] + 3;
108  values[2] = x[2];
109  };
110  optim.setObjectiveFunction(objective, 3, true); // least-squares type
111 
112  EXPECT_TRUE(solver.initialize(&optim));
113 
114  SolverStatus status = solver.solve(optim, true);
115 
116  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
117 
118  EXPECT_TRUE(success);
119  EXPECT_NEAR(optim.getX()[0], 5.0, 1e-3);
120  EXPECT_NEAR(optim.getX()[1], -3.0, 1e-3);
121  EXPECT_NEAR(optim.getX()[2], 0.0, 1e-3);
122 }
123 
124 TEST_F(TestSolverIpopt, solve_rosenbrock_unconstr)
125 {
126  // parameters
127  optim.resizeParameterVector(2);
128  Eigen::VectorXd x(2);
129  x.setOnes();
130  optim.setX(x);
131 
132  // create objective function
133  // Create edges for minimizing 0.5 * (100*(x2-x1^2)^2 + (1-x1)^2 ) => f1^2 + f2^2
134  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) {
135  values[0] = std::sqrt(100) * (x[1] - x[0] * x[0]);
136  values[1] = 1 - x[0];
137  };
138  optim.setObjectiveFunction(objective, 2, true); // least-squares type
139 
140  EXPECT_TRUE(solver.initialize(&optim));
141 
142  // now solve
143  SolverStatus status = solver.solve(optim, true);
144 
145  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
146 
147  EXPECT_TRUE(success);
148  EXPECT_NEAR(optim.getX()[0], 1.0, 1e-3);
149  EXPECT_NEAR(optim.getX()[1], 1.0, 1e-3);
150 }
151 
152 TEST_F(TestSolverIpopt, solve_eqconstr_1)
153 {
154  // parameters
155  optim.resizeParameterVector(1);
156  Eigen::VectorXd x(1);
157  x.setOnes();
158  optim.setX(x);
159 
160  // create objective function
161  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) { values[0] = x[0] - 2; };
162  optim.setObjectiveFunction(objective, 1, true); // least-squares type
163 
164  // create equality constraint function
165  auto equality = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) { values[0] = x[0] - 3; };
166  optim.setEqualityConstraint(equality, 1);
167 
168  EXPECT_TRUE(solver.initialize(&optim));
169 
170  SolverStatus status = solver.solve(optim, true);
171 
172  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
173 
174  EXPECT_TRUE(success);
175  EXPECT_NEAR(optim.getX()[0], 3.0, 1e-3);
176 }
177 
178 TEST_F(TestSolverIpopt, solve_ineqconstr_1)
179 {
180  // parameters
181  optim.resizeParameterVector(1);
182  Eigen::VectorXd x(1);
183  x.setOnes();
184  optim.setX(x);
185 
186  // create objective function
187  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) { values[0] = x[0] - 2; }; // min -> x = 2
188  optim.setObjectiveFunction(objective, 1, true); // least-squares type
189 
190  // create inequality constraint function
191  auto inequality = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) { values[0] = -x[0] + 3; }; // x > 3
192  optim.setInequalityConstraint(inequality, 1);
193 
194  EXPECT_TRUE(solver.initialize(&optim));
195 
196  SolverStatus status = solver.solve(optim, true);
197 
198  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
199 
200  EXPECT_TRUE(success);
201  EXPECT_NEAR(optim.getX()[0], 3.0, 1e-3);
202 }
203 
204 TEST_F(TestSolverIpopt, solve_lower_bounds)
205 {
206  // parameters
207  optim.resizeParameterVector(1);
208  Eigen::VectorXd x(1);
209  x.setOnes();
210  optim.setX(x);
211 
212  // set bounds
213  Eigen::VectorXd lb(1);
214  lb[0] = 5;
215  optim.setLowerBounds(lb);
216 
217  // create objective function
218  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) { values[0] = x[0] - 2; }; // min -> x = 2
219  optim.setObjectiveFunction(objective, 1, true); // least-squares type
220 
221  EXPECT_TRUE(solver.initialize(&optim));
222 
223  SolverStatus status = solver.solve(optim, true);
224 
225  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
226 
227  EXPECT_TRUE(success);
228  EXPECT_NEAR(optim.getX()[0], 5.0, 1e-3);
229 }
230 
231 TEST_F(TestSolverIpopt, solve_upper_bounds)
232 {
233  // parameters
234  optim.resizeParameterVector(1);
235  Eigen::VectorXd x(1);
236  x.setOnes();
237  optim.setX(x);
238 
239  // set bounds
240  Eigen::VectorXd ub(1);
241  ub[0] = -1;
242  optim.setUpperBounds(ub);
243 
244  // create objective function
245  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) { values[0] = x[0] - 2; }; // min -> x = 2
246  optim.setObjectiveFunction(objective, 1, true); // least-squares type
247 
248  EXPECT_TRUE(solver.initialize(&optim));
249 
250  SolverStatus status = solver.solve(optim, true);
251 
252  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
253 
254  EXPECT_TRUE(success);
255  EXPECT_NEAR(optim.getX()[0], -1.0, 1e-3);
256 }
257 
258 TEST_F(TestSolverIpopt, solve_betts_fun_constr)
259 {
260  // parameters
261  optim.resizeParameterVector(2);
262 
263  // create bounds on x1 and x2
264  optim.setLowerBound(0, 2);
265  optim.setUpperBound(0, 50);
266  optim.setLowerBound(1, -50);
267  optim.setUpperBound(1, 50);
268 
269  // Problem definition
270  // min 0.01 * x1^2 + x2^2 - 100
271  // s.t. 2<=x1<=50,
272  // -50<=x2<=50,
273  // 10*x1-x2>=10
274 
275  // Create objective for minimizing
276  // 0.01 * x1^2 + x2^2 - 100 = f1^2 + f^2
277  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) {
278  values[0] = std::sqrt(0.01) * x[0];
279  values[1] = x[1];
280  };
281  optim.setObjectiveFunction(objective, 2, true); // least-squares type
282 
283  // Create inequality for satisfying
284  // 10*x1-x2>=10 -> 10*x1-x2-10>=0 -> x2-10*x1+10<=0
285  auto inequality = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) {
286  values[0] = x[1] - 10.0 * x[0] + 10.0; // c(x)<=0 convention
287  };
288  optim.setInequalityConstraint(inequality, 1);
289 
290  // feasible start
291  optim.setParameterValue(0, 5);
292  optim.setParameterValue(0, -5);
293 
294  EXPECT_TRUE(solver.initialize(&optim));
295 
296  // now solve
297  SolverStatus status = solver.solve(optim, true);
298 
299  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
300 
301  EXPECT_TRUE(success);
302  EXPECT_NEAR(optim.getX()[0], 2.0, 1e-2);
303  EXPECT_NEAR(optim.getX()[1], 0.0, 1e-2);
304 
305  // infeasible start
306  optim.setParameterValue(0, -1);
307  optim.setParameterValue(0, -1);
308 
309  solver.setIterations(5000);
310  EXPECT_TRUE(solver.initialize(&optim));
311 
312  // now solve
313  status = solver.solve(optim, true);
314 
315  success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
316 
317  EXPECT_TRUE(success);
318  EXPECT_NEAR(optim.getX()[0], 2.0, 1e-2);
319  EXPECT_NEAR(optim.getX()[1], 0.0, 1e-2);
320 }
321 
322 TEST_F(TestSolverIpopt, solve_betts_fun_constr_weight_adapt)
323 {
324  // parameters
325  optim.resizeParameterVector(2);
326 
327  // create bounds on x1 and x2
328  optim.setLowerBound(0, 2);
329  optim.setUpperBound(0, 50);
330  optim.setLowerBound(1, -50);
331  optim.setUpperBound(1, 50);
332 
333  // Problem definition
334  // min 0.01 * x1^2 + x2^2 - 100
335  // s.t. 2<=x1<=50,
336  // -50<=x2<=50,
337  // 10*x1-x2>=10
338 
339  // Create objective for minimizing
340  // 0.01 * x1^2 + x2^2 - 100 = f1^2 + f^2
341  auto objective = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) {
342  values[0] = std::sqrt(0.01) * x[0];
343  values[1] = x[1];
344  };
345  optim.setObjectiveFunction(objective, 2, true); // least-squares type
346 
347  // Create inequality for satisfying
348  // 10*x1-x2>=10 -> 10*x1-x2-10>=0 -> x2-10*x1+10<=0
349  auto inequality = [](const Eigen::VectorXd& x, Eigen::Ref<Eigen::VectorXd> values) {
350  values[0] = x[1] - 10.0 * x[0] + 10.0; // c(x)<=0 convention
351  };
352  optim.setInequalityConstraint(inequality, 1);
353 
354  // feasible start
355  optim.setParameterValue(0, 5);
356  optim.setParameterValue(0, -5);
357 
358  EXPECT_TRUE(solver.initialize(&optim));
359 
360  // now solve
361  SolverStatus status;
362  for (int i = 0; i < 5; ++i)
363  {
364  status = solver.solve(optim, true, i == 0 ? true : false);
365  }
366  bool success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
367  EXPECT_TRUE(success);
368 
369  EXPECT_NEAR(optim.getX()[0], 2.0, 1e-2);
370  EXPECT_NEAR(optim.getX()[1], 0.0, 1e-2);
371 
372  // infeasible start
373  optim.setParameterValue(0, -1);
374  optim.setParameterValue(0, -1);
375 
376  // now solve
377  for (int i = 0; i < 5; ++i)
378  {
379  status = solver.solve(optim, true, i == 0 ? true : false);
380  }
381  success = (status == SolverStatus::Converged || status == SolverStatus::EarlyTerminated);
382  EXPECT_TRUE(success);
383 
384  EXPECT_NEAR(optim.getX()[0], 2.0, 1e-2);
385  EXPECT_NEAR(optim.getX()[1], 0.0, 1e-2);
386 }
387 
388 #else // IPOPT
389 
390 class TestSolverIpopt : public testing::Test
391 {
392  protected:
393  // You can do set-up work for each test here.
394  TestSolverIpopt() {}
395  // You can do clean-up work that doesn't throw exceptions here.
396  virtual ~TestSolverIpopt() {}
397 };
398 
399 TEST_F(TestSolverIpopt, ipopt_not_found) { PRINT_WARNING("Skipping IPOPT tests, since IPOPT is not found."); }
400 
401 #endif // IPOPT
corbo::SimpleOptimizationProblemWithCallbacks
Simple optimization problem formulation (callback based configuration)
Definition: simple_optimization_problem.h:225
sqrt
const EIGEN_DEVICE_FUNC SqrtReturnType sqrt() const
Definition: ArrayCwiseUnaryOps.h:152
PRINT_WARNING
#define PRINT_WARNING(msg)
Print msg-stream.
Definition: console.h:145
nlp_solver_ipopt.h
simple_optimization_problem.h
TestSolverIpopt::TestSolverIpopt
TestSolverIpopt()
Definition: test_solver_ipopt.cpp:416
macros.h
corbo::SolverIpopt
Interface to the external interior point solver IPOPT.
Definition: nlp_solver_ipopt.h:210
console.h
corbo::SolverStatus
SolverStatus
Definition: optimization/include/corbo-optimization/types.h:52
utilities.h
value_comparison.h
TestSolverIpopt::~TestSolverIpopt
virtual ~TestSolverIpopt()
Definition: test_solver_ipopt.cpp:418
x
Scalar * x
Definition: level1_cplx_impl.h:89
Eigen::Ref
A matrix or vector expression mapping an existing expression.
Definition: Ref.h:192
TestSolverIpopt
Definition: test_solver_ipopt.cpp:390
TEST_F
TEST_F(TestSolverIpopt, ipopt_not_found)
Definition: test_solver_ipopt.cpp:399


control_box_rst
Author(s): Christoph Rösmann
autogenerated on Wed Mar 2 2022 00:07:06