00001
00029 #include <Eigen/Dense>
00030
00031 #include "isam/Optimizer.h"
00032 #include "isam/OptimizationInterface.h"
00033
00034 using namespace std;
00035 using namespace Eigen;
00036
00037 namespace isam {
00038
00039
00040
00041
00042 void Optimizer::permute_vector(const VectorXd& v, VectorXd& p,
00043 const int* permutation) {
00044 for (int i = 0; i < v.size(); i++) {
00045 p(permutation[i]) = v(i);
00046 }
00047 }
00048
00049 VectorXd Optimizer::compute_gauss_newton_step(const SparseSystem& jacobian,
00050 SparseSystem* R, double lambda) {
00051 VectorXd delta_ordered;
00052 _cholesky->factorize(jacobian, &delta_ordered, lambda);
00053 if (R != NULL) {
00054 _cholesky->get_R(*R);
00055 }
00056
00057
00058 int nrows = delta_ordered.size();
00059 VectorXd delta(nrows);
00060 permute_vector(delta_ordered, delta, _cholesky->get_order());
00061
00062 return delta;
00063 }
00064
00065 VectorXd Optimizer::compute_dog_leg(double alpha, const VectorXd& h_sd,
00066 const VectorXd& h_gn, double delta, double& gain_ratio_denominator) {
00067 if (h_gn.norm() <= delta) {
00068 gain_ratio_denominator = current_SSE_at_linpoint;
00069 return h_gn;
00070 }
00071
00072 double h_sd_norm = h_sd.norm();
00073
00074 if ((alpha * h_sd_norm) >= delta) {
00075 gain_ratio_denominator = delta * (2 * alpha * h_sd_norm - delta)
00076 / (2 * alpha);
00077 return (delta / h_sd_norm) * h_sd;
00078 } else {
00079
00080
00081 VectorXd a = alpha * h_sd;
00082 VectorXd b = h_gn;
00083 double c = a.dot(b - a);
00084 double b_a_norm2 = (b - a).squaredNorm();
00085 double a_norm2 = a.squaredNorm();
00086 double delta2 = delta * delta;
00087 double sqrt_term = sqrt(c * c + b_a_norm2 * (delta2 - a_norm2));
00088 double beta;
00089 if (c <= 0) {
00090 beta = (-c + sqrt_term) / b_a_norm2;
00091 } else {
00092 beta = (delta2 - a_norm2) / (c + sqrt_term);
00093 }
00094
00095 gain_ratio_denominator = .5 * alpha * (1 - beta) * (1 - beta) * h_sd_norm
00096 * h_sd_norm + beta * (2 - beta) * current_SSE_at_linpoint;
00097 return (alpha * h_sd + beta * (h_gn - alpha * h_sd));
00098 }
00099 }
00100
00101 void Optimizer::update_trust_radius(double rho, double hdl_norm) {
00102 if (rho < .25) {
00103 Delta /= 2.0;
00104 }
00105 if (rho > .75) {
00106 Delta = max(Delta, 3 * hdl_norm);
00107 }
00108 }
00109
00110 void Optimizer::relinearize(const Properties& prop) {
00111
00112 function_system.estimate_to_linpoint();
00113
00114
00115 SparseSystem jac = function_system.jacobian();
00116
00117
00118 VectorXd h_gn = compute_gauss_newton_step(jac, &function_system._R);
00119
00120 if (prop.method == DOG_LEG) {
00121
00122 gradient = mul_SparseMatrixTrans_Vector(jac, jac.rhs());
00123
00124
00125 current_SSE_at_linpoint = jac.rhs().squaredNorm();
00126
00127
00128
00129
00130
00131
00132
00133 double alpha_denominator = (jac * gradient).squaredNorm();
00134
00135 if (alpha_denominator > 0) {
00136 double alpha_numerator = gradient.squaredNorm();
00137 double alpha = alpha_numerator / alpha_denominator;
00138
00139
00140 double F_0, F_h;
00141
00142 F_0 = current_SSE_at_linpoint;
00143
00144 double rho_denominator, rho;
00145
00146 VectorXd h_dl;
00147
00148 do {
00149
00150
00151
00152
00153
00154
00155 h_dl = compute_dog_leg(alpha, -gradient, -h_gn, Delta, rho_denominator);
00156
00157
00158
00159 function_system.apply_exmap(-h_dl);
00160
00161
00162 F_h = function_system.weighted_errors(ESTIMATE).squaredNorm();
00163
00164
00165 rho = (F_0 - F_h) / (rho_denominator);
00166
00167 update_trust_radius(rho, h_dl.norm());
00168 } while (rho < 0);
00169
00170
00171 last_accepted_hdl = h_dl;
00172 } else {
00173 function_system.linpoint_to_estimate();
00174 last_accepted_hdl = VectorXd::Zero(gradient.size());
00175 }
00176
00177 } else {
00178
00179 function_system.apply_exmap(h_gn);
00180 }
00181 }
00182
00183 bool Optimizer::powells_dog_leg_update(double epsilon1, double epsilon3,
00184 SparseSystem& jacobian, VectorXd& f_x, VectorXd& grad) {
00185 jacobian = function_system.jacobian();
00186 f_x = function_system.weighted_errors(LINPOINT);
00187 grad = mul_SparseMatrixTrans_Vector(jacobian, f_x);
00188 return (f_x.lpNorm<Eigen::Infinity>() <= epsilon3)
00189 || (grad.lpNorm<Eigen::Infinity>() <= epsilon1);
00190 }
00191
00192 void Optimizer::augment_sparse_linear_system(SparseSystem& W,
00193 const Properties& prop) {
00194 if (prop.method == DOG_LEG) {
00195
00196
00197 const VectorXd& f_new = W.rhs();
00198
00199
00200
00201 current_SSE_at_linpoint += f_new.squaredNorm();
00202
00203
00204 VectorXd g_new(W.num_cols());
00205
00206
00207 VectorXd increment = mul_SparseMatrixTrans_Vector(W, f_new);
00208
00209
00210 g_new.head(gradient.size()) = gradient + increment.head(gradient.size());
00211 g_new.tail(W.num_cols() - gradient.size()) = increment.tail(
00212 W.num_cols() - gradient.size());
00213
00214
00215 gradient = g_new;
00216 }
00217
00218
00219 for (int i = 0; i < W.num_rows(); i++) {
00220 SparseVector new_row = W.get_row(i);
00221 function_system._R.add_row_givens(new_row, W.rhs()(i));
00222 }
00223 }
00224
00225 void Optimizer::update_estimate(const Properties& prop) {
00226
00227 VectorXd h_gn_reordered = function_system._R.solve();
00228
00229
00230 VectorXd h_gn(h_gn_reordered.size());
00231 permute_vector(h_gn_reordered, h_gn, function_system._R.r_to_a());
00232
00233 if (prop.method == GAUSS_NEWTON) {
00234 function_system.apply_exmap(h_gn);
00235 } else {
00236
00237
00238
00239 VectorXd reordered_gradient(gradient.size());
00240
00241
00242 permute_vector(gradient, reordered_gradient, function_system._R.a_to_r());
00243
00244 double alpha_denominator =
00245 (function_system._R * reordered_gradient).squaredNorm();
00246
00247 if (alpha_denominator > 0) {
00248 double alpha = (gradient.squaredNorm()) / alpha_denominator;
00249
00250 double rho_denominator;
00251
00252 VectorXd h_dl = compute_dog_leg(alpha, -gradient, -h_gn, Delta,
00253 rho_denominator);
00254 function_system.apply_exmap(-h_dl);
00255
00256
00257 double rho = (current_SSE_at_linpoint
00258 - function_system.weighted_errors(ESTIMATE).squaredNorm())
00259 / rho_denominator;
00260
00261 update_trust_radius(rho, h_dl.norm());
00262
00263 if (rho < 0) {
00264
00265
00266 VectorXd restore_step(gradient.size());
00267 restore_step.head(last_accepted_hdl.size()) = last_accepted_hdl;
00268 restore_step.tail(gradient.size() - last_accepted_hdl.size()).setZero();
00269
00270 function_system.apply_exmap(-restore_step);
00271 } else {
00272
00273
00274 last_accepted_hdl = h_dl;
00275 }
00276 }
00277
00278
00279
00280 }
00281 }
00282
00283 void Optimizer::gauss_newton(const Properties& prop, int* num_iterations) {
00284
00285 int num_iter = 0;
00286
00287
00288 function_system.estimate_to_linpoint();
00289
00290 SparseSystem jacobian = function_system.jacobian();
00291
00292
00293 VectorXd r = function_system.weighted_errors(LINPOINT);
00294
00295 #ifdef USE_PDL_STOPPING_CRITERIA
00296
00297 VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
00298 #else
00299 double error = r.squaredNorm();
00300 double error_new;
00301
00302
00303
00304 double error_diff = prop.epsilon_rel * error + 1;
00305 #endif
00306
00307
00308 VectorXd delta = compute_gauss_newton_step(jacobian);
00309
00310 while (
00311
00312 ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
00313 && (delta.norm() > prop.epsilon2)
00314
00315 #ifdef USE_PDL_STOPPING_CRITERIA
00316 && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
00317 && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)
00318
00319 #else
00320 && (error > prop.epsilon_abs)
00321 && (fabs(error_diff) > prop.epsilon_rel * error)
00322 #endif
00323
00324 )
00325 {
00326 num_iter++;
00327
00328
00329 function_system.apply_exmap(delta);
00330
00331 function_system.estimate_to_linpoint();
00332
00333 jacobian = function_system.jacobian();
00334
00335 r = function_system.weighted_errors(LINPOINT);
00336
00337 #ifdef USE_PDL_STOPPING_CRITERIA
00338 g = mul_SparseMatrixTrans_Vector(jacobian, r);
00339 #else
00340
00341
00342 error_new = r.squaredNorm();
00343 error_diff = error - error_new;
00344 error = error_new;
00345 #endif
00346
00347
00348
00349 delta = compute_gauss_newton_step(jacobian);
00350 if (!prop.quiet) {
00351 cout << "Iteration " << num_iter << ": residual ";
00352
00353 #ifdef USE_PDL_STOPPING_CRITERIA
00354 cout << r.squaredNorm();
00355 #else
00356 cout << error;
00357 #endif
00358 cout << endl;
00359 }
00360 }
00361
00362 if (num_iterations != NULL) {
00363 *num_iterations = num_iter;
00364 }
00365 _cholesky->get_R(function_system._R);
00366 }
00367
00368 void Optimizer::levenberg_marquardt(const Properties& prop,
00369 int* num_iterations) {
00370 int num_iter = 0;
00371 double lambda = prop.lm_lambda0;
00372
00373 function_system.estimate_to_linpoint();
00374
00375
00376 SparseSystem jacobian = function_system.jacobian();
00377
00378
00379 VectorXd r = function_system.weighted_errors(LINPOINT);
00380
00381
00382 double error = r.squaredNorm();
00383
00384 #ifdef USE_PDL_STOPPING_CRITERIA
00385
00386 VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
00387 #endif
00388
00389 double error_diff, error_new;
00390
00391
00392 VectorXd delta = compute_gauss_newton_step(jacobian, &function_system._R,
00393 lambda);
00394
00395 while (
00396
00397 ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
00398 && (delta.norm() > prop.epsilon2)
00399
00400 #ifdef USE_PDL_STOPPING_CRITERIA
00401 && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
00402 && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)
00403 #else
00404 && (error > prop.epsilon_abs)
00405 #endif
00406 )
00407 {
00408 num_iter++;
00409
00410
00411 function_system.linpoint_to_estimate();
00412
00413 function_system.self_exmap(delta);
00414 error_new = function_system.weighted_errors(LINPOINT).squaredNorm();
00415 error_diff = error - error_new;
00416
00417 if (!prop.quiet) {
00418 cout << "LM Iteration " << num_iter << ": (lambda=" << lambda << ") ";
00419 if (error_diff > 0.) {
00420 cout << "residual: " << error_new << endl;
00421 } else {
00422 cout << "rejected" << endl;
00423 }
00424 }
00425
00426 if (error_diff > 0.) {
00427
00428 #ifndef USE_PDL_STOPPING_CRITERIA
00429 if (error_diff < prop.epsilon_rel * error) {
00430 break;
00431 }
00432 #endif
00433
00434
00435 lambda /= prop.lm_lambda_factor;
00436
00437
00438 error = error_new;
00439
00440
00441 jacobian = function_system.jacobian();
00442
00443 #ifdef USE_PDL_STOPPING_CRITERIA
00444 r = function_system.weighted_errors(LINPOINT);
00445 g = mul_SparseMatrixTrans_Vector(jacobian, r);
00446 #endif
00447 } else {
00448
00449 lambda *= prop.lm_lambda_factor;
00450
00451 function_system.estimate_to_linpoint();
00452 }
00453
00454
00455 delta = compute_gauss_newton_step(jacobian, &function_system._R, lambda);
00456
00457 }
00458
00459 if (num_iterations != NULL) {
00460 *num_iterations = num_iter;
00461 }
00462
00463 function_system.linpoint_to_estimate();
00464 }
00465
00466 void Optimizer::powells_dog_leg(int* num_iterations, double delta0,
00467 int max_iterations, double epsilon1, double epsilon2, double epsilon3) {
00468
00469 int num_iter = 0;
00470
00471 function_system.estimate_to_linpoint();
00472
00473 double delta = delta0;
00474 SparseSystem jacobian(1, 1);
00475 VectorXd f_x;
00476 VectorXd grad;
00477
00478 bool found = powells_dog_leg_update(epsilon1, epsilon3, jacobian, f_x, grad);
00479
00480 double rho_denominator;
00481
00482 while ((not found) && (max_iterations == 0 || num_iter < max_iterations)) {
00483 num_iter++;
00484 cout << "PDL Iteration " << num_iter << " residual: " << f_x.squaredNorm()
00485 << endl;
00486
00487 double alpha = grad.squaredNorm() / (jacobian * grad).squaredNorm();
00488
00489 VectorXd h_sd = -grad;
00490
00491 VectorXd h_gn = compute_gauss_newton_step(jacobian, &function_system._R);
00492
00493
00494 function_system.linpoint_to_estimate();
00495 VectorXd h_dl = compute_dog_leg(alpha, h_sd, h_gn, delta, rho_denominator);
00496
00497 if (h_dl.norm() <= epsilon2) {
00498 found = true;
00499 } else {
00500
00501
00502 function_system.self_exmap(h_dl);
00503
00504 VectorXd f_x_new = function_system.weighted_errors(LINPOINT);
00505 double rho = (f_x.squaredNorm() - f_x_new.squaredNorm())
00506 / (rho_denominator);
00507 if (rho > 0) {
00508
00509 cout << "accepted" << endl;
00510 f_x = f_x_new;
00511 found = powells_dog_leg_update(epsilon1, epsilon3, jacobian, f_x, grad);
00512 } else {
00513
00514 cout << "rejected" << endl;
00515 function_system.estimate_to_linpoint();
00516 }
00517 if (rho > 0.75) {
00518 delta = max(delta, 3.0 * h_dl.norm());
00519 } else if (rho < 0.25) {
00520 delta *= 0.5;
00521 found = (delta <= epsilon2);
00522 }
00523 }
00524 }
00525 if (num_iterations) {
00526 *num_iterations = num_iter;
00527 }
00528
00529
00530
00531 function_system.swap_estimates();
00532
00533 }
00534
00535 void Optimizer::batch_optimize(const Properties& prop, int* num_iterations) {
00536
00537 const double delta0 = 1.0;
00538
00539 switch (prop.method) {
00540 case GAUSS_NEWTON:
00541 gauss_newton(prop, num_iterations);
00542 break;
00543 case DOG_LEG:
00544 powells_dog_leg(num_iterations, delta0, prop.max_iterations, prop.epsilon1,
00545 prop.epsilon2, prop.epsilon3);
00546 break;
00547 case LEVENBERG_MARQUARDT:
00548 levenberg_marquardt(prop, num_iterations);
00549 break;
00550 }
00551
00552 }
00553
00554 }