Optimizer.cpp
Go to the documentation of this file.
00001 
00029 #include <Eigen/Dense>
00030 
00031 #include "isam/Optimizer.h"
00032 #include "isam/OptimizationInterface.h"
00033 
00034 using namespace std;
00035 using namespace Eigen;
00036 
00037 namespace isam {
00038 
00039 /* Use Powell's Dog-Leg stopping criteria for all of the batch algorithms? */
00040 // #define USE_PDL_STOPPING_CRITERIA
00041 
00042 void Optimizer::permute_vector(const VectorXd& v, VectorXd& p,
00043     const int* permutation) {
00044   for (int i = 0; i < v.size(); i++) {
00045     p(permutation[i]) = v(i);
00046   }
00047 }
00048 
00049 VectorXd Optimizer::compute_gauss_newton_step(const SparseSystem& jacobian,
00050     SparseSystem* R, double lambda) {
00051   VectorXd delta_ordered;
00052   _cholesky->factorize(jacobian, &delta_ordered, lambda);
00053   if (R != NULL) {
00054     _cholesky->get_R(*R);
00055   }
00056 
00057   // delta has new ordering, need to return result with default ordering
00058   int nrows = delta_ordered.size();
00059   VectorXd delta(nrows);
00060   permute_vector(delta_ordered, delta, _cholesky->get_order());
00061 
00062   return delta;
00063 }
00064 
00065 VectorXd Optimizer::compute_dog_leg(double alpha, const VectorXd& h_sd,
00066     const VectorXd& h_gn, double delta, double& gain_ratio_denominator) {
00067   if (h_gn.norm() <= delta) {
00068     gain_ratio_denominator = current_SSE_at_linpoint;
00069     return h_gn;
00070   }
00071 
00072   double h_sd_norm = h_sd.norm();
00073 
00074   if ((alpha * h_sd_norm) >= delta) {
00075     gain_ratio_denominator = delta * (2 * alpha * h_sd_norm - delta)
00076         / (2 * alpha);
00077     return (delta / h_sd_norm) * h_sd;
00078   } else {
00079     // complicated case: calculate intersection of trust region with
00080     // line between Gauss-Newton and steepest descent solutions
00081     VectorXd a = alpha * h_sd;
00082     VectorXd b = h_gn;
00083     double c = a.dot(b - a);
00084     double b_a_norm2 = (b - a).squaredNorm();
00085     double a_norm2 = a.squaredNorm();
00086     double delta2 = delta * delta;
00087     double sqrt_term = sqrt(c * c + b_a_norm2 * (delta2 - a_norm2));
00088     double beta;
00089     if (c <= 0) {
00090       beta = (-c + sqrt_term) / b_a_norm2;
00091     } else {
00092       beta = (delta2 - a_norm2) / (c + sqrt_term);
00093     }
00094 
00095     gain_ratio_denominator = .5 * alpha * (1 - beta) * (1 - beta) * h_sd_norm
00096         * h_sd_norm + beta * (2 - beta) * current_SSE_at_linpoint;
00097     return (alpha * h_sd + beta * (h_gn - alpha * h_sd));
00098   }
00099 }
00100 
00101 void Optimizer::update_trust_radius(double rho, double hdl_norm) {
00102   if (rho < .25) {
00103     Delta /= 2.0;
00104   }
00105   if (rho > .75) {
00106     Delta = max(Delta, 3 * hdl_norm);
00107   }
00108 }
00109 
00110 void Optimizer::relinearize(const Properties& prop) {
00111   // We're going to relinearize about the current estimate.
00112   function_system.estimate_to_linpoint();
00113 
00114   // prepare factorization
00115   SparseSystem jac = function_system.jacobian();
00116 
00117   // factorization and new rhs based on new linearization point will be in _R
00118   VectorXd h_gn = compute_gauss_newton_step(jac, &function_system._R); // modifies _R
00119 
00120   if (prop.method == DOG_LEG) {
00121     // Compute the gradient and cache it.
00122     gradient = mul_SparseMatrixTrans_Vector(jac, jac.rhs());
00123 
00124     //Get the value of the sum-of-squared errors at the current linearization point.
00125     current_SSE_at_linpoint = jac.rhs().squaredNorm();
00126 
00127     // NB: alpha's denominator will be zero iff the gradient vector is zero
00128     // (since J is full-rank by hypothesis).  But the gradient is zero iff
00129     // we're already at the minimum, so we don't actually need to to any
00130     // updates; just set the estimate to be the current linearization point,
00131     // since we're already at the minimum.
00132 
00133     double alpha_denominator = (jac * gradient).squaredNorm();
00134 
00135     if (alpha_denominator > 0) {
00136       double alpha_numerator = gradient.squaredNorm();
00137       double alpha = alpha_numerator / alpha_denominator;
00138 
00139       // These values will be used to update the estimate of Delta
00140       double F_0, F_h;
00141 
00142       F_0 = current_SSE_at_linpoint;
00143 
00144       double rho_denominator, rho;
00145 
00146       VectorXd h_dl;
00147 
00148       do {
00149         // We repeat the computation of the dog-leg step, shrinking the
00150         // trust-region radius if necessary, until we generate a sufficiently
00151         // small region of trust that we accept the proposed step.
00152 
00153         // Compute dog-leg step.
00154         // NOTE: Here we use -h_gn because of the weird sign change in the exmap functions.
00155         h_dl = compute_dog_leg(alpha, -gradient, -h_gn, Delta, rho_denominator);
00156 
00157         // Update the estimate.
00158         // NOTE:  Here we use -h_dl because of the weird sign change in the exmap functions.
00159         function_system.apply_exmap(-h_dl);
00160 
00161         // Get the value of the sum-of-squared errors at the new estimate.
00162         F_h = function_system.weighted_errors(ESTIMATE).squaredNorm();
00163 
00164         // Compute gain ratio.
00165         rho = (F_0 - F_h) / (rho_denominator);
00166 
00167         update_trust_radius(rho, h_dl.norm());
00168       } while (rho < 0);
00169 
00170       // Cache last accepted dog-leg step
00171       last_accepted_hdl = h_dl;
00172     } else {
00173       function_system.linpoint_to_estimate();
00174       last_accepted_hdl = VectorXd::Zero(gradient.size());
00175     }
00176 
00177   } else {
00178     // For Gauss-Newton just apply the update directly.
00179     function_system.apply_exmap(h_gn);
00180   }
00181 }
00182 
00183 bool Optimizer::powells_dog_leg_update(double epsilon1, double epsilon3,
00184     SparseSystem& jacobian, VectorXd& f_x, VectorXd& grad) {
00185   jacobian = function_system.jacobian();
00186   f_x = function_system.weighted_errors(LINPOINT);
00187   grad = mul_SparseMatrixTrans_Vector(jacobian, f_x);
00188   return (f_x.lpNorm<Eigen::Infinity>() <= epsilon3)
00189       || (grad.lpNorm<Eigen::Infinity>() <= epsilon1);
00190 }
00191 
00192 void Optimizer::augment_sparse_linear_system(SparseSystem& W,
00193     const Properties& prop) {
00194   if (prop.method == DOG_LEG) {
00195     // We're using the incremental version of Powell's Dog-Leg, so we need
00196     // to form the updated gradient.
00197     const VectorXd& f_new = W.rhs();
00198 
00199     // Augment the running count for the sum-of-squared errors at the current
00200     // linearization point.
00201     current_SSE_at_linpoint += f_new.squaredNorm();
00202 
00203     // Allocate the new gradient vector
00204     VectorXd g_new(W.num_cols());
00205 
00206     // Compute W^T \cdot f_new
00207     VectorXd increment = mul_SparseMatrixTrans_Vector(W, f_new);
00208 
00209     // Set g_new = (g_old 0)^T + W^T f_new.
00210     g_new.head(gradient.size()) = gradient + increment.head(gradient.size());
00211     g_new.tail(W.num_cols() - gradient.size()) = increment.tail(
00212         W.num_cols() - gradient.size());
00213 
00214     // Cache the new gradient vector
00215     gradient = g_new;
00216   }
00217 
00218   // Apply Givens to QR factorize the newly augmented sparse system.
00219   for (int i = 0; i < W.num_rows(); i++) {
00220     SparseVector new_row = W.get_row(i);
00221     function_system._R.add_row_givens(new_row, W.rhs()(i));
00222   }
00223 }
00224 
00225 void Optimizer::update_estimate(const Properties& prop) {
00226   // Solve for the Gauss-Newton step.
00227   VectorXd h_gn_reordered = function_system._R.solve();
00228 
00229   // permute from R-ordering to J-ordering
00230   VectorXd h_gn(h_gn_reordered.size());
00231   permute_vector(h_gn_reordered, h_gn, function_system._R.r_to_a());
00232 
00233   if (prop.method == GAUSS_NEWTON) {
00234     function_system.apply_exmap(h_gn);
00235   } else { //method == DOG_LEG
00236     // Compute alpha.  Note that since the variable ordering of the factor
00237     // R differs from that of the original Jacobian J,
00238     // we must first rearrange the ordering of the elements in the gradient.
00239     VectorXd reordered_gradient(gradient.size());
00240 
00241     // Permute from J-ordering to R-ordering
00242     permute_vector(gradient, reordered_gradient, function_system._R.a_to_r());
00243 
00244     double alpha_denominator =
00245         (function_system._R * reordered_gradient).squaredNorm();
00246 
00247     if (alpha_denominator > 0) {
00248       double alpha = (gradient.squaredNorm()) / alpha_denominator;
00249 
00250       double rho_denominator;
00251 
00252       VectorXd h_dl = compute_dog_leg(alpha, -gradient, -h_gn, Delta,
00253           rho_denominator);
00254       function_system.apply_exmap(-h_dl);
00255 
00256       // Compute the gain ratio
00257       double rho = (current_SSE_at_linpoint
00258           - function_system.weighted_errors(ESTIMATE).squaredNorm())
00259           / rho_denominator;
00260 
00261       update_trust_radius(rho, h_dl.norm());
00262 
00263       if (rho < 0) {
00264         // The proposed update actually /increased/ the value of the
00265         // objective function; restore the last good estimate that we had
00266         VectorXd restore_step(gradient.size());
00267         restore_step.head(last_accepted_hdl.size()) = last_accepted_hdl;
00268         restore_step.tail(gradient.size() - last_accepted_hdl.size()).setZero();
00269 
00270         function_system.apply_exmap(-restore_step);
00271       } else {
00272         // The proposed update was accepted; cache the dog-leg step used
00273         // to produce it.
00274         last_accepted_hdl = h_dl;
00275       }
00276     }
00277 
00278     // NOTE:  The negatives prepended to "compute_dog_leg()" and "h_gn"
00279     // are due to the weird sign change in the exmap functions.
00280   }
00281 }
00282 
00283 void Optimizer::gauss_newton(const Properties& prop, int* num_iterations) {
00284   // Batch optimization
00285   int num_iter = 0;
00286 
00287   // Set the new linearization point to be the current estimate.
00288   function_system.estimate_to_linpoint();
00289   // Compute Jacobian about current estimate.
00290   SparseSystem jacobian = function_system.jacobian();
00291 
00292   // Get the current error residual vector
00293   VectorXd r = function_system.weighted_errors(LINPOINT);
00294 
00295 #ifdef USE_PDL_STOPPING_CRITERIA
00296   // Compute the current gradient direction vector
00297   VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
00298 #else
00299   double error = r.squaredNorm();
00300   double error_new;
00301   // We haven't computed a step yet, so this initialization is to ensure
00302   // that we never skip over the while loop as a result of failing
00303   // change-in-error check.
00304   double error_diff = prop.epsilon_rel * error + 1;
00305 #endif
00306 
00307   // Compute Gauss-Newton step h_{gn} to get to the next estimated optimizing point.
00308   VectorXd delta = compute_gauss_newton_step(jacobian);
00309 
00310   while (
00311   // We ALWAYS use these criteria
00312   ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
00313       && (delta.norm() > prop.epsilon2)
00314 
00315 #ifdef USE_PDL_STOPPING_CRITERIA
00316       && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
00317       && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)
00318 
00319 #else // Custom stopping criteria for GN
00320       && (error > prop.epsilon_abs)
00321       && (fabs(error_diff) > prop.epsilon_rel * error)
00322 #endif
00323 
00324   ) // end while conditional
00325   {
00326     num_iter++;
00327 
00328     // Apply the Gauss-Newton step h_{gn} to...
00329     function_system.apply_exmap(delta);
00330     // ...set the new linearization point to be the current estimate.
00331     function_system.estimate_to_linpoint();
00332     // Relinearize about the new current estimate.
00333     jacobian = function_system.jacobian();
00334     // Compute the error residual vector at the new estimate.
00335     r = function_system.weighted_errors(LINPOINT);
00336 
00337 #ifdef USE_PDL_STOPPING_CRITERIA
00338     g = mul_SparseMatrixTrans_Vector(jacobian, r);
00339 #else
00340     // Update the error difference in errors between the previous and
00341     // current estimates.
00342     error_new = r.squaredNorm();
00343     error_diff = error - error_new;
00344     error = error_new;  // Record the absolute error at the current estimate
00345 #endif
00346 
00347     // Compute Gauss-Newton step h_{gn} to get to the next estimated
00348     // optimizing point.
00349     delta = compute_gauss_newton_step(jacobian);
00350     if (!prop.quiet) {
00351       cout << "Iteration " << num_iter << ": residual ";
00352 
00353 #ifdef USE_PDL_STOPPING_CRITERIA
00354       cout << r.squaredNorm();
00355 #else
00356       cout << error;
00357 #endif
00358       cout << endl;
00359     }
00360   }  //end while
00361 
00362   if (num_iterations != NULL) {
00363     *num_iterations = num_iter;
00364   }
00365   _cholesky->get_R(function_system._R);
00366 }
00367 
00368 void Optimizer::levenberg_marquardt(const Properties& prop,
00369     int* num_iterations) {
00370   int num_iter = 0;
00371   double lambda = prop.lm_lambda0;
00372   // Using linpoint as current estimate below.
00373   function_system.estimate_to_linpoint();
00374 
00375   // Get the current Jacobian at the linearization point.
00376   SparseSystem jacobian = function_system.jacobian();
00377 
00378   // Get the error residual vector at the current linearization point.
00379   VectorXd r = function_system.weighted_errors(LINPOINT);
00380 
00381   // Record the absolute sum-of-squares error (i.e., objective function value) here.
00382   double error = r.squaredNorm();
00383 
00384 #ifdef USE_PDL_STOPPING_CRITERIA
00385   // Compute the gradient direction vector at the current linearization point
00386   VectorXd g = mul_SparseMatrixTrans_Vector(jacobian, r);
00387 #endif
00388 
00389   double error_diff, error_new;
00390 
00391   // solve at J'J + lambda*diag(J'J)
00392   VectorXd delta = compute_gauss_newton_step(jacobian, &function_system._R,
00393       lambda);
00394 
00395   while (
00396   // We ALWAYS use these stopping criteria
00397   ((prop.max_iterations <= 0) || (num_iter < prop.max_iterations))
00398       && (delta.norm() > prop.epsilon2)
00399 
00400 #ifdef USE_PDL_STOPPING_CRITERIA
00401       && (r.lpNorm<Eigen::Infinity>() > prop.epsilon3)
00402       && (g.lpNorm<Eigen::Infinity>() > prop.epsilon1)
00403 #else
00404       && (error > prop.epsilon_abs)
00405 #endif
00406   )  // end while conditional
00407   {
00408     num_iter++;
00409 
00410     // remember the last accepted linearization point
00411     function_system.linpoint_to_estimate();
00412     // Apply the delta vector DIRECTLY TO THE LINEARIZATION POINT!
00413     function_system.self_exmap(delta);
00414     error_new = function_system.weighted_errors(LINPOINT).squaredNorm();
00415     error_diff = error - error_new;
00416     // feedback
00417     if (!prop.quiet) {
00418       cout << "LM Iteration " << num_iter << ": (lambda=" << lambda << ") ";
00419       if (error_diff > 0.) {
00420         cout << "residual: " << error_new << endl;
00421       } else {
00422         cout << "rejected" << endl;
00423       }
00424     }
00425     // decide if acceptable
00426     if (error_diff > 0.) {
00427 
00428 #ifndef USE_PDL_STOPPING_CRITERIA
00429       if (error_diff < prop.epsilon_rel * error) {
00430         break;
00431       }
00432 #endif
00433 
00434       // Update lambda
00435       lambda /= prop.lm_lambda_factor;
00436 
00437       // Record the error at the newly-accepted estimate.
00438       error = error_new;
00439 
00440       // Relinearize around the newly-accepted estimate.
00441       jacobian = function_system.jacobian();
00442 
00443 #ifdef USE_PDL_STOPPING_CRITERIA
00444       r = function_system.weighted_errors(LINPOINT);
00445       g = mul_SparseMatrixTrans_Vector(jacobian, r);
00446 #endif
00447     } else {
00448       // reject new estimate
00449       lambda *= prop.lm_lambda_factor;
00450       // restore previous estimate
00451       function_system.estimate_to_linpoint();
00452     }
00453 
00454     // Compute the step for the next iteration.
00455     delta = compute_gauss_newton_step(jacobian, &function_system._R, lambda);
00456 
00457   } // end while
00458 
00459   if (num_iterations != NULL) {
00460     *num_iterations = num_iter;
00461   }
00462   // Copy current estimate contained in linpoint.
00463   function_system.linpoint_to_estimate();
00464 }
00465 
00466 void Optimizer::powells_dog_leg(int* num_iterations, double delta0,
00467     int max_iterations, double epsilon1, double epsilon2, double epsilon3) {
00468   // Batch optimization
00469   int num_iter = 0;
00470   // current estimate is used as new linearization point
00471   function_system.estimate_to_linpoint();
00472 
00473   double delta = delta0;
00474   SparseSystem jacobian(1, 1);
00475   VectorXd f_x;
00476   VectorXd grad;
00477 
00478   bool found = powells_dog_leg_update(epsilon1, epsilon3, jacobian, f_x, grad);
00479 
00480   double rho_denominator;
00481 
00482   while ((not found) && (max_iterations == 0 || num_iter < max_iterations)) {
00483     num_iter++;
00484     cout << "PDL Iteration " << num_iter << " residual: " << f_x.squaredNorm()
00485         << endl;
00486     // compute alpha
00487     double alpha = grad.squaredNorm() / (jacobian * grad).squaredNorm();
00488     // steepest descent
00489     VectorXd h_sd = -grad;
00490     // solve Gauss Newton
00491     VectorXd h_gn = compute_gauss_newton_step(jacobian, &function_system._R);
00492     // compute dog leg h_dl
00493     // x0 = x: remember (and return) linearization point of R
00494     function_system.linpoint_to_estimate();
00495     VectorXd h_dl = compute_dog_leg(alpha, h_sd, h_gn, delta, rho_denominator);
00496     // Evaluate new solution, update estimate and trust region.
00497     if (h_dl.norm() <= epsilon2) {
00498       found = true;
00499     } else {
00500       // new estimate
00501       // change linearization point directly (original LP saved in estimate)
00502       function_system.self_exmap(h_dl);
00503       // calculate gain ratio rho
00504       VectorXd f_x_new = function_system.weighted_errors(LINPOINT);
00505       double rho = (f_x.squaredNorm() - f_x_new.squaredNorm())
00506           / (rho_denominator);
00507       if (rho > 0) {
00508         // accept new estimate
00509         cout << "accepted" << endl;
00510         f_x = f_x_new;
00511         found = powells_dog_leg_update(epsilon1, epsilon3, jacobian, f_x, grad);
00512       } else {
00513         // reject new estimate, overwrite with last saved one
00514         cout << "rejected" << endl;
00515         function_system.estimate_to_linpoint();
00516       }
00517       if (rho > 0.75) {
00518         delta = max(delta, 3.0 * h_dl.norm());
00519       } else if (rho < 0.25) {
00520         delta *= 0.5;
00521         found = (delta <= epsilon2);
00522       }
00523     }
00524   }
00525   if (num_iterations) {
00526     *num_iterations = num_iter;
00527   }
00528   // Overwrite potentially rejected linearization point with last saved one
00529   // (could be identical if it was accepted in the last iteration).
00530 
00531   function_system.swap_estimates();
00532 
00533 }
00534 
00535 void Optimizer::batch_optimize(const Properties& prop, int* num_iterations) {
00536 
00537   const double delta0 = 1.0;
00538 
00539   switch (prop.method) {
00540   case GAUSS_NEWTON:
00541     gauss_newton(prop, num_iterations);
00542     break;
00543   case DOG_LEG:
00544     powells_dog_leg(num_iterations, delta0, prop.max_iterations, prop.epsilon1,
00545         prop.epsilon2, prop.epsilon3); // modifies x0,R
00546     break;
00547   case LEVENBERG_MARQUARDT:
00548     levenberg_marquardt(prop, num_iterations);
00549     break;
00550   }
00551 
00552 }
00553 
00554 }


demo_rgbd
Author(s): Ji Zhang
autogenerated on Tue Mar 3 2015 18:01:08