00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017 #include "graph_optimizer_sparse_incremental.h"
00018
00019 #include "g2o/apps/g2o_interactive/types_slam2d_online.h"
00020 #include "g2o/apps/g2o_interactive/types_slam3d_online.h"
00021
00022 #include "g2o/core/block_solver.h"
00023 #include "g2o/stuff/macros.h"
00024
00025 #define DIM_TO_SOLVER(p, l) BlockSolver< BlockSolverTraits<p, l> >
00026
00027 #define ALLOC_CHOLMOD(s, p, l) \
00028 if (1) { \
00029 std::cerr << "# Using CHOLMOD online poseDim " << p << " landMarkDim " << l << " blockordering 1" << std::endl; \
00030 LinearSolverCholmodOnline < DIM_TO_SOLVER(p, l)::PoseMatrixType >* linearSolver = new LinearSolverCholmodOnline<DIM_TO_SOLVER(p, l)::PoseMatrixType>(); \
00031 s = new DIM_TO_SOLVER(p, l)(opt, linearSolver); \
00032 } else (void)0
00033
00034 using namespace std;
00035
00036 namespace g2o {
00037
00038 namespace {
00042 struct VertexBackup
00043 {
00044 int tempIndex;
00045 OptimizableGraph::Vertex* vertex;
00046 double* hessianData;
00047 bool operator<(const VertexBackup& other) const
00048 {
00049 return tempIndex < other.tempIndex;
00050 }
00051 };
00052 }
00053
00054 SparseOptimizerIncremental::SparseOptimizerIncremental()
00055 {
00056 _cholmodSparse = new CholmodExt();
00057 _cholmodFactor = 0;
00058 cholmod_start(&_cholmodCommon);
00059
00060
00061 _cholmodCommon.nmethods = 1 ;
00062 _cholmodCommon.method[0].ordering = CHOLMOD_NATURAL;
00063 _cholmodCommon.postorder = 0;
00064 _cholmodCommon.supernodal = CHOLMOD_SIMPLICIAL;
00065
00066 _permutedUpdate = cholmod_allocate_triplet(1000, 1000, 1024, 0, CHOLMOD_REAL, &_cholmodCommon);
00067 _L = 0;
00068 _cholmodFactor = 0;
00069 _solverInterface = 0;
00070 }
00071
00072 SparseOptimizerIncremental::~SparseOptimizerIncremental()
00073 {
00074 _updateMat.clear(true);
00075 delete _cholmodSparse;
00076 if (_cholmodFactor) {
00077 cholmod_free_factor(&_cholmodFactor, &_cholmodCommon);
00078 _cholmodFactor = 0;
00079 }
00080 cholmod_free_triplet(&_permutedUpdate, &_cholmodCommon);
00081 cholmod_finish(&_cholmodCommon);
00082 }
00083
00084 int SparseOptimizerIncremental::optimize(int iterations, bool online)
00085 {
00086 (void) iterations;
00087 Solver* solver = _solver;
00088 solver->init(online);
00089
00090 int cjIterations=0;
00091 bool ok=true;
00092
00093 if (! online || batchStep) {
00094
00095 if (! online) {
00096 ok = solver->buildStructure();
00097 if (! ok) {
00098 cerr << __PRETTY_FUNCTION__ << ": Failure while building CCS structure" << endl;
00099 return 0;
00100 }
00101 }
00102
00103
00104 if (slamDimension == 3) {
00105 for (size_t i = 0; i < indexMapping().size(); ++i) {
00106 OnlineVertexSE2* v = static_cast<OnlineVertexSE2*>(indexMapping()[i]);
00107 v->estimate() = v->updatedEstimate;
00108 }
00109 }
00110 else if (slamDimension == 6) {
00111 for (size_t i = 0; i < indexMapping().size(); ++i) {
00112 OnlineVertexSE3* v= static_cast<OnlineVertexSE3*>(indexMapping()[i]);
00113 v->estimate() = v->updatedEstimate;
00114 }
00115 }
00116
00117 SparseOptimizer::computeActiveErrors();
00118 SparseOptimizer::linearizeSystem();
00119 solver->buildSystem();
00120
00121 int numBlocksRequired = _ivMap.size();
00122 if (_cmember.size() < numBlocksRequired) {
00123 _cmember.resize(2 * numBlocksRequired);
00124 }
00125 memset(_cmember.data(), 0, numBlocksRequired * sizeof(int));
00126 if (_ivMap.size() > 100) {
00127 for (size_t i = _ivMap.size() - 20; i < _ivMap.size(); ++i) {
00128 const HyperGraph::EdgeSet& eset = _ivMap[i]->edges();
00129 for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
00130 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
00131 OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
00132 OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
00133 if (v1->tempIndex() >= 0)
00134 _cmember(v1->tempIndex()) = 1;
00135 if (v2->tempIndex() >= 0)
00136 _cmember(v2->tempIndex()) = 1;
00137 }
00138 }
00139 }
00140
00141 ok = solver->solve();
00142
00143
00144 _L = _solverInterface->L();
00145 if (_perm.size() < (int)_L->n)
00146 _perm.resize(2*_L->n);
00147 int* p = (int*)_L->Perm;
00148 for (size_t i = 0; i < _L->n; ++i)
00149 _perm[p[i]] = i;
00150
00151 }
00152 else {
00153
00154 for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
00155 OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
00156 int iBase = v->colInHessian();
00157 v->copyB(solver->b() + iBase);
00158 }
00159 _solverInterface->solve(solver->x(), solver->b());
00160 }
00161
00162 update(solver->x());
00163 ++cjIterations;
00164
00165 if (verbose()){
00166 computeActiveErrors();
00167 cerr
00168 << "nodes = " << vertices().size()
00169 << "\t edges= " << _activeEdges.size()
00170 << "\t chi2= " << FIXED(activeChi2())
00171 << endl << endl;
00172 }
00173
00174 if (vizWithGnuplot)
00175 gnuplotVisualization();
00176
00177 if (! ok)
00178 return 0;
00179 return 1;
00180 }
00181
00182 bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
00183 {
00184 if (batchStep) {
00185 return SparseOptimizerOnline::updateInitialization(vset, eset);
00186 }
00187
00188
00189
00190 for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
00191 OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
00192 v->clearQuadraticForm();
00193 }
00194
00195
00196 _touchedVertices.clear();
00197 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
00198 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
00199 OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
00200 OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
00201 if (! v1->fixed())
00202 _touchedVertices.insert(v1);
00203 if (! v2->fixed())
00204 _touchedVertices.insert(v2);
00205 }
00206
00207
00208
00209 std::vector<HyperGraph::Vertex*> newVertices;
00210 newVertices.reserve(vset.size());
00211 _activeVertices.reserve(_activeVertices.size() + vset.size());
00212 _activeEdges.reserve(_activeEdges.size() + eset.size());
00213 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
00214 _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
00215
00216
00217
00218 size_t next = _ivMap.size();
00219 for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
00220 OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
00221 if (! v->fixed()){
00222 if (! v->marginalized()){
00223 v->setTempIndex(next);
00224 _ivMap.push_back(v);
00225 newVertices.push_back(v);
00226 _activeVertices.push_back(v);
00227 next++;
00228 }
00229 else
00230 abort();
00231 }
00232 else {
00233 v->setTempIndex(-1);
00234 }
00235 }
00236
00237
00238
00239 VertexBackup backupIdx[_touchedVertices.size()];
00240 memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
00241 int idx = 0;
00242 for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
00243 OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
00244 backupIdx[idx].tempIndex = v->tempIndex();
00245 backupIdx[idx].vertex = v;
00246 backupIdx[idx].hessianData = v->hessianData();
00247 ++idx;
00248 }
00249 sort(backupIdx, backupIdx + _touchedVertices.size());
00250 for (int i = 0; i < idx; ++i) {
00251 backupIdx[i].vertex->setTempIndex(i);
00252 }
00253
00254
00255
00256 _updateMat.clear(true);
00257 _updateMat.rowBlockIndices().clear();
00258 _updateMat.colBlockIndices().clear();
00259 _updateMat.blockCols().clear();
00260
00261
00262 MatrixXd* lastBlock = 0;
00263 int sizePoses = 0;
00264 for (int i = 0; i < idx; ++i) {
00265 OptimizableGraph::Vertex* v = backupIdx[i].vertex;
00266 int dim = v->dimension();
00267 sizePoses+=dim;
00268 _updateMat.rowBlockIndices().push_back(sizePoses);
00269 _updateMat.colBlockIndices().push_back(sizePoses);
00270 _updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap());
00271 int ind = v->tempIndex();
00272
00273 if (ind >= 0) {
00274 MatrixXd* m = _updateMat.block(ind, ind, true);
00275 v->mapHessianMemory(m->data());
00276 lastBlock = m;
00277 }
00278 }
00279 lastBlock->diagonal().array() += 1e-6;
00280
00281
00282 for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
00283 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
00284 OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertices()[0];
00285 OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertices()[1];
00286
00287 int ind1 = v1->tempIndex();
00288 if (ind1 == -1)
00289 continue;
00290 int ind2 = v2->tempIndex();
00291 if (ind2 == -1)
00292 continue;
00293 bool transposedBlock = ind1 > ind2;
00294 if (transposedBlock)
00295 swap(ind1, ind2);
00296
00297 MatrixXd* m = _updateMat.block(ind1, ind2, true);
00298 e->mapHessianMemory(m->data(), 0, 1, transposedBlock);
00299 }
00300
00301
00302 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
00303 OptimizableGraph::Edge * e = static_cast<OptimizableGraph::Edge*>(*it);
00304 e->computeError();
00305 }
00306 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
00307 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
00308 e->linearizeOplus();
00309 }
00310 for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
00311 OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
00312 e->constructQuadraticForm();
00313 }
00314
00315
00316 for (int i = 0; i < idx; ++i) {
00317 backupIdx[i].vertex->setTempIndex(backupIdx[i].tempIndex);
00318 if (backupIdx[i].hessianData)
00319 backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData);
00320 }
00321
00322
00323 bool solverStatus = _solver->updateStructure(newVertices, eset);
00324
00325 bool updateStatus = computeCholeskyUpdate();
00326 if (! updateStatus) {
00327 cerr << "Error while computing update" << endl;
00328 }
00329
00330 cholmod_sparse* updateAsSparseFactor = cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon);
00331
00332
00333 if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) {
00334
00335 cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate, &_cholmodCommon);
00336 }
00337 _permutedUpdate->nnz = 0;
00338 _permutedUpdate->nrow = _permutedUpdate->ncol = _L->n;
00339 {
00340 int* Ap = (int*)updateAsSparseFactor->p;
00341 int* Ai = (int*)updateAsSparseFactor->i;
00342 double* Ax = (double*)updateAsSparseFactor->x;
00343 int* Bj = (int*)_permutedUpdate->j;
00344 int* Bi = (int*)_permutedUpdate->i;
00345 double* Bx = (double*)_permutedUpdate->x;
00346 for (size_t c = 0; c < updateAsSparseFactor->ncol; ++c) {
00347 const int& rbeg = Ap[c];
00348 const int& rend = Ap[c+1];
00349 int cc = c / slamDimension;
00350 int coff = c % slamDimension;
00351 const int& cbase = backupIdx[cc].vertex->colInHessian();
00352 const int& ccol = _perm(cbase + coff);
00353 for (int j = rbeg; j < rend; j++) {
00354 const int& r = Ai[j];
00355 const double& val = Ax[j];
00356
00357 int rr = r / slamDimension;
00358 int roff = r % slamDimension;
00359 const int& rbase = backupIdx[rr].vertex->colInHessian();
00360
00361 int row = _perm(rbase + roff);
00362 int col = ccol;
00363 if (col > row)
00364 swap(col, row);
00365 Bi[_permutedUpdate->nnz] = row;
00366 Bj[_permutedUpdate->nnz] = col;
00367 Bx[_permutedUpdate->nnz] = val;
00368 ++_permutedUpdate->nnz;
00369 }
00370 }
00371 }
00372 cholmod_free_sparse(&updateAsSparseFactor, &_cholmodCommon);
00373 cholmod_sparse* updatePermuted = cholmod_triplet_to_sparse(_permutedUpdate, _permutedUpdate->nnz, &_cholmodCommon);
00374
00375
00376 _solverInterface->choleskyUpdate(updatePermuted);
00377
00378 cholmod_free_sparse(&updatePermuted, &_cholmodCommon);
00379
00380 return solverStatus;
00381 }
00382
00383 bool SparseOptimizerIncremental::computeCholeskyUpdate()
00384 {
00385 if (_cholmodFactor) {
00386 cholmod_free_factor(&_cholmodFactor, &_cholmodCommon);
00387 _cholmodFactor = 0;
00388 }
00389
00390 const SparseBlockMatrix<MatrixXd>& A = _updateMat;
00391 size_t m = A.rows();
00392 size_t n = A.cols();
00393
00394 if (_cholmodSparse->columnsAllocated < n) {
00395
00396 _cholmodSparse->columnsAllocated = _cholmodSparse->columnsAllocated == 0 ? n : 2 * n;
00397 delete[] (int*)_cholmodSparse->p;
00398 _cholmodSparse->p = new int[_cholmodSparse->columnsAllocated+1];
00399 }
00400 size_t nzmax = A.nonZeros();
00401 if (_cholmodSparse->nzmax < nzmax) {
00402
00403 _cholmodSparse->nzmax = _cholmodSparse->nzmax == 0 ? nzmax : 2 * nzmax;
00404 delete[] (double*)_cholmodSparse->x;
00405 delete[] (int*)_cholmodSparse->i;
00406 _cholmodSparse->i = new int[_cholmodSparse->nzmax];
00407 _cholmodSparse->x = new double[_cholmodSparse->nzmax];
00408 }
00409 _cholmodSparse->ncol = n;
00410 _cholmodSparse->nrow = m;
00411
00412 A.fillCCS((int*)_cholmodSparse->p, (int*)_cholmodSparse->i, (double*)_cholmodSparse->x, true);
00413
00414
00415 _cholmodFactor = cholmod_analyze(_cholmodSparse, &_cholmodCommon);
00416 cholmod_factorize(_cholmodSparse, _cholmodFactor, &_cholmodCommon);
00417
00418 #if 0
00419 int* p = (int*)_cholmodFactor->Perm;
00420 for (int i = 0; i < (int)n; ++i)
00421 if (i != p[i])
00422 cerr << "wrong permutation" << i << " -> " << p[i] << endl;
00423 #endif
00424
00425 if (_cholmodCommon.status == CHOLMOD_NOT_POSDEF) {
00426
00427
00428 return false;
00429 }
00430
00431
00432 int change_status = cholmod_change_factor(CHOLMOD_REAL, 1, 0, 1, 1, _cholmodFactor, &_cholmodCommon);
00433 if (! change_status) {
00434 return false;
00435 }
00436
00437 return true;
00438 }
00439
00440 static Solver* createSolver(SparseOptimizer* opt, const std::string& solverName)
00441 {
00442 g2o::Solver* s = 0;
00443
00444 if (solverName == "fix3_2_cholmod") {
00445 ALLOC_CHOLMOD(s, 3, 2);
00446 s->setAdditionalVectorSpace(300);
00447 }
00448 else if (solverName == "fix6_3_cholmod") {
00449 ALLOC_CHOLMOD(s, 6, 3);
00450 s->setAdditionalVectorSpace(600);
00451 }
00452
00453 return s;
00454 }
00455
00456 bool SparseOptimizerIncremental::initSolver(int dimension, int batchEveryN)
00457 {
00458
00459 slamDimension = dimension;
00460 if (dimension == 3) {
00461 setSolver(createSolver(this, "fix3_2_cholmod"));
00462 BlockSolver<BlockSolverTraits<3, 2> >* bs = dynamic_cast<BlockSolver<BlockSolverTraits<3, 2> >*>(solver());
00463 LinearSolverCholmodOnline<Matrix3d>* s = dynamic_cast<LinearSolverCholmodOnline<Matrix3d>*>(bs->linearSolver());
00464 _solverInterface = s;
00465 } else {
00466 setSolver(createSolver(this, "fix6_3_cholmod"));
00467 BlockSolver<BlockSolverTraits<6, 3> >* bs = dynamic_cast<BlockSolver<BlockSolverTraits<6, 3> >*>(solver());
00468 LinearSolverCholmodOnline<Matrix<double, 6, 6> >* s = dynamic_cast<LinearSolverCholmodOnline<Matrix<double, 6, 6> >*>(bs->linearSolver());
00469 _solverInterface = s;
00470 }
00471 _solverInterface->cmember = &_cmember;
00472 _solverInterface->batchEveryN = batchEveryN;
00473 solver()->setSchur(false);
00474 if (! solver()) {
00475 cerr << "Error allocating solver. Allocating CHOLMOD solver failed!" << endl;
00476 return false;
00477 }
00478 return true;
00479 }
00480
00481 }