block_solver.hpp
Go to the documentation of this file.
1 // g2o - General Graph Optimization
2 // Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard
3 // All rights reserved.
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright notice,
10 // this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
16 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
17 // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
18 // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
19 // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
20 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
21 // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
24 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #include "sparse_optimizer.h"
28 #include <Eigen/LU>
29 #include <fstream>
30 #include <iomanip>
31 
32 #include "../stuff/timeutil.h"
33 #include "../stuff/macros.h"
34 #include "../stuff/misc.h"
35 
36 namespace g2o {
37 
38 using namespace std;
39 using namespace Eigen;
40 
41 template <typename Traits>
44  _linearSolver(linearSolver)
45 {
46  // workspace
47  _Hpp=0;
48  _Hll=0;
49  _Hpl=0;
50  _HplCCS = 0;
52  _Hschur=0;
53  _DInvSchur=0;
54  _coefficients=0;
55  _bschur = 0;
56  _xSize=0;
57  _numPoses=0;
58  _numLandmarks=0;
59  _sizePoses=0;
61  _doSchur=true;
62 }
63 
64 template <typename Traits>
65 void BlockSolver<Traits>::resize(int* blockPoseIndices, int numPoseBlocks,
66  int* blockLandmarkIndices, int numLandmarkBlocks,
67  int s)
68 {
69  deallocate();
70 
71  resizeVector(s);
72 
73  if (_doSchur) {
74  // the following two are only used in schur
75  assert(_sizePoses > 0 && "allocating with wrong size");
76  _coefficients = new double [s];
77  _bschur = new double[_sizePoses];
78  }
79 
80  _Hpp=new PoseHessianType(blockPoseIndices, blockPoseIndices, numPoseBlocks, numPoseBlocks);
81  if (_doSchur) {
82  _Hschur=new PoseHessianType(blockPoseIndices, blockPoseIndices, numPoseBlocks, numPoseBlocks);
83  _Hll=new LandmarkHessianType(blockLandmarkIndices, blockLandmarkIndices, numLandmarkBlocks, numLandmarkBlocks);
85  _Hpl=new PoseLandmarkHessianType(blockPoseIndices, blockLandmarkIndices, numPoseBlocks, numLandmarkBlocks);
86  _HplCCS = new SparseBlockMatrixCCS<PoseLandmarkMatrixType>(_Hpl->rowBlockIndices(), _Hpl->colBlockIndices());
88 #ifdef G2O_OPENMP
89  _coefficientsMutex.resize(numPoseBlocks);
90 #endif
91  }
92 }
93 
94 template <typename Traits>
96 {
97  if (_Hpp){
98  delete _Hpp;
99  _Hpp=0;
100  }
101  if (_Hll){
102  delete _Hll;
103  _Hll=0;
104  }
105  if (_Hpl){
106  delete _Hpl;
107  _Hpl = 0;
108  }
109  if (_Hschur){
110  delete _Hschur;
111  _Hschur=0;
112  }
113  if (_DInvSchur){
114  delete _DInvSchur;
115  _DInvSchur=0;
116  }
117  if (_coefficients) {
118  delete[] _coefficients;
119  _coefficients = 0;
120  }
121  if (_bschur) {
122  delete[] _bschur;
123  _bschur = 0;
124  }
125  if (_HplCCS) {
126  delete _HplCCS;
127  _HplCCS = 0;
128  }
129  if (_HschurTransposedCCS) {
130  delete _HschurTransposedCCS;
132  }
133 }
134 
135 template <typename Traits>
137 {
138  delete _linearSolver;
139  deallocate();
140 }
141 
142 template <typename Traits>
144 {
145  assert(_optimizer);
146 
147  size_t sparseDim = 0;
148  _numPoses=0;
149  _numLandmarks=0;
150  _sizePoses=0;
151  _sizeLandmarks=0;
152  int* blockPoseIndices = new int[_optimizer->indexMapping().size()];
153  int* blockLandmarkIndices = new int[_optimizer->indexMapping().size()];
154 
155  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
157  int dim = v->dimension();
158  if (! v->marginalized()){
160  _sizePoses+=dim;
161  blockPoseIndices[_numPoses]=_sizePoses;
162  ++_numPoses;
163  } else {
165  _sizeLandmarks+=dim;
166  blockLandmarkIndices[_numLandmarks]=_sizeLandmarks;
167  ++_numLandmarks;
168  }
169  sparseDim += dim;
170  }
171  resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks, sparseDim);
172  delete[] blockLandmarkIndices;
173  delete[] blockPoseIndices;
174 
175  // allocate the diagonal on Hpp and Hll
176  int poseIdx = 0;
177  int landmarkIdx = 0;
178  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
180  if (! v->marginalized()){
181  //assert(poseIdx == v->hessianIndex());
182  PoseMatrixType* m = _Hpp->block(poseIdx, poseIdx, true);
183  if (zeroBlocks)
184  m->setZero();
185  v->mapHessianMemory(m->data());
186  ++poseIdx;
187  } else {
188  LandmarkMatrixType* m = _Hll->block(landmarkIdx, landmarkIdx, true);
189  if (zeroBlocks)
190  m->setZero();
191  v->mapHessianMemory(m->data());
192  ++landmarkIdx;
193  }
194  }
195  assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);
196 
197  // temporary structures for building the pattern of the Schur complement
198  SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0;
199  if (_doSchur) {
201  schurMatrixLookup->blockCols().resize(_Hschur->blockCols().size());
202  }
203 
204  // here we assume that the landmark indices start after the pose ones
205  // create the structure in Hpp, Hll and in Hpl
206  for (SparseOptimizer::EdgeContainer::const_iterator it=_optimizer->activeEdges().begin(); it!=_optimizer->activeEdges().end(); ++it){
207  OptimizableGraph::Edge* e = *it;
208 
209  for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
211  int ind1 = v1->hessianIndex();
212  if (ind1 == -1)
213  continue;
214  int indexV1Bak = ind1;
215  for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
217  int ind2 = v2->hessianIndex();
218  if (ind2 == -1)
219  continue;
220  ind1 = indexV1Bak;
221  bool transposedBlock = ind1 > ind2;
222  if (transposedBlock){ // make sure, we allocate the upper triangle block
223  swap(ind1, ind2);
224  }
225  if (! v1->marginalized() && !v2->marginalized()){
226  PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
227  if (zeroBlocks)
228  m->setZero();
229  e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
230  if (_Hschur) {// assume this is only needed in case we solve with the schur complement
231  schurMatrixLookup->addBlock(ind1, ind2);
232  }
233  } else if (v1->marginalized() && v2->marginalized()){
234  // RAINER hmm.... should we ever reach this here????
235  LandmarkMatrixType* m = _Hll->block(ind1-_numPoses, ind2-_numPoses, true);
236  if (zeroBlocks)
237  m->setZero();
238  e->mapHessianMemory(m->data(), viIdx, vjIdx, false);
239  } else {
240  if (v1->marginalized()){
242  if (zeroBlocks)
243  m->setZero();
244  e->mapHessianMemory(m->data(), viIdx, vjIdx, true); // transpose the block before writing to it
245  } else {
247  if (zeroBlocks)
248  m->setZero();
249  e->mapHessianMemory(m->data(), viIdx, vjIdx, false); // directly the block
250  }
251  }
252  }
253  }
254  }
255 
256  if (! _doSchur)
257  return true;
258 
259  _DInvSchur->diagonal().resize(landmarkIdx);
261 
262  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
264  if (v->marginalized()){
265  const HyperGraph::EdgeSet& vedges=v->edges();
266  for (HyperGraph::EdgeSet::const_iterator it1=vedges.begin(); it1!=vedges.end(); ++it1){
267  for (size_t i=0; i<(*it1)->vertices().size(); ++i)
268  {
269  OptimizableGraph::Vertex* v1= (OptimizableGraph::Vertex*) (*it1)->vertex(i);
270  if (v1->hessianIndex()==-1 || v1==v)
271  continue;
272  for (HyperGraph::EdgeSet::const_iterator it2=vedges.begin(); it2!=vedges.end(); ++it2){
273  for (size_t j=0; j<(*it2)->vertices().size(); ++j)
274  {
275  OptimizableGraph::Vertex* v2= (OptimizableGraph::Vertex*) (*it2)->vertex(j);
276  if (v2->hessianIndex()==-1 || v2==v)
277  continue;
278  int i1=v1->hessianIndex();
279  int i2=v2->hessianIndex();
280  if (i1<=i2) {
281  schurMatrixLookup->addBlock(i1, i2);
282  }
283  }
284  }
285  }
286  }
287  }
288  }
289 
290  _Hschur->takePatternFromHash(*schurMatrixLookup);
291  delete schurMatrixLookup;
293 
294  return true;
295 }
296 
297 template <typename Traits>
298 bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges)
299 {
300  for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) {
301  OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
302  int dim = v->dimension();
303  if (! v->marginalized()){
305  _sizePoses+=dim;
306  _Hpp->rowBlockIndices().push_back(_sizePoses);
307  _Hpp->colBlockIndices().push_back(_sizePoses);
309  ++_numPoses;
310  int ind = v->hessianIndex();
311  PoseMatrixType* m = _Hpp->block(ind, ind, true);
312  v->mapHessianMemory(m->data());
313  } else {
314  std::cerr << "updateStructure(): Schur not supported" << std::endl;
315  abort();
316  }
317  }
319 
320  for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
321  OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
322 
323  for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
325  int ind1 = v1->hessianIndex();
326  int indexV1Bak = ind1;
327  if (ind1 == -1)
328  continue;
329  for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
331  int ind2 = v2->hessianIndex();
332  if (ind2 == -1)
333  continue;
334  ind1 = indexV1Bak;
335  bool transposedBlock = ind1 > ind2;
336  if (transposedBlock) // make sure, we allocate the upper triangular block
337  swap(ind1, ind2);
338 
339  if (! v1->marginalized() && !v2->marginalized()) {
340  PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
341  e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
342  } else {
343  std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
344  }
345  }
346  }
347 
348  }
349 
350  return true;
351 }
352 
353 template <typename Traits>
355  //cerr << __PRETTY_FUNCTION__ << endl;
356  if (! _doSchur){
357  double t=get_monotonic_time();
358  bool ok = _linearSolver->solve(*_Hpp, _x, _b);
360  if (globalStats) {
361  globalStats->timeLinearSolver = get_monotonic_time() - t;
362  globalStats->hessianDimension = globalStats->hessianPoseDimension = _Hpp->cols();
363  }
364  return ok;
365  }
366 
367  // schur thing
368 
369  // backup the coefficient matrix
370  double t=get_monotonic_time();
371 
372  // _Hschur = _Hpp, but keeping the pattern of _Hschur
373  _Hschur->clear();
374  _Hpp->add(_Hschur);
375 
376  //_DInvSchur->clear();
377  memset (_coefficients, 0, _sizePoses*sizeof(double));
378 # ifdef G2O_OPENMP
379 # pragma omp parallel for default (shared) schedule(dynamic, 10)
380 # endif
381  for (int landmarkIndex = 0; landmarkIndex < static_cast<int>(_Hll->blockCols().size()); ++landmarkIndex) {
382  const typename SparseBlockMatrix<LandmarkMatrixType>::IntBlockMap& marginalizeColumn = _Hll->blockCols()[landmarkIndex];
383  assert(marginalizeColumn.size() == 1 && "more than one block in _Hll column");
384 
385  // calculate inverse block for the landmark
386  const LandmarkMatrixType * D = marginalizeColumn.begin()->second;
387  assert (D && D->rows()==D->cols() && "Error in landmark matrix");
388  LandmarkMatrixType& Dinv = _DInvSchur->diagonal()[landmarkIndex];
389  Dinv = D->inverse();
390 
391  LandmarkVectorType db(D->rows());
392  for (int j=0; j<D->rows(); ++j) {
393  db[j]=_b[_Hll->rowBaseOfBlock(landmarkIndex) + _sizePoses + j];
394  }
395  db=Dinv*db;
396 
397  assert((size_t)landmarkIndex < _HplCCS->blockCols().size() && "Index out of bounds");
398  const typename SparseBlockMatrixCCS<PoseLandmarkMatrixType>::SparseColumn& landmarkColumn = _HplCCS->blockCols()[landmarkIndex];
399 
400  for (typename SparseBlockMatrixCCS<PoseLandmarkMatrixType>::SparseColumn::const_iterator it_outer = landmarkColumn.begin();
401  it_outer != landmarkColumn.end(); ++it_outer) {
402  int i1 = it_outer->row;
403 
404  const PoseLandmarkMatrixType* Bi = it_outer->block;
405  assert(Bi);
406 
407  PoseLandmarkMatrixType BDinv = (*Bi)*(Dinv);
408  assert(_HplCCS->rowBaseOfBlock(i1) < _sizePoses && "Index out of bounds");
409  typename PoseVectorType::MapType Bb(&_coefficients[_HplCCS->rowBaseOfBlock(i1)], Bi->rows());
410 # ifdef G2O_OPENMP
411  ScopedOpenMPMutex mutexLock(&_coefficientsMutex[i1]);
412 # endif
413  Bb.noalias() += (*Bi)*db;
414 
415  assert(i1 >= 0 && i1 < static_cast<int>(_HschurTransposedCCS->blockCols().size()) && "Index out of bounds");
417 
419  typename SparseBlockMatrixCCS<PoseLandmarkMatrixType>::SparseColumn::const_iterator it_inner = lower_bound(landmarkColumn.begin(), landmarkColumn.end(), aux);
420  for (; it_inner != landmarkColumn.end(); ++it_inner) {
421  int i2 = it_inner->row;
422  const PoseLandmarkMatrixType* Bj = it_inner->block;
423  assert(Bj);
424  while (targetColumnIt->row < i2 /*&& targetColumnIt != _HschurTransposedCCS->blockCols()[i1].end()*/)
425  ++targetColumnIt;
426  assert(targetColumnIt != _HschurTransposedCCS->blockCols()[i1].end() && targetColumnIt->row == i2 && "invalid iterator, something wrong with the matrix structure");
427  PoseMatrixType* Hi1i2 = targetColumnIt->block;//_Hschur->block(i1,i2);
428  assert(Hi1i2);
429  (*Hi1i2).noalias() -= BDinv*Bj->transpose();
430  }
431  }
432  }
433  //cerr << "Solve [marginalize] = " << get_monotonic_time()-t << endl;
434 
435  // _bschur = _b for calling solver, and not touching _b
436  memcpy(_bschur, _b, _sizePoses * sizeof(double));
437  for (int i=0; i<_sizePoses; ++i){
438  _bschur[i]-=_coefficients[i];
439  }
440 
442  if (globalStats){
443  globalStats->timeSchurComplement = get_monotonic_time() - t;
444  }
445 
446  t=get_monotonic_time();
447  bool solvedPoses = _linearSolver->solve(*_Hschur, _x, _bschur);
448  if (globalStats) {
449  globalStats->timeLinearSolver = get_monotonic_time() - t;
450  globalStats->hessianPoseDimension = _Hpp->cols();
451  globalStats->hessianLandmarkDimension = _Hll->cols();
452  globalStats->hessianDimension = globalStats->hessianPoseDimension + globalStats->hessianLandmarkDimension;
453  }
454  //cerr << "Solve [decompose and solve] = " << get_monotonic_time()-t << endl;
455 
456  if (! solvedPoses)
457  return false;
458 
459  // _x contains the solution for the poses, now applying it to the landmarks to get the new part of the
460  // solution;
461  double* xp = _x;
462  double* cp = _coefficients;
463 
464  double* xl=_x+_sizePoses;
465  double* cl=_coefficients + _sizePoses;
466  double* bl=_b+_sizePoses;
467 
468  // cp = -xp
469  for (int i=0; i<_sizePoses; ++i)
470  cp[i]=-xp[i];
471 
472  // cl = bl
473  memcpy(cl,bl,_sizeLandmarks*sizeof(double));
474 
475  // cl = bl - Bt * xp
476  //Bt->multiply(cl, cp);
477  _HplCCS->rightMultiply(cl, cp);
478 
479  // xl = Dinv * cl
480  memset(xl,0, _sizeLandmarks*sizeof(double));
481  _DInvSchur->multiply(xl,cl);
482  //_DInvSchur->rightMultiply(xl,cl);
483  //cerr << "Solve [landmark delta] = " << get_monotonic_time()-t << endl;
484 
485  return true;
486 }
487 
488 
489 template <typename Traits>
490 bool BlockSolver<Traits>::computeMarginals(SparseBlockMatrix<MatrixXd>& spinv, const std::vector<std::pair<int, int> >& blockIndices)
491 {
492  double t = get_monotonic_time();
493  bool ok = _linearSolver->solvePattern(spinv, blockIndices, *_Hpp);
495  if (globalStats) {
496  globalStats->timeMarginals = get_monotonic_time() - t;
497  }
498  return ok;
499 }
500 
501 template <typename Traits>
503 {
504  // clear b vector
505 # ifdef G2O_OPENMP
506 # pragma omp parallel for default (shared) if (_optimizer->indexMapping().size() > 1000)
507 # endif
508  for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size()); ++i) {
510  assert(v);
511  v->clearQuadraticForm();
512  }
513  _Hpp->clear();
514  if (_doSchur) {
515  _Hll->clear();
516  _Hpl->clear();
517  }
518 
519  // resetting the terms for the pairwise constraints
520  // built up the current system by storing the Hessian blocks in the edges and vertices
521 # ifndef G2O_OPENMP
522  // no threading, we do not need to copy the workspace
523  JacobianWorkspace& jacobianWorkspace = _optimizer->jacobianWorkspace();
524 # else
525  // if running with threads need to produce copies of the workspace for each thread
526  JacobianWorkspace jacobianWorkspace = _optimizer->jacobianWorkspace();
527 # pragma omp parallel for default (shared) firstprivate(jacobianWorkspace) if (_optimizer->activeEdges().size() > 100)
528 # endif
529  for (int k = 0; k < static_cast<int>(_optimizer->activeEdges().size()); ++k) {
531  e->linearizeOplus(jacobianWorkspace); // jacobian of the nodes' oplus (manifold)
533 # ifndef NDEBUG
534  for (size_t i = 0; i < e->vertices().size(); ++i) {
535  const OptimizableGraph::Vertex* v = static_cast<const OptimizableGraph::Vertex*>(e->vertex(i));
536  if (! v->fixed()) {
537  bool hasANan = arrayHasNaN(jacobianWorkspace.workspaceForVertex(i), e->dimension() * v->dimension());
538  if (hasANan) {
539  cerr << "buildSystem(): NaN within Jacobian for edge " << e << " for vertex " << i << endl;
540  break;
541  }
542  }
543  }
544 # endif
545  }
546 
547  // flush the current system in a sparse block matrix
548 # ifdef G2O_OPENMP
549 # pragma omp parallel for default (shared) if (_optimizer->indexMapping().size() > 1000)
550 # endif
551  for (int i = 0; i < static_cast<int>(_optimizer->indexMapping().size()); ++i) {
553  int iBase = v->colInHessian();
554  if (v->marginalized())
555  iBase+=_sizePoses;
556  v->copyB(_b+iBase);
557  }
558 
559  return 0;
560 }
561 
562 
563 template <typename Traits>
564 bool BlockSolver<Traits>::setLambda(double lambda, bool backup)
565 {
566  if (backup) {
569  }
570 # ifdef G2O_OPENMP
571 # pragma omp parallel for default (shared) if (_numPoses > 100)
572 # endif
573  for (int i = 0; i < _numPoses; ++i) {
574  PoseMatrixType *b=_Hpp->block(i,i);
575  if (backup)
576  _diagonalBackupPose[i] = b->diagonal();
577  b->diagonal().array() += lambda;
578  }
579 # ifdef G2O_OPENMP
580 # pragma omp parallel for default (shared) if (_numLandmarks > 100)
581 # endif
582  for (int i = 0; i < _numLandmarks; ++i) {
584  if (backup)
585  _diagonalBackupLandmark[i] = b->diagonal();
586  b->diagonal().array() += lambda;
587  }
588  return true;
589 }
590 
591 template <typename Traits>
593 {
594  assert((int) _diagonalBackupPose.size() == _numPoses && "Mismatch in dimensions");
595  assert((int) _diagonalBackupLandmark.size() == _numLandmarks && "Mismatch in dimensions");
596  for (int i = 0; i < _numPoses; ++i) {
597  PoseMatrixType *b=_Hpp->block(i,i);
598  b->diagonal() = _diagonalBackupPose[i];
599  }
600  for (int i = 0; i < _numLandmarks; ++i) {
602  b->diagonal() = _diagonalBackupLandmark[i];
603  }
604 }
605 
606 template <typename Traits>
608 {
610  if (! online) {
611  if (_Hpp)
612  _Hpp->clear();
613  if (_Hpl)
614  _Hpl->clear();
615  if (_Hll)
616  _Hll->clear();
617  }
618  _linearSolver->init();
619  return true;
620 }
621 
622 template <typename Traits>
624 {
625  _linearSolver->setWriteDebug(writeDebug);
626 }
627 
628 template <typename Traits>
629 bool BlockSolver<Traits>::saveHessian(const std::string& fileName) const
630 {
631  return _Hpp->writeOctave(fileName.c_str(), true);
632 }
633 
634 } // end namespace
double * _b
Definition: solver.h:137
SparseBlockMatrix< PoseMatrixType > * _Hschur
Definition: block_solver.h:154
Traits::PoseHessianType PoseHessianType
Definition: block_solver.h:108
std::vector< LandmarkVectorType, Eigen::aligned_allocator< LandmarkVectorType > > _diagonalBackupLandmark
Definition: block_solver.h:163
statistics about the optimization
Definition: batch_stats.h:39
Traits::LandmarkMatrixType LandmarkMatrixType
Definition: block_solver.h:103
void setColInHessian(int c)
set the row of this vertex in the Hessian
virtual bool solve(const SparseBlockMatrix< MatrixType > &A, double *x, double *b)=0
SparseOptimizer * optimizer() const
the optimizer (graph) on which the solver works
Definition: solver.h:105
#define __PRETTY_FUNCTION__
Definition: macros.h:95
int cols() const
columns of the matrix
int dimension() const
returns the dimensions of the error function
const Vertex * vertex(size_t i) const
Definition: hyper_graph.h:140
bool fixed() const
true => this node is fixed during the optimization
virtual void setWriteDebug(bool writeDebug)
SparseBlockMatrix< PoseMatrixType > * _Hpp
Definition: block_solver.h:150
Traits::LandmarkHessianType LandmarkHessianType
Definition: block_solver.h:109
virtual int copyB(double *b_) const =0
int fillSparseBlockMatrixCCSTransposed(SparseBlockMatrixCCS< MatrixType > &blockCCS) const
const EdgeContainer & activeEdges() const
the edges active in the current optimization
void multiply(double *&dest, const double *src) const
virtual bool init()=0
double * workspaceForVertex(int vertexIndex)
bool add(SparseBlockMatrix< MatrixType > *&dest) const
adds the current matrix to the destination
SparseOptimizer * _optimizer
Definition: solver.h:135
void rightMultiply(double *&dest, const double *src) const
int hessianIndex() const
temporary index of this node in the parameter vector obtained from linearization
SparseBlockMatrixDiagonal< LandmarkMatrixType > * _DInvSchur
Definition: block_solver.h:155
SparseMatrixBlock * block(int r, int c, bool alloc=false)
returns the block at location r,c. if alloc=true he block is created if it does not exist ...
virtual bool init(SparseOptimizer *optmizer, bool online=false)
bool writeOctave(const char *filename, bool upperTriangle=true) const
void resize(int *blockPoseIndices, int numPoseBlocks, int *blockLandmarkIndices, int numLandmarkBlocks, int totalDim)
lock a mutex within a scope
Definition: openmp_mutex.h:84
size_t _xSize
Definition: solver.h:138
SparseBlockMatrixCCS< PoseLandmarkMatrixType > * _HplCCS
Definition: block_solver.h:157
SparseBlockMatrix< LandmarkMatrixType > * _Hll
Definition: block_solver.h:151
void clear(bool dealloc=false)
this zeroes all the blocks. If dealloc=true the blocks are removed from memory
virtual void linearizeOplus(JacobianWorkspace &jacobianWorkspace)=0
Traits::PoseMatrixType PoseMatrixType
Definition: block_solver.h:102
double * _coefficients
Definition: block_solver.h:171
geometry_msgs::TransformStamped t
virtual bool saveHessian(const std::string &fileName) const
write the hessian to disk using the specified file name
double * _x
Definition: solver.h:136
const std::vector< SparseColumn > & blockCols() const
the block matrices per block-column
bool arrayHasNaN(const double *array, int size, int *nanIndex=0)
Definition: misc.h:177
int dimension() const
dimension of the estimated state belonging to this node
int rowBaseOfBlock(int r) const
where does the row at block-row r start?
size_t hessianPoseDimension
dimension of the pose matrix in Schur
Definition: batch_stats.h:68
JacobianWorkspace & jacobianWorkspace()
the workspace for storing the Jacobians of the graph
virtual bool computeMarginals(SparseBlockMatrix< MatrixXd > &spinv, const std::vector< std::pair< int, int > > &blockIndices)
Traits::LinearSolverType LinearSolverType
Definition: block_solver.h:111
const VertexContainer & vertices() const
Definition: hyper_graph.h:132
std::set< Edge * > EdgeSet
Definition: hyper_graph.h:90
virtual void setWriteDebug(bool)
Definition: linear_solver.h:80
virtual bool buildStructure(bool zeroBlocks=false)
void takePatternFromHash(SparseBlockMatrixHashMap< MatrixType > &hashMatrix)
MatrixType * addBlock(int r, int c, bool zeroBlock=false)
virtual bool setLambda(double lambda, bool backup=false)
SparseBlockMatrixCCS< PoseMatrixType > * _HschurTransposedCCS
Definition: block_solver.h:158
Traits::PoseLandmarkHessianType PoseLandmarkHessianType
Definition: block_solver.h:110
virtual void constructQuadraticForm()=0
std::vector< PoseVectorType, Eigen::aligned_allocator< PoseVectorType > > _diagonalBackupPose
Definition: block_solver.h:162
const std::vector< SparseColumn > & blockCols() const
the block matrices per block-column
size_t hessianLandmarkDimension
dimension of the landmark matrix in Schur
Definition: batch_stats.h:69
Traits::PoseLandmarkMatrixType PoseLandmarkMatrixType
Definition: block_solver.h:104
Traits::LandmarkVectorType LandmarkVectorType
Definition: block_solver.h:106
static G2OBatchStatistics * globalStats()
Definition: batch_stats.h:72
virtual bool solve()
virtual bool writeDebug() const
Definition: block_solver.h:138
ROSCPP_DECL bool ok()
double get_monotonic_time()
Definition: timeutil.cpp:113
void resizeVector(size_t sx)
Definition: solver.cpp:46
virtual bool solvePattern(SparseBlockMatrix< MatrixXd > &spinv, const std::vector< std::pair< int, int > > &blockIndices, const SparseBlockMatrix< MatrixType > &A)
Definition: linear_solver.h:71
virtual bool buildSystem()
int colInHessian() const
get the row of this vertex in the Hessian
virtual void mapHessianMemory(double *d)=0
bool marginalized() const
true => this node is marginalized out during the optimization
const std::vector< int > & colBlockIndices() const
indices of the column blocks
virtual void clearQuadraticForm()=0
const EdgeSet & edges() const
returns the set of hyper-edges that are leaving/entering in this vertex
Definition: hyper_graph.h:106
A general case Vertex for optimization.
const std::vector< IntBlockMap > & blockCols() const
the block matrices per block-column
virtual bool updateStructure(const std::vector< HyperGraph::Vertex *> &vset, const HyperGraph::EdgeSet &edges)
SparseBlockMatrix< PoseLandmarkMatrixType > * _Hpl
Definition: block_solver.h:152
size_t hessianDimension
rows / cols of the Hessian
Definition: batch_stats.h:67
double * b()
return b, the right hand side of the system
Definition: solver.h:98
LinearSolver< PoseMatrixType > * _linearSolver
Definition: block_solver.h:160
double timeSchurComplement
compute schur complement (0 if not done)
Definition: batch_stats.h:53
int fillSparseBlockMatrixCCS(SparseBlockMatrixCCS< MatrixType > &blockCCS) const
provide memory workspace for computing the Jacobians
Sparse matrix which uses blocks based on hash structures.
virtual void mapHessianMemory(double *d, int i, int j, bool rowMajor)=0
const std::vector< int > & rowBlockIndices() const
indices of the row blocks
const VertexContainer & indexMapping() const
the index mapping of the vertices
int rowBaseOfBlock(int r) const
where does the row at block-row r starts?
void swap(scoped_ptr< T > &, scoped_ptr< T > &)
BlockSolver(LinearSolverType *linearSolver)
base for the block solvers with some basic function interfaces
Definition: block_solver.h:83
Sparse matrix which uses blocks.
double timeLinearSolver
time for solving, excluding Schur setup
Definition: batch_stats.h:59
double timeMarginals
computing the inverse elements (solve blocks) and thus the marginal covariances
Definition: batch_stats.h:64
const DiagonalVector & diagonal() const
the block matrices per block-column
virtual void restoreDiagonal()


orb_slam2_ros
Author(s):
autogenerated on Mon Feb 28 2022 23:03:52