AmbiVector.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_AMBIVECTOR_H
00011 #define EIGEN_AMBIVECTOR_H
00012 
00013 namespace Eigen { 
00014 
00015 namespace internal {
00016 
00022 template<typename _Scalar, typename _Index>
00023 class AmbiVector
00024 {
00025   public:
00026     typedef _Scalar Scalar;
00027     typedef _Index Index;
00028     typedef typename NumTraits<Scalar>::Real RealScalar;
00029 
00030     AmbiVector(Index size)
00031       : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
00032     {
00033       resize(size);
00034     }
00035 
00036     void init(double estimatedDensity);
00037     void init(int mode);
00038 
00039     Index nonZeros() const;
00040 
00042     void setBounds(Index start, Index end) { m_start = start; m_end = end; }
00043 
00044     void setZero();
00045 
00046     void restart();
00047     Scalar& coeffRef(Index i);
00048     Scalar& coeff(Index i);
00049 
00050     class Iterator;
00051 
00052     ~AmbiVector() { delete[] m_buffer; }
00053 
00054     void resize(Index size)
00055     {
00056       if (m_allocatedSize < size)
00057         reallocate(size);
00058       m_size = size;
00059     }
00060 
00061     Index size() const { return m_size; }
00062 
00063   protected:
00064 
00065     void reallocate(Index size)
00066     {
00067       // if the size of the matrix is not too large, let's allocate a bit more than needed such
00068       // that we can handle dense vector even in sparse mode.
00069       delete[] m_buffer;
00070       if (size<1000)
00071       {
00072         Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
00073         m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
00074         m_buffer = new Scalar[allocSize];
00075       }
00076       else
00077       {
00078         m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
00079         m_buffer = new Scalar[size];
00080       }
00081       m_size = size;
00082       m_start = 0;
00083       m_end = m_size;
00084     }
00085 
00086     void reallocateSparse()
00087     {
00088       Index copyElements = m_allocatedElements;
00089       m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
00090       Index allocSize = m_allocatedElements * sizeof(ListEl);
00091       allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
00092       Scalar* newBuffer = new Scalar[allocSize];
00093       memcpy(newBuffer,  m_buffer,  copyElements * sizeof(ListEl));
00094       delete[] m_buffer;
00095       m_buffer = newBuffer;
00096     }
00097 
00098   protected:
00099     // element type of the linked list
00100     struct ListEl
00101     {
00102       Index next;
00103       Index index;
00104       Scalar value;
00105     };
00106 
00107     // used to store data in both mode
00108     Scalar* m_buffer;
00109     Scalar m_zero;
00110     Index m_size;
00111     Index m_start;
00112     Index m_end;
00113     Index m_allocatedSize;
00114     Index m_allocatedElements;
00115     Index m_mode;
00116 
00117     // linked list mode
00118     Index m_llStart;
00119     Index m_llCurrent;
00120     Index m_llSize;
00121 };
00122 
00124 template<typename _Scalar,typename _Index>
00125 _Index AmbiVector<_Scalar,_Index>::nonZeros() const
00126 {
00127   if (m_mode==IsSparse)
00128     return m_llSize;
00129   else
00130     return m_end - m_start;
00131 }
00132 
00133 template<typename _Scalar,typename _Index>
00134 void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
00135 {
00136   if (estimatedDensity>0.1)
00137     init(IsDense);
00138   else
00139     init(IsSparse);
00140 }
00141 
00142 template<typename _Scalar,typename _Index>
00143 void AmbiVector<_Scalar,_Index>::init(int mode)
00144 {
00145   m_mode = mode;
00146   if (m_mode==IsSparse)
00147   {
00148     m_llSize = 0;
00149     m_llStart = -1;
00150   }
00151 }
00152 
00158 template<typename _Scalar,typename _Index>
00159 void AmbiVector<_Scalar,_Index>::restart()
00160 {
00161   m_llCurrent = m_llStart;
00162 }
00163 
00165 template<typename _Scalar,typename _Index>
00166 void AmbiVector<_Scalar,_Index>::setZero()
00167 {
00168   if (m_mode==IsDense)
00169   {
00170     for (Index i=m_start; i<m_end; ++i)
00171       m_buffer[i] = Scalar(0);
00172   }
00173   else
00174   {
00175     eigen_assert(m_mode==IsSparse);
00176     m_llSize = 0;
00177     m_llStart = -1;
00178   }
00179 }
00180 
00181 template<typename _Scalar,typename _Index>
00182 _Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
00183 {
00184   if (m_mode==IsDense)
00185     return m_buffer[i];
00186   else
00187   {
00188     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
00189     // TODO factorize the following code to reduce code generation
00190     eigen_assert(m_mode==IsSparse);
00191     if (m_llSize==0)
00192     {
00193       // this is the first element
00194       m_llStart = 0;
00195       m_llCurrent = 0;
00196       ++m_llSize;
00197       llElements[0].value = Scalar(0);
00198       llElements[0].index = i;
00199       llElements[0].next = -1;
00200       return llElements[0].value;
00201     }
00202     else if (i<llElements[m_llStart].index)
00203     {
00204       // this is going to be the new first element of the list
00205       ListEl& el = llElements[m_llSize];
00206       el.value = Scalar(0);
00207       el.index = i;
00208       el.next = m_llStart;
00209       m_llStart = m_llSize;
00210       ++m_llSize;
00211       m_llCurrent = m_llStart;
00212       return el.value;
00213     }
00214     else
00215     {
00216       Index nextel = llElements[m_llCurrent].next;
00217       eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
00218       while (nextel >= 0 && llElements[nextel].index<=i)
00219       {
00220         m_llCurrent = nextel;
00221         nextel = llElements[nextel].next;
00222       }
00223 
00224       if (llElements[m_llCurrent].index==i)
00225       {
00226         // the coefficient already exists and we found it !
00227         return llElements[m_llCurrent].value;
00228       }
00229       else
00230       {
00231         if (m_llSize>=m_allocatedElements)
00232         {
00233           reallocateSparse();
00234           llElements = reinterpret_cast<ListEl*>(m_buffer);
00235         }
00236         eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
00237         // let's insert a new coefficient
00238         ListEl& el = llElements[m_llSize];
00239         el.value = Scalar(0);
00240         el.index = i;
00241         el.next = llElements[m_llCurrent].next;
00242         llElements[m_llCurrent].next = m_llSize;
00243         ++m_llSize;
00244         return el.value;
00245       }
00246     }
00247   }
00248 }
00249 
00250 template<typename _Scalar,typename _Index>
00251 _Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
00252 {
00253   if (m_mode==IsDense)
00254     return m_buffer[i];
00255   else
00256   {
00257     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
00258     eigen_assert(m_mode==IsSparse);
00259     if ((m_llSize==0) || (i<llElements[m_llStart].index))
00260     {
00261       return m_zero;
00262     }
00263     else
00264     {
00265       Index elid = m_llStart;
00266       while (elid >= 0 && llElements[elid].index<i)
00267         elid = llElements[elid].next;
00268 
00269       if (llElements[elid].index==i)
00270         return llElements[m_llCurrent].value;
00271       else
00272         return m_zero;
00273     }
00274   }
00275 }
00276 
00278 template<typename _Scalar,typename _Index>
00279 class AmbiVector<_Scalar,_Index>::Iterator
00280 {
00281   public:
00282     typedef _Scalar Scalar;
00283     typedef typename NumTraits<Scalar>::Real RealScalar;
00284 
00291     Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
00292       : m_vector(vec)
00293     {
00294       using std::abs;
00295       m_epsilon = epsilon;
00296       m_isDense = m_vector.m_mode==IsDense;
00297       if (m_isDense)
00298       {
00299         m_currentEl = 0;   // this is to avoid a compilation warning
00300         m_cachedValue = 0; // this is to avoid a compilation warning
00301         m_cachedIndex = m_vector.m_start-1;
00302         ++(*this);
00303       }
00304       else
00305       {
00306         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
00307         m_currentEl = m_vector.m_llStart;
00308         while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
00309           m_currentEl = llElements[m_currentEl].next;
00310         if (m_currentEl<0)
00311         {
00312           m_cachedValue = 0; // this is to avoid a compilation warning
00313           m_cachedIndex = -1;
00314         }
00315         else
00316         {
00317           m_cachedIndex = llElements[m_currentEl].index;
00318           m_cachedValue = llElements[m_currentEl].value;
00319         }
00320       }
00321     }
00322 
00323     Index index() const { return m_cachedIndex; }
00324     Scalar value() const { return m_cachedValue; }
00325 
00326     operator bool() const { return m_cachedIndex>=0; }
00327 
00328     Iterator& operator++()
00329     {
00330       using std::abs;
00331       if (m_isDense)
00332       {
00333         do {
00334           ++m_cachedIndex;
00335         } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
00336         if (m_cachedIndex<m_vector.m_end)
00337           m_cachedValue = m_vector.m_buffer[m_cachedIndex];
00338         else
00339           m_cachedIndex=-1;
00340       }
00341       else
00342       {
00343         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
00344         do {
00345           m_currentEl = llElements[m_currentEl].next;
00346         } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<m_epsilon);
00347         if (m_currentEl<0)
00348         {
00349           m_cachedIndex = -1;
00350         }
00351         else
00352         {
00353           m_cachedIndex = llElements[m_currentEl].index;
00354           m_cachedValue = llElements[m_currentEl].value;
00355         }
00356       }
00357       return *this;
00358     }
00359 
00360   protected:
00361     const AmbiVector& m_vector; // the target vector
00362     Index m_currentEl;            // the current element in sparse/linked-list mode
00363     RealScalar m_epsilon;       // epsilon used to prune zero coefficients
00364     Index m_cachedIndex;          // current coordinate
00365     Scalar m_cachedValue;       // current value
00366     bool m_isDense;             // mode of the vector
00367 };
00368 
00369 } // end namespace internal
00370 
00371 } // end namespace Eigen
00372 
00373 #endif // EIGEN_AMBIVECTOR_H


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Sat Jun 8 2019 19:36:41