00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025 #ifndef EIGEN_SPARSEVECTOR_H
00026 #define EIGEN_SPARSEVECTOR_H
00027
00037 template<typename _Scalar, int _Flags>
00038 struct ei_traits<SparseVector<_Scalar, _Flags> >
00039 {
00040 typedef _Scalar Scalar;
00041 enum {
00042 IsColVector = _Flags & RowMajorBit ? 0 : 1,
00043
00044 RowsAtCompileTime = IsColVector ? Dynamic : 1,
00045 ColsAtCompileTime = IsColVector ? 1 : Dynamic,
00046 MaxRowsAtCompileTime = RowsAtCompileTime,
00047 MaxColsAtCompileTime = ColsAtCompileTime,
00048 Flags = SparseBit | _Flags,
00049 CoeffReadCost = NumTraits<Scalar>::ReadCost,
00050 SupportedAccessPatterns = InnerRandomAccessPattern
00051 };
00052 };
00053
00054 template<typename _Scalar, int _Flags>
00055 class SparseVector
00056 : public SparseMatrixBase<SparseVector<_Scalar, _Flags> >
00057 {
00058 public:
00059 EIGEN_SPARSE_GENERIC_PUBLIC_INTERFACE(SparseVector)
00060 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
00061 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
00062
00063
00064 protected:
00065 public:
00066
00067 typedef SparseMatrixBase<SparseVector> SparseBase;
00068 enum { IsColVector = ei_traits<SparseVector>::IsColVector };
00069
00070 CompressedStorage<Scalar> m_data;
00071 int m_size;
00072
00073 CompressedStorage<Scalar>& _data() { return m_data; }
00074 CompressedStorage<Scalar>& _data() const { return m_data; }
00075
00076 public:
00077
00078 EIGEN_STRONG_INLINE int rows() const { return IsColVector ? m_size : 1; }
00079 EIGEN_STRONG_INLINE int cols() const { return IsColVector ? 1 : m_size; }
00080 EIGEN_STRONG_INLINE int innerSize() const { return m_size; }
00081 EIGEN_STRONG_INLINE int outerSize() const { return 1; }
00082 EIGEN_STRONG_INLINE int innerNonZeros(int j) const { ei_assert(j==0); return m_size; }
00083
00084 EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
00085 EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
00086
00087 EIGEN_STRONG_INLINE const int* _innerIndexPtr() const { return &m_data.index(0); }
00088 EIGEN_STRONG_INLINE int* _innerIndexPtr() { return &m_data.index(0); }
00089
00090 inline Scalar coeff(int row, int col) const
00091 {
00092 ei_assert((IsColVector ? col : row)==0);
00093 return coeff(IsColVector ? row : col);
00094 }
00095 inline Scalar coeff(int i) const { return m_data.at(i); }
00096
00097 inline Scalar& coeffRef(int row, int col)
00098 {
00099 ei_assert((IsColVector ? col : row)==0);
00100 return coeff(IsColVector ? row : col);
00101 }
00102
00109 inline Scalar& coeffRef(int i)
00110 {
00111 return m_data.atWithInsertion(i);
00112 }
00113
00114 public:
00115
00116 class InnerIterator;
00117
00118 inline void setZero() { m_data.clear(); }
00119
00121 inline int nonZeros() const { return m_data.size(); }
00122
00125 inline void reserve(int reserveSize) { m_data.reserve(reserveSize); }
00126
00127 inline void startFill(int reserve)
00128 {
00129 setZero();
00130 m_data.reserve(reserve);
00131 }
00132
00135 inline Scalar& fill(int r, int c)
00136 {
00137 ei_assert(r==0 || c==0);
00138 return fill(IsColVector ? r : c);
00139 }
00140
00141 inline Scalar& fill(int i)
00142 {
00143 m_data.append(0, i);
00144 return m_data.value(m_data.size()-1);
00145 }
00146
00147 inline Scalar& fillrand(int r, int c)
00148 {
00149 ei_assert(r==0 || c==0);
00150 return fillrand(IsColVector ? r : c);
00151 }
00152
00155 inline Scalar& fillrand(int i)
00156 {
00157 int startId = 0;
00158 int id = m_data.size() - 1;
00159 m_data.resize(id+2,1);
00160
00161 while ( (id >= startId) && (m_data.index(id) > i) )
00162 {
00163 m_data.index(id+1) = m_data.index(id);
00164 m_data.value(id+1) = m_data.value(id);
00165 --id;
00166 }
00167 m_data.index(id+1) = i;
00168 m_data.value(id+1) = 0;
00169 return m_data.value(id+1);
00170 }
00171
00172 inline void endFill() {}
00173
00174 void prune(Scalar reference, RealScalar epsilon = precision<RealScalar>())
00175 {
00176 m_data.prune(reference,epsilon);
00177 }
00178
00179 void resize(int rows, int cols)
00180 {
00181 ei_assert(rows==1 || cols==1);
00182 resize(IsColVector ? rows : cols);
00183 }
00184
00185 void resize(int newSize)
00186 {
00187 m_size = newSize;
00188 m_data.clear();
00189 }
00190
00191 void resizeNonZeros(int size) { m_data.resize(size); }
00192
00193 inline SparseVector() : m_size(0) { resize(0); }
00194
00195 inline SparseVector(int size) : m_size(0) { resize(size); }
00196
00197 inline SparseVector(int rows, int cols) : m_size(0) { resize(rows,cols); }
00198
00199 template<typename OtherDerived>
00200 inline SparseVector(const MatrixBase<OtherDerived>& other)
00201 : m_size(0)
00202 {
00203 *this = other.derived();
00204 }
00205
00206 template<typename OtherDerived>
00207 inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
00208 : m_size(0)
00209 {
00210 *this = other.derived();
00211 }
00212
00213 inline SparseVector(const SparseVector& other)
00214 : m_size(0)
00215 {
00216 *this = other.derived();
00217 }
00218
00219 inline void swap(SparseVector& other)
00220 {
00221 std::swap(m_size, other.m_size);
00222 m_data.swap(other.m_data);
00223 }
00224
00225 inline SparseVector& operator=(const SparseVector& other)
00226 {
00227 if (other.isRValue())
00228 {
00229 swap(other.const_cast_derived());
00230 }
00231 else
00232 {
00233 resize(other.size());
00234 m_data = other.m_data;
00235 }
00236 return *this;
00237 }
00238
00239 template<typename OtherDerived>
00240 inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
00241 {
00242 return Base::operator=(other);
00243 }
00244
00245
00246
00247
00248
00249
00250
00251
00252
00253
00254
00255
00256
00257
00258
00259
00260
00261
00262
00263
00264
00265
00266
00267
00268
00269
00270
00271
00272
00273
00274
00275
00276
00277
00278
00279
00280
00281
00282
00283
00284
00285
00286
00287
00288
00289
00290
00291
00292
00293
00294
00295 friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
00296 {
00297 for (unsigned int i=0; i<m.nonZeros(); ++i)
00298 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
00299 s << std::endl;
00300 return s;
00301 }
00302
00303
00304
00305
00306
00307
00308
00309
00310
00311
00312
00313
00314
00315
00316
00317
00318
00319
00320
00321
00322
00323
00324
00326 inline ~SparseVector() {}
00327 };
00328
00329 template<typename Scalar, int _Flags>
00330 class SparseVector<Scalar,_Flags>::InnerIterator
00331 {
00332 public:
00333 InnerIterator(const SparseVector& vec, int outer=0)
00334 : m_data(vec.m_data), m_id(0), m_end(m_data.size())
00335 {
00336 ei_assert(outer==0);
00337 }
00338
00339 InnerIterator(const CompressedStorage<Scalar>& data)
00340 : m_data(data), m_id(0), m_end(m_data.size())
00341 {}
00342
00343 template<unsigned int Added, unsigned int Removed>
00344 InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, int outer)
00345 : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
00346 {}
00347
00348 inline InnerIterator& operator++() { m_id++; return *this; }
00349
00350 inline Scalar value() const { return m_data.value(m_id); }
00351 inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
00352
00353 inline int index() const { return m_data.index(m_id); }
00354 inline int row() const { return IsColVector ? index() : 0; }
00355 inline int col() const { return IsColVector ? 0 : index(); }
00356
00357 inline operator bool() const { return (m_id < m_end); }
00358
00359 protected:
00360 const CompressedStorage<Scalar>& m_data;
00361 int m_id;
00362 const int m_end;
00363
00364 private:
00365 InnerIterator& operator=(const InnerIterator&);
00366 };
00367
00368 #endif // EIGEN_SPARSEVECTOR_H