AmbiVector.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_AMBIVECTOR_H
11 #define EIGEN_AMBIVECTOR_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
22 template<typename _Scalar, typename _Index>
24 {
25  public:
26  typedef _Scalar Scalar;
27  typedef _Index Index;
29 
32  {
33  resize(size);
34  }
35 
36  void init(double estimatedDensity);
37  void init(int mode);
38 
39  Index nonZeros() const;
40 
42  void setBounds(Index start, Index end) { m_start = start; m_end = end; }
43 
44  void setZero();
45 
46  void restart();
47  Scalar& coeffRef(Index i);
48  Scalar& coeff(Index i);
49 
50  class Iterator;
51 
52  ~AmbiVector() { delete[] m_buffer; }
53 
54  void resize(Index size)
55  {
56  if (m_allocatedSize < size)
57  reallocate(size);
58  m_size = size;
59  }
60 
61  Index size() const { return m_size; }
62 
63  protected:
64 
65  void reallocate(Index size)
66  {
67  // if the size of the matrix is not too large, let's allocate a bit more than needed such
68  // that we can handle dense vector even in sparse mode.
69  delete[] m_buffer;
70  if (size<1000)
71  {
72  Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
73  m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
74  m_buffer = new Scalar[allocSize];
75  }
76  else
77  {
78  m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
79  m_buffer = new Scalar[size];
80  }
81  m_size = size;
82  m_start = 0;
83  m_end = m_size;
84  }
85 
87  {
88  Index copyElements = m_allocatedElements;
90  Index allocSize = m_allocatedElements * sizeof(ListEl);
91  allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
92  Scalar* newBuffer = new Scalar[allocSize];
93  memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
94  delete[] m_buffer;
95  m_buffer = newBuffer;
96  }
97 
98  protected:
99  // element type of the linked list
100  struct ListEl
101  {
102  Index next;
103  Index index;
104  Scalar value;
105  };
106 
107  // used to store data in both mode
108  Scalar* m_buffer;
109  Scalar m_zero;
110  Index m_size;
111  Index m_start;
112  Index m_end;
115  Index m_mode;
116 
117  // linked list mode
118  Index m_llStart;
119  Index m_llCurrent;
120  Index m_llSize;
121 };
122 
124 template<typename _Scalar,typename _Index>
126 {
127  if (m_mode==IsSparse)
128  return m_llSize;
129  else
130  return m_end - m_start;
131 }
132 
133 template<typename _Scalar,typename _Index>
134 void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
135 {
136  if (estimatedDensity>0.1)
137  init(IsDense);
138  else
139  init(IsSparse);
140 }
141 
142 template<typename _Scalar,typename _Index>
144 {
145  m_mode = mode;
146  if (m_mode==IsSparse)
147  {
148  m_llSize = 0;
149  m_llStart = -1;
150  }
151 }
152 
158 template<typename _Scalar,typename _Index>
160 {
162 }
163 
165 template<typename _Scalar,typename _Index>
167 {
168  if (m_mode==IsDense)
169  {
170  for (Index i=m_start; i<m_end; ++i)
171  m_buffer[i] = Scalar(0);
172  }
173  else
174  {
176  m_llSize = 0;
177  m_llStart = -1;
178  }
179 }
180 
181 template<typename _Scalar,typename _Index>
183 {
184  if (m_mode==IsDense)
185  return m_buffer[i];
186  else
187  {
188  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
189  // TODO factorize the following code to reduce code generation
191  if (m_llSize==0)
192  {
193  // this is the first element
194  m_llStart = 0;
195  m_llCurrent = 0;
196  ++m_llSize;
197  llElements[0].value = Scalar(0);
198  llElements[0].index = i;
199  llElements[0].next = -1;
200  return llElements[0].value;
201  }
202  else if (i<llElements[m_llStart].index)
203  {
204  // this is going to be the new first element of the list
205  ListEl& el = llElements[m_llSize];
206  el.value = Scalar(0);
207  el.index = i;
208  el.next = m_llStart;
210  ++m_llSize;
212  return el.value;
213  }
214  else
215  {
216  Index nextel = llElements[m_llCurrent].next;
217  eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
218  while (nextel >= 0 && llElements[nextel].index<=i)
219  {
220  m_llCurrent = nextel;
221  nextel = llElements[nextel].next;
222  }
223 
224  if (llElements[m_llCurrent].index==i)
225  {
226  // the coefficient already exists and we found it !
227  return llElements[m_llCurrent].value;
228  }
229  else
230  {
232  {
234  llElements = reinterpret_cast<ListEl*>(m_buffer);
235  }
236  eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
237  // let's insert a new coefficient
238  ListEl& el = llElements[m_llSize];
239  el.value = Scalar(0);
240  el.index = i;
241  el.next = llElements[m_llCurrent].next;
242  llElements[m_llCurrent].next = m_llSize;
243  ++m_llSize;
244  return el.value;
245  }
246  }
247  }
248 }
249 
250 template<typename _Scalar,typename _Index>
252 {
253  if (m_mode==IsDense)
254  return m_buffer[i];
255  else
256  {
257  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
259  if ((m_llSize==0) || (i<llElements[m_llStart].index))
260  {
261  return m_zero;
262  }
263  else
264  {
265  Index elid = m_llStart;
266  while (elid >= 0 && llElements[elid].index<i)
267  elid = llElements[elid].next;
268 
269  if (llElements[elid].index==i)
270  return llElements[m_llCurrent].value;
271  else
272  return m_zero;
273  }
274  }
275 }
276 
278 template<typename _Scalar,typename _Index>
279 class AmbiVector<_Scalar,_Index>::Iterator
280 {
281  public:
282  typedef _Scalar Scalar;
284 
291  Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
292  : m_vector(vec)
293  {
294  using std::abs;
295  m_epsilon = epsilon;
296  m_isDense = m_vector.m_mode==IsDense;
297  if (m_isDense)
298  {
299  m_currentEl = 0; // this is to avoid a compilation warning
300  m_cachedValue = 0; // this is to avoid a compilation warning
301  m_cachedIndex = m_vector.m_start-1;
302  ++(*this);
303  }
304  else
305  {
306  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
307  m_currentEl = m_vector.m_llStart;
308  while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
309  m_currentEl = llElements[m_currentEl].next;
310  if (m_currentEl<0)
311  {
312  m_cachedValue = 0; // this is to avoid a compilation warning
313  m_cachedIndex = -1;
314  }
315  else
316  {
317  m_cachedIndex = llElements[m_currentEl].index;
318  m_cachedValue = llElements[m_currentEl].value;
319  }
320  }
321  }
322 
323  Index index() const { return m_cachedIndex; }
324  Scalar value() const { return m_cachedValue; }
325 
326  operator bool() const { return m_cachedIndex>=0; }
327 
329  {
330  using std::abs;
331  if (m_isDense)
332  {
333  do {
334  ++m_cachedIndex;
335  } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
336  if (m_cachedIndex<m_vector.m_end)
337  m_cachedValue = m_vector.m_buffer[m_cachedIndex];
338  else
339  m_cachedIndex=-1;
340  }
341  else
342  {
343  ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
344  do {
345  m_currentEl = llElements[m_currentEl].next;
346  } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<m_epsilon);
347  if (m_currentEl<0)
348  {
349  m_cachedIndex = -1;
350  }
351  else
352  {
353  m_cachedIndex = llElements[m_currentEl].index;
354  m_cachedValue = llElements[m_currentEl].value;
355  }
356  }
357  return *this;
358  }
359 
360  protected:
361  const AmbiVector& m_vector; // the target vector
362  Index m_currentEl; // the current element in sparse/linked-list mode
363  RealScalar m_epsilon; // epsilon used to prune zero coefficients
364  Index m_cachedIndex; // current coordinate
365  Scalar m_cachedValue; // current value
366  bool m_isDense; // mode of the vector
367 };
368 
369 } // end namespace internal
370 
371 } // end namespace Eigen
372 
373 #endif // EIGEN_AMBIVECTOR_H
void setBounds(Index start, Index end)
Definition: AmbiVector.h:42
void resize(Index size)
Definition: AmbiVector.h:54
#define EIGEN_RESTRICT
iterative scaling algorithm to equilibrate rows and column norms in matrices
Definition: matrix.hpp:471
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:88
#define eigen_internal_assert(x)
Scalar & coeff(Index i)
Definition: AmbiVector.h:251
NumTraits< Scalar >::Real RealScalar
Definition: AmbiVector.h:28
EIGEN_STRONG_INLINE const CwiseUnaryOp< internal::scalar_abs_op< Scalar >, const Derived > abs() const
Iterator(const AmbiVector &vec, const RealScalar &epsilon=0)
Definition: AmbiVector.h:291
void init(double estimatedDensity)
Definition: AmbiVector.h:134
Scalar & coeffRef(Index i)
Definition: AmbiVector.h:182
void reallocate(Index size)
Definition: AmbiVector.h:65
#define eigen_assert(x)
NumTraits< Scalar >::Real RealScalar
Definition: AmbiVector.h:283


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:34:27