TensorFixedSize.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
12 
13 namespace Eigen {
14 
26 template<typename Scalar_, typename Dimensions_, int Options_, typename IndexType>
27 class TensorFixedSize : public TensorBase<TensorFixedSize<Scalar_, Dimensions_, Options_, IndexType> >
28 {
29  public:
35  typedef Scalar_ Scalar;
38 
39  static const int Options = Options_;
40 
41  enum {
43  Layout = Options_ & RowMajor ? RowMajor : ColMajor,
44  CoordAccess = true,
45  RawAccess = true
46  };
47 
48  typedef Dimensions_ Dimensions;
49  static const std::size_t NumIndices = Dimensions::count;
50 
51  protected:
53 
54  public:
55  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const { return NumIndices; }
56  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return m_storage.dimensions()[n]; }
57  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_storage.dimensions(); }
58  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return m_storage.size(); }
59  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar *data() { return m_storage.data(); }
60  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar *data() const { return m_storage.data(); }
61 
62  // This makes EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
63  // work, because that uses base().coeffRef() - and we don't yet
64  // implement a similar class hierarchy
65  inline Self& base() { return *this; }
66  inline const Self& base() const { return *this; }
67 
68 #if EIGEN_HAS_VARIADIC_TEMPLATES
69  template<typename... IndexTypes>
70  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& coeff(Index firstIndex, IndexTypes... otherIndices) const
71  {
72  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
73  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
74  return coeff(array<Index, NumIndices>{{firstIndex, otherIndices...}});
75  }
76 #endif
77 
78  EIGEN_DEVICE_FUNC
79  EIGEN_STRONG_INLINE const Scalar& coeff(const array<Index, NumIndices>& indices) const
80  {
82  return m_storage.data()[linearizedIndex(indices)];
83  }
84 
85  EIGEN_DEVICE_FUNC
86  EIGEN_STRONG_INLINE const Scalar& coeff(Index index) const
87  {
88  eigen_internal_assert(index >= 0 && index < size());
89  return m_storage.data()[index];
90  }
91 
92  EIGEN_DEVICE_FUNC
93  EIGEN_STRONG_INLINE const Scalar& coeff() const
94  {
95  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
96  return m_storage.data()[0];
97  }
98 
99 
100 #if EIGEN_HAS_VARIADIC_TEMPLATES
101  template<typename... IndexTypes>
102  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index firstIndex, IndexTypes... otherIndices)
103  {
104  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
105  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
106  return coeffRef(array<Index, NumIndices>{{firstIndex, otherIndices...}});
107  }
108 #endif
109 
110  EIGEN_DEVICE_FUNC
112  {
114  return m_storage.data()[linearizedIndex(indices)];
115  }
116 
117  EIGEN_DEVICE_FUNC
118  EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
119  {
120  eigen_internal_assert(index >= 0 && index < size());
121  return m_storage.data()[index];
122  }
123 
124  EIGEN_DEVICE_FUNC
126  {
127  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
128  return m_storage.data()[0];
129  }
130 
131 #if EIGEN_HAS_VARIADIC_TEMPLATES
132  template<typename... IndexTypes>
133  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar& operator()(Index firstIndex, IndexTypes... otherIndices) const
134  {
135  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
136  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
137  return this->operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
138  }
139 #else
140  EIGEN_DEVICE_FUNC
141  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1) const
142  {
143  if (Options&RowMajor) {
144  const Index index = i1 + i0 * m_storage.dimensions()[1];
145  return m_storage.data()[index];
146  } else {
147  const Index index = i0 + i1 * m_storage.dimensions()[0];
148  return m_storage.data()[index];
149  }
150  }
151  EIGEN_DEVICE_FUNC
152  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2) const
153  {
154  if (Options&RowMajor) {
155  const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
156  return m_storage.data()[index];
157  } else {
158  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
159  return m_storage.data()[index];
160  }
161  }
162  EIGEN_DEVICE_FUNC
163  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3) const
164  {
165  if (Options&RowMajor) {
166  const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
167  return m_storage.data()[index];
168  } else {
169  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
170  return m_storage.data()[index];
171  }
172  }
173  EIGEN_DEVICE_FUNC
174  EIGEN_STRONG_INLINE const Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const
175  {
176  if (Options&RowMajor) {
177  const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
178  return m_storage.data()[index];
179  } else {
180  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
181  return m_storage.data()[index];
182  }
183  }
184 #endif
185 
186 
187  EIGEN_DEVICE_FUNC
188  EIGEN_STRONG_INLINE const Scalar& operator()(const array<Index, NumIndices>& indices) const
189  {
190  eigen_assert(checkIndexRange(indices));
191  return coeff(indices);
192  }
193 
194  EIGEN_DEVICE_FUNC
195  EIGEN_STRONG_INLINE const Scalar& operator()(Index index) const
196  {
197  eigen_internal_assert(index >= 0 && index < size());
198  return coeff(index);
199  }
200 
201  EIGEN_DEVICE_FUNC
202  EIGEN_STRONG_INLINE const Scalar& operator()() const
203  {
204  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
205  return coeff();
206  }
207 
208  EIGEN_DEVICE_FUNC
209  EIGEN_STRONG_INLINE const Scalar& operator[](Index index) const
210  {
211  // The bracket operator is only for vectors, use the parenthesis operator instead.
212  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE);
213  return coeff(index);
214  }
215 
216 #if EIGEN_HAS_VARIADIC_TEMPLATES
217  template<typename... IndexTypes>
218  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& operator()(Index firstIndex, IndexTypes... otherIndices)
219  {
220  // The number of indices used to access a tensor coefficient must be equal to the rank of the tensor.
221  EIGEN_STATIC_ASSERT(sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
222  return operator()(array<Index, NumIndices>{{firstIndex, otherIndices...}});
223  }
224 #else
225  EIGEN_DEVICE_FUNC
226  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1)
227  {
228  if (Options&RowMajor) {
229  const Index index = i1 + i0 * m_storage.dimensions()[1];
230  return m_storage.data()[index];
231  } else {
232  const Index index = i0 + i1 * m_storage.dimensions()[0];
233  return m_storage.data()[index];
234  }
235  }
236  EIGEN_DEVICE_FUNC
237  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2)
238  {
239  if (Options&RowMajor) {
240  const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
241  return m_storage.data()[index];
242  } else {
243  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
244  return m_storage.data()[index];
245  }
246  }
247  EIGEN_DEVICE_FUNC
248  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3)
249  {
250  if (Options&RowMajor) {
251  const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
252  return m_storage.data()[index];
253  } else {
254  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
255  return m_storage.data()[index];
256  }
257  }
258  EIGEN_DEVICE_FUNC
259  EIGEN_STRONG_INLINE Scalar& operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
260  {
261  if (Options&RowMajor) {
262  const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
263  return m_storage.data()[index];
264  } else {
265  const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
266  return m_storage.data()[index];
267  }
268  }
269 #endif
270 
271  EIGEN_DEVICE_FUNC
273  {
274  eigen_assert(checkIndexRange(indices));
275  return coeffRef(indices);
276  }
277 
278  EIGEN_DEVICE_FUNC
279  EIGEN_STRONG_INLINE Scalar& operator()(Index index)
280  {
281  eigen_assert(index >= 0 && index < size());
282  return coeffRef(index);
283  }
284 
285  EIGEN_DEVICE_FUNC
287  {
288  EIGEN_STATIC_ASSERT(NumIndices == 0, YOU_MADE_A_PROGRAMMING_MISTAKE);
289  return coeffRef();
290  }
291 
292  EIGEN_DEVICE_FUNC
293  EIGEN_STRONG_INLINE Scalar& operator[](Index index)
294  {
295  // The bracket operator is only for vectors, use the parenthesis operator instead
296  EIGEN_STATIC_ASSERT(NumIndices == 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
297  return coeffRef(index);
298  }
299 
300  EIGEN_DEVICE_FUNC
302  : m_storage()
303  {
304  }
305 
306  EIGEN_DEVICE_FUNC
308  : m_storage(other.m_storage)
309  {
310  }
311 
312 #if EIGEN_HAS_RVALUE_REFERENCES
313  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(Self&& other)
314  : m_storage(other.m_storage)
315  {
316  }
317 #endif
318 
319  template<typename OtherDerived>
320  EIGEN_DEVICE_FUNC
322  {
324  Assign assign(*this, other.derived());
326  }
327  template<typename OtherDerived>
328  EIGEN_DEVICE_FUNC
330  {
332  Assign assign(*this, other.derived());
334  }
335 
336  EIGEN_DEVICE_FUNC
337  EIGEN_STRONG_INLINE TensorFixedSize& operator=(const TensorFixedSize& other)
338  {
339  // FIXME: check that the dimensions of other match the dimensions of *this.
340  // Unfortunately this isn't possible yet when the rhs is an expression.
342  Assign assign(*this, other);
344  return *this;
345  }
346  template<typename OtherDerived>
347  EIGEN_DEVICE_FUNC
348  EIGEN_STRONG_INLINE TensorFixedSize& operator=(const OtherDerived& other)
349  {
350  // FIXME: check that the dimensions of other match the dimensions of *this.
351  // Unfortunately this isn't possible yet when the rhs is an expression.
353  Assign assign(*this, other);
355  return *this;
356  }
357 
358  protected:
359  EIGEN_DEVICE_FUNC
361  {
366  using internal::lesser_op;
367 
368  return true;
369  // check whether the indices are all >= 0
370  /* array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
371  // check whether the indices fit in the dimensions
372  array_zip_and_reduce<logical_and_op, lesser_op>(indices, m_storage.dimensions());*/
373  }
374 
375  EIGEN_DEVICE_FUNC
377  {
378  if (Options&RowMajor) {
379  return m_storage.dimensions().IndexOfRowMajor(indices);
380  } else {
381  return m_storage.dimensions().IndexOfColMajor(indices);
382  }
383  }
384 };
385 
386 
387 } // end namespace Eigen
388 
389 #endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
#define EIGEN_STRONG_INLINE
Definition: Macros.h:493
#define EIGEN_MAX_ALIGN_BYTES
Definition: Macros.h:773
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(const array< Index, NumIndices > &indices)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar * data()
internal::traits< Self >::Index Index
Eigen::internal::nested< Self >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase< OtherDerived, WriteAccessors > &other)
Definition: LDLT.h:16
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:150
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:122
internal::traits< Self >::StorageKind StorageKind
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & derived()
Definition: TensorBase.h:1003
static const int Options
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar * data() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef()
TensorBase< TensorFixedSize< Scalar_, Dimensions_, Options_, IndexType > > Base
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1, Index i2)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator[](Index index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1, Index i2) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase< OtherDerived, ReadOnlyAccessors > &other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool checkIndexRange(const array< Index, NumIndices > &) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1) const
bool array_apply_and_reduce(const array< A, N > &a)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index index)
#define eigen_assert(x)
Definition: Macros.h:577
const Self & base() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(const array< Index, NumIndices > &indices) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize & operator=(const TensorFixedSize &other)
static const std::size_t NumIndices
static EIGEN_DEVICE_FUNC void run(const Expression &expr, const Device &device=Device())
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(const Self &other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
The fixed sized version of the tensor class.
The tensor base class.
Definition: TensorBase.h:827
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator[](Index index) const
NumTraits< Scalar >::Real RealScalar
bool array_zip_and_reduce(const array< A, N > &a, const array< B, N > &b)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize & operator=(const OtherDerived &other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1, Index i2, Index i3) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()() const
TensorFixedSize< Scalar_, Dimensions_, Options_, IndexType > Self
TensorStorage< Scalar, Dimensions, Options > m_storage
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array< Index, NumIndices > &indices) const
#define eigen_internal_assert(x)
Definition: Macros.h:583
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1, Index i2, Index i3)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & coeff(const array< Index, NumIndices > &indices) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(const array< Index, NumIndices > &indices)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & coeff() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()()
Base::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const


hebiros
Author(s): Xavier Artache , Matthew Tesch
autogenerated on Thu Sep 3 2020 04:09:20