10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H 26 template<
typename Scalar_,
typename Dimensions_,
int Options_,
typename IndexType>
65 inline Self&
base() {
return *
this; }
66 inline const Self&
base()
const {
return *
this; }
68 #if EIGEN_HAS_VARIADIC_TEMPLATES 69 template<
typename... IndexTypes>
73 EIGEN_STATIC_ASSERT(
sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
89 return m_storage.data()[index];
96 return m_storage.data()[0];
100 #if EIGEN_HAS_VARIADIC_TEMPLATES 101 template<
typename... IndexTypes>
105 EIGEN_STATIC_ASSERT(
sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
121 return m_storage.data()[index];
128 return m_storage.data()[0];
131 #if EIGEN_HAS_VARIADIC_TEMPLATES 132 template<
typename... IndexTypes>
136 EIGEN_STATIC_ASSERT(
sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
144 const Index index = i1 + i0 * m_storage.dimensions()[1];
145 return m_storage.data()[index];
147 const Index index = i0 + i1 * m_storage.dimensions()[0];
148 return m_storage.data()[index];
155 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
156 return m_storage.data()[index];
158 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
159 return m_storage.data()[index];
166 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
167 return m_storage.data()[index];
169 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
170 return m_storage.data()[index];
177 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
178 return m_storage.data()[index];
180 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
181 return m_storage.data()[index];
191 return coeff(indices);
216 #if EIGEN_HAS_VARIADIC_TEMPLATES 217 template<
typename... IndexTypes>
221 EIGEN_STATIC_ASSERT(
sizeof...(otherIndices) + 1 == NumIndices, YOU_MADE_A_PROGRAMMING_MISTAKE)
229 const Index index = i1 + i0 * m_storage.dimensions()[1];
230 return m_storage.data()[index];
232 const Index index = i0 + i1 * m_storage.dimensions()[0];
233 return m_storage.data()[index];
240 const Index index = i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0);
241 return m_storage.data()[index];
243 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * i2);
244 return m_storage.data()[index];
251 const Index index = i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0));
252 return m_storage.data()[index];
254 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * i3));
255 return m_storage.data()[index];
262 const Index index = i4 + m_storage.dimensions()[4] * (i3 + m_storage.dimensions()[3] * (i2 + m_storage.dimensions()[2] * (i1 + m_storage.dimensions()[1] * i0)));
263 return m_storage.data()[index];
265 const Index index = i0 + m_storage.dimensions()[0] * (i1 + m_storage.dimensions()[1] * (i2 + m_storage.dimensions()[2] * (i3 + m_storage.dimensions()[3] * i4)));
266 return m_storage.data()[index];
308 : m_storage(other.m_storage)
312 #if EIGEN_HAS_RVALUE_REFERENCES 319 template<
typename OtherDerived>
324 Assign assign(*
this, other.
derived());
327 template<
typename OtherDerived>
332 Assign assign(*
this, other.
derived());
342 Assign assign(*
this, other);
346 template<
typename OtherDerived>
353 Assign assign(*
this, other);
379 return m_storage.dimensions().IndexOfRowMajor(indices);
381 return m_storage.dimensions().IndexOfColMajor(indices);
389 #endif // EIGEN_CXX11_TENSOR_TENSOR_FIXED_SIZE_H
#define EIGEN_STRONG_INLINE
#define EIGEN_MAX_ALIGN_BYTES
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(const array< Index, NumIndices > &indices)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar * data()
internal::traits< Self >::Index Index
Eigen::internal::nested< Self >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase< OtherDerived, WriteAccessors > &other)
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
internal::traits< Self >::StorageKind StorageKind
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived & derived()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar * data() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef()
TensorBase< TensorFixedSize< Scalar_, Dimensions_, Options_, IndexType > > Base
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1, Index i2)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator[](Index index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1, Index i2) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(const TensorBase< OtherDerived, ReadOnlyAccessors > &other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool checkIndexRange(const array< Index, NumIndices > &) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1) const
bool array_apply_and_reduce(const array< A, N > &a)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index index)
const Self & base() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(const array< Index, NumIndices > &indices) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1, Index i2, Index i3, Index i4)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize & operator=(const TensorFixedSize &other)
static const std::size_t NumIndices
static EIGEN_DEVICE_FUNC void run(const Expression &expr, const Device &device=Device())
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize(const Self &other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
The fixed sized version of the tensor class.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator[](Index index) const
NumTraits< Scalar >::Real RealScalar
bool array_zip_and_reduce(const array< A, N > &a, const array< B, N > &b)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize & operator=(const OtherDerived &other)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorFixedSize()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1, Index i2, Index i3) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()() const
TensorFixedSize< Scalar_, Dimensions_, Options_, IndexType > Self
TensorStorage< Scalar, Dimensions, Options > m_storage
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index linearizedIndex(const array< Index, NumIndices > &indices) const
#define eigen_internal_assert(x)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()(Index i0, Index i1, Index i2, Index i3)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & coeff(const array< Index, NumIndices > &indices) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(const array< Index, NumIndices > &indices)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & coeff() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & operator()()
Base::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar & operator()(Index i0, Index i1, Index i2, Index i3, Index i4) const