TensorReverse.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com>
5 // Benoit Steiner <benoit.steiner.goog@gmail.com>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
13 namespace Eigen {
14 
21 namespace internal {
22 template<typename ReverseDimensions, typename XprType>
23 struct traits<TensorReverseOp<ReverseDimensions,
24  XprType> > : public traits<XprType>
25 {
26  typedef typename XprType::Scalar Scalar;
28  typedef typename XprTraits::StorageKind StorageKind;
29  typedef typename XprTraits::Index Index;
30  typedef typename XprType::Nested Nested;
32  static const int NumDimensions = XprTraits::NumDimensions;
33  static const int Layout = XprTraits::Layout;
34 };
35 
36 template<typename ReverseDimensions, typename XprType>
37 struct eval<TensorReverseOp<ReverseDimensions, XprType>, Eigen::Dense>
38 {
40 };
41 
42 template<typename ReverseDimensions, typename XprType>
43 struct nested<TensorReverseOp<ReverseDimensions, XprType>, 1,
44  typename eval<TensorReverseOp<ReverseDimensions, XprType> >::type>
45 {
47 };
48 
49 } // end namespace internal
50 
51 template<typename ReverseDimensions, typename XprType>
52 class TensorReverseOp : public TensorBase<TensorReverseOp<ReverseDimensions,
53  XprType>, WriteAccessors>
54 {
55  public:
58  typedef typename XprType::CoeffReturnType CoeffReturnType;
63 
65  const XprType& expr, const ReverseDimensions& reverse_dims)
66  : m_xpr(expr), m_reverse_dims(reverse_dims) { }
67 
68  EIGEN_DEVICE_FUNC
69  const ReverseDimensions& reverse() const { return m_reverse_dims; }
70 
71  EIGEN_DEVICE_FUNC
73  expression() const { return m_xpr; }
74 
75  EIGEN_DEVICE_FUNC
77  {
79  Assign assign(*this, other);
81  return *this;
82  }
83 
84  template<typename OtherDerived>
85  EIGEN_DEVICE_FUNC
86  EIGEN_STRONG_INLINE TensorReverseOp& operator = (const OtherDerived& other)
87  {
89  Assign assign(*this, other);
91  return *this;
92  }
93 
94  protected:
95  typename XprType::Nested m_xpr;
96  const ReverseDimensions m_reverse_dims;
97 };
98 
99 // Eval as rvalue
100 template<typename ReverseDimensions, typename ArgType, typename Device>
101 struct TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device>
102 {
104  typedef typename XprType::Index Index;
107  typedef typename XprType::Scalar Scalar;
111 
112  enum {
113  IsAligned = false,
116  CoordAccess = false, // to be implemented
117  RawAccess = false
118  };
119 
120  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
121  const Device& device)
122  : m_impl(op.expression(), device), m_reverse(op.reverse())
123  {
124  // Reversing a scalar isn't supported yet. It would be a no-op anyway.
125  EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
126 
127  // Compute strides
128  m_dimensions = m_impl.dimensions();
129  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
130  m_strides[0] = 1;
131  for (int i = 1; i < NumDims; ++i) {
132  m_strides[i] = m_strides[i-1] * m_dimensions[i-1];
133  }
134  } else {
135  m_strides[NumDims-1] = 1;
136  for (int i = NumDims - 2; i >= 0; --i) {
137  m_strides[i] = m_strides[i+1] * m_dimensions[i+1];
138  }
139  }
140  }
141 
142  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
143  const Dimensions& dimensions() const { return m_dimensions; }
144 
145  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
146  m_impl.evalSubExprsIfNeeded(NULL);
147  return true;
148  }
149  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
150  m_impl.cleanup();
151  }
152 
153  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex(
154  Index index) const {
155  eigen_assert(index < dimensions().TotalSize());
156  Index inputIndex = 0;
157  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
158  for (int i = NumDims - 1; i > 0; --i) {
159  Index idx = index / m_strides[i];
160  index -= idx * m_strides[i];
161  if (m_reverse[i]) {
162  idx = m_dimensions[i] - idx - 1;
163  }
164  inputIndex += idx * m_strides[i] ;
165  }
166  if (m_reverse[0]) {
167  inputIndex += (m_dimensions[0] - index - 1);
168  } else {
169  inputIndex += index;
170  }
171  } else {
172  for (int i = 0; i < NumDims - 1; ++i) {
173  Index idx = index / m_strides[i];
174  index -= idx * m_strides[i];
175  if (m_reverse[i]) {
176  idx = m_dimensions[i] - idx - 1;
177  }
178  inputIndex += idx * m_strides[i] ;
179  }
180  if (m_reverse[NumDims-1]) {
181  inputIndex += (m_dimensions[NumDims-1] - index - 1);
182  } else {
183  inputIndex += index;
184  }
185  }
186  return inputIndex;
187  }
188 
189  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(
190  Index index) const {
191  return m_impl.coeff(reverseIndex(index));
192  }
193 
194  template<int LoadMode>
195  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
196  PacketReturnType packet(Index index) const
197  {
198  EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
199  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
200 
201  // TODO(ndjaitly): write a better packing routine that uses
202  // local structure.
204  values[PacketSize];
205  for (int i = 0; i < PacketSize; ++i) {
206  values[i] = coeff(index+i);
207  }
208  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
209  return rslt;
210  }
211 
212  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
213  double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
214  2 * TensorOpCost::MulCost<Index>() +
215  TensorOpCost::DivCost<Index>());
216  for (int i = 0; i < NumDims; ++i) {
217  if (m_reverse[i]) {
218  compute_cost += 2 * TensorOpCost::AddCost<Index>();
219  }
220  }
221  return m_impl.costPerCoeff(vectorized) +
222  TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
223  }
224 
225  EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
226 
227  protected:
228  Dimensions m_dimensions;
231  ReverseDimensions m_reverse;
232 };
233 
234 // Eval as lvalue
235 
236 template <typename ReverseDimensions, typename ArgType, typename Device>
237 struct TensorEvaluator<TensorReverseOp<ReverseDimensions, ArgType>, Device>
238  : public TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>,
239  Device> {
241  Device> Base;
243  typedef typename XprType::Index Index;
246 
247  enum {
248  IsAligned = false,
251  CoordAccess = false, // to be implemented
252  RawAccess = false
253  };
254  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
255  const Device& device)
256  : Base(op, device) {}
257 
258  typedef typename XprType::Scalar Scalar;
262 
263  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
264  const Dimensions& dimensions() const { return this->m_dimensions; }
265 
266  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
267  return this->m_impl.coeffRef(this->reverseIndex(index));
268  }
269 
270  template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
271  void writePacket(Index index, const PacketReturnType& x) {
272  EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
273  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
274 
275  // This code is pilfered from TensorMorphing.h
276  EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize];
277  internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
278  for (int i = 0; i < PacketSize; ++i) {
279  this->coeffRef(index+i) = values[i];
280  }
281  }
282 
283 };
284 
285 
286 } // end namespace Eigen
287 
288 #endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
Definition: TensorReverse.h:73
SCALAR Scalar
Definition: bench_gemm.cpp:33
#define EIGEN_STRONG_INLINE
Definition: Macros.h:494
XprType::CoeffReturnType CoeffReturnType
Definition: TensorReverse.h:58
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex(Index index) const
EIGEN_DEVICE_FUNC const ReverseDimensions & reverse() const
Definition: TensorReverse.h:69
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
leaf::MyValues values
XprType::Nested m_xpr
Definition: TensorReverse.h:95
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
const ReverseDimensions m_reverse_dims
Definition: TensorReverse.h:96
A cost model used to limit the number of threads used for evaluating tensor expression.
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:124
vector< size_t > dimensions(L.begin(), L.end())
Eigen::internal::traits< TensorReverseOp >::StorageKind StorageKind
Definition: TensorReverse.h:61
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar *)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
Eigen::NumTraits< Scalar >::Real RealScalar
Definition: TensorReverse.h:57
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
#define eigen_assert(x)
Definition: Macros.h:579
Eigen::internal::traits< TensorReverseOp >::Index Index
Definition: TensorReverse.h:62
TensorEvaluator< const TensorReverseOp< ReverseDimensions, ArgType >, Device > Base
static EIGEN_DEVICE_FUNC void run(const Expression &expr, const Device &device=Device())
#define NULL
Definition: ccolamd.c:609
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp(const XprType &expr, const ReverseDimensions &reverse_dims)
Definition: TensorReverse.h:64
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType &x)
The tensor base class.
Definition: TensorBase.h:829
Eigen::internal::traits< TensorReverseOp >::Scalar Scalar
Definition: TensorReverse.h:56
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
void reverse(const MatrixType &m)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
#define EIGEN_ALIGN_MAX
Definition: Macros.h:757
Eigen::internal::nested< TensorReverseOp >::type Nested
Definition: TensorReverse.h:59
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:45:57