Go to the documentation of this file.
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
22 template<
typename ReverseDimensions,
typename XprType>
30 typedef typename XprType::Nested
Nested;
32 static const int NumDimensions = XprTraits::NumDimensions;
33 static const int Layout = XprTraits::Layout;
37 template<
typename ReverseDimensions,
typename XprType>
43 template<
typename ReverseDimensions,
typename XprType>
52 template<
typename ReverseDimensions,
typename XprType>
54 XprType>, WriteAccessors>
67 const XprType& expr,
const ReverseDimensions& reverse_dims)
86 template<
typename ReverseDimensions,
typename ArgType,
typename Device>
125 : m_impl(op.expression(), device),
133 m_dimensions = m_impl.dimensions();
136 for (
int i = 1;
i < NumDims; ++
i) {
137 m_strides[
i] = m_strides[
i-1] * m_dimensions[
i-1];
141 m_strides[NumDims-1] = 1;
142 for (
int i = NumDims - 2;
i >= 0; --
i) {
143 m_strides[
i] = m_strides[
i+1] * m_dimensions[
i+1];
153 m_impl.evalSubExprsIfNeeded(
NULL);
157 #ifdef EIGEN_USE_THREADS
158 template <
typename EvalSubExprsCallback>
161 m_impl.evalSubExprsIfNeededAsync(
nullptr, [done](
bool) { done(
true); });
163 #endif // EIGEN_USE_THREADS
172 Index inputIndex = 0;
175 for (
int i = NumDims - 1;
i > 0; --
i) {
176 Index idx = index / m_fastStrides[
i];
177 index -= idx * m_strides[
i];
179 idx = m_dimensions[
i] - idx - 1;
181 inputIndex += idx * m_strides[
i] ;
184 inputIndex += (m_dimensions[0] - index - 1);
190 for (
int i = 0;
i < NumDims - 1; ++
i) {
191 Index idx = index / m_fastStrides[
i];
192 index -= idx * m_strides[
i];
194 idx = m_dimensions[
i] - idx - 1;
196 inputIndex += idx * m_strides[
i] ;
198 if (m_reverse[NumDims-1]) {
199 inputIndex += (m_dimensions[NumDims-1] - index - 1);
209 return m_impl.coeff(reverseIndex(index));
212 template<
int LoadMode>
233 const size_t target_size =
m_device.lastLevelCacheSize();
236 return internal::TensorBlockResourceRequirements::skewed<Scalar>(
238 .addCostPerCoeff({0, 0, 24});
243 bool =
false)
const {
251 static const bool isColMajor =
254 static const Index inner_dim_idx = isColMajor ? 0 : NumDims - 1;
255 const bool inner_dim_reversed = m_reverse[inner_dim_idx];
258 Index block_offset = 0;
261 Index input_offset = reverseIndex(
desc.offset());
266 for (
int i = 0;
i < NumDims; ++
i) {
267 const int dim = isColMajor ?
i : NumDims - 1 -
i;
270 it[
i].reverse = m_reverse[dim];
273 i == 0 ? 1 : (it[
i - 1].
size * it[
i - 1].block_stride);
274 it[
i].block_span = it[
i].block_stride * (it[
i].
size - 1);
276 it[
i].input_stride = m_strides[dim];
277 it[
i].input_span = it[
i].input_stride * (it[
i].
size - 1);
280 it[
i].input_stride = -1 * it[
i].input_stride;
281 it[
i].input_span = -1 * it[
i].input_span;
287 int effective_inner_dim = 0;
288 for (
int i = 1;
i < NumDims; ++
i) {
290 if (it[
i].block_stride != it[effective_inner_dim].
size)
break;
291 if (it[
i].block_stride !=
numext::abs(it[
i].input_stride))
break;
295 it[
i].block_stride = 1;
296 it[
i].input_stride = (inner_dim_reversed ? -1 : 1);
298 it[
i].block_span = it[
i].block_stride * (it[
i].
size - 1);
299 it[
i].input_span = it[
i].input_stride * (it[
i].
size - 1);
301 effective_inner_dim =
i;
304 eigen_assert(it[effective_inner_dim].block_stride == 1);
306 (inner_dim_reversed ? -1 : 1));
308 const Index inner_dim_size = it[effective_inner_dim].
size;
315 while (it[NumDims - 1].count < it[NumDims - 1].
size) {
317 Index dst = block_offset;
318 Index src = input_offset;
322 if (inner_dim_reversed) {
323 for (
Index i = 0;
i < inner_dim_size; ++
i) {
324 block_buffer[dst] = m_impl.coeff(src);
329 for (
Index i = 0;
i < inner_dim_size; ++
i) {
330 block_buffer[dst] = m_impl.coeff(src);
337 if ((NumDims - effective_inner_dim) == 1)
break;
340 for (
Index i = effective_inner_dim + 1;
i < NumDims; ++
i) {
341 if (++it[
i].count < it[
i].
size) {
342 block_offset += it[
i].block_stride;
343 input_offset += it[
i].input_stride;
346 if (
i != NumDims - 1) it[
i].count = 0;
347 block_offset -= it[
i].block_span;
348 input_offset -= it[
i].input_span;
356 double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
357 2 * TensorOpCost::MulCost<Index>() +
358 TensorOpCost::DivCost<Index>());
359 for (
int i = 0;
i < NumDims; ++
i) {
361 compute_cost += 2 * TensorOpCost::AddCost<Index>();
364 return m_impl.costPerCoeff(vectorized) +
370 #ifdef EIGEN_USE_SYCL
386 struct BlockIteratorState {
408 template <
typename ReverseDimensions,
typename ArgType,
typename Device>
410 :
public TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>,
429 :
Base(op, device) {}
444 return this->m_impl.coeffRef(this->reverseIndex(index));
454 internal::pstore<CoeffReturnType, PacketReturnType>(
values,
x);
465 #endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dimensions & dimensions() const
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
#define EIGEN_DEVICE_FUNC
TensorEvaluator< const ArgType, Device >::TensorBlock ArgTensorBlock
Namespace containing all symbols from the Eigen library.
TensorReverseOp< ReverseDimensions, ArgType > XprType
Storage::Type EvaluatorPointerType
const EIGEN_DEVICE_FUNC internal::remove_all< typename XprType::Nested >::type & expression() const
Generic expression where a coefficient-wise binary operator is applied to two expressions.
DSizes< Index, NumDims > Dimensions
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType)
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x
internal::TensorMaterializedBlock< ScalarNoConst, NumCoords, Layout, Index > TensorBlock
internal::TensorIntDivisor< Index > IndexDivisor
const Device EIGEN_DEVICE_REF m_device
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp(const XprType &expr, const ReverseDimensions &reverse_dims)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType &x)
const ReverseDimensions m_reverse_dims
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dimensions & dimensions() const
XprTraits::StorageKind StorageKind
static const int PacketSize
const typedef TensorReverseOp< ReverseDimensions, XprType > & type
TensorEvaluator< const TensorReverseOp< ReverseDimensions, ArgType >, Device > Base
EIGEN_DEVICE_FUNC Storage::Type data() const
Eigen::NumTraits< Scalar >::Real RealScalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const
DSizes< Index, NumDims > Dimensions
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
internal::TensorBlockNotImplemented TensorBlock
TensorEvaluator< ArgType, Device > m_impl
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dimensions & dimensions() const
#define EIGEN_STRONG_INLINE
ReverseDimensions m_reverse
#define EIGEN_UNROLL_LOOP
TensorReverseOp< ReverseDimensions, XprType > type
array< Index, NumDims > m_strides
Eigen::internal::traits< TensorReverseOp >::Index Index
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
const Device EIGEN_DEVICE_REF m_device
Eigen::internal::traits< TensorReverseOp >::Scalar Scalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType & coeffRef(Index index)
Eigen::internal::nested< TensorReverseOp >::type Nested
#define EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(Derived)
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
const EIGEN_DEVICE_FUNC ReverseDimensions & reverse() const
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_STRONG_INLINE void cleanup()
Storage::Type EvaluatorPointerType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE std::size_t size()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
traits< XprType > XprTraits
remove_reference< Nested >::type _Nested
A cost model used to limit the number of threads used for evaluating tensor expression.
XprType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE internal::enable_if< NumTraits< T >::IsSigned||NumTraits< T >::IsComplex, typename NumTraits< T >::Real >::type abs(const T &x)
Eigen::internal::traits< TensorReverseOp >::StorageKind StorageKind
void reverse(const MatrixType &m)
Derived::Scalar CoeffReturnType
static EIGEN_STRONG_INLINE Storage prepareStorage(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool allow_strided_storage=false)
StorageMemory< CoeffReturnType, Device > Storage
TensorMaterializedBlock AsTensorMaterializedBlock() const
TensorBase< TensorReverseOp< ReverseDimensions, XprType >, WriteAccessors > Base
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar & coeffRef(Index index)
XprType::CoeffReturnType CoeffReturnType
internal::TensorMaterializedBlock< CoeffReturnType, NumDims, Layout, Index > TensorBlock
array< IndexDivisor, NumDims > m_fastStrides
XprType::CoeffReturnType CoeffReturnType
XprTraits::PointerType PointerType
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
TensorReverseOp< ReverseDimensions, ArgType > XprType
PacketType< CoeffReturnType, Device >::type PacketReturnType
gtsam
Author(s):
autogenerated on Thu Dec 19 2024 04:05:43