Tensor<float, 2, ColMajor> input(2, 4); Tensor<float, 2, RowMajor> output = input.swap_layout(); eigen_assert(output.dimension(0) == 4); eigen_assert(output.dimension(1) == 2);
array<int, 2> shuffle(1, 0); output = input.swap_layout().shuffle(shuffle); eigen_assert(output.dimension(0) == 2); eigen_assert(output.dimension(1) == 4);
#ifndef EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
#define EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H
template<typename XprType>
struct traits<TensorLayoutSwapOp<XprType> > : public traits<XprType>
{
typedef traits<XprType> XprTraits;
typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprType::Nested Nested;
static const int NumDimensions = traits<XprType>::NumDimensions;
typedef typename XprTraits::PointerType PointerType;
};
template<typename XprType>
struct eval<TensorLayoutSwapOp<XprType>, Eigen::Dense>
{
typedef const TensorLayoutSwapOp<XprType>&
type;
};
template<typename XprType>
struct nested<TensorLayoutSwapOp<XprType>, 1, typename eval<TensorLayoutSwapOp<XprType> >::
type>
{
typedef TensorLayoutSwapOp<XprType>
type;
};
}
template<typename XprType>
class TensorLayoutSwapOp : public TensorBase<TensorLayoutSwapOp<XprType>, WriteAccessors>
{
public:
: m_xpr(expr) {}
expression() const { return m_xpr; }
protected:
typename XprType::Nested m_xpr;
};
template<typename ArgType, typename Device>
struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
{
typedef TensorLayoutSwapOp<ArgType>
XprType;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>
::value;
typedef DSizes<Index, NumDims> Dimensions;
enum {
BlockAccess = false,
CoordAccess = false,
RawAccess = TensorEvaluator<ArgType, Device>::RawAccess
};
typedef internal::TensorBlockNotImplemented TensorBlock;
: m_impl(op.expression(), device)
{
for(
int i = 0;
i < NumDims; ++
i) {
m_dimensions[
i] = m_impl.dimensions()[NumDims-1-
i];
}
}
#ifdef EIGEN_USE_SYCL
m_impl.bind(cgh);
}
#endif
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef StorageMemory<CoeffReturnType, Device> Storage;
return m_impl.evalSubExprsIfNeeded(data);
}
m_impl.cleanup();
}
{
return m_impl.coeff(index);
}
template<int LoadMode>
{
return m_impl.template packet<LoadMode>(index);
}
return m_impl.costPerCoeff(vectorized);
}
}
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
protected:
TensorEvaluator<ArgType, Device> m_impl;
Dimensions m_dimensions;
};
template<typename ArgType, typename Device>
struct TensorEvaluator<TensorLayoutSwapOp<ArgType>, Device>
: public TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
{
typedef TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
Base;
typedef TensorLayoutSwapOp<ArgType>
XprType;
enum {
BlockAccess = false,
CoordAccess = false
};
typedef internal::TensorBlockNotImplemented TensorBlock;
: Base(op, device)
{ }
typedef typename XprType::CoeffReturnType CoeffReturnType;
{
return this->m_impl.coeffRef(index);
}
void writePacket(Index index,
const PacketReturnType&
x)
{
this->m_impl.template writePacket<StoreMode>(index,
x);
}
};
}
#endif // EIGEN_CXX11_TENSOR_TENSOR_LAYOUT_SWAP_H