11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H 12 #define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H 24 template<
typename XprType>
31 typedef typename XprType::Nested
Nested;
33 static const int NumDimensions = XprTraits::NumDimensions;
34 static const int Layout = XprTraits::Layout;
37 template<
typename XprType>
43 template<
typename XprType>
52 template<
typename XprType>
75 template<
typename ArgType,
typename Device>
96 : m_impl(op.expression(), device) { }
99 return m_impl.dimensions();
103 m_impl.evalSubExprsIfNeeded(
NULL);
112 return CoeffReturnType(index, m_impl.coeff(index));
117 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, 1);
120 EIGEN_DEVICE_FUNC Scalar*
data()
const {
return NULL; }
134 template<
typename ReduceOp,
typename Dims,
typename XprType>
144 static const int Layout = XprTraits::Layout;
147 template<
typename ReduceOp,
typename Dims,
typename XprType>
153 template<
typename ReduceOp,
typename Dims,
typename XprType>
162 template<
typename ReduceOp,
typename Dims,
typename XprType>
174 const ReduceOp& reduce_op,
175 const int return_dim,
176 const Dims& reduce_dims)
177 : m_xpr(expr), m_reduce_op(reduce_op), m_return_dim(return_dim), m_reduce_dims(reduce_dims) {}
184 const ReduceOp&
reduce_op()
const {
return m_reduce_op; }
200 template<
typename ReduceOp,
typename Dims,
typename ArgType,
typename Device>
215 PacketAccess =
false,
217 Layout = TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>::Layout,
223 : m_orig_impl(op.expression(), device),
224 m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
225 m_return_dim(op.return_dim()) {
227 gen_strides(m_orig_impl.dimensions(), m_strides);
228 if (Layout == static_cast<int>(
ColMajor)) {
230 m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size;
233 m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size;
235 m_stride_div = m_strides[m_return_dim];
239 return m_impl.dimensions();
243 m_impl.evalSubExprsIfNeeded(
NULL);
251 const TupleType
v = m_impl.coeff(index);
252 return (m_return_dim < 0) ? v.first : (v.first % m_stride_mod) / m_stride_div;
255 EIGEN_DEVICE_FUNC Scalar*
data()
const {
return NULL; }
259 const double compute_cost = 1.0 +
260 (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost<Index>() + TensorOpCost::DivCost<Index>()));
261 return m_orig_impl.costPerCoeff(vectorized) +
262 m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost);
266 EIGEN_DEVICE_FUNC
void gen_strides(
const InputDimensions& dims, StrideDims& strides) {
267 if (m_return_dim < 0) {
271 "Asking to convert index to a dimension outside of the rank");
275 if (Layout == static_cast<int>(
ColMajor)) {
277 for (
int i = 1;
i < NumDims; ++
i) {
278 strides[
i] = strides[
i-1] * dims[
i-1];
281 strides[NumDims-1] = 1;
282 for (
int i = NumDims - 2;
i >= 0; --
i) {
283 strides[
i] = strides[
i+1] * dims[
i+1];
289 TensorEvaluator<const TensorIndexTupleOp<ArgType>, Device>
m_orig_impl;
290 TensorEvaluator<const TensorReductionOp<ReduceOp, Dims, const TensorIndexTupleOp<ArgType> >, Device>
m_impl;
299 #endif // EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H TensorEvaluator< ArgType, Device > m_impl
Eigen::NumTraits< Scalar >::Real RealScalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes< Indices... > &)
#define EIGEN_STRONG_INLINE
EIGEN_DEVICE_FUNC int return_dim() const
TensorEvaluator< const TensorIndexTupleOp< ArgType >, Device > m_orig_impl
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar *)
const ReduceOp m_reduce_op
Namespace containing all symbols from the Eigen library.
TensorTupleReducerOp< ReduceOp, Dims, ArgType > XprType
TensorIndexTupleOp< ArgType > XprType
TensorEvaluator< const TensorReductionOp< ReduceOp, Dims, const TensorIndexTupleOp< ArgType > >, Device >::Dimensions Dimensions
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
A cost model used to limit the number of threads used for evaluating tensor expression.
remove_reference< Nested >::type _Nested
array< Index, NumDims > StrideDims
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
traits< XprType > XprTraits
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Eigen::internal::traits< TensorTupleReducerOp >::StorageKind StorageKind
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType &expr, const ReduceOp &reduce_op, const int return_dim, const Dims &reduce_dims)
Eigen::internal::traits< TensorTupleReducerOp >::Index Index
const TensorTupleReducerOp< ReduceOp, Dims, XprType > & type
EIGEN_DEVICE_FUNC Scalar * data() const
TensorEvaluator< const TensorIndexTupleOp< ArgType >, Device >::Dimensions InputDimensions
TensorTupleReducerOp< ReduceOp, Dims, XprType > type
TensorEvaluator< const TensorReductionOp< ReduceOp, Dims, const TensorIndexTupleOp< ArgType > >, Device > m_impl
Eigen::internal::nested< TensorTupleReducerOp >::type Nested
XprTraits::StorageKind StorageKind
Tuple< Index, typename XprType::CoeffReturnType > CoeffReturnType
Eigen::NumTraits< Scalar >::Real RealScalar
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType &expr)
Tuple< Index, typename XprTraits::Scalar > Scalar
TensorIndexTupleOp< ArgType >::CoeffReturnType TupleType
remove_reference< Nested >::type _Nested
TensorIndexTupleOp< XprType > type
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
EIGEN_DEVICE_FUNC Scalar * data() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar *)
traits< XprType > XprTraits
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
Eigen::internal::traits< TensorIndexTupleOp >::StorageKind StorageKind
Eigen::internal::traits< TensorTupleReducerOp >::Scalar Scalar
XprTraits::StorageKind StorageKind
const TensorIndexTupleOp< XprType > & type
EIGEN_DEVICE_FUNC const ReduceOp & reduce_op() const
Eigen::internal::nested< TensorIndexTupleOp >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
Eigen::internal::traits< TensorIndexTupleOp >::Scalar Scalar
EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions &dims, StrideDims &strides)
TensorEvaluator< ArgType, Device >::Dimensions Dimensions
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC const Dims & reduce_dims() const
Eigen::internal::traits< TensorIndexTupleOp >::Index Index
XprType::CoeffReturnType CoeffReturnType
XprType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)