Go to the documentation of this file.
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
24 template<
typename XprType>
31 typedef typename XprType::Nested
Nested;
33 static const int NumDimensions = XprTraits::NumDimensions;
34 static const int Layout = XprTraits::Layout;
37 template<
typename XprType>
43 template<
typename XprType>
52 template<
typename XprType>
75 template<
typename ArgType,
typename Device>
103 : m_impl(op.expression(), device) { }
106 return m_impl.dimensions();
110 m_impl.evalSubExprsIfNeeded(
NULL);
124 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, 1);
129 #ifdef EIGEN_USE_SYCL
147 template<
typename ReduceOp,
typename Dims,
typename XprType>
157 static const int Layout = XprTraits::Layout;
160 template<
typename ReduceOp,
typename Dims,
typename XprType>
166 template<
typename ReduceOp,
typename Dims,
typename XprType>
175 template<
typename ReduceOp,
typename Dims,
typename XprType>
213 template<
typename ReduceOp,
typename Dims,
typename ArgType,
typename Device>
244 : m_orig_impl(op.expression(), device),
245 m_impl(op.expression().index_tuples().reduce(op.reduce_dims(), op.reduce_op()), device),
246 m_return_dim(op.return_dim())
248 gen_strides(m_orig_impl.dimensions(), m_strides);
251 m_stride_mod = (m_return_dim < NumDims - 1) ? m_strides[m_return_dim + 1] : total_size;
254 m_stride_mod = (m_return_dim > 0) ? m_strides[m_return_dim - 1] : total_size;
257 m_stride_div = ((m_return_dim >= 0) &&
258 (m_return_dim <
static_cast<Index>(m_strides.size())))
259 ? m_strides[m_return_dim] : 1;
263 return m_impl.dimensions();
267 m_impl.evalSubExprsIfNeeded(
NULL);
276 return (m_return_dim < 0) ?
v.first : (
v.first % m_stride_mod) / m_stride_div;
280 #ifdef EIGEN_USE_SYCL
283 m_orig_impl.bind(cgh);
289 const double compute_cost = 1.0 +
290 (m_return_dim < 0 ? 0.0 : (TensorOpCost::ModCost<Index>() + TensorOpCost::DivCost<Index>()));
291 return m_orig_impl.costPerCoeff(vectorized) +
292 m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, compute_cost);
297 if (m_return_dim < 0) {
301 "Asking to convert index to a dimension outside of the rank");
307 for (
int i = 1;
i < NumDims; ++
i) {
312 for (
int i = NumDims - 2;
i >= 0; --
i) {
329 #endif // EIGEN_CXX11_TENSOR_TENSOR_ARG_MAX_H
std::vector< Eigen::Index > Dims
Eigen::internal::traits< TensorIndexTupleOp >::StorageKind StorageKind
Eigen::internal::traits< TensorIndexTupleOp >::Index Index
#define EIGEN_DEVICE_FUNC
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
Namespace containing all symbols from the Eigen library.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
TensorEvaluator< const TensorIndexTupleOp< ArgType >, Device >::Dimensions InputDimensions
TensorEvaluator< const TensorIndexTupleOp< ArgType >, Device > m_orig_impl
const typedef TensorTupleReducerOp< ReduceOp, Dims, XprType > EIGEN_DEVICE_REF type
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType)
Eigen::internal::traits< TensorIndexTupleOp >::Scalar Scalar
StorageMemory< CoeffReturnType, Device > Storage
internal::TensorBlockNotImplemented TensorBlock
TensorTupleReducerOp< ReduceOp, Dims, XprType > type
EIGEN_ALWAYS_INLINE DSizes< IndexType, NumDims > strides(const DSizes< IndexType, NumDims > &dimensions)
TensorEvaluator< ArgType, Device > m_impl
Generic expression where a coefficient-wise binary operator is applied to two expressions.
TensorEvaluator< const TensorReductionOp< ReduceOp, Dims, const TensorIndexTupleOp< ArgType > >, Device >::Dimensions Dimensions
EIGEN_DEVICE_FUNC EvaluatorPointerType data() const
Eigen::internal::traits< TensorTupleReducerOp >::StorageKind StorageKind
Storage::Type EvaluatorPointerType
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dimensions & dimensions() const
Tuple< Index, typename XprTraits::Scalar > Scalar
Eigen::internal::traits< TensorTupleReducerOp >::Index Index
const typedef TensorIndexTupleOp< XprType > EIGEN_DEVICE_REF type
TensorTupleReducerOp< ReduceOp, Dims, ArgType > XprType
XprTraits::StorageKind StorageKind
EIGEN_STRONG_INLINE void cleanup()
Eigen::internal::nested< TensorIndexTupleOp >::type Nested
EIGEN_DEVICE_FUNC EvaluatorPointerType data() const
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
remove_reference< Nested >::type _Nested
const EIGEN_DEVICE_FUNC internal::remove_all< typename XprType::Nested >::type & expression() const
TensorEvaluator< const TensorReductionOp< ReduceOp, Dims, const TensorIndexTupleOp< ArgType > >, Device > m_impl
const EIGEN_DEVICE_FUNC ReduceOp & reduce_op() const
remove_reference< Nested >::type _Nested
internal::TensorBlockNotImplemented TensorBlock
EIGEN_DEVICE_FUNC Index return_dim() const
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dimensions & dimensions() const
const EIGEN_DEVICE_FUNC internal::remove_all< typename XprType::Nested >::type & expression() const
TensorIndexTupleOp< ArgType >::CoeffReturnType TupleType
TensorIndexTupleOp< ArgType > XprType
traits< XprType > XprTraits
#define EIGEN_STRONG_INLINE
EIGEN_STRONG_INLINE void cleanup()
Eigen::internal::traits< TensorTupleReducerOp >::Scalar Scalar
XprTraits::StorageKind StorageKind
Eigen::internal::nested< TensorTupleReducerOp >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes< Indices... > &)
EIGEN_DEVICE_FUNC void gen_strides(const InputDimensions &dims, StrideDims &strides)
TensorIndexTupleOp< XprType > type
const EIGEN_DEVICE_FUNC Dims & reduce_dims() const
XprType::CoeffReturnType CoeffReturnType
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
XprType::CoeffReturnType CoeffReturnType
StorageMemory< CoeffReturnType, Device > Storage
Array< int, Dynamic, 1 > v
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIndexTupleOp(const XprType &expr)
A cost model used to limit the number of threads used for evaluating tensor expression.
array< Index, NumDims > StrideDims
Eigen::NumTraits< Scalar >::Real RealScalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Derived::Scalar CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTupleReducerOp(const XprType &expr, const ReduceOp &reduce_op, const Index return_dim, const Dims &reduce_dims)
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType)
Eigen::NumTraits< Scalar >::Real RealScalar
traits< XprType > XprTraits
Tuple< Index, typename XprType::CoeffReturnType > CoeffReturnType
TensorEvaluator< ArgType, Device >::Dimensions Dimensions
StorageMemory< TupleType, Device > TupleStorageMem
const ReduceOp m_reduce_op
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Storage::Type EvaluatorPointerType
gtsam
Author(s):
autogenerated on Fri Nov 1 2024 03:37:02