Go to the documentation of this file.
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
25 template<
typename Dims,
typename XprType>
32 typedef typename XprType::Nested
Nested;
35 static const int Layout = XprTraits::Layout;
38 template<
typename Dims,
typename XprType>
44 template<
typename Dims,
typename XprType>
53 template<
typename Dims,
typename XprType>
81 template<
typename Dims,
typename ArgType,
typename Device>
87 static const int NumOutputDims = NumInputDims - NumReducedDims;
112 : m_impl(op.expression(), device), m_traceDim(1),
m_device(device)
116 EIGEN_STATIC_ASSERT((NumReducedDims >= 2) || ((NumReducedDims == 0) && (NumInputDims == 0)), YOU_MADE_A_PROGRAMMING_MISTAKE);
118 for (
int i = 0;
i < NumInputDims; ++
i) {
119 m_reduced[
i] =
false;
123 for (
int i = 0;
i < NumReducedDims; ++
i) {
126 m_reduced[op_dims[
i]] =
true;
130 int num_distinct_reduce_dims = 0;
131 for (
int i = 0;
i < NumInputDims; ++
i) {
133 ++num_distinct_reduce_dims;
137 eigen_assert(num_distinct_reduce_dims == NumReducedDims);
142 int output_index = 0;
143 int reduced_index = 0;
144 for (
int i = 0;
i < NumInputDims; ++
i) {
146 m_reducedDims[reduced_index] = input_dims[
i];
147 if (reduced_index > 0) {
149 eigen_assert(m_reducedDims[0] == m_reducedDims[reduced_index]);
154 m_dimensions[output_index] = input_dims[
i];
159 if (NumReducedDims != 0) {
160 m_traceDim = m_reducedDims[0];
164 if (NumOutputDims > 0) {
166 m_outputStrides[0] = 1;
167 for (
int i = 1;
i < NumOutputDims; ++
i) {
168 m_outputStrides[
i] = m_outputStrides[
i - 1] * m_dimensions[
i - 1];
172 m_outputStrides.back() = 1;
173 for (
int i = NumOutputDims - 2;
i >= 0; --
i) {
174 m_outputStrides[
i] = m_outputStrides[
i + 1] * m_dimensions[
i + 1];
180 if (NumInputDims > 0) {
183 input_strides[0] = 1;
184 for (
int i = 1;
i < NumInputDims; ++
i) {
185 input_strides[
i] = input_strides[
i - 1] * input_dims[
i - 1];
189 input_strides.
back() = 1;
190 for (
int i = NumInputDims - 2;
i >= 0; --
i) {
191 input_strides[
i] = input_strides[
i + 1] * input_dims[
i + 1];
197 for (
int i = 0;
i < NumInputDims; ++
i) {
199 m_reducedStrides[reduced_index] = input_strides[
i];
203 m_preservedStrides[output_index] = input_strides[
i];
215 m_impl.evalSubExprsIfNeeded(
NULL);
227 Index index_stride = 0;
228 for (
int i = 0;
i < NumReducedDims; ++
i) {
229 index_stride += m_reducedStrides[
i];
234 if (NumOutputDims != 0)
235 cur_index = firstInput(index);
236 for (
Index i = 0;
i < m_traceDim; ++
i) {
237 result += m_impl.coeff(cur_index);
238 cur_index += index_stride;
244 template<
int LoadMode>
258 #ifdef EIGEN_USE_SYCL
268 Index startInput = 0;
270 for (
int i = NumOutputDims - 1;
i > 0; --
i) {
271 const Index idx = index / m_outputStrides[
i];
272 startInput += idx * m_preservedStrides[
i];
273 index -= idx * m_outputStrides[
i];
275 startInput += index * m_preservedStrides[0];
278 for (
int i = 0;
i < NumOutputDims - 1; ++
i) {
279 const Index idx = index / m_outputStrides[
i];
280 startInput += idx * m_preservedStrides[
i];
281 index -= idx * m_outputStrides[
i];
283 startInput += index * m_preservedStrides[NumOutputDims - 1];
303 #endif // EIGEN_CXX11_TENSOR_TENSOR_TRACE_H
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dimensions & dimensions() const
std::vector< Eigen::Index > Dims
Eigen::internal::traits< TensorTraceOp >::Scalar Scalar
#define EIGEN_DEVICE_FUNC
Namespace containing all symbols from the Eigen library.
Eigen::NumTraits< Scalar >::Real RealScalar
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dims & dims() const
array< Index, NumReducedDims > m_reducedDims
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Eigen::internal::traits< TensorTraceOp >::Index Index
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType)
array< bool, NumInputDims > m_reduced
TensorTraceOp< Dims, ArgType > XprType
static const int PacketSize
remove_reference< Nested >::type _Nested
TensorTraceOp< Dims, XprType > type
array< Index, NumReducedDims > m_reducedStrides
array< Index, NumOutputDims > m_outputStrides
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const
StorageMemory< CoeffReturnType, Device > Storage
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE internal::remove_all< typename XprType::Nested >::type & expression() const
const Device EIGEN_DEVICE_REF m_device
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE Dimensions & dimensions() const
#define EIGEN_STRONG_INLINE
Storage::Type EvaluatorPointerType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
Eigen::internal::nested< TensorTraceOp >::type Nested
TensorEvaluator< ArgType, Device > m_impl
const Device EIGEN_DEVICE_REF m_device
internal::TensorBlockNotImplemented TensorBlock
XprTraits::StorageKind StorageKind
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T & back()
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
XprType::CoeffReturnType CoeffReturnType
XprType::CoeffReturnType CoeffReturnType
Eigen::internal::traits< TensorTraceOp >::StorageKind StorageKind
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
const typedef TensorTraceOp< Dims, XprType > & type
traits< XprType > XprTraits
A cost model used to limit the number of threads used for evaluating tensor expression.
array< Index, NumOutputDims > m_preservedStrides
DSizes< Index, NumOutputDims > Dimensions
EIGEN_STRONG_INLINE void cleanup()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorTraceOp(const XprType &expr, const Dims &dims)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
gtsam
Author(s):
autogenerated on Fri Nov 1 2024 03:39:24