10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H 23 template<
typename NewDimensions,
typename XprType>
30 typedef typename XprType::Nested
Nested;
33 static const int Layout = XprTraits::Layout;
37 template<
typename NewDimensions,
typename XprType>
43 template<
typename NewDimensions,
typename XprType>
53 template<
typename NewDimensions,
typename XprType>
65 : m_xpr(expr), m_dims(dims) {}
68 const NewDimensions&
dimensions()
const {
return m_dims; }
83 template<
typename NewDimensions,
typename ArgType,
typename Device>
110 #if defined(EIGEN_HAS_INDEX_LIST) 111 (NumOutputDims == 2 && internal::index_statically_eq<NewDimensions>(0, 1)) ? OneByN
112 : (NumOutputDims == 2 && internal::index_statically_eq<NewDimensions>(1, 1)) ?
NByOne 126 NumInputDims > 0 && NumOutputDims > 0,
127 PreferBlockAccess =
false,
146 : m_impl(op.expression(), device), m_dimensions(op.
dimensions())
155 #ifdef EIGEN_USE_THREADS 156 template <
typename EvalSubExprsCallback>
158 EvaluatorPointerType
data, EvalSubExprsCallback done) {
159 m_impl.evalSubExprsIfNeededAsync(data, std::move(done));
164 return m_impl.evalSubExprsIfNeeded(data);
172 return m_impl.coeff(index);
175 template<
int LoadMode>
178 return m_impl.template packet<LoadMode>(index);
182 return m_impl.costPerCoeff(vectorized);
192 struct BlockIteratorState {
200 block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
201 bool =
false)
const {
204 (kind == OneByN && desc.
dimensions()[0] == 1) ||
207 if (kind == OneByN || kind ==
NByOne) {
215 return TensorBlock::materialize(m_impl.data(), m_dimensions, desc,
226 #ifdef EIGEN_USE_SYCL 239 template<
typename NewDimensions,
typename ArgType,
typename Device>
241 :
public TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
252 PreferBlockAccess =
false,
274 return this->m_impl.coeffRef(index);
280 this->m_impl.template writePacket<StoreMode>(index,
x);
283 template <
typename TensorBlock>
286 assert(this->m_impl.data() !=
NULL);
290 Scalar, TensorEvaluator::NumOutputDims, TensorBlockExpr, Index>
293 TensorBlockAssign::Run(
295 internal::strides<Layout>(this->
dimensions()),
296 this->m_impl.data(), desc.
offset()),
310 template<
typename StartIndices,
typename Sizes,
typename XprType>
320 static const int Layout = XprTraits::Layout;
324 template<
typename StartIndices,
typename Sizes,
typename XprType>
330 template<
typename StartIndices,
typename Sizes,
typename XprType>
340 template<
typename StartIndices,
typename Sizes,
typename XprType>
352 : m_xpr(expr), m_indices(indices), m_sizes(sizes) {}
374 template <
typename Index,
typename Device,
bool BlockAccess>
struct MemcpyTriggerForSlicing {
377 const bool prefer_block_evaluation = BlockAccess && total > 32*1024;
378 return !prefer_block_evaluation && contiguous >
threshold_;
388 template <
typename Index,
bool BlockAccess>
struct MemcpyTriggerForSlicing<
Index, GpuDevice, BlockAccess> {
396 #ifdef EIGEN_USE_SYCL 397 template <
typename Index,
bool BlockAccess>
struct MemcpyTriggerForSlicing<Index, Eigen::SyclDevice, BlockAccess> {
406 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
429 PreferBlockAccess =
true,
447 : m_impl(op.expression(), device), m_device(device), m_dimensions(op.
sizes()), m_offsets(op.startIndices())
449 m_is_identity =
true;
453 if (m_impl.dimensions()[
i] != op.
sizes()[
i] ||
455 m_is_identity =
false;
460 if (NumDims == 0)
return;
464 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
465 m_inputStrides[0] = 1;
466 for (
int i = 1;
i < NumDims; ++
i) {
467 m_inputStrides[
i] = m_inputStrides[
i-1] * input_dims[
i-1];
471 m_outputStrides[0] = 1;
472 for (
int i = 1;
i < NumDims; ++
i) {
473 m_outputStrides[
i] = m_outputStrides[
i-1] * output_dims[
i-1];
477 m_inputStrides[NumDims-1] = 1;
478 for (
int i = NumDims - 2;
i >= 0; --
i) {
479 m_inputStrides[
i] = m_inputStrides[
i+1] * input_dims[
i+1];
483 m_outputStrides[NumDims-1] = 1;
484 for (
int i = NumDims - 2;
i >= 0; --
i) {
485 m_outputStrides[
i] = m_outputStrides[
i+1] * output_dims[
i+1];
494 m_impl.evalSubExprsIfNeeded(
NULL);
496 && data && m_impl.data()) {
497 Index contiguous_values = 1;
498 if (static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
499 for (
int i = 0;
i < NumDims; ++
i) {
506 for (
int i = NumDims-1;
i >= 0; --
i) {
514 const MemcpyTriggerForSlicing<Index, Device, BlockAccess> trigger(m_device);
516 EvaluatorPointerType src = (EvaluatorPointerType)m_impl.data();
519 m_device.memcpy((
void*)(m_device.get(data +
i)), m_device.get(src+offset), contiguous_values *
sizeof(
Scalar));
527 #ifdef EIGEN_USE_THREADS 528 template <
typename EvalSubExprsCallback>
530 EvaluatorPointerType , EvalSubExprsCallback done) {
531 m_impl.evalSubExprsIfNeededAsync(
nullptr, [done](
bool) { done(
true); });
533 #endif // EIGEN_USE_THREADS 542 return m_impl.coeff(index);
544 return m_impl.coeff(srcCoeff(index));
548 template<
int LoadMode>
556 return m_impl.template packet<LoadMode>(index);
559 Index inputIndices[] = {0, 0};
560 Index indices[] = {index, index + packetSize - 1};
561 if (static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
563 for (
int i = NumDims - 1;
i > 0; --
i) {
564 const Index idx0 = indices[0] / m_fastOutputStrides[
i];
565 const Index idx1 = indices[1] / m_fastOutputStrides[
i];
566 inputIndices[0] += (idx0 + m_offsets[
i]) * m_inputStrides[
i];
567 inputIndices[1] += (idx1 + m_offsets[
i]) * m_inputStrides[
i];
568 indices[0] -= idx0 * m_outputStrides[
i];
569 indices[1] -= idx1 * m_outputStrides[
i];
571 inputIndices[0] += (indices[0] + m_offsets[0]);
572 inputIndices[1] += (indices[1] + m_offsets[0]);
575 for (
int i = 0;
i < NumDims - 1; ++
i) {
576 const Index idx0 = indices[0] / m_fastOutputStrides[
i];
577 const Index idx1 = indices[1] / m_fastOutputStrides[
i];
578 inputIndices[0] += (idx0 + m_offsets[
i]) * m_inputStrides[
i];
579 inputIndices[1] += (idx1 + m_offsets[
i]) * m_inputStrides[
i];
580 indices[0] -= idx0 * m_outputStrides[
i];
581 indices[1] -= idx1 * m_outputStrides[
i];
583 inputIndices[0] += (indices[0] + m_offsets[NumDims-1]);
584 inputIndices[1] += (indices[1] + m_offsets[NumDims-1]);
586 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
587 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
592 values[0] = m_impl.coeff(inputIndices[0]);
593 values[packetSize-1] = m_impl.coeff(inputIndices[1]);
595 for (
int i = 1;
i < packetSize-1; ++
i) {
596 values[
i] = coeff(index+
i);
598 PacketReturnType rslt = internal::pload<PacketReturnType>(
values);
604 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, m_is_identity ? 1 : NumDims);
609 const size_t target_size = m_device.lastLevelCacheSize();
611 internal::TensorBlockResourceRequirements::skewed<Scalar>(target_size),
612 m_impl.getResourceRequirements());
616 block(TensorBlockDesc& desc, TensorBlockScratch& scratch,
617 bool =
false)
const {
628 if (static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
629 for (
int i = 0;
i < NumDims; ++
i) {
630 if (m_dimensions[
i] != m_impl.dimensions()[
i]) {
631 offset += m_offsets[
i] * m_inputStrides[
i];
632 for (
int j =
i+1;
j < NumDims; ++
j) {
633 if (m_dimensions[
j] > 1) {
636 offset += m_offsets[
j] * m_inputStrides[
j];
642 for (
int i = NumDims - 1;
i >= 0; --
i) {
643 if (m_dimensions[
i] != m_impl.dimensions()[
i]) {
644 offset += m_offsets[
i] * m_inputStrides[
i];
645 for (
int j =
i-1;
j >= 0; --
j) {
646 if (m_dimensions[
j] > 1) {
649 offset += m_offsets[
j] * m_inputStrides[
j];
659 #ifdef EIGEN_USE_SYCL 669 Index inputIndex = 0;
670 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
672 for (
int i = NumDims - 1;
i > 0; --
i) {
673 const Index idx = index / m_fastOutputStrides[
i];
674 inputIndex += (idx + m_offsets[
i]) * m_inputStrides[
i];
675 index -= idx * m_outputStrides[
i];
677 inputIndex += (index + m_offsets[0]);
680 for (
int i = 0;
i < NumDims - 1; ++
i) {
681 const Index idx = index / m_fastOutputStrides[
i];
682 inputIndex += (idx + m_offsets[
i]) * m_inputStrides[
i];
683 index -= idx * m_outputStrides[
i];
685 inputIndex += (index + m_offsets[NumDims-1]);
702 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
704 :
public TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
720 PreferBlockAccess =
true,
739 if (this->m_is_identity) {
740 return this->m_impl.coeffRef(index);
742 return this->m_impl.coeffRef(this->srcCoeff(index));
749 if (this->m_is_identity) {
750 this->m_impl.template writePacket<StoreMode>(index,
x);
755 Index inputIndices[] = {0, 0};
756 Index indices[] = {index, index + packetSize - 1};
757 if (static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
759 for (
int i = NumDims - 1;
i > 0; --
i) {
760 const Index idx0 = indices[0] / this->m_fastOutputStrides[
i];
761 const Index idx1 = indices[1] / this->m_fastOutputStrides[
i];
762 inputIndices[0] += (idx0 + this->m_offsets[
i]) * this->m_inputStrides[
i];
763 inputIndices[1] += (idx1 + this->m_offsets[
i]) * this->m_inputStrides[
i];
764 indices[0] -= idx0 * this->m_outputStrides[
i];
765 indices[1] -= idx1 * this->m_outputStrides[
i];
767 inputIndices[0] += (indices[0] + this->m_offsets[0]);
768 inputIndices[1] += (indices[1] + this->m_offsets[0]);
771 for (
int i = 0;
i < NumDims - 1; ++
i) {
772 const Index idx0 = indices[0] / this->m_fastOutputStrides[
i];
773 const Index idx1 = indices[1] / this->m_fastOutputStrides[
i];
774 inputIndices[0] += (idx0 + this->m_offsets[
i]) * this->m_inputStrides[
i];
775 inputIndices[1] += (idx1 + this->m_offsets[
i]) * this->m_inputStrides[
i];
776 indices[0] -= idx0 * this->m_outputStrides[
i];
777 indices[1] -= idx1 * this->m_outputStrides[
i];
779 inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]);
780 inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]);
782 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
783 this->m_impl.template writePacket<StoreMode>(inputIndices[0],
x);
787 internal::pstore<CoeffReturnType, PacketReturnType>(
values,
x);
788 this->m_impl.coeffRef(inputIndices[0]) = values[0];
789 this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1];
791 for (
int i = 1;
i < packetSize-1; ++
i) {
792 this->coeffRef(index+
i) = values[
i];
797 template<
typename TensorBlock>
801 this->m_impl.writeBlock(arg_desc, block);
806 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
816 static const int Layout = XprTraits::Layout;
820 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
826 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
835 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
847 const XprType& expr,
const StartIndices& startIndices,
848 const StopIndices& stopIndices,
const Strides&
strides)
849 : m_xpr(expr), m_startIndices(startIndices), m_stopIndices(stopIndices),
850 m_strides(strides) {}
857 const StartIndices&
strides()
const {
return m_strides; }
873 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename ArgType,
typename Device>
890 PacketAccess =
false,
902 : m_impl(op.expression(), device),
910 if (m_strides[
i] > 0) {
911 startIndicesClamped[
i] =
913 stopIndicesClamped[
i] =
917 startIndicesClamped[
i] =
919 stopIndicesClamped[
i] =
922 m_startIndices[
i] = startIndicesClamped[
i];
926 const InputDimensions& input_dims = m_impl.
dimensions();
929 m_is_identity =
true;
930 for (
int i = 0;
i < NumDims;
i++) {
931 Index interval = stopIndicesClamped[
i] - startIndicesClamped[
i];
932 if (interval == 0 || ((interval < 0) != (m_strides[
i] < 0))) {
936 (interval / m_strides[
i]) + (interval % m_strides[
i] != 0 ? 1 : 0);
939 if (m_strides[
i] != 1 || interval != m_impl.dimensions()[
i]) {
940 m_is_identity =
false;
944 Strides output_dims = m_dimensions;
946 if (static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
947 m_inputStrides[0] = m_strides[0];
948 m_offsets[0] = startIndicesClamped[0];
949 Index previousDimProduct = 1;
950 for (
int i = 1;
i < NumDims; ++
i) {
951 previousDimProduct *= input_dims[
i-1];
952 m_inputStrides[
i] = previousDimProduct * m_strides[
i];
953 m_offsets[
i] = startIndicesClamped[
i] * previousDimProduct;
957 m_outputStrides[0] = 1;
958 for (
int i = 1;
i < NumDims; ++
i) {
959 m_outputStrides[
i] = m_outputStrides[
i-1] * output_dims[
i-1];
963 m_inputStrides[NumDims-1] = m_strides[NumDims-1];
964 m_offsets[NumDims-1] = startIndicesClamped[NumDims-1];
965 Index previousDimProduct = 1;
966 for (
int i = NumDims - 2;
i >= 0; --
i) {
967 previousDimProduct *= input_dims[
i+1];
968 m_inputStrides[
i] = previousDimProduct * m_strides[
i];
969 m_offsets[
i] = startIndicesClamped[
i] * previousDimProduct;
972 m_outputStrides[NumDims-1] = 1;
973 for (
int i = NumDims - 2;
i >= 0; --
i) {
974 m_outputStrides[
i] = m_outputStrides[
i+1] * output_dims[
i+1];
984 m_impl.evalSubExprsIfNeeded(
NULL);
995 return m_impl.coeff(index);
997 return m_impl.coeff(srcCoeff(index));
1002 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, m_is_identity ? 1 : NumDims);
1008 #ifdef EIGEN_USE_SYCL 1017 Index inputIndex = 0;
1018 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
1020 for (
int i = NumDims - 1;
i >= 0; --
i) {
1021 const Index idx = index / m_fastOutputStrides[
i];
1022 inputIndex += idx * m_inputStrides[
i] + m_offsets[
i];
1023 index -= idx * m_outputStrides[
i];
1027 for (
int i = 0;
i < NumDims; ++
i) {
1028 const Index idx = index / m_fastOutputStrides[
i];
1029 inputIndex += idx * m_inputStrides[
i] + m_offsets[
i];
1030 index -= idx * m_outputStrides[
i];
1037 #ifndef SYCL_DEVICE_ONLY 1040 return cl::sycl::clamp(value, min, max);
1057 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename ArgType,
typename Device>
1059 :
public TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType>, Device>
1067 PacketAccess =
false,
1068 BlockAccess =
false,
1091 if (this->m_is_identity) {
1092 return this->m_impl.coeffRef(index);
1094 return this->m_impl.coeffRef(this->srcCoeff(index));
1102 #endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
array< internal::TensorIntDivisor< Index >, NumDims > m_fastOutputStrides
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max)
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
StorageMemory< typename internal::remove_const< CoeffReturnType >::type, Device > ConstCastStorage
EIGEN_DEVICE_FUNC Storage::Type data() const
XprType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC const StartIndices & startIndices() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes< Indices... > &)
#define EIGEN_STRONG_INLINE
internal::remove_const< Scalar >::type ScalarNoConst
internal::traits< TensorStridingSlicingOp >::Scalar Scalar
Eigen::internal::traits< TensorSlicingOp >::Index Index
StorageMemory< typename internal::remove_const< CoeffReturnType >::type, Device > ConstCastStorage
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
m m block(1, 0, 2, 2)<< 4
Eigen::IndexList< Index, Eigen::type2index< 1 > > NByOne(Index n)
TensorBase< TensorStridingSlicingOp< StartIndices, StopIndices, Strides, XprType > > Base
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
const XprType & expr() const
DSizes< Index, NumDims > m_dimensions
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
array< Index, NumDims > m_inputStrides
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data)
EIGEN_DEVICE_FUNC const StartIndices & stopIndices() const
bool HasDestinationBuffer() const
std::vector< Array2i > sizes
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
TensorSlicingOp< StartIndices, Sizes, ArgType > XprType
array< internal::TensorIntDivisor< Index >, NumDims > m_fastOutputStrides
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Storage::Type EvaluatorPointerType
internal::TensorBlockDescriptor< NumOutputDims, Index > TensorBlockDesc
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
XprTraits::PointerType PointerType
TensorEvaluator< const TensorStridingSlicingOp< StartIndices, StopIndices, Strides, ArgType >, Device > Base
StorageMemory< CoeffReturnType, Device > Storage
XprTraits::StorageKind StorageKind
EIGEN_DEVICE_FUNC const StartIndices & startIndices() const
StorageMemory< CoeffReturnType, Device > Storage
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType & coeffRef(Index index)
Namespace containing all symbols from the Eigen library.
EIGEN_STRONG_INLINE void cleanup()
StorageMemory< CoeffReturnType, Device > Storage
CleanedUpDerType< DerType >::type() max(const AutoDiffScalar< DerType > &x, const T &y)
A cost model used to limit the number of threads used for evaluating tensor expression.
TensorReshapingOp< NewDimensions, XprType > type
EIGEN_DEVICE_FUNC const NewDimensions & dimensions() const
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
array< Index, NumDims > m_inputStrides
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
const StopIndices m_stopIndices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Storage::Type data() const
internal::TensorBlockDescriptor< NumDims, Index > TensorBlockDesc
internal::nested< TensorStridingSlicingOp >::type Nested
NewDimensions m_dimensions
EIGEN_DEVICE_FUNC const StartIndices & strides() const
internal::remove_const< typename XprType::CoeffReturnType >::type CoeffReturnType
internal::traits< TensorStridingSlicingOp >::Index Index
TensorSlicingOp< StartIndices, Sizes, XprType > type
TensorBlockDescriptor & DropDestinationBuffer()
TensorReshapingOp< NewDimensions, ArgType > XprType
EIGEN_STRONG_INLINE void cleanup()
XprTraits::PointerType PointerType
const StartIndices m_offsets
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingSlicingOp(const XprType &expr, const StartIndices &startIndices, const StopIndices &stopIndices, const Strides &strides)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
TensorBase< TensorReshapingOp< NewDimensions, XprType >, WriteAccessors > Base
internal::TensorMaterializedBlock< ScalarNoConst, NumOutputDims, Layout, Index > TensorBlock
Generic expression where a coefficient-wise binary operator is applied to two expressions.
remove_reference< Nested >::type _Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
TensorReshapingOp< NewDimensions, ArgType > XprType
TensorEvaluator< ArgType, Device > m_impl
traits< XprType > XprTraits
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T * constCast(const T *data)
Eigen::internal::traits< TensorSlicingOp >::Scalar Scalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
Eigen::internal::traits< TensorReshapingOp >::StorageKind StorageKind
XprTraits::PointerType PointerType
XprType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
Eigen::internal::traits< TensorSlicingOp >::StorageKind StorageKind
XprType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
internal::traits< TensorStridingSlicingOp >::StorageKind StorageKind
EIGEN_ALWAYS_INLINE DSizes< IndexType, NumDims > strides(const DSizes< IndexType, NumDims > &dimensions)
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
EIGEN_STRONG_INLINE void cleanup()
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
remove_reference< Nested >::type _Nested
XprType::CoeffReturnType CoeffReturnType
PacketType< CoeffReturnType, Device >::type PacketReturnType
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const
array< Index, NumDims > m_outputStrides
TensorStridingSlicingOp< StartIndices, StopIndices, Strides, XprType > type
TensorBlockDescriptor WithOffset(IndexType offset) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
Eigen::internal::traits< TensorReshapingOp >::Scalar Scalar
const TensorReshapingOp< NewDimensions, XprType > EIGEN_DEVICE_REF type
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE internal::TensorBlockResourceRequirements getResourceRequirements() const
TensorStridingSlicingOp< StartIndices, StopIndices, Strides, ArgType > XprType
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
const TensorStridingSlicingOp< StartIndices, StopIndices, Strides, XprType > EIGEN_DEVICE_REF type
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType data)
internal::remove_const< Scalar >::type ScalarNoConst
XprType::CoeffReturnType CoeffReturnType
TensorEvaluator< const TensorReshapingOp< NewDimensions, ArgType >, Device > Base
DSizes< Index, NumDims > m_startIndices
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
TensorEvaluator< const ArgType, Device >::TensorBlock TensorBlock
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(const TensorBlockDesc &desc, const TensorBlock &block)
XprType::CoeffReturnType CoeffReturnType
traits< XprType > XprTraits
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
CleanedUpDerType< DerType >::type() min(const AutoDiffScalar< DerType > &x, const T &y)
internal::TensorBlockNotImplemented TensorBlock
#define EIGEN_DEVICE_FUNC
Eigen::internal::nested< TensorReshapingOp >::type Nested
internal::remove_const< Scalar >::type ScalarNoConst
XprTraits::StorageKind StorageKind
const Dimensions & dimensions() const
const StartIndices m_indices
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType &x)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC const Sizes & sizes() const
#define EIGEN_TENSOR_INHERIT_ASSIGNMENT_OPERATORS(Derived)
Storage::Type EvaluatorPointerType
const Device EIGEN_DEVICE_REF m_device
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writeBlock(const TensorBlockDesc &desc, const TensorBlock &block)
const Device EIGEN_DEVICE_REF m_device
TensorBase< TensorSlicingOp< StartIndices, Sizes, XprType > > Base
PacketType< CoeffReturnType, Device >::type PacketReturnType
internal::TensorBlockDescriptor< TensorEvaluator::NumOutputDims, Index > TensorBlockDesc
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
TensorSlicingOp< StartIndices, Sizes, ArgType > XprType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType & coeffRef(Index index)
Storage::Type EvaluatorPointerType
TensorEvaluator< ArgType, Device > m_impl
DSizes< Index, NumDims > m_offsets
XprType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC const TensorEvaluator< ArgType, Device > & impl() const
const StartIndices m_startIndices
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlock block(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool=false) const
Eigen::internal::traits< TensorReshapingOp >::Index Index
internal::TensorBlockNotImplemented TensorBlock
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Storage::Type data() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
TensorEvaluator< const TensorSlicingOp< StartIndices, Sizes, ArgType >, Device > Base
XprType::CoeffReturnType CoeffReturnType
internal::TensorBlockScratchAllocator< Device > TensorBlockScratch
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Generic expression where a coefficient-wise unary operator is applied to an expression.
remove_reference< Nested >::type _Nested
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockResourceRequirements any()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSlicingOp(const XprType &expr, const StartIndices &indices, const Sizes &sizes)
TensorStridingSlicingOp< StartIndices, StopIndices, Strides, ArgType > XprType
const NewDimensions m_dims
TensorEvaluator< ArgType, Device > m_impl
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType &x)
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x
const std::vector< size_t > dimensions
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
traits< XprType > XprTraits
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType)
Eigen::internal::nested< TensorSlicingOp >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType & coeffRef(Index index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReshapingOp(const XprType &expr, const NewDimensions &dims)
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockResourceRequirements merge(const TensorBlockResourceRequirements &lhs, const TensorBlockResourceRequirements &rhs)
const TensorSlicingOp< StartIndices, Sizes, XprType > EIGEN_DEVICE_REF type
XprTraits::StorageKind StorageKind
#define EIGEN_UNROLL_LOOP
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
array< Index, NumDims > m_outputStrides