10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H 23 template<
typename NewDimensions,
typename XprType>
30 typedef typename XprType::Nested
Nested;
33 static const int Layout = XprTraits::Layout;
36 template<
typename NewDimensions,
typename XprType>
42 template<
typename NewDimensions,
typename XprType>
52 template<
typename NewDimensions,
typename XprType>
63 : m_xpr(expr), m_dims(dims) {}
66 const NewDimensions&
dimensions()
const {
return m_dims; }
76 Assign assign(*
this, other);
81 template<
typename OtherDerived>
86 Assign assign(*
this, other);
98 template<
typename NewDimensions,
typename ArgType,
typename Device>
113 : m_impl(op.expression(), device), m_dimensions(op.
dimensions())
128 return m_impl.evalSubExprsIfNeeded(data);
136 return m_impl.coeff(index);
139 template<
int LoadMode>
142 return m_impl.template packet<LoadMode>(index);
146 return m_impl.costPerCoeff(vectorized);
149 EIGEN_DEVICE_FUNC Scalar*
data()
const {
return const_cast<Scalar*
>(m_impl.data()); }
160 template<
typename NewDimensions,
typename ArgType,
typename Device>
162 :
public TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
188 return this->m_impl.coeffRef(index);
193 this->m_impl.template writePacket<StoreMode>(index,
x);
206 template<
typename StartIndices,
typename Sizes,
typename XprType>
216 static const int Layout = XprTraits::Layout;
219 template<
typename StartIndices,
typename Sizes,
typename XprType>
225 template<
typename StartIndices,
typename Sizes,
typename XprType>
235 template<
typename StartIndices,
typename Sizes,
typename XprType>
246 : m_xpr(expr), m_indices(indices), m_sizes(sizes) {}
257 template<
typename OtherDerived>
262 Assign assign(*
this, other);
271 Assign assign(*
this, other);
286 template <
typename Index,
typename Device>
struct MemcpyTriggerForSlicing {
287 EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(
const Device& device) :
threshold_(2 * device.numThreads()) { }
288 EIGEN_DEVICE_FUNC
bool operator ()(
Index val)
const {
return val >
threshold_; }
297 template <
typename Index>
struct MemcpyTriggerForSlicing<
Index, GpuDevice> {
298 EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(
const GpuDevice&) { }
299 EIGEN_DEVICE_FUNC
bool operator ()(
Index val)
const {
return val > 4*1024*1024; }
305 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
322 : m_impl(op.expression(), device), m_device(device), m_dimensions(op.
sizes()), m_offsets(op.startIndices())
330 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
331 m_inputStrides[0] = 1;
332 for (
int i = 1;
i < NumDims; ++
i) {
333 m_inputStrides[
i] = m_inputStrides[
i-1] * input_dims[
i-1];
337 m_outputStrides[0] = 1;
338 for (
int i = 1;
i < NumDims; ++
i) {
339 m_outputStrides[
i] = m_outputStrides[
i-1] * output_dims[
i-1];
343 m_inputStrides[NumDims-1] = 1;
344 for (
int i = NumDims - 2;
i >= 0; --
i) {
345 m_inputStrides[
i] = m_inputStrides[
i+1] * input_dims[
i+1];
349 m_outputStrides[NumDims-1] = 1;
350 for (
int i = NumDims - 2;
i >= 0; --
i) {
351 m_outputStrides[
i] = m_outputStrides[
i+1] * output_dims[
i+1];
367 m_impl.evalSubExprsIfNeeded(
NULL);
369 Index contiguous_values = 1;
370 if (static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
371 for (
int i = 0;
i < NumDims; ++
i) {
378 for (
int i = NumDims-1;
i >= 0; --
i) {
386 const MemcpyTriggerForSlicing<Index, Device> trigger(m_device);
387 if (trigger(contiguous_values)) {
388 Scalar* src = (Scalar*)m_impl.data();
391 m_device.memcpy((
void*)(data+
i), src+offset, contiguous_values *
sizeof(Scalar));
405 return m_impl.coeff(srcCoeff(index));
408 template<
int LoadMode>
415 Index inputIndices[] = {0, 0};
416 Index indices[] = {index, index + packetSize - 1};
417 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
418 for (
int i = NumDims - 1;
i > 0; --
i) {
419 const Index idx0 = indices[0] / m_fastOutputStrides[
i];
420 const Index idx1 = indices[1] / m_fastOutputStrides[
i];
421 inputIndices[0] += (idx0 + m_offsets[
i]) * m_inputStrides[
i];
422 inputIndices[1] += (idx1 + m_offsets[
i]) * m_inputStrides[
i];
423 indices[0] -= idx0 * m_outputStrides[
i];
424 indices[1] -= idx1 * m_outputStrides[
i];
426 inputIndices[0] += (indices[0] + m_offsets[0]);
427 inputIndices[1] += (indices[1] + m_offsets[0]);
429 for (
int i = 0;
i < NumDims - 1; ++
i) {
430 const Index idx0 = indices[0] / m_fastOutputStrides[
i];
431 const Index idx1 = indices[1] / m_fastOutputStrides[
i];
432 inputIndices[0] += (idx0 + m_offsets[
i]) * m_inputStrides[
i];
433 inputIndices[1] += (idx1 + m_offsets[
i]) * m_inputStrides[
i];
434 indices[0] -= idx0 * m_outputStrides[
i];
435 indices[1] -= idx1 * m_outputStrides[
i];
437 inputIndices[0] += (indices[0] + m_offsets[NumDims-1]);
438 inputIndices[1] += (indices[1] + m_offsets[NumDims-1]);
440 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
441 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
446 values[0] = m_impl.coeff(inputIndices[0]);
447 values[packetSize-1] = m_impl.coeff(inputIndices[1]);
448 for (
int i = 1;
i < packetSize-1; ++
i) {
449 values[
i] = coeff(index+
i);
451 PacketReturnType rslt = internal::pload<PacketReturnType>(
values);
457 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, NumDims);
462 Scalar*
result = m_impl.data();
465 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
466 for (
int i = 0;
i < NumDims; ++
i) {
467 if (m_dimensions[
i] != m_impl.dimensions()[
i]) {
468 offset += m_offsets[
i] * m_inputStrides[
i];
469 for (
int j =
i+1;
j < NumDims; ++
j) {
470 if (m_dimensions[
j] > 1) {
473 offset += m_offsets[
j] * m_inputStrides[
j];
479 for (
int i = NumDims - 1;
i >= 0; --
i) {
480 if (m_dimensions[
i] != m_impl.dimensions()[
i]) {
481 offset += m_offsets[
i] * m_inputStrides[
i];
482 for (
int j =
i-1;
j >= 0; --
j) {
483 if (m_dimensions[
j] > 1) {
486 offset += m_offsets[
j] * m_inputStrides[
j];
500 Index inputIndex = 0;
501 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
502 for (
int i = NumDims - 1;
i > 0; --
i) {
503 const Index idx = index / m_fastOutputStrides[
i];
504 inputIndex += (idx + m_offsets[
i]) * m_inputStrides[
i];
505 index -= idx * m_outputStrides[
i];
507 inputIndex += (index + m_offsets[0]);
509 for (
int i = 0;
i < NumDims - 1; ++
i) {
510 const Index idx = index / m_fastOutputStrides[
i];
511 inputIndex += (idx + m_offsets[
i]) * m_inputStrides[
i];
512 index -= idx * m_outputStrides[
i];
514 inputIndex += (index + m_offsets[NumDims-1]);
530 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
532 :
public TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
558 return this->m_impl.coeffRef(this->srcCoeff(index));
565 Index inputIndices[] = {0, 0};
566 Index indices[] = {index, index + packetSize - 1};
567 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
568 for (
int i = NumDims - 1;
i > 0; --
i) {
569 const Index idx0 = indices[0] / this->m_fastOutputStrides[
i];
570 const Index idx1 = indices[1] / this->m_fastOutputStrides[
i];
571 inputIndices[0] += (idx0 + this->m_offsets[
i]) * this->m_inputStrides[
i];
572 inputIndices[1] += (idx1 + this->m_offsets[
i]) * this->m_inputStrides[
i];
573 indices[0] -= idx0 * this->m_outputStrides[
i];
574 indices[1] -= idx1 * this->m_outputStrides[
i];
576 inputIndices[0] += (indices[0] + this->m_offsets[0]);
577 inputIndices[1] += (indices[1] + this->m_offsets[0]);
579 for (
int i = 0;
i < NumDims - 1; ++
i) {
580 const Index idx0 = indices[0] / this->m_fastOutputStrides[
i];
581 const Index idx1 = indices[1] / this->m_fastOutputStrides[
i];
582 inputIndices[0] += (idx0 + this->m_offsets[
i]) * this->m_inputStrides[
i];
583 inputIndices[1] += (idx1 + this->m_offsets[
i]) * this->m_inputStrides[
i];
584 indices[0] -= idx0 * this->m_outputStrides[
i];
585 indices[1] -= idx1 * this->m_outputStrides[
i];
587 inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]);
588 inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]);
590 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
591 this->m_impl.template writePacket<StoreMode>(inputIndices[0],
x);
595 internal::pstore<CoeffReturnType, PacketReturnType>(
values,
x);
596 this->m_impl.coeffRef(inputIndices[0]) = values[0];
597 this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1];
598 for (
int i = 1;
i < packetSize-1; ++
i) {
599 this->coeffRef(index+
i) = values[
i];
608 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
618 static const int Layout = XprTraits::Layout;
621 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
627 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
636 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
647 const XprType& expr,
const StartIndices& startIndices,
648 const StopIndices& stopIndices,
const Strides& strides)
649 : m_xpr(expr), m_startIndices(startIndices), m_stopIndices(stopIndices),
650 m_strides(strides) {}
657 const StartIndices&
strides()
const {
return m_strides; }
667 Assign assign(*
this, other);
673 template<
typename OtherDerived>
678 Assign assign(*
this, other);
692 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename ArgType,
typename Device>
702 PacketAccess =
false,
709 : m_impl(op.expression(), device), m_device(device), m_strides(op.strides())
716 startIndicesClamped[
i] = clamp(op.
startIndices()[
i], 0, m_impl.dimensions()[
i]);
717 stopIndicesClamped[
i] = clamp(op.
stopIndices()[
i], 0, m_impl.dimensions()[
i]);
720 startIndicesClamped[
i] = clamp(op.
startIndices()[
i], -1, m_impl.dimensions()[
i] - 1);
721 stopIndicesClamped[
i] = clamp(op.
stopIndices()[
i], -1, m_impl.dimensions()[
i] - 1);
723 m_startIndices[
i] = startIndicesClamped[
i];
729 bool degenerate =
false;;
730 for(
int i = 0;
i < NumDims;
i++){
731 Index interval = stopIndicesClamped[
i] - startIndicesClamped[
i];
732 if(interval == 0 || ((interval<0) != (m_strides[
i]<0))){
736 m_dimensions[
i] = interval / m_strides[
i]
737 + (interval % m_strides[
i] != 0 ? 1 : 0);
741 Strides output_dims = m_dimensions;
743 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
744 m_inputStrides[0] = m_strides[0];
745 m_offsets[0] = startIndicesClamped[0];
746 Index previousDimProduct = 1;
747 for (
int i = 1;
i < NumDims; ++
i) {
748 previousDimProduct *= input_dims[
i-1];
749 m_inputStrides[
i] = previousDimProduct * m_strides[
i];
750 m_offsets[
i] = startIndicesClamped[
i] * previousDimProduct;
754 m_outputStrides[0] = 1;
755 for (
int i = 1;
i < NumDims; ++
i) {
756 m_outputStrides[
i] = m_outputStrides[
i-1] * output_dims[
i-1];
761 m_inputStrides[NumDims-1] = m_strides[NumDims-1];
762 m_offsets[NumDims-1] = startIndicesClamped[NumDims-1];
763 Index previousDimProduct = 1;
764 for (
int i = NumDims - 2;
i >= 0; --
i) {
765 previousDimProduct *= input_dims[
i+1];
766 m_inputStrides[
i] = previousDimProduct * m_strides[
i];
767 m_offsets[
i] = startIndicesClamped[
i] * previousDimProduct;
770 m_outputStrides[NumDims-1] = 1;
771 for (
int i = NumDims - 2;
i >= 0; --
i) {
772 m_outputStrides[
i] = m_outputStrides[
i+1] * output_dims[
i+1];
777 m_block_total_size_max =
numext::maxi(static_cast<std::size_t>(1),
778 device.lastLevelCacheSize() /
793 m_impl.evalSubExprsIfNeeded(
NULL);
803 return m_impl.coeff(srcCoeff(index));
807 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, NumDims);
817 Index inputIndex = 0;
818 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
819 for (
int i = NumDims - 1;
i >= 0; --
i) {
820 const Index idx = index / m_fastOutputStrides[
i];
821 inputIndex += idx * m_inputStrides[
i] + m_offsets[
i];
822 index -= idx * m_outputStrides[
i];
825 for (
int i = 0;
i < NumDims; ++
i) {
826 const Index idx = index / m_fastOutputStrides[
i];
827 inputIndex += idx * m_inputStrides[
i] + m_offsets[
i];
828 index -= idx * m_outputStrides[
i];
851 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename ArgType,
typename Device>
853 :
public TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType>, Device>
861 PacketAccess =
false,
881 return this->m_impl.coeffRef(this->srcCoeff(index));
888 #endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H
array< internal::TensorIntDivisor< Index >, NumDims > m_fastOutputStrides
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
EIGEN_DEVICE_FUNC const StartIndices & strides() const
XprType::CoeffReturnType CoeffReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes< Indices... > &)
#define EIGEN_STRONG_INLINE
const TensorSlicingOp< StartIndices, Sizes, XprType > & type
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType *)
internal::traits< TensorStridingSlicingOp >::Scalar Scalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
Eigen::internal::traits< TensorSlicingOp >::Index Index
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
DSizes< Index, NumDims > m_dimensions
array< Index, NumDims > m_inputStrides
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC const Sizes & sizes() const
std::vector< Array2i > sizes
internal::remove_const< Scalar >::type ScalarNonConst
TensorSlicingOp< StartIndices, Sizes, ArgType > XprType
array< internal::TensorIntDivisor< Index >, NumDims > m_fastOutputStrides
TensorEvaluator< const TensorStridingSlicingOp< StartIndices, StopIndices, Strides, ArgType >, Device > Base
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
XprTraits::StorageKind StorageKind
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType & coeffRef(Index index)
Namespace containing all symbols from the Eigen library.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
A cost model used to limit the number of threads used for evaluating tensor expression.
TensorReshapingOp< NewDimensions, XprType > type
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
array< Index, NumDims > m_inputStrides
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
const StopIndices m_stopIndices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
internal::nested< TensorStridingSlicingOp >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
NewDimensions m_dimensions
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
internal::remove_const< typename XprType::CoeffReturnType >::type CoeffReturnType
vector< size_t > dimensions(L.begin(), L.end())
internal::traits< TensorStridingSlicingOp >::Index Index
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType *data)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
TensorSlicingOp< StartIndices, Sizes, XprType > type
EIGEN_DEVICE_FUNC const StartIndices & startIndices() const
TensorReshapingOp< NewDimensions, ArgType > XprType
const StartIndices m_offsets
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorStridingSlicingOp(const XprType &expr, const StartIndices &startIndices, const StopIndices &stopIndices, const Strides &strides)
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar * data() const
remove_reference< Nested >::type _Nested
internal::remove_const< Scalar >::type ScalarNonConst
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
TensorReshapingOp< NewDimensions, ArgType > XprType
TensorEvaluator< ArgType, Device > m_impl
const TensorReshapingOp< NewDimensions, XprType > & type
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType *data)
traits< XprType > XprTraits
Eigen::internal::traits< TensorSlicingOp >::Scalar Scalar
EIGEN_DEVICE_FUNC const NewDimensions & dimensions() const
Eigen::internal::traits< TensorReshapingOp >::StorageKind StorageKind
XprType::CoeffReturnType CoeffReturnType
Eigen::internal::traits< TensorSlicingOp >::StorageKind StorageKind
XprType::CoeffReturnType CoeffReturnType
internal::traits< TensorStridingSlicingOp >::StorageKind StorageKind
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
remove_reference< Nested >::type _Nested
XprType::CoeffReturnType CoeffReturnType
PacketType< CoeffReturnType, Device >::type PacketReturnType
PacketType< CoeffReturnType, Device >::type PacketReturnType
array< Index, NumDims > m_outputStrides
TensorStridingSlicingOp< StartIndices, StopIndices, Strides, XprType > type
Eigen::internal::traits< TensorReshapingOp >::Scalar Scalar
EIGEN_DEVICE_FUNC const StartIndices & startIndices() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
const TensorStridingSlicingOp< StartIndices, StopIndices, Strides, XprType > & type
static EIGEN_DEVICE_FUNC void run(const Expression &expr, const Device &device=Device())
TensorStridingSlicingOp< StartIndices, StopIndices, Strides, ArgType > XprType
XprType::CoeffReturnType CoeffReturnType
TensorEvaluator< const TensorReshapingOp< NewDimensions, ArgType >, Device > Base
DSizes< Index, NumDims > m_startIndices
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
XprType::CoeffReturnType CoeffReturnType
traits< XprType > XprTraits
static EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max)
Eigen::internal::nested< TensorReshapingOp >::type Nested
EIGEN_DEVICE_FUNC Scalar * data() const
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
XprTraits::StorageKind StorageKind
const StartIndices m_indices
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType &x)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
PacketType< CoeffReturnType, Device >::type PacketReturnType
EIGEN_DEVICE_FUNC const TensorEvaluator< ArgType, Device > & impl() const
TensorSlicingOp< StartIndices, Sizes, ArgType > XprType
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType & coeffRef(Index index)
TensorEvaluator< ArgType, Device > m_impl
DSizes< Index, NumDims > m_offsets
XprType::CoeffReturnType CoeffReturnType
const StartIndices m_startIndices
Eigen::internal::traits< TensorReshapingOp >::Index Index
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
TensorEvaluator< const TensorSlicingOp< StartIndices, Sizes, ArgType >, Device > Base
XprType::CoeffReturnType CoeffReturnType
remove_reference< Nested >::type _Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorSlicingOp(const XprType &expr, const StartIndices &indices, const Sizes &sizes)
TensorStridingSlicingOp< StartIndices, StopIndices, Strides, ArgType > XprType
const NewDimensions m_dims
TensorEvaluator< ArgType, Device > m_impl
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar * data() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void writePacket(Index index, const PacketReturnType &x)
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy x
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
traits< XprType > XprTraits
Eigen::internal::nested< TensorSlicingOp >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType & coeffRef(Index index)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReshapingOp(const XprType &expr, const NewDimensions &dims)
EIGEN_DEVICE_FUNC const StartIndices & stopIndices() const
XprTraits::StorageKind StorageKind
std::size_t m_block_total_size_max
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
PacketType< CoeffReturnType, Device >::type PacketReturnType
array< Index, NumDims > m_outputStrides