Go to the documentation of this file.
8 #ifndef EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
9 #define EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
16 template <
typename Scalar,
typename IndexType,
int NumDims,
int Layout>
25 template <
int Layout,
typename IndexType,
int NumDims>
29 if (NumDims == 0)
return strides;
33 if (
static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
35 for (
int i = 1;
i < NumDims; ++
i) {
40 for (
int i = NumDims - 2;
i >= 0; --
i) {
48 template <
int Layout,
typename IndexType,
size_t NumDims>
54 template <
int Layout, std::ptrdiff_t...
Indices>
91 template <
typename Scalar>
99 template <
typename Scalar>
116 return withShapeAndSize<Scalar>(
shape_type, size_in_bytes,
122 template <
typename Scalar>
124 size_t size_in_bytes) {
129 template <
typename Scalar>
131 size_t size_in_bytes) {
179 return lhs_cost + rhs_cost;
187 template <
int NumDims,
typename IndexType = Eigen::Index>
235 template <
typename Scalar>
249 template <
typename Scalar>
257 template <
int Layout,
typename Scalar>
263 template <
int Layout>
267 const Dimensions& desc_strides = internal::strides<Layout>(desc_dims);
268 for (
int i = 0;
i < NumDims; ++
i) {
269 if (desc_dims[
i] == 1)
continue;
305 template <
int Layout,
typename Scalar>
309 DestinationBuffer::template make<Layout>(*
this, dst_base, dst_strides);
312 template <
int Layout,
typename Scalar,
typename DstStr
idesIndexType>
317 AddDestinationBuffer<Layout>(dst_base,
Dimensions(dst_strides));
346 template <
int NumDims,
int Layout,
typename IndexType = Eigen::Index>
376 static const bool isColMajor = Layout ==
static_cast<int>(
ColMajor);
384 for (
int i = NumDims - 1;
i >= 0; --
i) {
385 const int dim = isColMajor ?
i : NumDims -
i - 1;
403 IndexType target_block_size =
412 if (tensor_size == 0) {
413 for (
int i = 0;
i < NumDims; ++
i) {
421 if (tensor_size <= target_block_size) {
426 for (
int i = 0;
i < NumDims; ++
i) {
433 static const bool isColMajor = Layout ==
static_cast<int>(
ColMajor);
437 IndexType coeff_to_allocate = target_block_size;
439 for (
int i = 0;
i < NumDims; ++
i) {
440 const int dim = isColMajor ?
i : NumDims -
i - 1;
443 coeff_to_allocate =
divup(
452 const IndexType dim_size_target = convert_index<IndexType>(
453 std::pow(
static_cast<float>(target_block_size),
456 for (
int i = 0;
i < NumDims; ++
i) {
467 for (
int i = 0;
i < NumDims; ++
i) {
468 const int dim = isColMajor ?
i : NumDims -
i - 1;
471 const IndexType total_size_other_dims =
473 const IndexType alloc_avail =
474 divup<IndexType>(target_block_size, total_size_other_dims);
490 numext::mini<IndexType>(target_block_size,
495 for (
int i = 0;
i < NumDims; ++
i) {
524 template <
typename Device>
541 const int num_allocations =
static_cast<int>(
m_allocations.size());
559 if (!has_allocation) {
627 template <
typename XprType>
654 template <
typename Scalar,
int NumDims,
int Layout,
713 bool strided_storage)
729 template <
typename TensorBlockScratch>
732 bool allow_strided_storage =
false) {
736 if (
desc.destination().kind() == DestinationBuffer::kContiguous) {
738 desc.DropDestinationBuffer();
740 internal::strides<Layout>(
desc.dimensions()),
744 }
else if (
desc.destination().kind() == DestinationBuffer::kStrided &&
745 allow_strided_storage) {
747 desc.DropDestinationBuffer();
752 void* mem = scratch.allocate(
desc.size() *
sizeof(
Scalar));
754 internal::strides<Layout>(
desc.dimensions()),
761 template <
typename DataDimensions,
typename TensorBlockScratch>
763 const Scalar*
data,
const DataDimensions& data_dims,
777 static const bool is_col_major = Layout ==
ColMajor;
780 int num_matching_inner_dims = 0;
781 for (
int i = 0;
i < NumDims; ++
i) {
782 int dim = is_col_major ?
i : NumDims -
i - 1;
783 if (data_dims[dim] !=
desc.dimensions()[dim])
break;
784 ++num_matching_inner_dims;
789 bool can_use_direct_access =
true;
790 for (
int i = num_matching_inner_dims + 1;
i < NumDims; ++
i) {
791 int dim = is_col_major ?
i : NumDims -
i - 1;
792 if (
desc.dimension(dim) != 1) {
793 can_use_direct_access =
false;
798 if (can_use_direct_access) {
801 block_start,
desc.dimensions());
812 TensorBlockIOSrc src(internal::strides<Layout>(
Dimensions(data_dims)),
834 template <
typename UnaryOp,
typename ArgTensorBlock>
865 template <
typename BinaryOp,
typename LhsTensorBlock,
typename RhsTensorBlock>
881 const RhsTensorBlock& right_block,
882 const BinaryOp& functor)
911 template <
typename BlockFactory,
typename ArgTensorBlock>
941 template <
typename BlockFactory,
typename Arg1TensorBlock,
942 typename Arg2TensorBlock,
typename Arg3TensorBlock>
961 const Arg2TensorBlock& arg2_block,
962 const Arg3TensorBlock& arg3_block,
992 template <
typename Scalar,
typename IndexType>
1028 template <
typename Str
idedLinearBufferCopy::Kind kind>
1031 const size_t count) {
1037 template <
typename Str
idedLinearBufferCopy::Kind kind>
1039 const IndexType count,
const IndexType dst_offset,
1041 const IndexType src_offset,
const IndexType src_stride,
1043 const Scalar* src = &src_data[src_offset];
1044 Scalar* dst = &dst_data[dst_offset];
1047 for (
Index i = 0;
i < count; ++
i) {
1048 dst[
i * dst_stride] = src[
i * src_stride];
1053 const IndexType vectorized_size = count -
PacketSize;
1059 const IndexType unrolled_size = count - 4 *
PacketSize;
1062 for (
int j = 0;
j < 4; ++
j) {
1068 Packet p = ploadu<Packet>(src +
i);
1069 pstoreu<Scalar, Packet>(dst +
i,
p);
1071 for (;
i < count; ++
i) {
1079 Packet p = ploadu<Packet>(src +
i);
1080 pscatter<Scalar, Packet>(dst +
i * dst_stride,
p, dst_stride);
1082 for (;
i < count; ++
i) {
1083 dst[
i * dst_stride] = src[
i];
1089 const IndexType unrolled_size = count - 4 *
PacketSize;
1090 Packet p = pload1<Packet>(src);
1092 for (
int j = 0;
j < 4; ++
j) {
1097 pstoreu<Scalar, Packet>(dst +
i,
p);
1099 for (;
i < count; ++
i) {
1106 Packet p = pload1<Packet>(src);
1108 pscatter<Scalar, Packet>(dst +
i * dst_stride,
p, dst_stride);
1110 for (;
i < count; ++
i) {
1111 dst[
i * dst_stride] = *src;
1118 Packet p = pgather<Scalar, Packet>(src +
i * src_stride, src_stride);
1119 pstoreu<Scalar, Packet>(dst +
i,
p);
1121 for (;
i < count; ++
i) {
1122 dst[
i] = src[
i * src_stride];
1127 for (;
i < count; ++
i) {
1128 dst[
i * dst_stride] = src[
i * src_stride];
1142 template <
typename Scalar,
typename IndexType,
int NumDims,
int Layout>
1143 class TensorBlockIO {
1154 IndexType dst_offset = 0)
1165 IndexType src_offset = 0)
1190 int inner_dim =
IsColMajor ? 0 : NumDims - 1;
1212 int num_size_one_inner_dims = 0;
1213 for (
int i = 0;
i < num_squeezable_dims; ++
i) {
1215 if (dst.
dims[dst_dim] != 1)
break;
1216 num_size_one_inner_dims++;
1220 if (num_size_one_inner_dims == NumDims) {
1227 ? num_size_one_inner_dims
1228 : NumDims - num_size_one_inner_dims - 1;
1231 const int src_dim_for_dst_stride1_dim =
1232 NumDims == 0 ? 1 : dim_map[dst_stride1_dim];
1235 IndexType dst_inner_dim_size = NumDims == 0 ? 1 : dst.
dims[dst_stride1_dim];
1239 for (
int i = num_size_one_inner_dims + 1;
i < num_squeezable_dims; ++
i) {
1241 const IndexType dst_stride = dst.
strides[dst_dim];
1242 const IndexType src_stride = src.
strides[dim_map[dst_dim]];
1243 if (dst_inner_dim_size == dst_stride && dst_stride == src_stride) {
1244 dst_inner_dim_size *= dst.
dims[dst_dim];
1245 ++num_size_one_inner_dims;
1252 IndexType input_offset = src.
offset;
1253 IndexType output_offset = dst.
offset;
1254 IndexType input_stride =
1255 NumDims == 0 ? 1 : src.
strides[src_dim_for_dst_stride1_dim];
1256 IndexType output_stride = NumDims == 0 ? 1 : dst.
strides[dst_stride1_dim];
1258 const int at_least_1_dim = NumDims <= 1 ? 1 : NumDims - 1;
1263 for (
int i = num_size_one_inner_dims;
i < NumDims - 1; ++
i) {
1264 const int dst_dim =
IsColMajor ?
i + 1 : NumDims -
i - 2;
1265 if (dst.
dims[dst_dim] == 1)
continue;
1268 it[idx].input_stride = src.
strides[dim_map[dst_dim]];
1269 it[idx].output_stride = dst.
strides[dst_dim];
1271 it[idx].input_span = it[idx].input_stride * (it[idx].
size - 1);
1272 it[idx].output_span = it[idx].output_stride * (it[idx].
size - 1);
1278 const IndexType block_total_size = NumDims == 0 ? 1 : dst.
dims.
TotalSize();
1280 #define COPY_INNER_DIM(KIND) \
1281 IndexType num_copied = 0; \
1282 for (num_copied = 0; num_copied < block_total_size; \
1283 num_copied += dst_inner_dim_size) { \
1284 LinCopy::template Run<KIND>( \
1285 typename LinCopy::Dst(output_offset, output_stride, dst.data), \
1286 typename LinCopy::Src(input_offset, input_stride, src.data), \
1287 dst_inner_dim_size); \
1289 for (int j = 0; j < idx; ++j) { \
1290 if (++it[j].count < it[j].size) { \
1291 input_offset += it[j].input_stride; \
1292 output_offset += it[j].output_stride; \
1296 input_offset -= it[j].input_span; \
1297 output_offset -= it[j].output_span; \
1302 if (input_stride == 1 && output_stride == 1) {
1304 }
else if (input_stride == 1 && output_stride != 1) {
1306 }
else if (input_stride == 0 && output_stride == 1) {
1308 }
else if (input_stride == 0 && output_stride != 1) {
1310 }
else if (output_stride == 1) {
1316 #undef COPY_INNER_DIM
1324 for (
int i = 0;
i < NumDims; ++
i) dst_to_src_map[
i] =
i;
1325 return Copy(dst, src, dst_to_src_map);
1350 int num_squeezable_dims = 0;
1351 for (
int i = 0;
i < NumDims; ++
i) {
1353 if (dim_map[dim] != dim)
break;
1354 num_squeezable_dims++;
1356 return num_squeezable_dims;
1379 template <
typename Scalar,
int NumDims,
typename TensorBlockExpr,
1393 template <
bool Vectorizable,
typename Evaluator>
1396 const Evaluator&
eval,
1397 IndexType eval_offset) {
1398 for (IndexType
i = 0;
i < count; ++
i) {
1404 template <
typename Evaluator>
1407 const Evaluator&
eval,
1408 IndexType eval_offset) {
1411 const IndexType unrolled_size = count - 4 *
PacketSize;
1412 const IndexType vectorized_size = count -
PacketSize;
1416 for (
int j = 0;
j < 4; ++
j) {
1417 const IndexType idx = eval_offset +
i +
j *
PacketSize;
1424 Packet p =
eval.template packet<Unaligned>(eval_offset +
i);
1428 for (;
i < count; ++
i) {
1437 Scalar* target_data, IndexType target_offset = 0)
1438 :
dims(target_dims),
1451 IndexType target_offset = 0) {
1452 return Target(target_dims, target_strides, target_data, target_offset);
1455 template <
typename TargetDimsIndexType,
typename TargetStr
idesIndexType>
1459 Scalar* target_data, IndexType target_offset = 0) {
1462 target_data, target_offset);
1475 static const bool is_col_major = Layout ==
ColMajor;
1479 const int inner_dim_idx = is_col_major ? 0 : NumDims - 1;
1480 IndexType output_inner_dim_size =
target.
dims[inner_dim_idx];
1486 IndexType num_squeezed_dims = 0;
1487 for (
Index i = 1;
i < NumDims; ++
i) {
1488 const Index dim = is_col_major ?
i : NumDims -
i - 1;
1491 if (output_inner_dim_size == target_stride) {
1493 num_squeezed_dims++;
1504 for (
Index i = num_squeezed_dims;
i < NumDims - 1; ++
i) {
1505 const Index dim = is_col_major ?
i + 1 : NumDims -
i - 2;
1510 it[idx].output_span = it[idx].output_stride * (it[idx].
size - 1);
1516 IndexType input_offset = 0;
1520 for (IndexType
i = 0;
i < output_size;
i += output_inner_dim_size) {
1524 output_inner_dim_size,
eval,
1528 input_offset += output_inner_dim_size;
1531 for (
int j = 0;
j < idx; ++
j) {
1532 if (++it[
j].count < it[
j].
size) {
1533 output_offset += it[
j].output_stride;
1537 output_offset -= it[
j].output_span;
1559 #endif // EIGEN_CXX11_TENSOR_TENSOR_BLOCK_H
TensorBlockDescriptor WithOffset(IndexType offset) const
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE size_t merge(size_t lhs_size, size_t rhs_size)
TensorBlockMapper(const DSizes< IndexType, NumDims > &dimensions, const TensorBlockResourceRequirements &requirements)
DSizes< IndexType, NumDims > Dimensions
#define EIGEN_DEVICE_FUNC
static Target target(const DSizes< TargetDimsIndexType, NumDims > &target_dims, const DSizes< TargetStridesIndexType, NumDims > &target_strides, Scalar *target_data, IndexType target_offset=0)
Namespace containing all symbols from the Eigen library.
const std::vector< size_t > dimensions
TensorBlockKind kind() const
conditional< NoArgBlockAccess, void, TensorCwiseBinaryOp< BinaryOp, const typename LhsTensorBlock::XprType, const typename RhsTensorBlock::XprType > >::type XprType
static EIGEN_STRONG_INLINE TensorMaterializedBlock materialize(const Scalar *data, const DataDimensions &data_dims, TensorBlockDesc &desc, TensorBlockScratch &scratch)
void InitializeBlockDimensions()
const DestinationBufferKind & kind() const
EIGEN_ALWAYS_INLINE DSizes< IndexType, NumDims > strides(const DSizes< IndexType, NumDims > &dimensions)
Arg3TensorBlock m_arg3_block
static const bool NoArgBlockAccess
XprScalar< XprType >::type Scalar
static const double d[K][N]
Arg2TensorBlock m_arg2_block
void AddDestinationBuffer(Scalar *dst_base, const Dimensions &dst_strides)
internal::packet_traits< Scalar >::type Packet
const Scalar * data() const
DSizes< IndexType, NumDims > m_block_strides
std::vector< Allocation > m_allocations
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE BlockDescriptor blockDescriptor(IndexType block_index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType blockTotalSize() const
static const bool IsColMajor
Arg1TensorBlock::XprType Arg1XprType
TensorMaterializedBlock(TensorBlockKind kind, const Scalar *data, const Dimensions &dimensions, bool valid_expr=true)
const Dimensions m_dimensions
Storage(Scalar *data, const Dimensions &dimensions, const Dimensions &strides, bool materialized_in_output, bool strided_storage)
static const bool NoArgBlockAccess
static EIGEN_DEVICE_FUNC TensorBlockResourceRequirements skewed(size_t size_in_bytes)
void AddDestinationBuffer(Scalar *dst_base, const DSizes< DstStridesIndexType, NumDims > &dst_strides)
static EIGEN_DEVICE_FUNC TensorBlockResourceRequirements withShapeAndSize(TensorBlockShapeType shape_type, size_t size_in_bytes, TensorOpCost cost)
TensorBlockResourceRequirements m_requirements
TensorBlockKind kind() const
static EIGEN_ALWAYS_INLINE void Run(Scalar *target, IndexType count, const Evaluator &eval, IndexType eval_offset)
std::vector< Array2i > sizes
DSizes< IndexType, NumDims > m_tensor_strides
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockResourceRequirements merge(const TensorBlockResourceRequirements &lhs, const TensorBlockResourceRequirements &rhs)
Arg3TensorBlock::XprType Arg3XprType
DSizes< IndexType, NumDims > m_block_dimensions
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost merge(TensorOpCost lhs_cost, TensorOpCost rhs_cost)
DestinationBuffer(Scalar *data, const Dimensions &strides, DestinationBufferKind kind)
conditional< NoArgBlockAccess, void, typename BlockFactory::template XprType< Arg1XprType, Arg2XprType, Arg3XprType >::type >::type XprType
const Dimensions & dimensions() const
static DestinationBuffer make(const TensorBlockDescriptor &desc, Scalar *data, const Dimensions &strides)
static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexType Copy(const Dst &dst, const Src &src)
Src(const Dimensions &src_strides, const Scalar *src, IndexType src_offset=0)
DestinationBuffer m_destination
EIGEN_DEVICE_FUNC const EIGEN_STRONG_INLINE DSizes< IndexType, NumDims > & blockDimensions() const
TensorCwiseUnaryBlock(const ArgTensorBlock &arg_block, const UnaryOp &functor)
TensorBlockScratchAllocator(const Device &device)
const Scalar * data() const
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool dimensions_match(Dims1 dims1, Dims2 dims2)
IndexType m_total_block_count
#define COPY_INNER_DIM(KIND)
XprScalar< XprType >::type Scalar
TensorBlockShapeType shape_type
#define EIGEN_UNUSED_VARIABLE(var)
TensorOpCost cost_per_coeff
TensorBlockMapper()=default
static EIGEN_ALWAYS_INLINE void Run(Scalar *target, IndexType count, const Evaluator &eval, IndexType eval_offset)
TensorEvaluator< const TensorBlockExpr, DefaultDevice > TensorBlockEvaluator
const DestinationBuffer & destination() const
TensorBlockKind kind() const
void * allocate(size_t size)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T mini(const T &x, const T &y)
const Scalar * data() const
TensorBlockDescriptor< NumDims, IndexType > BlockDescriptor
TensorBlockDescriptor(const IndexType offset, const Dimensions &dimensions)
friend class TensorMaterializedBlock
#define EIGEN_STRONG_INLINE
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(const IndexType count, const IndexType dst_offset, const IndexType dst_stride, Scalar *EIGEN_RESTRICT dst_data, const IndexType src_offset, const IndexType src_stride, const Scalar *EIGEN_RESTRICT src_data)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(const Dst &dst, const Src &src, const size_t count)
Arg2TensorBlock::XprType Arg2XprType
StridedLinearBufferCopy< Scalar, IndexType > LinCopy
A tensor expression mapping an existing array of data.
static int NumSqueezableInnerDims(const DimensionsMap &dim_map)
TensorBlockKind kind() const
DSizes< int, NumDims > DimensionsMap
ArgTensorBlock m_arg_block
Target(const Dimensions &target_dims, const Dimensions &target_strides, Scalar *target_data, IndexType target_offset=0)
XprScalar< XprType >::type Scalar
#define EIGEN_ALWAYS_INLINE
bool m_materialized_in_output
static Target target(const Dimensions &target_dims, const Dimensions &target_strides, Scalar *target_data, IndexType target_offset=0)
Jet< T, N > pow(const Jet< T, N > &f, double g)
Arg1TensorBlock m_arg1_block
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockShapeType merge(TensorBlockShapeType lhs, TensorBlockShapeType rhs)
EIGEN_DEVICE_FUNC TensorBlockResourceRequirements & addCostPerCoeff(TensorOpCost cost)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE std::ptrdiff_t array_prod(const Sizes< Indices... > &)
CwiseBinaryOp< internal::scalar_sum_op< double, double >, const CpyMatrixXd, const CpyMatrixXd > XprType
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
conditional< NoArgBlockAccess, void, TensorCwiseUnaryOp< UnaryOp, const typename ArgTensorBlock::XprType > >::type XprType
DSizes< IndexType, NumDims > m_tensor_dimensions
static EIGEN_DEVICE_FUNC TensorBlockResourceRequirements withShapeAndSize(TensorBlockShapeType shape_type, size_t size_in_bytes)
TensorBlockDescriptor(const IndexType offset, const Dimensions &dimensions, const DestinationBuffer &destination)
DSizes< IndexType, NumDims > Dimensions
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void Run(const Target &target, const TensorBlockExpr &expr)
bool HasDestinationBuffer() const
static const bool NoArgBlockAccess
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType Copy(const Dst &dst, const Src &src, const DimensionsMap &dst_to_src_dim_map)
TensorMap< const Tensor< Scalar, NumDims, Layout > > XprType
ArgTensorBlock::XprType ArgXprType
const Dimensions & strides() const
~TensorBlockScratchAllocator()
DSizes< IndexType, NumDims > Dimensions
TensorCwiseBinaryBlock(const LhsTensorBlock &left_block, const RhsTensorBlock &right_block, const BinaryOp &functor)
DSizes< IndexType, NumDims > Dimensions
packet_traits< Scalar >::type Packet
LhsTensorBlock m_left_block
EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE std::size_t size()
DestinationBufferKind m_kind
const Scalar * data() const
RhsTensorBlock m_right_block
static const bool NoArgBlockAccess
static DestinationBufferKind kind(const TensorBlockDescriptor &desc, const Dimensions &strides)
A cost model used to limit the number of threads used for evaluating tensor expression.
TensorTernaryExprBlock(const Arg1TensorBlock &arg1_block, const Arg2TensorBlock &arg2_block, const Arg3TensorBlock &arg3_block, const BlockFactory &factory)
TensorUnaryExprBlock(const ArgTensorBlock &arg_block, const BlockFactory &factory)
const Dimensions & dimensions() const
const XprType & expr() const
const Scalar * data() const
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T maxi(const T &x, const T &y)
DSizes< IndexType, NumDims > Dimensions
ArgTensorBlock m_arg_block
static EIGEN_STRONG_INLINE Storage prepareStorage(TensorBlockDesc &desc, TensorBlockScratch &scratch, bool allow_strided_storage=false)
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T divup(const X x, const Y y)
EIGEN_CONSTEXPR Index size(const T &x)
std::vector< size_t > Indices
Dst(const Dimensions &dst_dims, const Dimensions &dst_strides, Scalar *dst, IndexType dst_offset=0)
IndexType dimension(int index) const
XprScalar< XprType >::type Scalar
TensorBlockDescriptor & DropDestinationBuffer()
const Dimensions & strides() const
Src(IndexType o, IndexType s, const Scalar *d)
TensorMaterializedBlock AsTensorMaterializedBlock() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE IndexType blockCount() const
internal::nested_eval< T, 1 >::type eval(const T &xpr)
static EIGEN_DEVICE_FUNC TensorBlockResourceRequirements uniform(size_t size_in_bytes)
conditional< NoArgBlockAccess, void, typename BlockFactory::template XprType< ArgXprType >::type >::type XprType
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorBlockResourceRequirements any()
internal::TensorBlockDescriptor< NumDims, IndexType > TensorBlockDesc
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
TensorBlockKind kind() const
Dst(IndexType o, IndexType s, Scalar *d)
gtsam
Author(s):
autogenerated on Sat Nov 16 2024 04:05:30