10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_SCAN_H
17 template <
typename Op,
typename XprType>
23 typedef typename XprType::Nested
Nested;
25 static const int NumDimensions = XprTraits::NumDimensions;
26 static const int Layout = XprTraits::Layout;
30 template<
typename Op,
typename XprType>
36 template<
typename Op,
typename XprType>
49 template <
typename Op,
typename XprType>
51 :
public TensorBase<TensorScanOp<Op, XprType>, ReadOnlyAccessors> {
83 template <
typename Self>
85 typename Self::CoeffReturnType*
data) {
87 typename Self::CoeffReturnType accum =
self.accumulator().initialize();
88 if (
self.stride() == 1) {
89 if (
self.exclusive()) {
91 data[curr] =
self.accumulator().finalize(accum);
92 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
96 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
97 data[curr] =
self.accumulator().finalize(accum);
101 if (
self.exclusive()) {
102 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
104 data[curr] =
self.accumulator().finalize(accum);
105 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
108 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
110 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
111 data[curr] =
self.accumulator().finalize(accum);
117 template <
typename Self>
119 typename Self::CoeffReturnType*
data) {
120 using Scalar =
typename Self::CoeffReturnType;
121 using Packet =
typename Self::PacketReturnType;
123 Packet accum =
self.accumulator().template initializePacket<Packet>();
124 if (
self.stride() == 1) {
125 if (
self.exclusive()) {
127 internal::pstoreu<Scalar, Packet>(
data + curr,
self.accumulator().finalizePacket(accum));
128 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
132 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
133 internal::pstoreu<Scalar, Packet>(
data + curr,
self.accumulator().finalizePacket(accum));
137 if (
self.exclusive()) {
138 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
140 internal::pstoreu<Scalar, Packet>(
data + curr,
self.accumulator().finalizePacket(accum));
141 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
144 for (
Index idx3 = 0; idx3 <
self.size(); idx3++) {
146 self.accumulator().reducePacket(
self.inner().
template packet<Unaligned>(curr), &accum);
147 internal::pstoreu<Scalar, Packet>(
data + curr,
self.accumulator().finalizePacket(accum));
153 template <
typename Self,
bool Vectorize,
bool Parallel>
156 typename Self::CoeffReturnType*
data) {
157 for (
Index idx2 = 0; idx2 <
self.stride(); idx2++) {
166 template <
typename Self>
169 typename Self::CoeffReturnType*
data) {
170 using Packet =
typename Self::PacketReturnType;
173 for (; idx2 + PacketSize <=
self.stride(); idx2 += PacketSize) {
178 for (; idx2 <
self.stride(); idx2++) {
187 template <
typename Self,
typename Reducer,
typename Device,
199 for (
Index idx1 = 0; idx1 < total_size; idx1 +=
self.stride() *
self.
size()) {
201 block_reducer(
self, idx1,
data);
206 #ifdef EIGEN_USE_THREADS
213 const Index items_per_cacheline =
214 numext::maxi<Index>(1, kBlockAlignment / item_size);
215 return items_per_cacheline *
divup(block_size, items_per_cacheline);
218 template <
typename Self>
219 struct ReduceBlock<Self,
true,
true> {
221 typename Self::CoeffReturnType*
data) {
222 using Scalar =
typename Self::CoeffReturnType;
223 using Packet =
typename Self::PacketReturnType;
225 Index num_scalars =
self.stride();
226 Index num_packets = 0;
227 if (
self.stride() >= PacketSize) {
228 num_packets =
self.stride() / PacketSize;
229 self.device().parallelFor(
231 TensorOpCost(PacketSize *
self.
size(), PacketSize *
self.
size(),
232 16 * PacketSize *
self.
size(),
true, PacketSize),
235 [=](
Index blk_size) {
236 return AdjustBlockSize(PacketSize *
sizeof(
Scalar), blk_size);
240 const Index idx2 = packet * PacketSize;
244 num_scalars -= num_packets * PacketSize;
246 self.device().parallelFor(
247 num_scalars, TensorOpCost(
self.
size(),
self.
size(), 16 *
self.
size()),
250 [=](
Index blk_size) {
251 return AdjustBlockSize(
sizeof(
Scalar), blk_size);
255 const Index idx2 = num_packets * PacketSize +
scalar;
262 template <
typename Self>
263 struct ReduceBlock<Self, false, true> {
265 typename Self::CoeffReturnType*
data) {
266 using Scalar =
typename Self::CoeffReturnType;
267 self.device().parallelFor(
268 self.stride(), TensorOpCost(
self.
size(),
self.
size(), 16 *
self.
size()),
271 [=](
Index blk_size) {
272 return AdjustBlockSize(
sizeof(
Scalar), blk_size);
283 template <
typename Self,
typename Reducer,
bool Vectorize>
284 struct ScanLauncher<Self, Reducer, ThreadPoolDevice, Vectorize> {
285 void operator()(Self&
self,
typename Self::CoeffReturnType*
data) {
286 using Scalar =
typename Self::CoeffReturnType;
287 using Packet =
typename Self::PacketReturnType;
290 const Index inner_block_size =
self.stride() *
self.size();
291 bool parallelize_by_outer_blocks = (total_size >= (
self.stride() * inner_block_size));
293 if ((parallelize_by_outer_blocks && total_size <= 4096) ||
294 (!parallelize_by_outer_blocks &&
self.stride() < PacketSize)) {
295 ScanLauncher<Self, Reducer, DefaultDevice, Vectorize> launcher;
296 launcher(
self,
data);
300 if (parallelize_by_outer_blocks) {
302 const Index num_outer_blocks = total_size / inner_block_size;
303 self.device().parallelFor(
305 TensorOpCost(inner_block_size, inner_block_size,
306 16 * PacketSize * inner_block_size, Vectorize,
308 [=](
Index blk_size) {
309 return AdjustBlockSize(inner_block_size *
sizeof(
Scalar), blk_size);
313 ReduceBlock<Self, Vectorize,
false> block_reducer;
314 block_reducer(
self, idx1 * inner_block_size,
data);
320 ReduceBlock<Self, Vectorize,
true> block_reducer;
321 for (
Index idx1 = 0; idx1 < total_size;
322 idx1 +=
self.stride() *
self.size()) {
323 block_reducer(
self, idx1,
data);
328 #endif // EIGEN_USE_THREADS
330 #if defined(EIGEN_USE_GPU) && (defined(EIGEN_GPUCC))
336 template <
typename Self,
typename Reducer>
340 Index offset = (val /
self.stride()) *
self.stride() *
self.
size() + val %
self.stride();
342 if (
offset + (
self.
size() - 1) *
self.stride() < total_size) {
344 typename Self::CoeffReturnType accum =
self.accumulator().initialize();
345 for (
Index idx = 0; idx <
self.size(); idx++) {
347 if (
self.exclusive()) {
348 data[curr] =
self.accumulator().finalize(accum);
349 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
351 self.accumulator().reduce(
self.inner().coeff(curr), &accum);
352 data[curr] =
self.accumulator().finalize(accum);
360 template <
typename Self,
typename Reducer,
bool Vectorize>
361 struct ScanLauncher<Self, Reducer, GpuDevice, Vectorize> {
362 void operator()(
const Self&
self,
typename Self::CoeffReturnType*
data) {
364 Index num_blocks = (total_size /
self.size() + 63) / 64;
365 Index block_size = 64;
367 LAUNCH_GPU_KERNEL((ScanKernel<Self, Reducer>), num_blocks, block_size, 0,
self.device(),
self, total_size,
data);
370 #endif // EIGEN_USE_GPU && (EIGEN_GPUCC)
375 template <
typename Op,
typename ArgType,
typename Device>
406 : m_impl(op.expression(), device),
408 m_exclusive(op.exclusive()),
409 m_accumulator(op.accumulator()),
411 m_stride(1), m_consume_dim(op.axis()),
421 for (
int i = 0;
i < op.
axis(); ++
i) {
422 m_stride = m_stride * dims[
i];
428 unsigned int axis = internal::convert_index<unsigned int>(op.
axis());
429 for (
unsigned int i = NumDims - 1;
i > axis; --
i) {
430 m_stride = m_stride * dims[
i];
436 return m_impl.dimensions();
444 return m_consume_dim;
452 return m_accumulator;
468 m_impl.evalSubExprsIfNeeded(
NULL);
471 launcher(*
this,
data);
477 launcher(*
this, m_output);
481 template<
int LoadMode>
483 return internal::ploadt<PacketReturnType, LoadMode>(m_output + index);
493 return m_output[index];
508 #ifdef EIGEN_USE_SYCL
528 #endif // EIGEN_CXX11_TENSOR_TENSOR_SCAN_H