TensorVolumePatch.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 
4 #ifndef EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
5 #define EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
6 
7 namespace Eigen {
8 
24 namespace internal {
25 
26 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
27 struct traits<TensorVolumePatchOp<Planes, Rows, Cols, XprType> > : public traits<XprType>
28 {
31  typedef typename XprTraits::StorageKind StorageKind;
32  typedef typename XprTraits::Index Index;
33  typedef typename XprType::Nested Nested;
35  static const int NumDimensions = XprTraits::NumDimensions + 1;
36  static const int Layout = XprTraits::Layout;
37  typedef typename XprTraits::PointerType PointerType;
38 
39 };
40 
41 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
42 struct eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Eigen::Dense>
43 {
45 };
46 
47 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
48 struct nested<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, 1, typename eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType> >::type>
49 {
51 };
52 
53 } // end namespace internal
54 
55 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
56 class TensorVolumePatchOp : public TensorBase<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, ReadOnlyAccessors>
57 {
58  public:
61  typedef typename XprType::CoeffReturnType CoeffReturnType;
65 
67  DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
68  DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
69  DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
70  PaddingType padding_type, Scalar padding_value)
71  : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
72  m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
73  m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
74  m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
75  m_padding_explicit(false), m_padding_top_z(0), m_padding_bottom_z(0), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
76  m_padding_type(padding_type), m_padding_value(padding_value) {}
77 
79  DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
80  DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
81  DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
82  DenseIndex padding_top_z, DenseIndex padding_bottom_z,
83  DenseIndex padding_top, DenseIndex padding_bottom,
84  DenseIndex padding_left, DenseIndex padding_right,
85  Scalar padding_value)
86  : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
87  m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
88  m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
89  m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
90  m_padding_explicit(true), m_padding_top_z(padding_top_z), m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
91  m_padding_left(padding_left), m_padding_right(padding_right),
92  m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
93 
95  DenseIndex patch_planes() const { return m_patch_planes; }
97  DenseIndex patch_rows() const { return m_patch_rows; }
99  DenseIndex patch_cols() const { return m_patch_cols; }
101  DenseIndex plane_strides() const { return m_plane_strides; }
103  DenseIndex row_strides() const { return m_row_strides; }
105  DenseIndex col_strides() const { return m_col_strides; }
107  DenseIndex in_plane_strides() const { return m_in_plane_strides; }
109  DenseIndex in_row_strides() const { return m_in_row_strides; }
111  DenseIndex in_col_strides() const { return m_in_col_strides; }
113  DenseIndex plane_inflate_strides() const { return m_plane_inflate_strides; }
115  DenseIndex row_inflate_strides() const { return m_row_inflate_strides; }
117  DenseIndex col_inflate_strides() const { return m_col_inflate_strides; }
119  bool padding_explicit() const { return m_padding_explicit; }
121  DenseIndex padding_top_z() const { return m_padding_top_z; }
123  DenseIndex padding_bottom_z() const { return m_padding_bottom_z; }
125  DenseIndex padding_top() const { return m_padding_top; }
127  DenseIndex padding_bottom() const { return m_padding_bottom; }
129  DenseIndex padding_left() const { return m_padding_left; }
131  DenseIndex padding_right() const { return m_padding_right; }
133  PaddingType padding_type() const { return m_padding_type; }
135  Scalar padding_value() const { return m_padding_value; }
136 
139  expression() const { return m_xpr; }
140 
141  protected:
142  typename XprType::Nested m_xpr;
155  const bool m_padding_explicit;
163  const Scalar m_padding_value;
164 };
165 
166 
167 // Eval as rvalue
168 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
169 struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, Device>
170 {
172  typedef typename XprType::Index Index;
174  static const int NumDims = NumInputDims + 1;
179  static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
182 
183  enum {
184  IsAligned = false,
186  BlockAccess = false,
189  CoordAccess = false,
190  RawAccess = false
191  };
192 
193  //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
195  //===--------------------------------------------------------------------===//
196 
197  EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) :
198  m_impl(op.expression(), device)
199  {
200  EIGEN_STATIC_ASSERT((NumDims >= 5), YOU_MADE_A_PROGRAMMING_MISTAKE);
201 
202  m_paddingValue = op.padding_value();
203 
204  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
205 
206  // Cache a few variables.
207  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
208  m_inputDepth = input_dims[0];
209  m_inputPlanes = input_dims[1];
210  m_inputRows = input_dims[2];
211  m_inputCols = input_dims[3];
212  } else {
213  m_inputDepth = input_dims[NumInputDims-1];
214  m_inputPlanes = input_dims[NumInputDims-2];
215  m_inputRows = input_dims[NumInputDims-3];
216  m_inputCols = input_dims[NumInputDims-4];
217  }
218 
219  m_plane_strides = op.plane_strides();
220  m_row_strides = op.row_strides();
221  m_col_strides = op.col_strides();
222 
223  // Input strides and effective input/patch size
224  m_in_plane_strides = op.in_plane_strides();
225  m_in_row_strides = op.in_row_strides();
226  m_in_col_strides = op.in_col_strides();
227  m_plane_inflate_strides = op.plane_inflate_strides();
228  m_row_inflate_strides = op.row_inflate_strides();
229  m_col_inflate_strides = op.col_inflate_strides();
230 
231  // The "effective" spatial size after inflating data with zeros.
232  m_input_planes_eff = (m_inputPlanes - 1) * m_plane_inflate_strides + 1;
233  m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
234  m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
235  m_patch_planes_eff = op.patch_planes() + (op.patch_planes() - 1) * (m_in_plane_strides - 1);
236  m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
237  m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
238 
239  if (op.padding_explicit()) {
240  m_outputPlanes = numext::ceil((m_input_planes_eff + op.padding_top_z() + op.padding_bottom_z() - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
241  m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
242  m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
243  m_planePaddingTop = op.padding_top_z();
244  m_rowPaddingTop = op.padding_top();
245  m_colPaddingLeft = op.padding_left();
246  } else {
247  // Computing padding from the type
248  switch (op.padding_type()) {
249  case PADDING_VALID:
250  m_outputPlanes = numext::ceil((m_input_planes_eff - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
251  m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
252  m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
253  m_planePaddingTop = 0;
254  m_rowPaddingTop = 0;
255  m_colPaddingLeft = 0;
256  break;
257  case PADDING_SAME: {
258  m_outputPlanes = numext::ceil(m_input_planes_eff / static_cast<float>(m_plane_strides));
259  m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
260  m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
261  const Index dz = (m_outputPlanes - 1) * m_plane_strides + m_patch_planes_eff - m_input_planes_eff;
262  const Index dy = (m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff;
263  const Index dx = (m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff;
264  m_planePaddingTop = dz / 2;
265  m_rowPaddingTop = dy / 2;
266  m_colPaddingLeft = dx / 2;
267  break;
268  }
269  default:
270  eigen_assert(false && "unexpected padding");
271  }
272  }
273  eigen_assert(m_outputRows > 0);
274  eigen_assert(m_outputCols > 0);
275  eigen_assert(m_outputPlanes > 0);
276 
277  // Dimensions for result of extraction.
278  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
279  // ColMajor
280  // 0: depth
281  // 1: patch_planes
282  // 2: patch_rows
283  // 3: patch_cols
284  // 4: number of patches
285  // 5 and beyond: anything else (such as batch).
286  m_dimensions[0] = input_dims[0];
287  m_dimensions[1] = op.patch_planes();
288  m_dimensions[2] = op.patch_rows();
289  m_dimensions[3] = op.patch_cols();
290  m_dimensions[4] = m_outputPlanes * m_outputRows * m_outputCols;
291  for (int i = 5; i < NumDims; ++i) {
292  m_dimensions[i] = input_dims[i-1];
293  }
294  } else {
295  // RowMajor
296  // NumDims-1: depth
297  // NumDims-2: patch_planes
298  // NumDims-3: patch_rows
299  // NumDims-4: patch_cols
300  // NumDims-5: number of patches
301  // NumDims-6 and beyond: anything else (such as batch).
302  m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
303  m_dimensions[NumDims-2] = op.patch_planes();
304  m_dimensions[NumDims-3] = op.patch_rows();
305  m_dimensions[NumDims-4] = op.patch_cols();
306  m_dimensions[NumDims-5] = m_outputPlanes * m_outputRows * m_outputCols;
307  for (int i = NumDims-6; i >= 0; --i) {
308  m_dimensions[i] = input_dims[i];
309  }
310  }
311 
312  // Strides for the output tensor.
313  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
314  m_rowStride = m_dimensions[1];
315  m_colStride = m_dimensions[2] * m_rowStride;
316  m_patchStride = m_colStride * m_dimensions[3] * m_dimensions[0];
317  m_otherStride = m_patchStride * m_dimensions[4];
318  } else {
319  m_rowStride = m_dimensions[NumDims-2];
320  m_colStride = m_dimensions[NumDims-3] * m_rowStride;
321  m_patchStride = m_colStride * m_dimensions[NumDims-4] * m_dimensions[NumDims-1];
322  m_otherStride = m_patchStride * m_dimensions[NumDims-5];
323  }
324 
325  // Strides for navigating through the input tensor.
326  m_planeInputStride = m_inputDepth;
327  m_rowInputStride = m_inputDepth * m_inputPlanes;
328  m_colInputStride = m_inputDepth * m_inputRows * m_inputPlanes;
329  m_otherInputStride = m_inputDepth * m_inputRows * m_inputCols * m_inputPlanes;
330 
331  m_outputPlanesRows = m_outputPlanes * m_outputRows;
332 
333  // Fast representations of different variables.
334  m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
335 
336  m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
337  m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
338  m_fastRowStride = internal::TensorIntDivisor<Index>(m_rowStride);
339  m_fastInputRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
340  m_fastInputColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
341  m_fastInputPlaneStride = internal::TensorIntDivisor<Index>(m_plane_inflate_strides);
342  m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
343  m_fastOutputPlanes = internal::TensorIntDivisor<Index>(m_outputPlanes);
344  m_fastOutputPlanesRows = internal::TensorIntDivisor<Index>(m_outputPlanesRows);
345 
346  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
347  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
348  } else {
349  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
350  }
351  }
352 
353  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
354 
355  EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
356  m_impl.evalSubExprsIfNeeded(NULL);
357  return true;
358  }
359 
361  m_impl.cleanup();
362  }
363 
364  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
365  {
366  // Patch index corresponding to the passed in index.
367  const Index patchIndex = index / m_fastPatchStride;
368 
369  // Spatial offset within the patch. This has to be translated into 3D
370  // coordinates within the patch.
371  const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
372 
373  // Batch, etc.
374  const Index otherIndex = (NumDims == 5) ? 0 : index / m_fastOtherStride;
375  const Index patch3DIndex = (NumDims == 5) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
376 
377  // Calculate column index in the input original tensor.
378  const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
379  const Index colOffset = patchOffset / m_fastColStride;
380  const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
381  const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0);
382  if (inputCol < 0 || inputCol >= m_input_cols_eff ||
383  ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
384  return Scalar(m_paddingValue);
385  }
386 
387  // Calculate row index in the original input tensor.
388  const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
389  const Index rowOffset = (patchOffset - colOffset * m_colStride) / m_fastRowStride;
390  const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
391  const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0);
392  if (inputRow < 0 || inputRow >= m_input_rows_eff ||
393  ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
394  return Scalar(m_paddingValue);
395  }
396 
397  // Calculate plane index in the original input tensor.
398  const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
399  const Index planeOffset = patchOffset - colOffset * m_colStride - rowOffset * m_rowStride;
400  const Index inputPlane = planeIndex * m_plane_strides + planeOffset * m_in_plane_strides - m_planePaddingTop;
401  const Index origInputPlane = (m_plane_inflate_strides == 1) ? inputPlane : ((inputPlane >= 0) ? (inputPlane / m_fastInputPlaneStride) : 0);
402  if (inputPlane < 0 || inputPlane >= m_input_planes_eff ||
403  ((m_plane_inflate_strides != 1) && (inputPlane != origInputPlane * m_plane_inflate_strides))) {
404  return Scalar(m_paddingValue);
405  }
406 
407  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
408  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
409 
410  const Index inputIndex = depth +
411  origInputRow * m_rowInputStride +
412  origInputCol * m_colInputStride +
413  origInputPlane * m_planeInputStride +
414  otherIndex * m_otherInputStride;
415 
416  return m_impl.coeff(inputIndex);
417  }
418 
419  template<int LoadMode>
420  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
421  {
422  EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
423  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
424 
425  if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1 ||
426  m_in_plane_strides != 1 || m_plane_inflate_strides != 1) {
427  return packetWithPossibleZero(index);
428  }
429 
430  const Index indices[2] = {index, index + PacketSize - 1};
431  const Index patchIndex = indices[0] / m_fastPatchStride;
432  if (patchIndex != indices[1] / m_fastPatchStride) {
433  return packetWithPossibleZero(index);
434  }
435  const Index otherIndex = (NumDims == 5) ? 0 : indices[0] / m_fastOtherStride;
436  eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
437 
438  // Find the offset of the element wrt the location of the first element.
439  const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
440  (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
441 
442  const Index patch3DIndex = (NumDims == 5) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
443  eigen_assert(patch3DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
444 
445  const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
446  const Index colOffsets[2] = {
447  patchOffsets[0] / m_fastColStride,
448  patchOffsets[1] / m_fastColStride};
449 
450  // Calculate col indices in the original input tensor.
451  const Index inputCols[2] = {
452  colIndex * m_col_strides + colOffsets[0] - m_colPaddingLeft,
453  colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
454  if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
455  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
456  }
457 
458  if (inputCols[0] != inputCols[1]) {
459  return packetWithPossibleZero(index);
460  }
461 
462  const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
463  const Index rowOffsets[2] = {
464  (patchOffsets[0] - colOffsets[0] * m_colStride) / m_fastRowStride,
465  (patchOffsets[1] - colOffsets[1] * m_colStride) / m_fastRowStride};
466  eigen_assert(rowOffsets[0] <= rowOffsets[1]);
467  // Calculate col indices in the original input tensor.
468  const Index inputRows[2] = {
469  rowIndex * m_row_strides + rowOffsets[0] - m_rowPaddingTop,
470  rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
471 
472  if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
473  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
474  }
475 
476  if (inputRows[0] != inputRows[1]) {
477  return packetWithPossibleZero(index);
478  }
479 
480  const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
481  const Index planeOffsets[2] = {
482  patchOffsets[0] - colOffsets[0] * m_colStride - rowOffsets[0] * m_rowStride,
483  patchOffsets[1] - colOffsets[1] * m_colStride - rowOffsets[1] * m_rowStride};
484  eigen_assert(planeOffsets[0] <= planeOffsets[1]);
485  const Index inputPlanes[2] = {
486  planeIndex * m_plane_strides + planeOffsets[0] - m_planePaddingTop,
487  planeIndex * m_plane_strides + planeOffsets[1] - m_planePaddingTop};
488 
489  if (inputPlanes[1] < 0 || inputPlanes[0] >= m_inputPlanes) {
490  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
491  }
492 
493  if (inputPlanes[0] >= 0 && inputPlanes[1] < m_inputPlanes) {
494  // no padding
495  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
496  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
497  const Index inputIndex = depth +
498  inputRows[0] * m_rowInputStride +
499  inputCols[0] * m_colInputStride +
500  m_planeInputStride * inputPlanes[0] +
501  otherIndex * m_otherInputStride;
502  return m_impl.template packet<Unaligned>(inputIndex);
503  }
504 
505  return packetWithPossibleZero(index);
506  }
507 
509  costPerCoeff(bool vectorized) const {
510  const double compute_cost =
511  10 * TensorOpCost::DivCost<Index>() + 21 * TensorOpCost::MulCost<Index>() +
512  8 * TensorOpCost::AddCost<Index>();
513  return TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
514  }
515 
516  EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
517 
518  const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
519 
520 
521  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planePaddingTop() const { return m_planePaddingTop; }
522  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop() const { return m_rowPaddingTop; }
523  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft() const { return m_colPaddingLeft; }
524  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputPlanes() const { return m_outputPlanes; }
525  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows() const { return m_outputRows; }
526  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols() const { return m_outputCols; }
527  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userPlaneStride() const { return m_plane_strides; }
528  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride() const { return m_row_strides; }
529  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride() const { return m_col_strides; }
530  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInPlaneStride() const { return m_in_plane_strides; }
531  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride() const { return m_in_row_strides; }
532  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride() const { return m_in_col_strides; }
533  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planeInflateStride() const { return m_plane_inflate_strides; }
534  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride() const { return m_row_inflate_strides; }
535  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride() const { return m_col_inflate_strides; }
536 
537 #ifdef EIGEN_USE_SYCL
538  // binding placeholder accessors to a command group handler for SYCL
539  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
540  m_impl.bind(cgh);
541  }
542 #endif
543  protected:
545  {
548  for (int i = 0; i < PacketSize; ++i) {
549  values[i] = coeff(index+i);
550  }
551  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
552  return rslt;
553  }
554 
555  Dimensions m_dimensions;
556 
557  // Parameters passed to the constructor.
561 
565 
569 
573 
577 
578  // Cached input size.
581  Index m_inputRows;
582  Index m_inputCols;
583 
584  // Other cached variables.
586 
587  // Effective input/patch post-inflation size.
594 
595  // Strides for the output tensor.
598  Index m_rowStride;
599  Index m_colStride;
600 
601  // Strides for the input tensor.
606 
618 
620 
622 
623 
624 };
625 
626 
627 } // end namespace Eigen
628 
629 #endif // EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
EIGEN_DEVICE_FUNC DenseIndex col_strides() const
SCALAR Scalar
Definition: bench_gemm.cpp:46
EIGEN_DEVICE_FUNC DenseIndex patch_cols() const
#define EIGEN_STRONG_INLINE
Definition: Macros.h:917
EIGEN_DEVICE_FUNC DenseIndex plane_inflate_strides() const
static double depth
EIGEN_DEVICE_FUNC Scalar padding_value() const
leaf::MyValues values
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType &expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, PaddingType padding_type, Scalar padding_value)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
A cost model used to limit the number of threads used for evaluating tensor expression.
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:127
#define EIGEN_ALIGN_MAX
const DenseIndex m_padding_top_z
EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
EIGEN_DEVICE_FUNC DenseIndex padding_top() const
EIGEN_DEVICE_FUNC DenseIndex row_strides() const
EIGEN_DEVICE_FUNC DenseIndex padding_bottom() const
EIGEN_DEVICE_FUNC DenseIndex in_row_strides() const
EIGEN_DEVICE_FUNC T() ceil(const T &x)
EIGEN_DEVICE_FUNC DenseIndex patch_rows() const
EIGEN_DEVICE_FUNC DenseIndex patch_planes() const
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Definition: CwiseBinaryOp.h:77
EIGEN_DEVICE_FUNC DenseIndex padding_bottom_z() const
const DenseIndex m_padding_bottom_z
Eigen::internal::traits< TensorVolumePatchOp >::StorageKind StorageKind
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
#define eigen_assert(x)
Definition: Macros.h:1037
const DenseIndex m_in_row_strides
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
EIGEN_DEVICE_FUNC DenseIndex in_plane_strides() const
Eigen::internal::traits< TensorVolumePatchOp >::Index Index
const DenseIndex m_plane_strides
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
#define NULL
Definition: ccolamd.c:609
const DenseIndex m_plane_inflate_strides
EIGEN_DEVICE_FUNC DenseIndex col_inflate_strides() const
EIGEN_DEVICE_FUNC DenseIndex padding_top_z() const
static const int Cols
The tensor base class.
Definition: TensorBase.h:973
const DenseIndex m_padding_right
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:976
EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex
Definition: Meta.h:66
const DenseIndex m_in_plane_strides
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC DenseIndex padding_right() const
EIGEN_DEVICE_FUNC bool padding_explicit() const
const DenseIndex m_padding_bottom
Eigen::NumTraits< Scalar >::Real RealScalar
EIGEN_DEVICE_FUNC DenseIndex in_col_strides() const
const PaddingType m_padding_type
EIGEN_DEVICE_FUNC DenseIndex padding_left() const
XprType::CoeffReturnType CoeffReturnType
const DenseIndex m_row_inflate_strides
EIGEN_DEVICE_FUNC PaddingType padding_type() const
EIGEN_DEVICE_FUNC DenseIndex row_inflate_strides() const
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
const DenseIndex m_col_inflate_strides
EIGEN_DEVICE_FUNC DenseIndex plane_strides() const
Eigen::internal::traits< TensorVolumePatchOp >::Scalar Scalar
const std::vector< size_t > dimensions
internal::remove_const< typename XprType::Scalar >::type Scalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType &expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols, DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides, DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, DenseIndex padding_top_z, DenseIndex padding_bottom_z, DenseIndex padding_top, DenseIndex padding_bottom, DenseIndex padding_left, DenseIndex padding_right, Scalar padding_value)
Eigen::internal::nested< TensorVolumePatchOp >::type Nested
const DenseIndex m_in_col_strides
#define EIGEN_UNROLL_LOOP
Definition: Macros.h:1461


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:37:45