TensorImagePatch.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
12 
13 namespace Eigen {
14 
29 namespace internal {
30 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
31 struct traits<TensorImagePatchOp<Rows, Cols, XprType> > : public traits<XprType>
32 {
35  typedef typename XprTraits::StorageKind StorageKind;
36  typedef typename XprTraits::Index Index;
37  typedef typename XprType::Nested Nested;
39  static const int NumDimensions = XprTraits::NumDimensions + 1;
40  static const int Layout = XprTraits::Layout;
41 };
42 
43 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
44 struct eval<TensorImagePatchOp<Rows, Cols, XprType>, Eigen::Dense>
45 {
47 };
48 
49 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
50 struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1, typename eval<TensorImagePatchOp<Rows, Cols, XprType> >::type>
51 {
53 };
54 
55 } // end namespace internal
56 
57 template<DenseIndex Rows, DenseIndex Cols, typename XprType>
58 class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors>
59 {
60  public:
63  typedef typename XprType::CoeffReturnType CoeffReturnType;
67 
68  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
69  DenseIndex row_strides, DenseIndex col_strides,
70  DenseIndex in_row_strides, DenseIndex in_col_strides,
71  DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
72  PaddingType padding_type, Scalar padding_value)
73  : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
74  m_row_strides(row_strides), m_col_strides(col_strides),
75  m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
76  m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
77  m_padding_explicit(false), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
78  m_padding_type(padding_type), m_padding_value(padding_value) {}
79 
80  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
81  DenseIndex row_strides, DenseIndex col_strides,
82  DenseIndex in_row_strides, DenseIndex in_col_strides,
83  DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
84  DenseIndex padding_top, DenseIndex padding_bottom,
85  DenseIndex padding_left, DenseIndex padding_right,
86  Scalar padding_value)
87  : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
88  m_row_strides(row_strides), m_col_strides(col_strides),
89  m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
90  m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
91  m_padding_explicit(true), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
92  m_padding_left(padding_left), m_padding_right(padding_right),
93  m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
94 
95  EIGEN_DEVICE_FUNC
96  DenseIndex patch_rows() const { return m_patch_rows; }
97  EIGEN_DEVICE_FUNC
98  DenseIndex patch_cols() const { return m_patch_cols; }
99  EIGEN_DEVICE_FUNC
100  DenseIndex row_strides() const { return m_row_strides; }
101  EIGEN_DEVICE_FUNC
102  DenseIndex col_strides() const { return m_col_strides; }
103  EIGEN_DEVICE_FUNC
104  DenseIndex in_row_strides() const { return m_in_row_strides; }
105  EIGEN_DEVICE_FUNC
106  DenseIndex in_col_strides() const { return m_in_col_strides; }
107  EIGEN_DEVICE_FUNC
108  DenseIndex row_inflate_strides() const { return m_row_inflate_strides; }
109  EIGEN_DEVICE_FUNC
110  DenseIndex col_inflate_strides() const { return m_col_inflate_strides; }
111  EIGEN_DEVICE_FUNC
112  bool padding_explicit() const { return m_padding_explicit; }
113  EIGEN_DEVICE_FUNC
114  DenseIndex padding_top() const { return m_padding_top; }
115  EIGEN_DEVICE_FUNC
116  DenseIndex padding_bottom() const { return m_padding_bottom; }
117  EIGEN_DEVICE_FUNC
118  DenseIndex padding_left() const { return m_padding_left; }
119  EIGEN_DEVICE_FUNC
120  DenseIndex padding_right() const { return m_padding_right; }
121  EIGEN_DEVICE_FUNC
122  PaddingType padding_type() const { return m_padding_type; }
123  EIGEN_DEVICE_FUNC
124  Scalar padding_value() const { return m_padding_value; }
125 
126  EIGEN_DEVICE_FUNC
128  expression() const { return m_xpr; }
129 
130  protected:
131  typename XprType::Nested m_xpr;
140  const bool m_padding_explicit;
146  const Scalar m_padding_value;
147 };
148 
149 // Eval as rvalue
150 template<DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
151 struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
152 {
154  typedef typename XprType::Index Index;
156  static const int NumDims = NumInputDims + 1;
160  Device> Self;
165 
166  enum {
167  IsAligned = false,
170  CoordAccess = false,
171  RawAccess = false
172  };
173 
174  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
175  : m_impl(op.expression(), device)
176  {
177  EIGEN_STATIC_ASSERT((NumDims >= 4), YOU_MADE_A_PROGRAMMING_MISTAKE);
178 
179  m_paddingValue = op.padding_value();
180 
181  const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
182 
183  // Caches a few variables.
184  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
185  m_inputDepth = input_dims[0];
186  m_inputRows = input_dims[1];
187  m_inputCols = input_dims[2];
188  } else {
189  m_inputDepth = input_dims[NumInputDims-1];
190  m_inputRows = input_dims[NumInputDims-2];
191  m_inputCols = input_dims[NumInputDims-3];
192  }
193 
194  m_row_strides = op.row_strides();
195  m_col_strides = op.col_strides();
196 
197  // Input strides and effective input/patch size
198  m_in_row_strides = op.in_row_strides();
199  m_in_col_strides = op.in_col_strides();
200  m_row_inflate_strides = op.row_inflate_strides();
201  m_col_inflate_strides = op.col_inflate_strides();
202  // The "effective" input rows and input cols are the input rows and cols
203  // after inflating them with zeros.
204  // For examples, a 2x3 matrix with row_inflate_strides and
205  // col_inflate_strides of 2 comes from:
206  // A B C
207  // D E F
208  //
209  // to a matrix is 3 x 5:
210  //
211  // A . B . C
212  // . . . . .
213  // D . E . F
214 
215  m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
216  m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
217  m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
218  m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
219 
220  if (op.padding_explicit()) {
221  m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
222  m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
223  m_rowPaddingTop = op.padding_top();
224  m_colPaddingLeft = op.padding_left();
225  } else {
226  // Computing padding from the type
227  switch (op.padding_type()) {
228  case PADDING_VALID:
229  m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
230  m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
231  // Calculate the padding
232  m_rowPaddingTop = numext::maxi<Index>(0, ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2);
233  m_colPaddingLeft = numext::maxi<Index>(0, ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2);
234  break;
235  case PADDING_SAME:
236  m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
237  m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
238  // Calculate the padding
239  m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff) / 2;
240  m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff) / 2;
241  break;
242  default:
243  eigen_assert(false && "unexpected padding");
244  }
245  }
246  eigen_assert(m_outputRows > 0);
247  eigen_assert(m_outputCols > 0);
248 
249  // Dimensions for result of extraction.
250  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
251  // ColMajor
252  // 0: depth
253  // 1: patch_rows
254  // 2: patch_cols
255  // 3: number of patches
256  // 4 and beyond: anything else (such as batch).
257  m_dimensions[0] = input_dims[0];
258  m_dimensions[1] = op.patch_rows();
259  m_dimensions[2] = op.patch_cols();
260  m_dimensions[3] = m_outputRows * m_outputCols;
261  for (int i = 4; i < NumDims; ++i) {
262  m_dimensions[i] = input_dims[i-1];
263  }
264  } else {
265  // RowMajor
266  // NumDims-1: depth
267  // NumDims-2: patch_rows
268  // NumDims-3: patch_cols
269  // NumDims-4: number of patches
270  // NumDims-5 and beyond: anything else (such as batch).
271  m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
272  m_dimensions[NumDims-2] = op.patch_rows();
273  m_dimensions[NumDims-3] = op.patch_cols();
274  m_dimensions[NumDims-4] = m_outputRows * m_outputCols;
275  for (int i = NumDims-5; i >= 0; --i) {
276  m_dimensions[i] = input_dims[i];
277  }
278  }
279 
280  // Strides for moving the patch in various dimensions.
281  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
282  m_colStride = m_dimensions[1];
283  m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0];
284  m_otherStride = m_patchStride * m_dimensions[3];
285  } else {
286  m_colStride = m_dimensions[NumDims-2];
287  m_patchStride = m_colStride * m_dimensions[NumDims-3] * m_dimensions[NumDims-1];
288  m_otherStride = m_patchStride * m_dimensions[NumDims-4];
289  }
290 
291  // Strides for navigating through the input tensor.
292  m_rowInputStride = m_inputDepth;
293  m_colInputStride = m_inputDepth * m_inputRows;
294  m_patchInputStride = m_inputDepth * m_inputRows * m_inputCols;
295 
296  // Fast representations of different variables.
297  m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
298  m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
299  m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
300  m_fastInflateRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
301  m_fastInflateColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
302  m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
303 
304  // Number of patches in the width dimension.
305  m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
306  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
307  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
308  } else {
309  m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
310  }
311  }
312 
313  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
314 
315  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/) {
316  m_impl.evalSubExprsIfNeeded(NULL);
317  return true;
318  }
319 
320  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
321  m_impl.cleanup();
322  }
323 
324  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
325  {
326  // Patch index corresponding to the passed in index.
327  const Index patchIndex = index / m_fastPatchStride;
328  // Find the offset of the element wrt the location of the first element.
329  const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
330 
331  // Other ways to index this element.
332  const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride;
333  const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
334 
335  // Calculate col index in the input original tensor.
336  const Index colIndex = patch2DIndex / m_fastOutputRows;
337  const Index colOffset = patchOffset / m_fastColStride;
338  const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
339  const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInflateColStride) : 0);
340  if (inputCol < 0 || inputCol >= m_input_cols_eff ||
341  ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
342  return Scalar(m_paddingValue);
343  }
344 
345  // Calculate row index in the original input tensor.
346  const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
347  const Index rowOffset = patchOffset - colOffset * m_colStride;
348  const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
349  const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInflateRowStride) : 0);
350  if (inputRow < 0 || inputRow >= m_input_rows_eff ||
351  ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
352  return Scalar(m_paddingValue);
353  }
354 
355  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
356  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
357 
358  const Index inputIndex = depth + origInputRow * m_rowInputStride + origInputCol * m_colInputStride + otherIndex * m_patchInputStride;
359  return m_impl.coeff(inputIndex);
360  }
361 
362  template<int LoadMode>
363  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
364  {
365  EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
366  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
367 
368  if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1) {
369  return packetWithPossibleZero(index);
370  }
371 
372  const Index indices[2] = {index, index + PacketSize - 1};
373  const Index patchIndex = indices[0] / m_fastPatchStride;
374  if (patchIndex != indices[1] / m_fastPatchStride) {
375  return packetWithPossibleZero(index);
376  }
377  const Index otherIndex = (NumDims == 4) ? 0 : indices[0] / m_fastOtherStride;
378  eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
379 
380  // Find the offset of the element wrt the location of the first element.
381  const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
382  (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
383 
384  const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
385  eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
386 
387  const Index colIndex = patch2DIndex / m_fastOutputRows;
388  const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride};
389 
390  // Calculate col indices in the original input tensor.
391  const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] -
392  m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
393  if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
394  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
395  }
396 
397  if (inputCols[0] == inputCols[1]) {
398  const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
399  const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride};
400  eigen_assert(rowOffsets[0] <= rowOffsets[1]);
401  // Calculate col indices in the original input tensor.
402  const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] -
403  m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
404 
405  if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
406  return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
407  }
408 
409  if (inputRows[0] >= 0 && inputRows[1] < m_inputRows) {
410  // no padding
411  const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
412  const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
413  const Index inputIndex = depth + inputRows[0] * m_rowInputStride + inputCols[0] * m_colInputStride + otherIndex * m_patchInputStride;
414  return m_impl.template packet<Unaligned>(inputIndex);
415  }
416  }
417 
418  return packetWithPossibleZero(index);
419  }
420 
421  EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
422 
423  const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
424 
425  Index rowPaddingTop() const { return m_rowPaddingTop; }
426  Index colPaddingLeft() const { return m_colPaddingLeft; }
427  Index outputRows() const { return m_outputRows; }
428  Index outputCols() const { return m_outputCols; }
429  Index userRowStride() const { return m_row_strides; }
430  Index userColStride() const { return m_col_strides; }
431  Index userInRowStride() const { return m_in_row_strides; }
432  Index userInColStride() const { return m_in_col_strides; }
433  Index rowInflateStride() const { return m_row_inflate_strides; }
434  Index colInflateStride() const { return m_col_inflate_strides; }
435 
436  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
437  costPerCoeff(bool vectorized) const {
438  // We conservatively estimate the cost for the code path where the computed
439  // index is inside the original image and
440  // TensorEvaluator<ArgType, Device>::CoordAccess is false.
441  const double compute_cost = 3 * TensorOpCost::DivCost<Index>() +
442  6 * TensorOpCost::MulCost<Index>() +
443  8 * TensorOpCost::MulCost<Index>();
444  return m_impl.costPerCoeff(vectorized) +
445  TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
446  }
447 
448  protected:
449  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
450  {
452  for (int i = 0; i < PacketSize; ++i) {
453  values[i] = coeff(index+i);
454  }
455  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
456  return rslt;
457  }
458 
459  Dimensions m_dimensions;
460 
463  Index m_colStride;
466 
471 
476 
483 
487 
489  Index m_inputRows;
490  Index m_inputCols;
491 
494 
497 
500 
502 
504 };
505 
506 
507 } // end namespace Eigen
508 
509 #endif // EIGEN_CXX11_TENSOR_TENSOR_IMAGE_PATCH_H
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
EIGEN_DEVICE_FUNC DenseIndex padding_left() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType &expr, DenseIndex patch_rows, DenseIndex patch_cols, DenseIndex row_strides, DenseIndex col_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, PaddingType padding_type, Scalar padding_value)
const DenseIndex m_in_col_strides
SCALAR Scalar
Definition: bench_gemm.cpp:33
Eigen::internal::nested< TensorImagePatchOp >::type Nested
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType &expr, DenseIndex patch_rows, DenseIndex patch_cols, DenseIndex row_strides, DenseIndex col_strides, DenseIndex in_row_strides, DenseIndex in_col_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides, DenseIndex padding_top, DenseIndex padding_bottom, DenseIndex padding_left, DenseIndex padding_right, Scalar padding_value)
internal::remove_const< typename XprType::Scalar >::type Scalar
#define EIGEN_STRONG_INLINE
Definition: Macros.h:494
const DenseIndex m_padding_left
Eigen::internal::traits< TensorImagePatchOp >::StorageKind StorageKind
const PaddingType m_padding_type
EIGEN_DEVICE_FUNC const internal::remove_all< typename XprType::Nested >::type & expression() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
EIGEN_DEVICE_FUNC PaddingType padding_type() const
TensorEvaluator< const TensorImagePatchOp< Rows, Cols, ArgType >, Device > Self
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const
const DenseIndex m_row_strides
const DenseIndex m_col_strides
EIGEN_DEVICE_FUNC bool padding_explicit() const
leaf::MyValues values
Eigen::internal::traits< TensorImagePatchOp >::Index Index
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
A cost model used to limit the number of threads used for evaluating tensor expression.
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:124
EIGEN_DEVICE_FUNC DenseIndex padding_bottom() const
const DenseIndex m_padding_top
vector< size_t > dimensions(L.begin(), L.end())
const DenseIndex m_patch_cols
EIGEN_DEVICE_FUNC T() ceil(const T &x)
const DenseIndex m_padding_bottom
Eigen::internal::traits< TensorImagePatchOp >::Scalar Scalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
EIGEN_DEVICE_FUNC DenseIndex col_strides() const
EIGEN_DEVICE_FUNC DenseIndex padding_top() const
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
EIGEN_DEVICE_FUNC Scalar padding_value() const
#define eigen_assert(x)
Definition: Macros.h:579
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
#define NULL
Definition: ccolamd.c:609
Eigen::NumTraits< Scalar >::Real RealScalar
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions & dimensions() const
static const int Cols
The tensor base class.
Definition: TensorBase.h:829
EIGEN_DEFAULT_DENSE_INDEX_TYPE DenseIndex
Definition: Meta.h:25
const DenseIndex m_patch_rows
EIGEN_DEVICE_FUNC DenseIndex patch_rows() const
internal::remove_const< typename XprType::Scalar >::type Scalar
#define EIGEN_ALIGN_MAX
Definition: Macros.h:757
EIGEN_DEVICE_FUNC DenseIndex patch_cols() const
EIGEN_DEVICE_FUNC DenseIndex row_inflate_strides() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar *)
EIGEN_DEVICE_FUNC DenseIndex in_row_strides() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType &op, const Device &device)
EIGEN_DEVICE_FUNC DenseIndex col_inflate_strides() const
EIGEN_DEVICE_FUNC DenseIndex row_strides() const
const DenseIndex m_in_row_strides
EIGEN_DEVICE_FUNC DenseIndex in_col_strides() const
const DenseIndex m_col_inflate_strides
const DenseIndex m_padding_right
XprType::CoeffReturnType CoeffReturnType
const DenseIndex m_row_inflate_strides
EIGEN_DEVICE_FUNC DenseIndex padding_right() const


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:45:29