cxx11_tensor_block_access.cpp
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2018 Andy Davis <andydavis@google.com>
5 // Copyright (C) 2018 Eugene Zhulenev <ezhulenev@google.com>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #include "main.h"
12 
13 #include <algorithm>
14 #include <set>
15 
16 #include <Eigen/CXX11/Tensor>
17 
18 using Eigen::Tensor;
19 using Eigen::Index;
20 using Eigen::RowMajor;
21 using Eigen::ColMajor;
23 
24 static TensorOpCost zeroCost() { return {0, 0, 0}; }
25 
26 template<typename T>
27 static const T& choose(int layout, const T& col, const T& row) {
28  return layout == ColMajor ? col : row;
29 }
30 
32  return internal::random<bool>()
33  ? TensorBlockShapeType::kUniformAllDims
34  : TensorBlockShapeType::kSkewedInnerDims;
35 }
36 
37 template <int NumDims>
38 static size_t RandomTargetSize(const DSizes<Index, NumDims>& dims) {
39  return internal::random<size_t>(1, dims.TotalSize());
40 }
41 
42 template <int NumDims>
45  for (int i = 0; i < NumDims; ++i) {
46  dims[i] = internal::random<int>(1, 20);
47  }
48  return DSizes<Index, NumDims>(dims);
49 }
50 
51 template <typename T>
52 static T* GenerateRandomData(const Index& size) {
53  T* data = new T[size];
54  for (int i = 0; i < size; ++i) {
55  data[i] = internal::random<T>();
56  }
57  return data;
58 }
59 
60 template <int NumDims>
61 static void Debug(DSizes<Index, NumDims> dims) {
62  for (int i = 0; i < NumDims; ++i) {
63  std::cout << dims[i] << "; ";
64  }
65  std::cout << std::endl;
66 }
67 
68 template <int Layout>
70 {
71  typedef internal::TensorBlockMapper<2, Layout> TensorBlockMapper;
72 
73  DSizes<Index, 2> tensor_dims(100, 100);
74 
75  // Test uniform blocks.
76  TensorBlockMapper uniform_block_mapper(
77  tensor_dims, {TensorBlockShapeType::kUniformAllDims, 100, zeroCost()});
78 
79  VERIFY_IS_EQUAL(uniform_block_mapper.blockCount(), 100);
80  VERIFY_IS_EQUAL(uniform_block_mapper.blockTotalSize(), 100);
81 
82  // 10x10 blocks
83  auto uniform_b0 = uniform_block_mapper.blockDescriptor(0);
84  VERIFY_IS_EQUAL(uniform_b0.dimensions().at(0), 10);
85  VERIFY_IS_EQUAL(uniform_b0.dimensions().at(1), 10);
86 
87  // Test skewed to inner dims blocks.
88  TensorBlockMapper skewed_block_mapper(
89  tensor_dims, {TensorBlockShapeType::kSkewedInnerDims, 100, zeroCost()});
90 
91  VERIFY_IS_EQUAL(skewed_block_mapper.blockCount(), 100);
92  VERIFY_IS_EQUAL(skewed_block_mapper.blockTotalSize(), 100);
93 
94  // 1x100 (100x1) rows/cols depending on a tensor layout.
95  auto skewed_b0 = skewed_block_mapper.blockDescriptor(0);
96  VERIFY_IS_EQUAL(skewed_b0.dimensions().at(0), choose(Layout, 100, 1));
97  VERIFY_IS_EQUAL(skewed_b0.dimensions().at(1), choose(Layout, 1, 100));
98 }
99 
100 // Given a TensorBlock "visit" every element accessible though it, and a keep an
101 // index in the visited set. Verify that every coeff accessed only once.
102 template<int NumDims, int Layout>
103 static void UpdateCoeffSet(
104  const DSizes<Index, NumDims>& tensor_strides,
105  const internal::TensorBlockDescriptor<NumDims>& block,
106  Index first_coeff_index, int dim_index, std::set<Index>* visited_coeffs) {
107  const DSizes<Index, NumDims>& block_sizes = block.dimensions();
108 
109  for (int i = 0; i < block_sizes[dim_index]; ++i) {
110  if (tensor_strides[dim_index] == 1) {
111  typedef std::pair<std::set<Index>::iterator, bool> ReturnType;
112  ReturnType inserted = visited_coeffs->insert(first_coeff_index + i);
113  VERIFY_IS_EQUAL(inserted.second, true);
114  } else {
115  int next_dim_index = dim_index + choose(Layout, -1, 1);
116  UpdateCoeffSet<NumDims, Layout>(tensor_strides, block, first_coeff_index,
117  next_dim_index, visited_coeffs);
118  first_coeff_index += tensor_strides[dim_index];
119  }
120  }
121 }
122 
123 template <typename T, int NumDims, int Layout>
125  typedef internal::TensorBlockMapper<NumDims, Layout> TensorBlockMapper;
126 
127  DSizes<Index, NumDims> dims = RandomDims<NumDims>();
128  DSizes<Index, NumDims> strides = internal::strides<Layout>(dims);
129 
130  // Keep track of elements indices available via block access.
131  std::set<Index> coeff_set;
132 
133  // Try different combinations of block types and sizes.
134  TensorBlockMapper block_mapper(
135  dims, {RandomShape(), RandomTargetSize(dims), zeroCost()});
136 
137  for (int i = 0; i < block_mapper.blockCount(); ++i) {
138  auto block = block_mapper.blockDescriptor(i);
139  UpdateCoeffSet<NumDims, Layout>(strides, block, block.offset(),
140  choose(Layout, NumDims - 1, 0),
141  &coeff_set);
142  }
143 
144  // Verify that every coefficient in the original Tensor is accessible through
145  // TensorBlock only once.
146  Index total_coeffs = dims.TotalSize();
147  VERIFY_IS_EQUAL(Index(coeff_set.size()), total_coeffs);
148  VERIFY_IS_EQUAL(*coeff_set.begin(), 0);
149  VERIFY_IS_EQUAL(*coeff_set.rbegin(), total_coeffs - 1);
150 }
151 
152 template <int Layout, int NumDims>
153 static Index GetInputIndex(Index output_index,
154  const array<Index, NumDims>& output_to_input_dim_map,
155  const array<Index, NumDims>& input_strides,
156  const array<Index, NumDims>& output_strides) {
157  int input_index = 0;
158  if (Layout == ColMajor) {
159  for (int i = NumDims - 1; i > 0; --i) {
160  const Index idx = output_index / output_strides[i];
161  input_index += idx * input_strides[output_to_input_dim_map[i]];
162  output_index -= idx * output_strides[i];
163  }
164  return input_index +
165  output_index * input_strides[output_to_input_dim_map[0]];
166  } else {
167  for (int i = 0; i < NumDims - 1; ++i) {
168  const Index idx = output_index / output_strides[i];
169  input_index += idx * input_strides[output_to_input_dim_map[i]];
170  output_index -= idx * output_strides[i];
171  }
172  return input_index +
173  output_index * input_strides[output_to_input_dim_map[NumDims - 1]];
174  }
175 }
176 
177 template <int Layout, int NumDims>
179  const array<Index, NumDims>& sizes) {
181  if (Layout == ColMajor) {
182  strides[0] = 1;
183  for (int i = 1; i < NumDims; ++i) {
184  strides[i] = strides[i - 1] * sizes[i - 1];
185  }
186  } else {
187  strides[NumDims - 1] = 1;
188  for (int i = NumDims - 2; i >= 0; --i) {
189  strides[i] = strides[i + 1] * sizes[i + 1];
190  }
191  }
192  return strides;
193 }
194 
195 template<typename Scalar, typename StorageIndex, int Dim>
197 {
200  void check_recursive(const Scalar* input, const Scalar* output, int depth=0) const
201  {
202  if(depth==Dim)
203  {
204  VERIFY_IS_EQUAL(*input, *output);
205  return;
206  }
207 
208  for(int i=0; i<output_dims[depth]; ++i)
209  {
210  check_recursive(input + i % input_dims[depth] * input_strides[depth], output + i*output_strides[depth], depth+1);
211  }
212  }
213 public:
214  EqualityChecker(const Scalar* input_data_,
215  const DSizes<StorageIndex, Dim> &input_dims_, const DSizes<StorageIndex, Dim> &input_strides_,
216  const DSizes<StorageIndex, Dim> &output_dims_, const DSizes<StorageIndex, Dim> &output_strides_)
217  : input_data(input_data_)
218  , input_dims(input_dims_), input_strides(input_strides_)
219  , output_dims(output_dims_), output_strides(output_strides_)
220  {}
221 
222  void operator()(const Scalar* output_data) const
223  {
224  check_recursive(input_data, output_data);
225  }
226 };
227 
228 template <int Layout>
230 {
231  typedef internal::TensorBlockDescriptor<5> TensorBlock;
232  typedef internal::TensorBlockMapper<5, Layout> TensorBlockMapper;
233 
234  {
235  // Test shape 'UniformAllDims' with uniform 'max_coeff count'.
236  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
237  const Index max_coeff_count = 5 * 5 * 5 * 5 * 5;
238  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
239  max_coeff_count, zeroCost()});
240  TensorBlock block = block_mapper.blockDescriptor(0);
241  for (int i = 0; i < 5; ++i) {
242  VERIFY_IS_EQUAL(5, block.dimensions()[i]);
243  }
244  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
245  }
246 
247  // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
248  // partially into first inner-most dimension.
249  if (Layout == ColMajor) {
250  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
251  const Index max_coeff_count = 7 * 5 * 5 * 5 * 5;
252  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
253  max_coeff_count, zeroCost()});
254  TensorBlock block = block_mapper.blockDescriptor(0);
255  VERIFY_IS_EQUAL(7, block.dimensions()[0]);
256  for (int i = 1; i < 5; ++i) {
257  VERIFY_IS_EQUAL(5, block.dimensions()[i]);
258  }
259  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
260  } else {
261  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
262  const Index max_coeff_count = 5 * 5 * 5 * 5 * 6;
263  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
264  max_coeff_count, zeroCost()});
265  TensorBlock block = block_mapper.blockDescriptor(0);
266  VERIFY_IS_EQUAL(6, block.dimensions()[4]);
267  for (int i = 3; i >= 0; --i) {
268  VERIFY_IS_EQUAL(5, block.dimensions()[i]);
269  }
270  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
271  }
272 
273  // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
274  // fully into first inner-most dimension.
275  if (Layout == ColMajor) {
276  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
277  const Index max_coeff_count = 11 * 5 * 5 * 5 * 5;
278  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
279  max_coeff_count, zeroCost()});
280  TensorBlock block = block_mapper.blockDescriptor(0);
281  VERIFY_IS_EQUAL(11, block.dimensions()[0]);
282  for (int i = 1; i < 5; ++i) {
283  VERIFY_IS_EQUAL(5, block.dimensions()[i]);
284  }
285  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
286  } else {
287  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
288  const Index max_coeff_count = 5 * 5 * 5 * 5 * 7;
289  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
290  max_coeff_count, zeroCost()});
291  TensorBlock block = block_mapper.blockDescriptor(0);
292  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
293  for (int i = 3; i >= 0; --i) {
294  VERIFY_IS_EQUAL(5, block.dimensions()[i]);
295  }
296  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
297  }
298 
299  // Test shape 'UniformAllDims' with larger 'max_coeff count' which spills
300  // fully into first few inner-most dimensions.
301  if (Layout == ColMajor) {
302  DSizes<Index, 5> dims(7, 5, 6, 17, 7);
303  const Index max_coeff_count = 7 * 5 * 6 * 7 * 5;
304  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
305  max_coeff_count, zeroCost()});
306  TensorBlock block = block_mapper.blockDescriptor(0);
307  VERIFY_IS_EQUAL(7, block.dimensions()[0]);
308  VERIFY_IS_EQUAL(5, block.dimensions()[1]);
309  VERIFY_IS_EQUAL(6, block.dimensions()[2]);
310  VERIFY_IS_EQUAL(7, block.dimensions()[3]);
311  VERIFY_IS_EQUAL(5, block.dimensions()[4]);
312  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
313  } else {
314  DSizes<Index, 5> dims(7, 5, 6, 9, 7);
315  const Index max_coeff_count = 5 * 5 * 5 * 6 * 7;
316  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
317  max_coeff_count, zeroCost()});
318  TensorBlock block = block_mapper.blockDescriptor(0);
319  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
320  VERIFY_IS_EQUAL(6, block.dimensions()[3]);
321  VERIFY_IS_EQUAL(5, block.dimensions()[2]);
322  VERIFY_IS_EQUAL(5, block.dimensions()[1]);
323  VERIFY_IS_EQUAL(5, block.dimensions()[0]);
324  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
325  }
326 
327  // Test shape 'UniformAllDims' with full allocation to all dims.
328  if (Layout == ColMajor) {
329  DSizes<Index, 5> dims(7, 5, 6, 17, 7);
330  const Index max_coeff_count = 7 * 5 * 6 * 17 * 7;
331  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
332  max_coeff_count, zeroCost()});
333  TensorBlock block = block_mapper.blockDescriptor(0);
334  VERIFY_IS_EQUAL(7, block.dimensions()[0]);
335  VERIFY_IS_EQUAL(5, block.dimensions()[1]);
336  VERIFY_IS_EQUAL(6, block.dimensions()[2]);
337  VERIFY_IS_EQUAL(17, block.dimensions()[3]);
338  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
339  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
340  } else {
341  DSizes<Index, 5> dims(7, 5, 6, 9, 7);
342  const Index max_coeff_count = 7 * 5 * 6 * 9 * 7;
343  TensorBlockMapper block_mapper(dims, {TensorBlockShapeType::kUniformAllDims,
344  max_coeff_count, zeroCost()});
345  TensorBlock block = block_mapper.blockDescriptor(0);
346  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
347  VERIFY_IS_EQUAL(9, block.dimensions()[3]);
348  VERIFY_IS_EQUAL(6, block.dimensions()[2]);
349  VERIFY_IS_EQUAL(5, block.dimensions()[1]);
350  VERIFY_IS_EQUAL(7, block.dimensions()[0]);
351  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
352  }
353 }
354 
355 template <int Layout>
357 {
358  typedef internal::TensorBlockDescriptor<5> TensorBlock;
359  typedef internal::TensorBlockMapper<5, Layout> TensorBlockMapper;
360 
361  // Test shape 'SkewedInnerDims' with partial allocation to inner-most dim.
362  if (Layout == ColMajor) {
363  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
364  const Index max_coeff_count = 10 * 1 * 1 * 1 * 1;
365  TensorBlockMapper block_mapper(
366  dims,
367  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
368  TensorBlock block = block_mapper.blockDescriptor(0);
369  VERIFY_IS_EQUAL(10, block.dimensions()[0]);
370  for (int i = 1; i < 5; ++i) {
371  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
372  }
373  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
374  } else {
375  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
376  const Index max_coeff_count = 1 * 1 * 1 * 1 * 6;
377  TensorBlockMapper block_mapper(
378  dims,
379  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
380  TensorBlock block = block_mapper.blockDescriptor(0);
381  VERIFY_IS_EQUAL(6, block.dimensions()[4]);
382  for (int i = 3; i >= 0; --i) {
383  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
384  }
385  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
386  }
387 
388  // Test shape 'SkewedInnerDims' with full allocation to inner-most dim.
389  if (Layout == ColMajor) {
390  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
391  const Index max_coeff_count = 11 * 1 * 1 * 1 * 1;
392  TensorBlockMapper block_mapper(
393  dims,
394  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
395  TensorBlock block = block_mapper.blockDescriptor(0);
396  VERIFY_IS_EQUAL(11, block.dimensions()[0]);
397  for (int i = 1; i < 5; ++i) {
398  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
399  }
400  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
401  } else {
402  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
403  const Index max_coeff_count = 1 * 1 * 1 * 1 * 7;
404  TensorBlockMapper block_mapper(
405  dims,
406  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
407  TensorBlock block = block_mapper.blockDescriptor(0);
408  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
409  for (int i = 3; i >= 0; --i) {
410  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
411  }
412  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
413  }
414 
415  // Test shape 'SkewedInnerDims' with full allocation to inner-most dim,
416  // and partial allocation to second inner-dim.
417  if (Layout == ColMajor) {
418  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
419  const Index max_coeff_count = 11 * 3 * 1 * 1 * 1;
420  TensorBlockMapper block_mapper(
421  dims,
422  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
423  TensorBlock block = block_mapper.blockDescriptor(0);
424  VERIFY_IS_EQUAL(11, block.dimensions()[0]);
425  VERIFY_IS_EQUAL(3, block.dimensions()[1]);
426  for (int i = 2; i < 5; ++i) {
427  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
428  }
429  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
430  } else {
431  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
432  const Index max_coeff_count = 1 * 1 * 1 * 15 * 7;
433  TensorBlockMapper block_mapper(
434  dims,
435  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
436  TensorBlock block = block_mapper.blockDescriptor(0);
437  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
438  VERIFY_IS_EQUAL(15, block.dimensions()[3]);
439  for (int i = 2; i >= 0; --i) {
440  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
441  }
442  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
443  }
444 
445  // Test shape 'SkewedInnerDims' with full allocation to inner-most dim,
446  // and partial allocation to third inner-dim.
447  if (Layout == ColMajor) {
448  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
449  const Index max_coeff_count = 11 * 5 * 5 * 1 * 1;
450  TensorBlockMapper block_mapper(
451  dims,
452  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
453  TensorBlock block = block_mapper.blockDescriptor(0);
454  VERIFY_IS_EQUAL(11, block.dimensions()[0]);
455  VERIFY_IS_EQUAL(5, block.dimensions()[1]);
456  VERIFY_IS_EQUAL(5, block.dimensions()[2]);
457  for (int i = 3; i < 5; ++i) {
458  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
459  }
460  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
461  } else {
462  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
463  const Index max_coeff_count = 1 * 1 * 5 * 17 * 7;
464  TensorBlockMapper block_mapper(
465  dims,
466  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
467  TensorBlock block = block_mapper.blockDescriptor(0);
468  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
469  VERIFY_IS_EQUAL(17, block.dimensions()[3]);
470  VERIFY_IS_EQUAL(5, block.dimensions()[2]);
471  for (int i = 1; i >= 0; --i) {
472  VERIFY_IS_EQUAL(1, block.dimensions()[i]);
473  }
474  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
475  }
476 
477  // Test shape 'SkewedInnerDims' with full allocation to all dims.
478  if (Layout == ColMajor) {
479  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
480  const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
481  TensorBlockMapper block_mapper(
482  dims,
483  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
484  TensorBlock block = block_mapper.blockDescriptor(0);
485  VERIFY_IS_EQUAL(11, block.dimensions()[0]);
486  VERIFY_IS_EQUAL(5, block.dimensions()[1]);
487  VERIFY_IS_EQUAL(6, block.dimensions()[2]);
488  VERIFY_IS_EQUAL(17, block.dimensions()[3]);
489  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
490  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
491  } else {
492  DSizes<Index, 5> dims(11, 5, 6, 17, 7);
493  const Index max_coeff_count = 11 * 5 * 6 * 17 * 7;
494  TensorBlockMapper block_mapper(
495  dims,
496  {TensorBlockShapeType::kSkewedInnerDims, max_coeff_count, zeroCost()});
497  TensorBlock block = block_mapper.blockDescriptor(0);
498  VERIFY_IS_EQUAL(7, block.dimensions()[4]);
499  VERIFY_IS_EQUAL(17, block.dimensions()[3]);
500  VERIFY_IS_EQUAL(6, block.dimensions()[2]);
501  VERIFY_IS_EQUAL(5, block.dimensions()[1]);
502  VERIFY_IS_EQUAL(11, block.dimensions()[0]);
503  VERIFY(block.dimensions().TotalSize() <= max_coeff_count);
504  }
505 }
506 
507 template <int Layout>
508 static void test_empty_dims(const internal::TensorBlockShapeType block_shape)
509 {
510  // Test blocking of tensors with zero dimensions:
511  // - we must not crash on asserts and divisions by zero
512  // - we must not return block with zero dimensions
513  // (recipe for overflows/underflows, divisions by zero and NaNs later)
514  // - total block count must be zero
515  {
516  typedef internal::TensorBlockMapper<1, Layout> TensorBlockMapper;
517 
518  DSizes<Index, 1> dims(0);
519  for (size_t max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
520  TensorBlockMapper block_mapper(
521  dims, {block_shape, max_coeff_count, zeroCost()});
522  VERIFY_IS_EQUAL(block_mapper.blockCount(), 0);
523  VERIFY(block_mapper.blockTotalSize() >= 1);
524  }
525  }
526 
527  {
528  typedef internal::TensorBlockMapper<2, Layout> TensorBlockMapper;
529 
530  for (int dim1 = 0; dim1 < 3; ++dim1) {
531  for (int dim2 = 0; dim2 < 3; ++dim2) {
532  DSizes<Index, 2> dims(dim1, dim2);
533  for (size_t max_coeff_count = 0; max_coeff_count < 2; ++max_coeff_count) {
534  TensorBlockMapper block_mapper(
535  dims, {block_shape, max_coeff_count, zeroCost()});
536  if (dim1 * dim2 == 0) {
537  VERIFY_IS_EQUAL(block_mapper.blockCount(), 0);
538  }
539  VERIFY(block_mapper.blockTotalSize() >= 1);
540  }
541  }
542  }
543  }
544 }
545 
546 #define TEST_LAYOUTS(NAME) \
547  CALL_SUBTEST(NAME<ColMajor>()); \
548  CALL_SUBTEST(NAME<RowMajor>())
549 
550 #define TEST_LAYOUTS_AND_DIMS(TYPE, NAME) \
551  CALL_SUBTEST((NAME<TYPE, 1, ColMajor>())); \
552  CALL_SUBTEST((NAME<TYPE, 1, RowMajor>())); \
553  CALL_SUBTEST((NAME<TYPE, 2, ColMajor>())); \
554  CALL_SUBTEST((NAME<TYPE, 2, RowMajor>())); \
555  CALL_SUBTEST((NAME<TYPE, 3, ColMajor>())); \
556  CALL_SUBTEST((NAME<TYPE, 3, RowMajor>())); \
557  CALL_SUBTEST((NAME<TYPE, 4, ColMajor>())); \
558  CALL_SUBTEST((NAME<TYPE, 4, RowMajor>())); \
559  CALL_SUBTEST((NAME<TYPE, 5, ColMajor>())); \
560  CALL_SUBTEST((NAME<TYPE, 5, RowMajor>()))
561 
562 #define TEST_LAYOUTS_WITH_ARG(NAME, ARG) \
563  CALL_SUBTEST(NAME<ColMajor>(ARG)); \
564  CALL_SUBTEST(NAME<RowMajor>(ARG))
565 
566 EIGEN_DECLARE_TEST(cxx11_tensor_block_access) {
571  TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kUniformAllDims);
572  TEST_LAYOUTS_WITH_ARG(test_empty_dims, TensorBlockShapeType::kSkewedInnerDims);
573 }
574 
575 #undef TEST_LAYOUTS
576 #undef TEST_LAYOUTS_WITH_ARG
static TensorBlockShapeType RandomShape()
static size_t RandomTargetSize(const DSizes< Index, NumDims > &dims)
SCALAR Scalar
Definition: bench_gemm.cpp:46
const DSizes< StorageIndex, Dim > & input_dims
m m block(1, 0, 2, 2)<< 4
#define TEST_LAYOUTS_WITH_ARG(NAME, ARG)
std::vector< Array2i > sizes
static double depth
#define TEST_LAYOUTS(NAME)
static const T & choose(int layout, const T &col, const T &row)
static void test_block_mapper_sanity()
static array< Index, NumDims > ComputeStrides(const array< Index, NumDims > &sizes)
const DSizes< StorageIndex, Dim > & output_strides
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
#define VERIFY_IS_EQUAL(a, b)
Definition: main.h:386
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex TotalSize() const
static Index GetInputIndex(Index output_index, const array< Index, NumDims > &output_to_input_dim_map, const array< Index, NumDims > &input_strides, const array< Index, NumDims > &output_strides)
EqualityChecker(const Scalar *input_data_, const DSizes< StorageIndex, Dim > &input_dims_, const DSizes< StorageIndex, Dim > &input_strides_, const DSizes< StorageIndex, Dim > &output_dims_, const DSizes< StorageIndex, Dim > &output_strides_)
m row(1)
static void test_skewed_inner_dim_block_shape()
static void test_uniform_block_shape()
EIGEN_ALWAYS_INLINE DSizes< IndexType, NumDims > strides(const DSizes< IndexType, NumDims > &dimensions)
Definition: TensorBlock.h:26
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
int data[]
static DSizes< Index, NumDims > RandomDims()
static TensorOpCost zeroCost()
static void test_block_mapper_maps_every_element()
EIGEN_DECLARE_TEST(cxx11_tensor_block_access)
static void test_empty_dims(const internal::TensorBlockShapeType block_shape)
#define VERIFY(a)
Definition: main.h:380
m col(1)
void operator()(const Scalar *output_data) const
const DSizes< StorageIndex, Dim > & input_strides
static void Debug(DSizes< Index, NumDims > dims)
void check_recursive(const Scalar *input, const Scalar *output, int depth=0) const
static void UpdateCoeffSet(const DSizes< Index, NumDims > &tensor_strides, const internal::TensorBlockDescriptor< NumDims > &block, Index first_coeff_index, int dim_index, std::set< Index > *visited_coeffs)
static T * GenerateRandomData(const Index &size)
The tensor class.
Definition: Tensor.h:63
#define TEST_LAYOUTS_AND_DIMS(TYPE, NAME)
const DSizes< StorageIndex, Dim > & output_dims


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:34:06