cxx11_tensor_reduction.cpp
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #include "main.h"
11 #include <limits>
12 #include <numeric>
13 #include <Eigen/CXX11/Tensor>
14 
15 using Eigen::Tensor;
16 
17 template <int DataLayout>
18 static void test_trivial_reductions() {
19  {
21  tensor.setRandom();
22  array<ptrdiff_t, 0> reduction_axis;
23 
24  Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
25  VERIFY_IS_EQUAL(result(), tensor());
26  }
27 
28  {
30  tensor.setRandom();
31  array<ptrdiff_t, 0> reduction_axis;
32 
33  Tensor<float, 1, DataLayout> result = tensor.sum(reduction_axis);
34  VERIFY_IS_EQUAL(result.dimension(0), 7);
35  for (int i = 0; i < 7; ++i) {
36  VERIFY_IS_EQUAL(result(i), tensor(i));
37  }
38  }
39 
40  {
41  Tensor<float, 2, DataLayout> tensor(2, 3);
42  tensor.setRandom();
43  array<ptrdiff_t, 0> reduction_axis;
44 
45  Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis);
46  VERIFY_IS_EQUAL(result.dimension(0), 2);
47  VERIFY_IS_EQUAL(result.dimension(1), 3);
48  for (int i = 0; i < 2; ++i) {
49  for (int j = 0; j < 3; ++j) {
50  VERIFY_IS_EQUAL(result(i, j), tensor(i, j));
51  }
52  }
53  }
54 }
55 
56 template <int DataLayout>
57 static void test_simple_reductions() {
58  Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
59  tensor.setRandom();
60  array<ptrdiff_t, 2> reduction_axis2;
61  reduction_axis2[0] = 1;
62  reduction_axis2[1] = 3;
63 
64  Tensor<float, 2, DataLayout> result = tensor.sum(reduction_axis2);
65  VERIFY_IS_EQUAL(result.dimension(0), 2);
66  VERIFY_IS_EQUAL(result.dimension(1), 5);
67  for (int i = 0; i < 2; ++i) {
68  for (int j = 0; j < 5; ++j) {
69  float sum = 0.0f;
70  for (int k = 0; k < 3; ++k) {
71  for (int l = 0; l < 7; ++l) {
72  sum += tensor(i, k, j, l);
73  }
74  }
75  VERIFY_IS_APPROX(result(i, j), sum);
76  }
77  }
78 
79  {
80  Tensor<float, 0, DataLayout> sum1 = tensor.sum();
81  VERIFY_IS_EQUAL(sum1.rank(), 0);
82 
83  array<ptrdiff_t, 4> reduction_axis4;
84  reduction_axis4[0] = 0;
85  reduction_axis4[1] = 1;
86  reduction_axis4[2] = 2;
87  reduction_axis4[3] = 3;
88  Tensor<float, 0, DataLayout> sum2 = tensor.sum(reduction_axis4);
89  VERIFY_IS_EQUAL(sum2.rank(), 0);
90 
91  VERIFY_IS_APPROX(sum1(), sum2());
92  }
93 
94  reduction_axis2[0] = 0;
95  reduction_axis2[1] = 2;
96  result = tensor.prod(reduction_axis2);
97  VERIFY_IS_EQUAL(result.dimension(0), 3);
98  VERIFY_IS_EQUAL(result.dimension(1), 7);
99  for (int i = 0; i < 3; ++i) {
100  for (int j = 0; j < 7; ++j) {
101  float prod = 1.0f;
102  for (int k = 0; k < 2; ++k) {
103  for (int l = 0; l < 5; ++l) {
104  prod *= tensor(k, i, l, j);
105  }
106  }
107  VERIFY_IS_APPROX(result(i, j), prod);
108  }
109  }
110 
111  {
112  Tensor<float, 0, DataLayout> prod1 = tensor.prod();
113  VERIFY_IS_EQUAL(prod1.rank(), 0);
114 
115  array<ptrdiff_t, 4> reduction_axis4;
116  reduction_axis4[0] = 0;
117  reduction_axis4[1] = 1;
118  reduction_axis4[2] = 2;
119  reduction_axis4[3] = 3;
120  Tensor<float, 0, DataLayout> prod2 = tensor.prod(reduction_axis4);
121  VERIFY_IS_EQUAL(prod2.rank(), 0);
122 
123  VERIFY_IS_APPROX(prod1(), prod2());
124  }
125 
126  reduction_axis2[0] = 0;
127  reduction_axis2[1] = 2;
128  result = tensor.maximum(reduction_axis2);
129  VERIFY_IS_EQUAL(result.dimension(0), 3);
130  VERIFY_IS_EQUAL(result.dimension(1), 7);
131  for (int i = 0; i < 3; ++i) {
132  for (int j = 0; j < 7; ++j) {
133  float max_val = std::numeric_limits<float>::lowest();
134  for (int k = 0; k < 2; ++k) {
135  for (int l = 0; l < 5; ++l) {
136  max_val = (std::max)(max_val, tensor(k, i, l, j));
137  }
138  }
139  VERIFY_IS_APPROX(result(i, j), max_val);
140  }
141  }
142 
143  {
144  Tensor<float, 0, DataLayout> max1 = tensor.maximum();
145  VERIFY_IS_EQUAL(max1.rank(), 0);
146 
147  array<ptrdiff_t, 4> reduction_axis4;
148  reduction_axis4[0] = 0;
149  reduction_axis4[1] = 1;
150  reduction_axis4[2] = 2;
151  reduction_axis4[3] = 3;
152  Tensor<float, 0, DataLayout> max2 = tensor.maximum(reduction_axis4);
153  VERIFY_IS_EQUAL(max2.rank(), 0);
154 
155  VERIFY_IS_APPROX(max1(), max2());
156  }
157 
158  reduction_axis2[0] = 0;
159  reduction_axis2[1] = 1;
160  result = tensor.minimum(reduction_axis2);
161  VERIFY_IS_EQUAL(result.dimension(0), 5);
162  VERIFY_IS_EQUAL(result.dimension(1), 7);
163  for (int i = 0; i < 5; ++i) {
164  for (int j = 0; j < 7; ++j) {
165  float min_val = (std::numeric_limits<float>::max)();
166  for (int k = 0; k < 2; ++k) {
167  for (int l = 0; l < 3; ++l) {
168  min_val = (std::min)(min_val, tensor(k, l, i, j));
169  }
170  }
171  VERIFY_IS_APPROX(result(i, j), min_val);
172  }
173  }
174 
175  {
176  Tensor<float, 0, DataLayout> min1 = tensor.minimum();
177  VERIFY_IS_EQUAL(min1.rank(), 0);
178 
179  array<ptrdiff_t, 4> reduction_axis4;
180  reduction_axis4[0] = 0;
181  reduction_axis4[1] = 1;
182  reduction_axis4[2] = 2;
183  reduction_axis4[3] = 3;
184  Tensor<float, 0, DataLayout> min2 = tensor.minimum(reduction_axis4);
185  VERIFY_IS_EQUAL(min2.rank(), 0);
186 
187  VERIFY_IS_APPROX(min1(), min2());
188  }
189 
190  reduction_axis2[0] = 0;
191  reduction_axis2[1] = 1;
192  result = tensor.mean(reduction_axis2);
193  VERIFY_IS_EQUAL(result.dimension(0), 5);
194  VERIFY_IS_EQUAL(result.dimension(1), 7);
195  for (int i = 0; i < 5; ++i) {
196  for (int j = 0; j < 7; ++j) {
197  float sum = 0.0f;
198  int count = 0;
199  for (int k = 0; k < 2; ++k) {
200  for (int l = 0; l < 3; ++l) {
201  sum += tensor(k, l, i, j);
202  ++count;
203  }
204  }
205  VERIFY_IS_APPROX(result(i, j), sum / count);
206  }
207  }
208 
209  {
210  Tensor<float, 0, DataLayout> mean1 = tensor.mean();
211  VERIFY_IS_EQUAL(mean1.rank(), 0);
212 
213  array<ptrdiff_t, 4> reduction_axis4;
214  reduction_axis4[0] = 0;
215  reduction_axis4[1] = 1;
216  reduction_axis4[2] = 2;
217  reduction_axis4[3] = 3;
218  Tensor<float, 0, DataLayout> mean2 = tensor.mean(reduction_axis4);
219  VERIFY_IS_EQUAL(mean2.rank(), 0);
220 
221  VERIFY_IS_APPROX(mean1(), mean2());
222  }
223 
224  {
225  Tensor<int, 1> ints(10);
226  std::iota(ints.data(), ints.data() + ints.dimension(0), 0);
227 
229  all = ints.all();
230  VERIFY(!all());
231  all = (ints >= ints.constant(0)).all();
232  VERIFY(all());
233 
235  any = (ints > ints.constant(10)).any();
236  VERIFY(!any());
237  any = (ints < ints.constant(1)).any();
238  VERIFY(any());
239  }
240 }
241 
242 
243 template <int DataLayout>
244 static void test_reductions_in_expr() {
245  Tensor<float, 4, DataLayout> tensor(2, 3, 5, 7);
246  tensor.setRandom();
247  array<ptrdiff_t, 2> reduction_axis2;
248  reduction_axis2[0] = 1;
249  reduction_axis2[1] = 3;
250 
252  result = result.constant(1.0f) - tensor.sum(reduction_axis2);
253  VERIFY_IS_EQUAL(result.dimension(0), 2);
254  VERIFY_IS_EQUAL(result.dimension(1), 5);
255  for (int i = 0; i < 2; ++i) {
256  for (int j = 0; j < 5; ++j) {
257  float sum = 0.0f;
258  for (int k = 0; k < 3; ++k) {
259  for (int l = 0; l < 7; ++l) {
260  sum += tensor(i, k, j, l);
261  }
262  }
263  VERIFY_IS_APPROX(result(i, j), 1.0f - sum);
264  }
265  }
266 }
267 
268 
269 template <int DataLayout>
270 static void test_full_reductions() {
271  Tensor<float, 2, DataLayout> tensor(2, 3);
272  tensor.setRandom();
273  array<ptrdiff_t, 2> reduction_axis;
274  reduction_axis[0] = 0;
275  reduction_axis[1] = 1;
276 
277  Tensor<float, 0, DataLayout> result = tensor.sum(reduction_axis);
278  VERIFY_IS_EQUAL(result.rank(), 0);
279 
280  float sum = 0.0f;
281  for (int i = 0; i < 2; ++i) {
282  for (int j = 0; j < 3; ++j) {
283  sum += tensor(i, j);
284  }
285  }
286  VERIFY_IS_APPROX(result(0), sum);
287 
288  result = tensor.square().sum(reduction_axis).sqrt();
289  VERIFY_IS_EQUAL(result.rank(), 0);
290 
291  sum = 0.0f;
292  for (int i = 0; i < 2; ++i) {
293  for (int j = 0; j < 3; ++j) {
294  sum += tensor(i, j) * tensor(i, j);
295  }
296  }
297  VERIFY_IS_APPROX(result(), sqrtf(sum));
298 }
299 
300 struct UserReducer {
301  static const bool PacketAccess = false;
302  UserReducer(float offset) : offset_(offset) {}
303  void reduce(const float val, float* accum) { *accum += val * val; }
304  float initialize() const { return 0; }
305  float finalize(const float accum) const { return 1.0f / (accum + offset_); }
306 
307  private:
308  const float offset_;
309 };
310 
311 template <int DataLayout>
313  Tensor<float, 2, DataLayout> tensor(5, 7);
314  tensor.setRandom();
315  array<ptrdiff_t, 1> reduction_axis;
316  reduction_axis[0] = 1;
317 
318  UserReducer reducer(10.0f);
319  Tensor<float, 1, DataLayout> result = tensor.reduce(reduction_axis, reducer);
320  VERIFY_IS_EQUAL(result.dimension(0), 5);
321  for (int i = 0; i < 5; ++i) {
322  float expected = 10.0f;
323  for (int j = 0; j < 7; ++j) {
324  expected += tensor(i, j) * tensor(i, j);
325  }
326  expected = 1.0f / expected;
327  VERIFY_IS_APPROX(result(i), expected);
328  }
329 }
330 
331 template <int DataLayout>
332 static void test_tensor_maps() {
333  int inputs[2 * 3 * 5 * 7];
334  TensorMap<Tensor<int, 4, DataLayout> > tensor_map(inputs, 2, 3, 5, 7);
335  TensorMap<Tensor<const int, 4, DataLayout> > tensor_map_const(inputs, 2, 3, 5,
336  7);
337  const TensorMap<Tensor<const int, 4, DataLayout> > tensor_map_const_const(
338  inputs, 2, 3, 5, 7);
339 
340  tensor_map.setRandom();
341  array<ptrdiff_t, 2> reduction_axis;
342  reduction_axis[0] = 1;
343  reduction_axis[1] = 3;
344 
345  Tensor<int, 2, DataLayout> result = tensor_map.sum(reduction_axis);
346  Tensor<int, 2, DataLayout> result2 = tensor_map_const.sum(reduction_axis);
348  tensor_map_const_const.sum(reduction_axis);
349 
350  for (int i = 0; i < 2; ++i) {
351  for (int j = 0; j < 5; ++j) {
352  int sum = 0;
353  for (int k = 0; k < 3; ++k) {
354  for (int l = 0; l < 7; ++l) {
355  sum += tensor_map(i, k, j, l);
356  }
357  }
358  VERIFY_IS_EQUAL(result(i, j), sum);
359  VERIFY_IS_EQUAL(result2(i, j), sum);
360  VERIFY_IS_EQUAL(result3(i, j), sum);
361  }
362  }
363 }
364 
365 template <int DataLayout>
366 static void test_static_dims() {
367  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
368  Tensor<float, 2, DataLayout> out(72, 97);
369  in.setRandom();
370 
371 #if !EIGEN_HAS_CONSTEXPR
372  array<int, 2> reduction_axis;
373  reduction_axis[0] = 1;
374  reduction_axis[1] = 3;
375 #else
376  Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<3> > reduction_axis;
377 #endif
378 
379  out = in.maximum(reduction_axis);
380 
381  for (int i = 0; i < 72; ++i) {
382  for (int j = 0; j < 97; ++j) {
383  float expected = -1e10f;
384  for (int k = 0; k < 53; ++k) {
385  for (int l = 0; l < 113; ++l) {
386  expected = (std::max)(expected, in(i, k, j, l));
387  }
388  }
389  VERIFY_IS_APPROX(out(i, j), expected);
390  }
391  }
392 }
393 
394 template <int DataLayout>
396  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
397  Tensor<float, 2, DataLayout> out(97, 113);
398  in.setRandom();
399 
400 // Reduce on the innermost dimensions.
401 #if !EIGEN_HAS_CONSTEXPR
402  array<int, 2> reduction_axis;
403  reduction_axis[0] = 0;
404  reduction_axis[1] = 1;
405 #else
406  // This triggers the use of packets for ColMajor.
407  Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1> > reduction_axis;
408 #endif
409 
410  out = in.maximum(reduction_axis);
411 
412  for (int i = 0; i < 97; ++i) {
413  for (int j = 0; j < 113; ++j) {
414  float expected = -1e10f;
415  for (int k = 0; k < 53; ++k) {
416  for (int l = 0; l < 72; ++l) {
417  expected = (std::max)(expected, in(l, k, i, j));
418  }
419  }
420  VERIFY_IS_APPROX(out(i, j), expected);
421  }
422  }
423 }
424 
425 template <int DataLayout>
427  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
428  Tensor<float, 2, DataLayout> out(72, 53);
429  in.setRandom();
430 
431 // Reduce on the innermost dimensions.
432 #if !EIGEN_HAS_CONSTEXPR
433  array<int, 2> reduction_axis;
434  reduction_axis[0] = 2;
435  reduction_axis[1] = 3;
436 #else
437  // This triggers the use of packets for RowMajor.
438  Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>> reduction_axis;
439 #endif
440 
441  out = in.maximum(reduction_axis);
442 
443  for (int i = 0; i < 72; ++i) {
444  for (int j = 0; j < 53; ++j) {
445  float expected = -1e10f;
446  for (int k = 0; k < 97; ++k) {
447  for (int l = 0; l < 113; ++l) {
448  expected = (std::max)(expected, in(i, j, k, l));
449  }
450  }
451  VERIFY_IS_APPROX(out(i, j), expected);
452  }
453  }
454 }
455 
456 template <int DataLayout>
457 static void test_reduce_middle_dims() {
458  Tensor<float, 4, DataLayout> in(72, 53, 97, 113);
459  Tensor<float, 2, DataLayout> out(72, 53);
460  in.setRandom();
461 
462 // Reduce on the innermost dimensions.
463 #if !EIGEN_HAS_CONSTEXPR
464  array<int, 2> reduction_axis;
465  reduction_axis[0] = 1;
466  reduction_axis[1] = 2;
467 #else
468  // This triggers the use of packets for RowMajor.
469  Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>> reduction_axis;
470 #endif
471 
472  out = in.maximum(reduction_axis);
473 
474  for (int i = 0; i < 72; ++i) {
475  for (int j = 0; j < 113; ++j) {
476  float expected = -1e10f;
477  for (int k = 0; k < 53; ++k) {
478  for (int l = 0; l < 97; ++l) {
479  expected = (std::max)(expected, in(i, k, l, j));
480  }
481  }
482  VERIFY_IS_APPROX(out(i, j), expected);
483  }
484  }
485 }
486 
488  CALL_SUBTEST(test_trivial_reductions<ColMajor>());
489  CALL_SUBTEST(test_trivial_reductions<RowMajor>());
490  CALL_SUBTEST(test_simple_reductions<ColMajor>());
491  CALL_SUBTEST(test_simple_reductions<RowMajor>());
492  CALL_SUBTEST(test_reductions_in_expr<ColMajor>());
493  CALL_SUBTEST(test_reductions_in_expr<RowMajor>());
494  CALL_SUBTEST(test_full_reductions<ColMajor>());
495  CALL_SUBTEST(test_full_reductions<RowMajor>());
496  CALL_SUBTEST(test_user_defined_reductions<ColMajor>());
497  CALL_SUBTEST(test_user_defined_reductions<RowMajor>());
498  CALL_SUBTEST(test_tensor_maps<ColMajor>());
499  CALL_SUBTEST(test_tensor_maps<RowMajor>());
500  CALL_SUBTEST(test_static_dims<ColMajor>());
501  CALL_SUBTEST(test_static_dims<RowMajor>());
502  CALL_SUBTEST(test_innermost_last_dims<ColMajor>());
503  CALL_SUBTEST(test_innermost_last_dims<RowMajor>());
504  CALL_SUBTEST(test_innermost_first_dims<ColMajor>());
505  CALL_SUBTEST(test_innermost_first_dims<RowMajor>());
506  CALL_SUBTEST(test_reduce_middle_dims<ColMajor>());
507  CALL_SUBTEST(test_reduce_middle_dims<RowMajor>());
508 }
static void test_trivial_reductions()
#define max(a, b)
Definition: datatypes.h:20
static void test_reductions_in_expr()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const
Definition: Tensor.h:101
static void test_tensor_maps()
#define min(a, b)
Definition: datatypes.h:19
Matrix expected
Definition: testMatrix.cpp:974
void test_cxx11_tensor_reduction()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor< Scalar_, NumIndices_, Options_, IndexType_ > & setRandom()
Definition: TensorBase.h:850
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const
Definition: Tensor.h:100
#define VERIFY_IS_APPROX(a, b)
static const Line3 l(Rot3(), 1, 1)
#define VERIFY_IS_EQUAL(a, b)
Definition: main.h:331
Values result
A tensor expression mapping an existing array of data.
float finalize(const float accum) const
static void test_innermost_last_dims()
static void test_full_reductions()
EIGEN_DONT_INLINE void prod(const Lhs &a, const Rhs &b, Res &c)
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
const mpreal sum(const mpreal tab[], const unsigned long int n, int &status, mp_rnd_t mode=mpreal::get_default_rnd())
Definition: mpreal.h:2381
The fixed sized version of the tensor class.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar * data()
Definition: Tensor.h:104
void reduce(const float val, float *accum)
static const bool PacketAccess
#define CALL_SUBTEST(FUNC)
Definition: main.h:342
#define VERIFY(a)
Definition: main.h:325
UserReducer(float offset)
float initialize() const
static void test_innermost_first_dims()
static void test_static_dims()
static void test_reduce_middle_dims()
static void test_user_defined_reductions()
std::ptrdiff_t j
static void test_simple_reductions()
The tensor class.
Definition: Tensor.h:63


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:41:56