13 #include <Eigen/CXX11/Tensor> 17 template <
int DataLayout>
35 for (
int i = 0;
i < 7; ++
i) {
48 for (
int i = 0;
i < 2; ++
i) {
49 for (
int j = 0;
j < 3; ++
j) {
56 template <
int DataLayout>
61 reduction_axis2[0] = 1;
62 reduction_axis2[1] = 3;
67 for (
int i = 0;
i < 2; ++
i) {
68 for (
int j = 0;
j < 5; ++
j) {
70 for (
int k = 0; k < 3; ++k) {
71 for (
int l = 0;
l < 7; ++
l) {
72 sum += tensor(
i, k,
j,
l);
84 reduction_axis4[0] = 0;
85 reduction_axis4[1] = 1;
86 reduction_axis4[2] = 2;
87 reduction_axis4[3] = 3;
94 reduction_axis2[0] = 0;
95 reduction_axis2[1] = 2;
96 result = tensor.prod(reduction_axis2);
99 for (
int i = 0;
i < 3; ++
i) {
100 for (
int j = 0;
j < 7; ++
j) {
102 for (
int k = 0; k < 2; ++k) {
103 for (
int l = 0;
l < 5; ++
l) {
104 prod *= tensor(k,
i,
l,
j);
116 reduction_axis4[0] = 0;
117 reduction_axis4[1] = 1;
118 reduction_axis4[2] = 2;
119 reduction_axis4[3] = 3;
126 reduction_axis2[0] = 0;
127 reduction_axis2[1] = 2;
128 result = tensor.maximum(reduction_axis2);
131 for (
int i = 0;
i < 3; ++
i) {
132 for (
int j = 0;
j < 7; ++
j) {
133 float max_val = std::numeric_limits<float>::lowest();
134 for (
int k = 0; k < 2; ++k) {
135 for (
int l = 0;
l < 5; ++
l) {
148 reduction_axis4[0] = 0;
149 reduction_axis4[1] = 1;
150 reduction_axis4[2] = 2;
151 reduction_axis4[3] = 3;
158 reduction_axis2[0] = 0;
159 reduction_axis2[1] = 1;
160 result = tensor.minimum(reduction_axis2);
163 for (
int i = 0;
i < 5; ++
i) {
164 for (
int j = 0;
j < 7; ++
j) {
166 for (
int k = 0; k < 2; ++k) {
167 for (
int l = 0;
l < 3; ++
l) {
180 reduction_axis4[0] = 0;
181 reduction_axis4[1] = 1;
182 reduction_axis4[2] = 2;
183 reduction_axis4[3] = 3;
190 reduction_axis2[0] = 0;
191 reduction_axis2[1] = 1;
192 result = tensor.mean(reduction_axis2);
195 for (
int i = 0;
i < 5; ++
i) {
196 for (
int j = 0;
j < 7; ++
j) {
199 for (
int k = 0; k < 2; ++k) {
200 for (
int l = 0;
l < 3; ++
l) {
201 sum += tensor(k,
l,
i,
j);
214 reduction_axis4[0] = 0;
215 reduction_axis4[1] = 1;
216 reduction_axis4[2] = 2;
217 reduction_axis4[3] = 3;
231 all = (ints >= ints.constant(0)).all();
235 any = (ints > ints.constant(10)).any();
237 any = (ints < ints.constant(1)).any();
243 template <
int DataLayout>
248 reduction_axis2[0] = 1;
249 reduction_axis2[1] = 3;
252 result = result.constant(1.0
f) - tensor.sum(reduction_axis2);
255 for (
int i = 0;
i < 2; ++
i) {
256 for (
int j = 0;
j < 5; ++
j) {
258 for (
int k = 0; k < 3; ++k) {
259 for (
int l = 0;
l < 7; ++
l) {
260 sum += tensor(
i, k,
j,
l);
269 template <
int DataLayout>
274 reduction_axis[0] = 0;
275 reduction_axis[1] = 1;
281 for (
int i = 0;
i < 2; ++
i) {
282 for (
int j = 0;
j < 3; ++
j) {
288 result = tensor.square().sum(reduction_axis).sqrt();
292 for (
int i = 0;
i < 2; ++
i) {
293 for (
int j = 0;
j < 3; ++
j) {
294 sum += tensor(
i,
j) * tensor(
i,
j);
303 void reduce(
const float val,
float* accum) { *accum += val * val; }
311 template <
int DataLayout>
316 reduction_axis[0] = 1;
321 for (
int i = 0;
i < 5; ++
i) {
323 for (
int j = 0;
j < 7; ++
j) {
324 expected += tensor(
i,
j) * tensor(
i,
j);
331 template <
int DataLayout>
333 int inputs[2 * 3 * 5 * 7];
342 reduction_axis[0] = 1;
343 reduction_axis[1] = 3;
348 tensor_map_const_const.sum(reduction_axis);
350 for (
int i = 0;
i < 2; ++
i) {
351 for (
int j = 0;
j < 5; ++
j) {
353 for (
int k = 0; k < 3; ++k) {
354 for (
int l = 0;
l < 7; ++
l) {
355 sum += tensor_map(
i, k,
j,
l);
365 template <
int DataLayout>
371 #if !EIGEN_HAS_CONSTEXPR 373 reduction_axis[0] = 1;
374 reduction_axis[1] = 3;
376 Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<3> > reduction_axis;
379 out = in.maximum(reduction_axis);
381 for (
int i = 0;
i < 72; ++
i) {
382 for (
int j = 0;
j < 97; ++
j) {
384 for (
int k = 0; k < 53; ++k) {
385 for (
int l = 0;
l < 113; ++
l) {
394 template <
int DataLayout>
401 #if !EIGEN_HAS_CONSTEXPR 403 reduction_axis[0] = 0;
404 reduction_axis[1] = 1;
407 Eigen::IndexList<Eigen::type2index<0>, Eigen::type2index<1> > reduction_axis;
410 out = in.maximum(reduction_axis);
412 for (
int i = 0;
i < 97; ++
i) {
413 for (
int j = 0;
j < 113; ++
j) {
415 for (
int k = 0; k < 53; ++k) {
416 for (
int l = 0;
l < 72; ++
l) {
425 template <
int DataLayout>
432 #if !EIGEN_HAS_CONSTEXPR 434 reduction_axis[0] = 2;
435 reduction_axis[1] = 3;
438 Eigen::IndexList<Eigen::type2index<2>, Eigen::type2index<3>> reduction_axis;
441 out = in.maximum(reduction_axis);
443 for (
int i = 0;
i < 72; ++
i) {
444 for (
int j = 0;
j < 53; ++
j) {
446 for (
int k = 0; k < 97; ++k) {
447 for (
int l = 0;
l < 113; ++
l) {
456 template <
int DataLayout>
463 #if !EIGEN_HAS_CONSTEXPR 465 reduction_axis[0] = 1;
466 reduction_axis[1] = 2;
469 Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>> reduction_axis;
472 out = in.maximum(reduction_axis);
474 for (
int i = 0;
i < 72; ++
i) {
475 for (
int j = 0;
j < 113; ++
j) {
477 for (
int k = 0; k < 53; ++k) {
478 for (
int l = 0;
l < 97; ++
l) {
static void test_trivial_reductions()
static void test_reductions_in_expr()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index dimension(std::size_t n) const
static void test_tensor_maps()
void test_cxx11_tensor_reduction()
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Tensor< Scalar_, NumIndices_, Options_, IndexType_ > & setRandom()
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rank() const
#define VERIFY_IS_APPROX(a, b)
static const Line3 l(Rot3(), 1, 1)
#define VERIFY_IS_EQUAL(a, b)
A tensor expression mapping an existing array of data.
float finalize(const float accum) const
static void test_innermost_last_dims()
static void test_full_reductions()
EIGEN_DONT_INLINE void prod(const Lhs &a, const Rhs &b, Res &c)
Point2(* f)(const Point3 &, OptionalJacobian< 2, 3 >)
const mpreal sum(const mpreal tab[], const unsigned long int n, int &status, mp_rnd_t mode=mpreal::get_default_rnd())
The fixed sized version of the tensor class.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar * data()
void reduce(const float val, float *accum)
static const bool PacketAccess
#define CALL_SUBTEST(FUNC)
UserReducer(float offset)
static void test_innermost_first_dims()
static void test_static_dims()
static void test_reduce_middle_dims()
static void test_user_defined_reductions()
static void test_simple_reductions()