Parallelizer.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PARALLELIZER_H
11 #define EIGEN_PARALLELIZER_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
18 inline void manage_multi_threading(Action action, int* v)
19 {
20  static EIGEN_UNUSED int m_maxThreads = -1;
21 
22  if(action==SetAction)
23  {
25  m_maxThreads = *v;
26  }
27  else if(action==GetAction)
28  {
30  #ifdef EIGEN_HAS_OPENMP
31  if(m_maxThreads>0)
32  *v = m_maxThreads;
33  else
34  *v = omp_get_max_threads();
35  #else
36  *v = 1;
37  #endif
38  }
39  else
40  {
41  eigen_internal_assert(false);
42  }
43 }
44 
45 }
46 
48 inline void initParallel()
49 {
50  int nbt;
52  std::ptrdiff_t l1, l2, l3;
54 }
55 
58 inline int nbThreads()
59 {
60  int ret;
62  return ret;
63 }
64 
67 inline void setNbThreads(int v)
68 {
70 }
71 
72 namespace internal {
73 
74 template<typename Index> struct GemmParallelInfo
75 {
76  GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
77 
78  int volatile sync;
79  int volatile users;
80 
83 };
84 
85 template<bool Condition, typename Functor, typename Index>
86 void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
87 {
88  // TODO when EIGEN_USE_BLAS is defined,
89  // we should still enable OMP for other scalar types
90 #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
91  // FIXME the transpose variable is only needed to properly split
92  // the matrix product when multithreading is enabled. This is a temporary
93  // fix to support row-major destination matrices. This whole
94  // parallelizer mechanism has to be redisigned anyway.
95  EIGEN_UNUSED_VARIABLE(depth);
96  EIGEN_UNUSED_VARIABLE(transpose);
97  func(0,rows, 0,cols);
98 #else
99 
100  // Dynamically check whether we should enable or disable OpenMP.
101  // The conditions are:
102  // - the max number of threads we can create is greater than 1
103  // - we are not already in a parallel code
104  // - the sizes are large enough
105 
106  // compute the maximal number of threads from the size of the product:
107  // FIXME this has to be fine tuned
108  Index size = transpose ? rows : cols;
109  Index pb_max_threads = std::max<Index>(1,size / 32);
110  // compute the maximal number of threads from the total amount of work:
111  double work = static_cast<double>(rows) * static_cast<double>(cols) *
112  static_cast<double>(depth);
113  double kMinTaskSize = 50000; // Heuristic.
114  pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
115 
116  // compute the number of threads we are going to use
117  Index threads = std::min<Index>(nbThreads(), pb_max_threads);
118 
119  // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
120  // then abort multi-threading
121  // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
122  if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
123  return func(0,rows, 0,cols);
124 
126  func.initParallelSession(threads);
127 
128  if(transpose)
129  std::swap(rows,cols);
130 
132 
133  #pragma omp parallel num_threads(threads)
134  {
135  Index i = omp_get_thread_num();
136  // Note that the actual number of threads might be lower than the number of request ones.
137  Index actual_threads = omp_get_num_threads();
138 
139  Index blockCols = (cols / actual_threads) & ~Index(0x3);
140  Index blockRows = (rows / actual_threads);
141  blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
142 
143  Index r0 = i*blockRows;
144  Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
145 
146  Index c0 = i*blockCols;
147  Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
148 
149  info[i].lhs_start = r0;
150  info[i].lhs_length = actualBlockRows;
151 
152  if(transpose) func(c0, actualBlockCols, 0, rows, info);
153  else func(0, rows, c0, actualBlockCols, info);
154  }
155 #endif
156 }
157 
158 } // end namespace internal
159 
160 } // end namespace Eigen
161 
162 #endif // EIGEN_PARALLELIZER_H
void initParallel()
Definition: Parallelizer.h:48
Definition: LDLT.h:16
void manage_caching_sizes(Action action, std::ptrdiff_t *l1, std::ptrdiff_t *l2, std::ptrdiff_t *l3)
int nbThreads()
Definition: Parallelizer.h:58
#define EIGEN_UNUSED
Definition: Macros.h:606
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
#define ei_declare_aligned_stack_constructed_variable(TYPE, NAME, SIZE, BUFFER)
Definition: Memory.h:644
void setNbThreads(int v)
Definition: Parallelizer.h:67
#define eigen_internal_assert(x)
Definition: Macros.h:582
void parallelize_gemm(const Functor &func, Index rows, Index cols, Index depth, bool transpose)
Definition: Parallelizer.h:86
void manage_multi_threading(Action action, int *v)
Definition: Parallelizer.h:18
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:615
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition: Memory.h:602


co_scan
Author(s):
autogenerated on Mon Feb 28 2022 23:00:45