1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9
10 #ifndef EIGEN_PARALLELIZER_H
11 #define EIGEN_PARALLELIZER_H
12
13 namespace Eigen {
14
15 namespace internal {
16
17 /** \internal */
manage_multi_threading(Action action,int * v)18 inline void manage_multi_threading(Action action, int* v)
19 {
20 static EIGEN_UNUSED int m_maxThreads = -1;
21
22 if(action==SetAction)
23 {
24 eigen_internal_assert(v!=0);
25 m_maxThreads = *v;
26 }
27 else if(action==GetAction)
28 {
29 eigen_internal_assert(v!=0);
30 #ifdef EIGEN_HAS_OPENMP
31 if(m_maxThreads>0)
32 *v = m_maxThreads;
33 else
34 *v = omp_get_max_threads();
35 #else
36 *v = 1;
37 #endif
38 }
39 else
40 {
41 eigen_internal_assert(false);
42 }
43 }
44
45 }
46
47 /** Must be call first when calling Eigen from multiple threads */
initParallel()48 inline void initParallel()
49 {
50 int nbt;
51 internal::manage_multi_threading(GetAction, &nbt);
52 std::ptrdiff_t l1, l2, l3;
53 internal::manage_caching_sizes(GetAction, &l1, &l2, &l3);
54 }
55
56 /** \returns the max number of threads reserved for Eigen
57 * \sa setNbThreads */
nbThreads()58 inline int nbThreads()
59 {
60 int ret;
61 internal::manage_multi_threading(GetAction, &ret);
62 return ret;
63 }
64
65 /** Sets the max number of threads reserved for Eigen
66 * \sa nbThreads */
setNbThreads(int v)67 inline void setNbThreads(int v)
68 {
69 internal::manage_multi_threading(SetAction, &v);
70 }
71
72 namespace internal {
73
74 template<typename Index> struct GemmParallelInfo
75 {
GemmParallelInfoGemmParallelInfo76 GemmParallelInfo() : sync(-1), users(0), lhs_start(0), lhs_length(0) {}
77
78 Index volatile sync;
79 int volatile users;
80
81 Index lhs_start;
82 Index lhs_length;
83 };
84
85 template<bool Condition, typename Functor, typename Index>
parallelize_gemm(const Functor & func,Index rows,Index cols,Index depth,bool transpose)86 void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth, bool transpose)
87 {
88 // TODO when EIGEN_USE_BLAS is defined,
89 // we should still enable OMP for other scalar types
90 #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS)
91 // FIXME the transpose variable is only needed to properly split
92 // the matrix product when multithreading is enabled. This is a temporary
93 // fix to support row-major destination matrices. This whole
94 // parallelizer mechanism has to be redisigned anyway.
95 EIGEN_UNUSED_VARIABLE(depth);
96 EIGEN_UNUSED_VARIABLE(transpose);
97 func(0,rows, 0,cols);
98 #else
99
100 // Dynamically check whether we should enable or disable OpenMP.
101 // The conditions are:
102 // - the max number of threads we can create is greater than 1
103 // - we are not already in a parallel code
104 // - the sizes are large enough
105
106 // compute the maximal number of threads from the size of the product:
107 // This first heuristic takes into account that the product kernel is fully optimized when working with nr columns at once.
108 Index size = transpose ? rows : cols;
109 Index pb_max_threads = std::max<Index>(1,size / Functor::Traits::nr);
110
111 // compute the maximal number of threads from the total amount of work:
112 double work = static_cast<double>(rows) * static_cast<double>(cols) *
113 static_cast<double>(depth);
114 double kMinTaskSize = 50000; // FIXME improve this heuristic.
115 pb_max_threads = std::max<Index>(1, std::min<Index>(pb_max_threads, work / kMinTaskSize));
116
117 // compute the number of threads we are going to use
118 Index threads = std::min<Index>(nbThreads(), pb_max_threads);
119
120 // if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session,
121 // then abort multi-threading
122 // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
123 if((!Condition) || (threads==1) || (omp_get_num_threads()>1))
124 return func(0,rows, 0,cols);
125
126 Eigen::initParallel();
127 func.initParallelSession(threads);
128
129 if(transpose)
130 std::swap(rows,cols);
131
132 ei_declare_aligned_stack_constructed_variable(GemmParallelInfo<Index>,info,threads,0);
133
134 #pragma omp parallel num_threads(threads)
135 {
136 Index i = omp_get_thread_num();
137 // Note that the actual number of threads might be lower than the number of request ones.
138 Index actual_threads = omp_get_num_threads();
139
140 Index blockCols = (cols / actual_threads) & ~Index(0x3);
141 Index blockRows = (rows / actual_threads);
142 blockRows = (blockRows/Functor::Traits::mr)*Functor::Traits::mr;
143
144 Index r0 = i*blockRows;
145 Index actualBlockRows = (i+1==actual_threads) ? rows-r0 : blockRows;
146
147 Index c0 = i*blockCols;
148 Index actualBlockCols = (i+1==actual_threads) ? cols-c0 : blockCols;
149
150 info[i].lhs_start = r0;
151 info[i].lhs_length = actualBlockRows;
152
153 if(transpose) func(c0, actualBlockCols, 0, rows, info);
154 else func(0, rows, c0, actualBlockCols, info);
155 }
156 #endif
157 }
158
159 } // end namespace internal
160
161 } // end namespace Eigen
162
163 #endif // EIGEN_PARALLELIZER_H
164