parallel_for.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. // This file is part of libigl, a simple c++ geometry processing library.
  2. //
  3. // Copyright (C) 2016 Alec Jacobson <alecjacobson@gmail.com>
  4. //
  5. // This Source Code Form is subject to the terms of the Mozilla Public License
  6. // v. 2.0. If a copy of the MPL was not distributed with this file, You can
  7. // obtain one at http://mozilla.org/MPL/2.0/.
  8. #ifndef IGL_PARALLEL_FOR_H
  9. #define IGL_PARALLEL_FOR_H
  10. #include "igl_inline.h"
  11. #include <functional>
  12. namespace igl
  13. {
  14. // PARALLEL_FOR Functional implementation of a basic, open-mp style, parallel
  15. // for loop. If the inner block of a for-loop can be rewritten/encapsulated in
  16. // a single (anonymous/lambda) function call `func` so that the serial code
  17. // looks like:
  18. //
  19. // for(int i = 0;i<loop_size;i++)
  20. // {
  21. // func(i);
  22. // }
  23. //
  24. // then `parallel_for(loop_size,func,min_parallel)` will use as many threads as
  25. // available on the current hardware to parallelize this for loop so long as
  26. // loop_size<min_parallel, otherwise it will just use a serial for loop.
  27. //
  28. // Inputs:
  29. // loop_size number of iterations. I.e. for(int i = 0;i<loop_size;i++) ...
  30. // func function handle taking iteration index as only arguement to compute
  31. // inner block of for loop I.e. for(int i ...){ func(i); }
  32. // min_parallel min size of loop_size such that parallel (non-serial)
  33. // thread pooling should be attempted {0}
  34. // Returns true iff thread pool was invoked
  35. template<typename Index, typename FunctionType >
  36. inline bool parallel_for(
  37. const Index loop_size,
  38. const FunctionType & func,
  39. const size_t min_parallel=0);
  40. // PARALLEL_FOR Functional implementation of an open-mp style, parallel for
  41. // loop with accumulation. For example, serial code separated into n chunks
  42. // (each to be parallelized with a thread) might look like:
  43. //
  44. // Eigen::VectorXd S;
  45. // const auto & prep_func = [&S](int n){ S = Eigen:VectorXd::Zero(n); };
  46. // const auto & func = [&X,&S](int i, int t){ S(t) += X(i); };
  47. // const auto & accum_func = [&S,&sum](int t){ sum += S(t); };
  48. // prep_func(n);
  49. // for(int i = 0;i<loop_size;i++)
  50. // {
  51. // func(i,i%n);
  52. // }
  53. // double sum = 0;
  54. // for(int t = 0;t<n;t++)
  55. // {
  56. // accum_func(t);
  57. // }
  58. //
  59. // Inputs:
  60. // loop_size number of iterations. I.e. for(int i = 0;i<loop_size;i++) ...
  61. // prep_func function handle taking n >= number of threads as only
  62. // argument
  63. // func function handle taking iteration index i and thread id t as only
  64. // arguements to compute inner block of for loop I.e.
  65. // for(int i ...){ func(i,t); }
  66. // accum_func function handle taking thread index as only argument, to be
  67. // called after all calls of func, e.g., for serial accumulation across
  68. // all n (potential) threads, see n in description of prep_func.
  69. // min_parallel min size of loop_size such that parallel (non-serial)
  70. // thread pooling should be attempted {0}
  71. // Returns true iff thread pool was invoked
  72. template<
  73. typename Index,
  74. typename PrepFunctionType,
  75. typename FunctionType,
  76. typename AccumFunctionType
  77. >
  78. inline bool parallel_for(
  79. const Index loop_size,
  80. const PrepFunctionType & prep_func,
  81. const FunctionType & func,
  82. const AccumFunctionType & accum_func,
  83. const size_t min_parallel=0);
  84. }
  85. // Implementation
  86. #include <cmath>
  87. #include <cassert>
  88. #include <thread>
  89. #include <vector>
  90. #include <algorithm>
  91. template<typename Index, typename FunctionType >
  92. inline bool igl::parallel_for(
  93. const Index loop_size,
  94. const FunctionType & func,
  95. const size_t min_parallel)
  96. {
  97. using namespace std;
  98. // no op preparation/accumulation
  99. const auto & no_op = [](const size_t /*n/t*/){};
  100. // two-parameter wrapper ignoring thread id
  101. const auto & wrapper = [&func](Index i,size_t /*t*/){ func(i); };
  102. return parallel_for(loop_size,no_op,wrapper,no_op,min_parallel);
  103. }
  104. template<
  105. typename Index,
  106. typename PreFunctionType,
  107. typename FunctionType,
  108. typename AccumFunctionType>
  109. inline bool igl::parallel_for(
  110. const Index loop_size,
  111. const PreFunctionType & prep_func,
  112. const FunctionType & func,
  113. const AccumFunctionType & accum_func,
  114. const size_t min_parallel)
  115. {
  116. assert(loop_size>=0);
  117. if(loop_size==0) return false;
  118. // Estimate number of threads in the pool
  119. // http://ideone.com/Z7zldb
  120. const static size_t sthc = std::thread::hardware_concurrency();
  121. const size_t nthreads = loop_size<min_parallel?0:(sthc==0?8:sthc);
  122. if(nthreads==0)
  123. {
  124. // serial
  125. prep_func(1);
  126. for(Index i = 0;i<loop_size;i++) func(i,0);
  127. accum_func(0);
  128. return false;
  129. }else
  130. {
  131. // Size of a slice for the range functions
  132. Index slice =
  133. std::max(
  134. (Index)std::round((loop_size+1)/static_cast<double>(nthreads)),(Index)1);
  135. // [Helper] Inner loop
  136. const auto & range = [&func](const Index k1, const Index k2, const size_t t)
  137. {
  138. for(Index k = k1; k < k2; k++) func(k,t);
  139. };
  140. prep_func(nthreads);
  141. // Create pool and launch jobs
  142. std::vector<std::thread> pool;
  143. pool.reserve(nthreads);
  144. // Inner range extents
  145. Index i1 = 0;
  146. Index i2 = std::min(0 + slice, loop_size);
  147. {
  148. size_t t = 0;
  149. for (; t+1 < nthreads && i1 < loop_size; ++t)
  150. {
  151. pool.emplace_back(range, i1, i2, t);
  152. i1 = i2;
  153. i2 = std::min(i2 + slice, loop_size);
  154. }
  155. if (i1 < loop_size)
  156. {
  157. pool.emplace_back(range, i1, loop_size, t);
  158. }
  159. }
  160. // Wait for jobs to finish
  161. for (std::thread &t : pool) if (t.joinable()) t.join();
  162. // Accumulate across threads
  163. for(size_t t = 0;t<nthreads;t++)
  164. {
  165. accum_func(t);
  166. }
  167. return true;
  168. }
  169. }
  170. //#ifndef IGL_STATIC_LIBRARY
  171. //#include "parallel_for.cpp"
  172. //#endif
  173. #endif