Skip to content
Snippets Groups Projects
Commit cc0b8b19 authored by Danilo Piparo's avatar Danilo Piparo
Browse files

[IMT] Isolate work steered by ThreadedExecutor in a task arena

We enclose the parallel_for and parallel_reduce invocation in a
task_arena::isolate because we want to prevent work stealing.
parent edf59c2d
Branches
Tags
No related merge requests found
...@@ -83,10 +83,20 @@ template<typename T> ...@@ -83,10 +83,20 @@ template<typename T>
static T ReduceHelper(const std::vector<T> &objs, const std::function<T(T a, T b)> &redfunc) static T ReduceHelper(const std::vector<T> &objs, const std::function<T(T a, T b)> &redfunc)
{ {
using BRange_t = tbb::blocked_range<decltype(objs.begin())>; using BRange_t = tbb::blocked_range<decltype(objs.begin())>;
return tbb::parallel_reduce(BRange_t(objs.begin(), objs.end()), T{},
[redfunc](BRange_t const & range, T init) { auto pred = [redfunc](BRange_t const & range, T init) {
return std::accumulate(range.begin(), range.end(), init, redfunc); return std::accumulate(range.begin(), range.end(), init, redfunc);
}, redfunc); };
T result;
BRange_t objRange(objs.begin(), objs.end());
tbb::this_task_arena::isolate([&]{
result = tbb::parallel_reduce(objRange, T{}, pred, redfunc);
});
return result;
} }
} // End NS Internal } // End NS Internal
...@@ -110,7 +120,9 @@ namespace ROOT { ...@@ -110,7 +120,9 @@ namespace ROOT {
void TThreadExecutor::ParallelFor(unsigned int start, unsigned int end, unsigned step, const std::function<void(unsigned int i)> &f) void TThreadExecutor::ParallelFor(unsigned int start, unsigned int end, unsigned step, const std::function<void(unsigned int i)> &f)
{ {
tbb::parallel_for(start, end, step, f); tbb::this_task_arena::isolate([&]{
tbb::parallel_for(start, end, step, f);
});
} }
double TThreadExecutor::ParallelReduce(const std::vector<double> &objs, const std::function<double(double a, double b)> &redfunc) double TThreadExecutor::ParallelReduce(const std::vector<double> &objs, const std::function<double(double a, double b)> &redfunc)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment