1 /*
2 * Copyright (c) 2016-2022 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/runtime/CPP/CPPScheduler.h"
25
26 #include "arm_compute/core/CPP/ICPPKernel.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Helpers.h"
29 #include "arm_compute/core/Log.h"
30 #include "arm_compute/core/Utils.h"
31 #include "arm_compute/core/utils/misc/Utility.h"
32 #include "support/Mutex.h"
33
34 #include <atomic>
35 #include <condition_variable>
36 #include <iostream>
37 #include <list>
38 #include <memory>
39 #include <mutex>
40 #include <system_error>
41 #include <thread>
42 #include <vector>
43
44 namespace arm_compute
45 {
46 namespace
47 {
48 class ThreadFeeder
49 {
50 public:
51 /** Constructor
52 *
53 * @param[in] start First value that will be returned by the feeder
54 * @param[in] end End condition (The last value returned by get_next() will be end - 1)
55 */
ThreadFeeder(unsigned int start=0,unsigned int end=0)56 explicit ThreadFeeder(unsigned int start = 0, unsigned int end = 0)
57 : _atomic_counter(start), _end(end)
58 {
59 }
60 /** Return the next element in the range if there is one.
61 *
62 * @param[out] next Will contain the next element if there is one.
63 *
64 * @return False if the end of the range has been reached and next wasn't set.
65 */
get_next(unsigned int & next)66 bool get_next(unsigned int &next)
67 {
68 next = atomic_fetch_add_explicit(&_atomic_counter, 1u, std::memory_order_relaxed);
69 return next < _end;
70 }
71
72 private:
73 std::atomic_uint _atomic_counter;
74 const unsigned int _end;
75 };
76
77 /** Execute workloads[info.thread_id] first, then call the feeder to get the index of the next workload to run.
78 *
79 * Will run workloads until the feeder reaches the end of its range.
80 *
81 * @param[in] workloads The array of workloads
82 * @param[in,out] feeder The feeder indicating which workload to execute next.
83 * @param[in] info Threading and CPU info.
84 */
process_workloads(std::vector<IScheduler::Workload> & workloads,ThreadFeeder & feeder,const ThreadInfo & info)85 void process_workloads(std::vector<IScheduler::Workload> &workloads, ThreadFeeder &feeder, const ThreadInfo &info)
86 {
87 unsigned int workload_index = info.thread_id;
88 do
89 {
90 ARM_COMPUTE_ERROR_ON(workload_index >= workloads.size());
91 workloads[workload_index](info);
92 }
93 while(feeder.get_next(workload_index));
94 }
95
96 /** Set thread affinity. Pin current thread to a particular core
97 *
98 * @param[in] core_id ID of the core to which the current thread is pinned
99 */
set_thread_affinity(int core_id)100 void set_thread_affinity(int core_id)
101 {
102 if(core_id < 0)
103 {
104 return;
105 }
106
107 #if !defined(_WIN64) && !defined(__APPLE__) && !defined(__OpenBSD__)
108 cpu_set_t set;
109 CPU_ZERO(&set);
110 CPU_SET(core_id, &set);
111 ARM_COMPUTE_EXIT_ON_MSG(sched_setaffinity(0, sizeof(set), &set), "Error setting thread affinity");
112 #endif /* !defined(__APPLE__) && !defined(__OpenBSD__) */
113 }
114
115 /** There are currently 2 scheduling modes supported by CPPScheduler
116 *
117 * Linear:
118 * The default mode where all the scheduling is carried out by the main thread linearly (in a loop).
119 * E.G. If there are 8 threads in total, there will be 1 main thread + 7 threads in the thread pool, and it is main
120 * thread's responsibility to start all the other threads in the thread pool.
121 *
122 * Fanout:
123 * In fanout mode, the scheduling (starting other threads) task is distributed across many threads instead of just
124 * the main thread.
125 *
126 * The scheduler has a fixed parameter: wake_fanout, and the scheduling sequence goes like this:
127 * 1. Main thread wakes the first wake_fanout - 1 number of FanoutThreads from the thread pool
128 * From thread: 0
129 * To thread (non-inclusive): Wake_fanout - 1
130 * 2. Each FanoutThread then wakes wake_fanout number of FanoutThreads from the thread pool:
131 * From thread: (i + 1) * wake_fanout - 1
132 * To thread (non-inclusive): (i + 2) * wake_fanout - 1
133 * where i is the current thread's thread id
134 * The end is clamped at the size of the thread pool / the number of threads in use - 1
135 *
136 * E.G. for a total number of 8 threads (1 main thread, 7 FanoutThreads in thread pool) with a fanout of 3
137 * 1. Main thread wakes FanoutThread 0, 1
138 * 2. FanoutThread 0 wakes FanoutThread 2, 3, 4
139 * 3. FanoutThread 1 wakes FanoutThread 5, 6
140 */
141
142 class Thread final
143 {
144 public:
145 /** Start a new thread
146 *
147 * Thread will be pinned to a given core id if value is non-negative
148 *
149 * @param[in] core_pin Core id to pin the thread on. If negative no thread pinning will take place
150 */
151 explicit Thread(int core_pin = -1);
152
153 Thread(const Thread &) = delete;
154 Thread &operator=(const Thread &) = delete;
155 Thread(Thread &&) = delete;
156 Thread &operator=(Thread &&) = delete;
157
158 /** Destructor. Make the thread join. */
159 ~Thread();
160
161 /** Set workloads */
162 void set_workload(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info);
163
164 /** Request the worker thread to start executing workloads.
165 *
166 * The thread will start by executing workloads[info.thread_id] and will then call the feeder to
167 * get the index of the following workload to run.
168 *
169 * @note This function will return as soon as the workloads have been sent to the worker thread.
170 * wait() needs to be called to ensure the execution is complete.
171 */
172 void start();
173
174 /** Wait for the current kernel execution to complete. */
175 void wait();
176
177 /** Function ran by the worker thread. */
178 void worker_thread();
179
180 /** Set the scheduling strategy to be linear */
set_linear_mode()181 void set_linear_mode()
182 {
183 _thread_pool = nullptr;
184 _wake_beg = 0;
185 _wake_end = 0;
186 }
187
188 /** Set the scheduling strategy to be fanout */
set_fanout_mode(std::list<Thread> * thread_pool,unsigned int wake_beg,unsigned int wake_end)189 void set_fanout_mode(std::list<Thread> *thread_pool, unsigned int wake_beg, unsigned int wake_end)
190 {
191 _thread_pool = thread_pool;
192 _wake_beg = wake_beg;
193 _wake_end = wake_end;
194 }
195
196 private:
197 std::thread _thread{};
198 ThreadInfo _info{};
199 std::vector<IScheduler::Workload> *_workloads{ nullptr };
200 ThreadFeeder *_feeder{ nullptr };
201 std::mutex _m{};
202 std::condition_variable _cv{};
203 bool _wait_for_work{ false };
204 bool _job_complete{ true };
205 std::exception_ptr _current_exception{ nullptr };
206 int _core_pin{ -1 };
207 std::list<Thread> *_thread_pool{ nullptr };
208 unsigned int _wake_beg{ 0 };
209 unsigned int _wake_end{ 0 };
210 };
211
Thread(int core_pin)212 Thread::Thread(int core_pin)
213 : _core_pin(core_pin)
214 {
215 _thread = std::thread(&Thread::worker_thread, this);
216 }
217
~Thread()218 Thread::~Thread()
219 {
220 // Make sure worker thread has ended
221 if(_thread.joinable())
222 {
223 ThreadFeeder feeder;
224 set_workload(nullptr, feeder, ThreadInfo());
225 start();
226 _thread.join();
227 }
228 }
229
set_workload(std::vector<IScheduler::Workload> * workloads,ThreadFeeder & feeder,const ThreadInfo & info)230 void Thread::set_workload(std::vector<IScheduler::Workload> *workloads, ThreadFeeder &feeder, const ThreadInfo &info)
231 {
232 _workloads = workloads;
233 _feeder = &feeder;
234 _info = info;
235 }
236
start()237 void Thread::start()
238 {
239 {
240 std::lock_guard<std::mutex> lock(_m);
241 _wait_for_work = true;
242 _job_complete = false;
243 }
244 _cv.notify_one();
245 }
246
wait()247 void Thread::wait()
248 {
249 {
250 std::unique_lock<std::mutex> lock(_m);
251 _cv.wait(lock, [&] { return _job_complete; });
252 }
253
254 if(_current_exception)
255 {
256 std::rethrow_exception(_current_exception);
257 }
258 }
259
worker_thread()260 void Thread::worker_thread()
261 {
262 set_thread_affinity(_core_pin);
263
264 while(true)
265 {
266 std::unique_lock<std::mutex> lock(_m);
267 _cv.wait(lock, [&] { return _wait_for_work; });
268 _wait_for_work = false;
269
270 _current_exception = nullptr;
271
272 // Exit if the worker thread has not been fed with workloads
273 if(_workloads == nullptr || _feeder == nullptr)
274 {
275 return;
276 }
277
278 // Wake up more peer threads from thread pool if this job has been delegated to the current thread
279 if(_thread_pool != nullptr)
280 {
281 auto thread_it = _thread_pool->begin();
282 std::advance(thread_it, std::min(static_cast<unsigned int>(_thread_pool->size()), _wake_beg));
283 auto wake_end = std::min(_wake_end, static_cast<unsigned int>(_info.num_threads - 1));
284 for(unsigned int t = _wake_beg; t < wake_end; ++t, ++thread_it)
285 {
286 thread_it->start();
287 }
288 }
289
290 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
291 try
292 {
293 #endif /* ARM_COMPUTE_EXCEPTIONS_ENABLED */
294 process_workloads(*_workloads, *_feeder, _info);
295
296 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
297 }
298 catch(...)
299 {
300 _current_exception = std::current_exception();
301 }
302 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
303 _workloads = nullptr;
304 _job_complete = true;
305 lock.unlock();
306 _cv.notify_one();
307 }
308 }
309 } //namespace
310
311 struct CPPScheduler::Impl final
312 {
313 constexpr static unsigned int m_default_wake_fanout = 4;
314 enum class Mode
315 {
316 Linear,
317 Fanout
318 };
319 enum class ModeToggle
320 {
321 None,
322 Linear,
323 Fanout
324 };
Implarm_compute::CPPScheduler::Impl325 explicit Impl(unsigned int thread_hint)
326 : _num_threads(thread_hint), _threads(_num_threads - 1), _mode(Mode::Linear), _wake_fanout(0U)
327 {
328 const auto mode_env_v = utility::tolower(utility::getenv("ARM_COMPUTE_CPP_SCHEDULER_MODE"));
329 if(mode_env_v == "linear")
330 {
331 _forced_mode = ModeToggle::Linear;
332 }
333 else if(mode_env_v == "fanout")
334 {
335 _forced_mode = ModeToggle::Fanout;
336 }
337 else
338 {
339 _forced_mode = ModeToggle::None;
340 }
341 }
set_num_threadsarm_compute::CPPScheduler::Impl342 void set_num_threads(unsigned int num_threads, unsigned int thread_hint)
343 {
344 _num_threads = num_threads == 0 ? thread_hint : num_threads;
345 _threads.resize(_num_threads - 1);
346 auto_switch_mode(_num_threads);
347 }
set_num_threads_with_affinityarm_compute::CPPScheduler::Impl348 void set_num_threads_with_affinity(unsigned int num_threads, unsigned int thread_hint, BindFunc func)
349 {
350 _num_threads = num_threads == 0 ? thread_hint : num_threads;
351
352 // Set affinity on main thread
353 set_thread_affinity(func(0, thread_hint));
354
355 // Set affinity on worked threads
356 _threads.clear();
357 for(auto i = 1U; i < _num_threads; ++i)
358 {
359 _threads.emplace_back(func(i, thread_hint));
360 }
361 auto_switch_mode(_num_threads);
362 }
auto_switch_modearm_compute::CPPScheduler::Impl363 void auto_switch_mode(unsigned int num_threads_to_use)
364 {
365 // If the environment variable is set to any of the modes, it overwrites the mode selected over num_threads_to_use
366 if(_forced_mode == ModeToggle::Fanout || (_forced_mode == ModeToggle::None && num_threads_to_use > 8))
367 {
368 set_fanout_mode(m_default_wake_fanout, num_threads_to_use);
369 ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Set CPPScheduler to Fanout mode, with wake up fanout : %d and %d threads to use\n", this->wake_fanout(), num_threads_to_use);
370 }
371 else // Equivalent to (_forced_mode == ModeToggle::Linear || (_forced_mode == ModeToggle::None && num_threads_to_use <= 8))
372 {
373 set_linear_mode();
374 ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("Set CPPScheduler to Linear mode, with %d threads to use\n", num_threads_to_use);
375 }
376 }
set_linear_modearm_compute::CPPScheduler::Impl377 void set_linear_mode()
378 {
379 for(auto &thread : _threads)
380 {
381 thread.set_linear_mode();
382 }
383 _mode = Mode::Linear;
384 _wake_fanout = 0U;
385 }
set_fanout_modearm_compute::CPPScheduler::Impl386 void set_fanout_mode(unsigned int wake_fanout, unsigned int num_threads_to_use)
387 {
388 ARM_COMPUTE_ERROR_ON(num_threads_to_use > _threads.size() + 1);
389 const auto actual_wake_fanout = std::max(2U, std::min(wake_fanout, num_threads_to_use - 1));
390 auto thread_it = _threads.begin();
391 for(auto i = 1U; i < num_threads_to_use; ++i, ++thread_it)
392 {
393 const auto wake_begin = i * actual_wake_fanout - 1;
394 const auto wake_end = std::min((i + 1) * actual_wake_fanout - 1, num_threads_to_use - 1);
395 thread_it->set_fanout_mode(&_threads, wake_begin, wake_end);
396 }
397 // Reset the remaining threads's wake up schedule
398 while(thread_it != _threads.end())
399 {
400 thread_it->set_fanout_mode(&_threads, 0U, 0U);
401 ++thread_it;
402 }
403 _mode = Mode::Fanout;
404 _wake_fanout = actual_wake_fanout;
405 }
num_threadsarm_compute::CPPScheduler::Impl406 unsigned int num_threads() const
407 {
408 return _num_threads;
409 }
wake_fanoutarm_compute::CPPScheduler::Impl410 unsigned int wake_fanout() const
411 {
412 return _wake_fanout;
413 }
modearm_compute::CPPScheduler::Impl414 Mode mode() const
415 {
416 return _mode;
417 }
418
419 void run_workloads(std::vector<IScheduler::Workload> &workloads);
420
421 unsigned int _num_threads;
422 std::list<Thread> _threads;
423 arm_compute::Mutex _run_workloads_mutex{};
424 Mode _mode{ Mode::Linear };
425 ModeToggle _forced_mode{ ModeToggle::None };
426 unsigned int _wake_fanout{ 0 };
427 };
428
429 /*
430 * This singleton has been deprecated and will be removed in future releases
431 */
get()432 CPPScheduler &CPPScheduler::get()
433 {
434 static CPPScheduler scheduler;
435 return scheduler;
436 }
437
CPPScheduler()438 CPPScheduler::CPPScheduler()
439 : _impl(std::make_unique<Impl>(num_threads_hint()))
440 {
441 }
442
443 CPPScheduler::~CPPScheduler() = default;
444
set_num_threads(unsigned int num_threads)445 void CPPScheduler::set_num_threads(unsigned int num_threads)
446 {
447 // No changes in the number of threads while current workloads are running
448 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
449 _impl->set_num_threads(num_threads, num_threads_hint());
450 }
451
set_num_threads_with_affinity(unsigned int num_threads,BindFunc func)452 void CPPScheduler::set_num_threads_with_affinity(unsigned int num_threads, BindFunc func)
453 {
454 // No changes in the number of threads while current workloads are running
455 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
456 _impl->set_num_threads_with_affinity(num_threads, num_threads_hint(), func);
457 }
458
num_threads() const459 unsigned int CPPScheduler::num_threads() const
460 {
461 return _impl->num_threads();
462 }
463
464 #ifndef DOXYGEN_SKIP_THIS
run_workloads(std::vector<IScheduler::Workload> & workloads)465 void CPPScheduler::run_workloads(std::vector<IScheduler::Workload> &workloads)
466 {
467 // Mutex to ensure other threads won't interfere with the setup of the current thread's workloads
468 // Other thread's workloads will be scheduled after the current thread's workloads have finished
469 // This is not great because different threads workloads won't run in parallel but at least they
470 // won't interfere each other and deadlock.
471 arm_compute::lock_guard<std::mutex> lock(_impl->_run_workloads_mutex);
472 const unsigned int num_threads_to_use = std::min(_impl->num_threads(), static_cast<unsigned int>(workloads.size()));
473 if(num_threads_to_use < 1)
474 {
475 return;
476 }
477 // Re-adjust the mode if the actual number of threads to use is different from the number of threads created
478 _impl->auto_switch_mode(num_threads_to_use);
479 int num_threads_to_start = 0;
480 switch(_impl->mode())
481 {
482 case CPPScheduler::Impl::Mode::Fanout:
483 {
484 num_threads_to_start = static_cast<int>(_impl->wake_fanout()) - 1;
485 break;
486 }
487 case CPPScheduler::Impl::Mode::Linear:
488 default:
489 {
490 num_threads_to_start = static_cast<int>(num_threads_to_use) - 1;
491 break;
492 }
493 }
494 ThreadFeeder feeder(num_threads_to_use, workloads.size());
495 ThreadInfo info;
496 info.cpu_info = &cpu_info();
497 info.num_threads = num_threads_to_use;
498 unsigned int t = 0;
499 auto thread_it = _impl->_threads.begin();
500 // Set num_threads_to_use - 1 workloads to the threads as the remaining 1 is left to the main thread
501 for(; t < num_threads_to_use - 1; ++t, ++thread_it)
502 {
503 info.thread_id = t;
504 thread_it->set_workload(&workloads, feeder, info);
505 }
506 thread_it = _impl->_threads.begin();
507 for(int i = 0; i < num_threads_to_start; ++i, ++thread_it)
508 {
509 thread_it->start();
510 }
511 info.thread_id = t; // Set main thread's thread_id
512 process_workloads(workloads, feeder, info); // Main thread processes workloads
513 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
514 try
515 {
516 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
517 thread_it = _impl->_threads.begin();
518 for(unsigned int i = 0; i < num_threads_to_use - 1; ++i, ++thread_it)
519 {
520 thread_it->wait();
521 }
522 #ifndef ARM_COMPUTE_EXCEPTIONS_DISABLED
523 }
524 catch(const std::system_error &e)
525 {
526 std::cerr << "Caught system_error with code " << e.code() << " meaning " << e.what() << '\n';
527 }
528 #endif /* ARM_COMPUTE_EXCEPTIONS_DISABLED */
529 }
530 #endif /* DOXYGEN_SKIP_THIS */
531
schedule_op(ICPPKernel * kernel,const Hints & hints,const Window & window,ITensorPack & tensors)532 void CPPScheduler::schedule_op(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
533 {
534 schedule_common(kernel, hints, window, tensors);
535 }
536
schedule(ICPPKernel * kernel,const Hints & hints)537 void CPPScheduler::schedule(ICPPKernel *kernel, const Hints &hints)
538 {
539 ITensorPack tensors;
540 schedule_common(kernel, hints, kernel->window(), tensors);
541 }
542 } // namespace arm_compute
543