xref: /aosp_15_r20/external/pytorch/aten/src/ATen/ParallelOpenMP.h (revision da0073e96a02ea20f0ac840b70461e3646d07c45)
1 #pragma once
2 
3 #include <algorithm>
4 #include <atomic>
5 #include <cstddef>
6 #include <exception>
7 
8 #ifdef _OPENMP
9 #define INTRA_OP_PARALLEL
10 
11 #include <omp.h>
12 #endif
13 
14 #ifdef _OPENMP
15 namespace at::internal {
16 template <typename F>
invoke_parallel(int64_t begin,int64_t end,int64_t grain_size,const F & f)17 inline void invoke_parallel(
18     int64_t begin,
19     int64_t end,
20     int64_t grain_size,
21     const F& f) {
22   std::atomic_flag err_flag = ATOMIC_FLAG_INIT;
23   std::exception_ptr eptr;
24 
25 #pragma omp parallel
26   {
27     // choose number of tasks based on grain size and number of threads
28     // can't use num_threads clause due to bugs in GOMP's thread pool (See
29     // #32008)
30     int64_t num_threads = omp_get_num_threads();
31     if (grain_size > 0) {
32       num_threads = std::min(num_threads, divup((end - begin), grain_size));
33     }
34 
35     int64_t tid = omp_get_thread_num();
36     int64_t chunk_size = divup((end - begin), num_threads);
37     int64_t begin_tid = begin + tid * chunk_size;
38     if (begin_tid < end) {
39       try {
40         internal::ThreadIdGuard tid_guard(tid);
41         f(begin_tid, std::min(end, chunk_size + begin_tid));
42       } catch (...) {
43         if (!err_flag.test_and_set()) {
44           eptr = std::current_exception();
45         }
46       }
47     }
48   }
49   if (eptr) {
50     std::rethrow_exception(eptr);
51   }
52 }
53 } // namespace at::internal
54 #endif // _OPENMP
55