xref: /aosp_15_r20/external/ComputeLibrary/src/runtime/IScheduler.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2016-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/runtime/IScheduler.h"
25 
26 #include "arm_compute/core/CPP/ICPPKernel.h"
27 #include "arm_compute/core/Error.h"
28 #include "arm_compute/core/Log.h"
29 #include "arm_compute/core/Window.h"
30 #include "src/common/cpuinfo/CpuInfo.h"
31 #include "src/runtime/SchedulerUtils.h"
32 
33 namespace arm_compute
34 {
IScheduler()35 IScheduler::IScheduler()
36 {
37     // Work out the best possible number of execution threads
38     _num_threads_hint = cpuinfo::num_threads_hint();
39 }
40 
cpu_info()41 CPUInfo &IScheduler::cpu_info()
42 {
43     return CPUInfo::get();
44 }
45 
set_num_threads_with_affinity(unsigned int num_threads,BindFunc func)46 void IScheduler::set_num_threads_with_affinity(unsigned int num_threads, BindFunc func)
47 {
48     ARM_COMPUTE_UNUSED(num_threads, func);
49     ARM_COMPUTE_ERROR("Feature for affinity setting is not implemented");
50 }
51 
num_threads_hint() const52 unsigned int IScheduler::num_threads_hint() const
53 {
54     return _num_threads_hint;
55 }
56 
schedule_common(ICPPKernel * kernel,const Hints & hints,const Window & window,ITensorPack & tensors)57 void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
58 {
59     ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
60 #ifndef BARE_METAL
61     const Window &max_window = window;
62     if(hints.split_dimension() == IScheduler::split_dimensions_all)
63     {
64         /*
65          * if the split dim is size_t max then this signals we should parallelise over
66          * all dimensions
67          */
68         const std::size_t m = max_window.num_iterations(Window::DimX);
69         const std::size_t n = max_window.num_iterations(Window::DimY);
70 
71         //in c++17 this can be swapped for   auto [ m_threads, n_threads ] = split_2d(...
72         unsigned m_threads, n_threads;
73         std::tie(m_threads, n_threads) = scheduler_utils::split_2d(this->num_threads(), m, n);
74 
75         std::vector<IScheduler::Workload> workloads;
76         for(unsigned int ni = 0; ni != n_threads; ++ni)
77         {
78             for(unsigned int mi = 0; mi != m_threads; ++mi)
79             {
80                 workloads.push_back(
81                     [ni, mi, m_threads, n_threads, &max_window, &kernel](const ThreadInfo & info)
82                 {
83                     //narrow the window to our mi-ni workload
84                     Window win = max_window.split_window(Window::DimX, mi, m_threads)
85                                  .split_window(Window::DimY, ni, n_threads);
86 
87                     win.validate();
88 
89                     Window thread_locator;
90                     thread_locator.set(Window::DimX, Window::Dimension(mi, m_threads));
91                     thread_locator.set(Window::DimY, Window::Dimension(ni, n_threads));
92 
93                     thread_locator.validate();
94 
95                     kernel->run_nd(win, info, thread_locator);
96                 });
97             }
98         }
99         run_workloads(workloads);
100     }
101     else
102     {
103         const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
104         const unsigned int num_threads    = std::min(num_iterations, this->num_threads());
105 
106         if(num_iterations == 0)
107         {
108             return;
109         }
110 
111         if(!kernel->is_parallelisable() || num_threads == 1)
112         {
113             ThreadInfo info;
114             info.cpu_info = &cpu_info();
115             if(tensors.empty())
116             {
117                 kernel->run(max_window, info);
118             }
119             else
120             {
121                 kernel->run_op(tensors, max_window, info);
122             }
123         }
124         else
125         {
126             unsigned int num_windows = 0;
127             switch(hints.strategy())
128             {
129                 case StrategyHint::STATIC:
130                     num_windows = num_threads;
131                     break;
132                 case StrategyHint::DYNAMIC:
133                 {
134                     const unsigned int granule_threshold = (hints.threshold() <= 0) ? num_threads : static_cast<unsigned int>(hints.threshold());
135                     // Make sure we don't use some windows which are too small as this might create some contention on the ThreadFeeder
136                     num_windows = num_iterations > granule_threshold ? granule_threshold : num_iterations;
137                     break;
138                 }
139                 default:
140                     ARM_COMPUTE_ERROR("Unknown strategy");
141             }
142             // Make sure the smallest window is larger than minimim workload size
143             num_windows = adjust_num_of_windows(max_window, hints.split_dimension(), num_windows, *kernel, cpu_info());
144 
145             std::vector<IScheduler::Workload> workloads(num_windows);
146             for(unsigned int t = 0; t < num_windows; ++t)
147             {
148                 //Capture 't' by copy, all the other variables by reference:
149                 workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo & info)
150                 {
151                     Window win = max_window.split_window(hints.split_dimension(), t, num_windows);
152                     win.validate();
153 
154                     if(tensors.empty())
155                     {
156                         kernel->run(win, info);
157                     }
158                     else
159                     {
160                         kernel->run_op(tensors, win, info);
161                     }
162                 };
163             }
164             run_workloads(workloads);
165         }
166     }
167 #else  /* !BARE_METAL */
168     ARM_COMPUTE_UNUSED(kernel, hints, window, tensors);
169 #endif /* !BARE_METAL */
170 }
171 
run_tagged_workloads(std::vector<Workload> & workloads,const char * tag)172 void IScheduler::run_tagged_workloads(std::vector<Workload> &workloads, const char *tag)
173 {
174     ARM_COMPUTE_UNUSED(tag);
175     run_workloads(workloads);
176 }
177 
adjust_num_of_windows(const Window & window,std::size_t split_dimension,std::size_t init_num_windows,const ICPPKernel & kernel,const CPUInfo & cpu_info)178 std::size_t IScheduler::adjust_num_of_windows(const Window &window, std::size_t split_dimension, std::size_t init_num_windows, const ICPPKernel &kernel, const CPUInfo &cpu_info)
179 {
180     // Mitigation of the narrow split issue, which occurs when the split dimension is too small to split (hence "narrow").
181     if(window.num_iterations(split_dimension) < init_num_windows )
182     {
183         auto recommended_split_dim = Window::DimX;
184         for(std::size_t dims = Window::DimY; dims <= Window::DimW; ++dims)
185         {
186             if(window.num_iterations(recommended_split_dim) < window.num_iterations(dims))
187             {
188                 recommended_split_dim = dims;
189             }
190         }
191         ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("%zu dimension is not a suitable dimension to split the workload. Recommended: %zu recommended_split_dim", split_dimension,
192                                                   recommended_split_dim);
193     }
194 
195     for(auto t = init_num_windows; t > 0; --t) // Trying the highest number of windows ,init_num_windows, first
196     {
197         // Try splitting the workload into t, subject to each subworkload size <= mws.
198         if((window.num_iterations(split_dimension) / kernel.get_mws(cpu_info, t)) >= t)
199         {
200             if(t != init_num_windows)
201             {
202                 ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using a different thread count than the one assigned by the user.");
203             }
204             return t;
205         }
206     }
207     ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using single thread instead of the thread count assigned by the user.");
208     return 1; //  If the workload is so small that it can't be split, we should run a single thread
209 }
210 
211 } // namespace arm_compute
212