xref: /aosp_15_r20/external/ComputeLibrary/src/runtime/IScheduler.cpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1*c217d954SCole Faust /*
2*c217d954SCole Faust  * Copyright (c) 2016-2022 Arm Limited.
3*c217d954SCole Faust  *
4*c217d954SCole Faust  * SPDX-License-Identifier: MIT
5*c217d954SCole Faust  *
6*c217d954SCole Faust  * Permission is hereby granted, free of charge, to any person obtaining a copy
7*c217d954SCole Faust  * of this software and associated documentation files (the "Software"), to
8*c217d954SCole Faust  * deal in the Software without restriction, including without limitation the
9*c217d954SCole Faust  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10*c217d954SCole Faust  * sell copies of the Software, and to permit persons to whom the Software is
11*c217d954SCole Faust  * furnished to do so, subject to the following conditions:
12*c217d954SCole Faust  *
13*c217d954SCole Faust  * The above copyright notice and this permission notice shall be included in all
14*c217d954SCole Faust  * copies or substantial portions of the Software.
15*c217d954SCole Faust  *
16*c217d954SCole Faust  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17*c217d954SCole Faust  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18*c217d954SCole Faust  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19*c217d954SCole Faust  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20*c217d954SCole Faust  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21*c217d954SCole Faust  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22*c217d954SCole Faust  * SOFTWARE.
23*c217d954SCole Faust  */
24*c217d954SCole Faust #include "arm_compute/runtime/IScheduler.h"
25*c217d954SCole Faust 
26*c217d954SCole Faust #include "arm_compute/core/CPP/ICPPKernel.h"
27*c217d954SCole Faust #include "arm_compute/core/Error.h"
28*c217d954SCole Faust #include "arm_compute/core/Log.h"
29*c217d954SCole Faust #include "arm_compute/core/Window.h"
30*c217d954SCole Faust #include "src/common/cpuinfo/CpuInfo.h"
31*c217d954SCole Faust #include "src/runtime/SchedulerUtils.h"
32*c217d954SCole Faust 
33*c217d954SCole Faust namespace arm_compute
34*c217d954SCole Faust {
IScheduler()35*c217d954SCole Faust IScheduler::IScheduler()
36*c217d954SCole Faust {
37*c217d954SCole Faust     // Work out the best possible number of execution threads
38*c217d954SCole Faust     _num_threads_hint = cpuinfo::num_threads_hint();
39*c217d954SCole Faust }
40*c217d954SCole Faust 
cpu_info()41*c217d954SCole Faust CPUInfo &IScheduler::cpu_info()
42*c217d954SCole Faust {
43*c217d954SCole Faust     return CPUInfo::get();
44*c217d954SCole Faust }
45*c217d954SCole Faust 
set_num_threads_with_affinity(unsigned int num_threads,BindFunc func)46*c217d954SCole Faust void IScheduler::set_num_threads_with_affinity(unsigned int num_threads, BindFunc func)
47*c217d954SCole Faust {
48*c217d954SCole Faust     ARM_COMPUTE_UNUSED(num_threads, func);
49*c217d954SCole Faust     ARM_COMPUTE_ERROR("Feature for affinity setting is not implemented");
50*c217d954SCole Faust }
51*c217d954SCole Faust 
num_threads_hint() const52*c217d954SCole Faust unsigned int IScheduler::num_threads_hint() const
53*c217d954SCole Faust {
54*c217d954SCole Faust     return _num_threads_hint;
55*c217d954SCole Faust }
56*c217d954SCole Faust 
schedule_common(ICPPKernel * kernel,const Hints & hints,const Window & window,ITensorPack & tensors)57*c217d954SCole Faust void IScheduler::schedule_common(ICPPKernel *kernel, const Hints &hints, const Window &window, ITensorPack &tensors)
58*c217d954SCole Faust {
59*c217d954SCole Faust     ARM_COMPUTE_ERROR_ON_MSG(!kernel, "The child class didn't set the kernel");
60*c217d954SCole Faust #ifndef BARE_METAL
61*c217d954SCole Faust     const Window &max_window = window;
62*c217d954SCole Faust     if(hints.split_dimension() == IScheduler::split_dimensions_all)
63*c217d954SCole Faust     {
64*c217d954SCole Faust         /*
65*c217d954SCole Faust          * if the split dim is size_t max then this signals we should parallelise over
66*c217d954SCole Faust          * all dimensions
67*c217d954SCole Faust          */
68*c217d954SCole Faust         const std::size_t m = max_window.num_iterations(Window::DimX);
69*c217d954SCole Faust         const std::size_t n = max_window.num_iterations(Window::DimY);
70*c217d954SCole Faust 
71*c217d954SCole Faust         //in c++17 this can be swapped for   auto [ m_threads, n_threads ] = split_2d(...
72*c217d954SCole Faust         unsigned m_threads, n_threads;
73*c217d954SCole Faust         std::tie(m_threads, n_threads) = scheduler_utils::split_2d(this->num_threads(), m, n);
74*c217d954SCole Faust 
75*c217d954SCole Faust         std::vector<IScheduler::Workload> workloads;
76*c217d954SCole Faust         for(unsigned int ni = 0; ni != n_threads; ++ni)
77*c217d954SCole Faust         {
78*c217d954SCole Faust             for(unsigned int mi = 0; mi != m_threads; ++mi)
79*c217d954SCole Faust             {
80*c217d954SCole Faust                 workloads.push_back(
81*c217d954SCole Faust                     [ni, mi, m_threads, n_threads, &max_window, &kernel](const ThreadInfo & info)
82*c217d954SCole Faust                 {
83*c217d954SCole Faust                     //narrow the window to our mi-ni workload
84*c217d954SCole Faust                     Window win = max_window.split_window(Window::DimX, mi, m_threads)
85*c217d954SCole Faust                                  .split_window(Window::DimY, ni, n_threads);
86*c217d954SCole Faust 
87*c217d954SCole Faust                     win.validate();
88*c217d954SCole Faust 
89*c217d954SCole Faust                     Window thread_locator;
90*c217d954SCole Faust                     thread_locator.set(Window::DimX, Window::Dimension(mi, m_threads));
91*c217d954SCole Faust                     thread_locator.set(Window::DimY, Window::Dimension(ni, n_threads));
92*c217d954SCole Faust 
93*c217d954SCole Faust                     thread_locator.validate();
94*c217d954SCole Faust 
95*c217d954SCole Faust                     kernel->run_nd(win, info, thread_locator);
96*c217d954SCole Faust                 });
97*c217d954SCole Faust             }
98*c217d954SCole Faust         }
99*c217d954SCole Faust         run_workloads(workloads);
100*c217d954SCole Faust     }
101*c217d954SCole Faust     else
102*c217d954SCole Faust     {
103*c217d954SCole Faust         const unsigned int num_iterations = max_window.num_iterations(hints.split_dimension());
104*c217d954SCole Faust         const unsigned int num_threads    = std::min(num_iterations, this->num_threads());
105*c217d954SCole Faust 
106*c217d954SCole Faust         if(num_iterations == 0)
107*c217d954SCole Faust         {
108*c217d954SCole Faust             return;
109*c217d954SCole Faust         }
110*c217d954SCole Faust 
111*c217d954SCole Faust         if(!kernel->is_parallelisable() || num_threads == 1)
112*c217d954SCole Faust         {
113*c217d954SCole Faust             ThreadInfo info;
114*c217d954SCole Faust             info.cpu_info = &cpu_info();
115*c217d954SCole Faust             if(tensors.empty())
116*c217d954SCole Faust             {
117*c217d954SCole Faust                 kernel->run(max_window, info);
118*c217d954SCole Faust             }
119*c217d954SCole Faust             else
120*c217d954SCole Faust             {
121*c217d954SCole Faust                 kernel->run_op(tensors, max_window, info);
122*c217d954SCole Faust             }
123*c217d954SCole Faust         }
124*c217d954SCole Faust         else
125*c217d954SCole Faust         {
126*c217d954SCole Faust             unsigned int num_windows = 0;
127*c217d954SCole Faust             switch(hints.strategy())
128*c217d954SCole Faust             {
129*c217d954SCole Faust                 case StrategyHint::STATIC:
130*c217d954SCole Faust                     num_windows = num_threads;
131*c217d954SCole Faust                     break;
132*c217d954SCole Faust                 case StrategyHint::DYNAMIC:
133*c217d954SCole Faust                 {
134*c217d954SCole Faust                     const unsigned int granule_threshold = (hints.threshold() <= 0) ? num_threads : static_cast<unsigned int>(hints.threshold());
135*c217d954SCole Faust                     // Make sure we don't use some windows which are too small as this might create some contention on the ThreadFeeder
136*c217d954SCole Faust                     num_windows = num_iterations > granule_threshold ? granule_threshold : num_iterations;
137*c217d954SCole Faust                     break;
138*c217d954SCole Faust                 }
139*c217d954SCole Faust                 default:
140*c217d954SCole Faust                     ARM_COMPUTE_ERROR("Unknown strategy");
141*c217d954SCole Faust             }
142*c217d954SCole Faust             // Make sure the smallest window is larger than minimim workload size
143*c217d954SCole Faust             num_windows = adjust_num_of_windows(max_window, hints.split_dimension(), num_windows, *kernel, cpu_info());
144*c217d954SCole Faust 
145*c217d954SCole Faust             std::vector<IScheduler::Workload> workloads(num_windows);
146*c217d954SCole Faust             for(unsigned int t = 0; t < num_windows; ++t)
147*c217d954SCole Faust             {
148*c217d954SCole Faust                 //Capture 't' by copy, all the other variables by reference:
149*c217d954SCole Faust                 workloads[t] = [t, &hints, &max_window, &num_windows, &kernel, &tensors](const ThreadInfo & info)
150*c217d954SCole Faust                 {
151*c217d954SCole Faust                     Window win = max_window.split_window(hints.split_dimension(), t, num_windows);
152*c217d954SCole Faust                     win.validate();
153*c217d954SCole Faust 
154*c217d954SCole Faust                     if(tensors.empty())
155*c217d954SCole Faust                     {
156*c217d954SCole Faust                         kernel->run(win, info);
157*c217d954SCole Faust                     }
158*c217d954SCole Faust                     else
159*c217d954SCole Faust                     {
160*c217d954SCole Faust                         kernel->run_op(tensors, win, info);
161*c217d954SCole Faust                     }
162*c217d954SCole Faust                 };
163*c217d954SCole Faust             }
164*c217d954SCole Faust             run_workloads(workloads);
165*c217d954SCole Faust         }
166*c217d954SCole Faust     }
167*c217d954SCole Faust #else  /* !BARE_METAL */
168*c217d954SCole Faust     ARM_COMPUTE_UNUSED(kernel, hints, window, tensors);
169*c217d954SCole Faust #endif /* !BARE_METAL */
170*c217d954SCole Faust }
171*c217d954SCole Faust 
run_tagged_workloads(std::vector<Workload> & workloads,const char * tag)172*c217d954SCole Faust void IScheduler::run_tagged_workloads(std::vector<Workload> &workloads, const char *tag)
173*c217d954SCole Faust {
174*c217d954SCole Faust     ARM_COMPUTE_UNUSED(tag);
175*c217d954SCole Faust     run_workloads(workloads);
176*c217d954SCole Faust }
177*c217d954SCole Faust 
adjust_num_of_windows(const Window & window,std::size_t split_dimension,std::size_t init_num_windows,const ICPPKernel & kernel,const CPUInfo & cpu_info)178*c217d954SCole Faust std::size_t IScheduler::adjust_num_of_windows(const Window &window, std::size_t split_dimension, std::size_t init_num_windows, const ICPPKernel &kernel, const CPUInfo &cpu_info)
179*c217d954SCole Faust {
180*c217d954SCole Faust     // Mitigation of the narrow split issue, which occurs when the split dimension is too small to split (hence "narrow").
181*c217d954SCole Faust     if(window.num_iterations(split_dimension) < init_num_windows )
182*c217d954SCole Faust     {
183*c217d954SCole Faust         auto recommended_split_dim = Window::DimX;
184*c217d954SCole Faust         for(std::size_t dims = Window::DimY; dims <= Window::DimW; ++dims)
185*c217d954SCole Faust         {
186*c217d954SCole Faust             if(window.num_iterations(recommended_split_dim) < window.num_iterations(dims))
187*c217d954SCole Faust             {
188*c217d954SCole Faust                 recommended_split_dim = dims;
189*c217d954SCole Faust             }
190*c217d954SCole Faust         }
191*c217d954SCole Faust         ARM_COMPUTE_LOG_INFO_MSG_WITH_FORMAT_CORE("%zu dimension is not a suitable dimension to split the workload. Recommended: %zu recommended_split_dim", split_dimension,
192*c217d954SCole Faust                                                   recommended_split_dim);
193*c217d954SCole Faust     }
194*c217d954SCole Faust 
195*c217d954SCole Faust     for(auto t = init_num_windows; t > 0; --t) // Trying the highest number of windows ,init_num_windows, first
196*c217d954SCole Faust     {
197*c217d954SCole Faust         // Try splitting the workload into t, subject to each subworkload size <= mws.
198*c217d954SCole Faust         if((window.num_iterations(split_dimension) / kernel.get_mws(cpu_info, t)) >= t)
199*c217d954SCole Faust         {
200*c217d954SCole Faust             if(t != init_num_windows)
201*c217d954SCole Faust             {
202*c217d954SCole Faust                 ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using a different thread count than the one assigned by the user.");
203*c217d954SCole Faust             }
204*c217d954SCole Faust             return t;
205*c217d954SCole Faust         }
206*c217d954SCole Faust     }
207*c217d954SCole Faust     ARM_COMPUTE_LOG_INFO_MSG_CORE("The scheduler is using single thread instead of the thread count assigned by the user.");
208*c217d954SCole Faust     return 1; //  If the workload is so small that it can't be split, we should run a single thread
209*c217d954SCole Faust }
210*c217d954SCole Faust 
211*c217d954SCole Faust } // namespace arm_compute
212