xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/gemm_hybrid_indirect.hpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #pragma once
25 
26 #if !defined(_WIN64) && !defined(__OpenBSD__)
27 #include <alloca.h>
28 #endif /* !defined(_WIN64) && !defined(__OpenBSD__) */
29 
30 #include <algorithm>
31 #include <cassert>
32 
33 #include "arm_gemm.hpp"
34 #include "bias_adder.hpp"
35 #include "convolver.hpp"
36 #include "kernel_weight_format.hpp"
37 #include "ndrange.hpp"
38 #include "performance_parameters.hpp"
39 #include "transform.hpp"
40 #include "utils.hpp"
41 
42 #ifdef CYCLE_PROFILING
43 #include "profiler.hpp"
44 #endif
45 
46 #ifndef UNUSED
47 #define __I_DEFINED_UNUSED
48 #define UNUSED(x)  ((void)(x))
49 #endif
50 
51 namespace arm_gemm {
52 
53 namespace {
54 
55 // We need to invoke the kernel differently for quantizing and non-quantizing cases, so here is a shim class to do
56 // that.
57 
58 template<typename OutputStage, bool SeparateQuantize, bool FixedFormat>
59 class run_hybrid_kernel {
60 public:
61     template<typename strategy, typename Tlo, typename Tro, typename Tr>
62     static inline void run (
63 #ifdef CYCLE_PROFILING
64         profiler &prof,
65 #endif
66         const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
67         unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
68         const OutputStage &os, const int32_t *col_bias, unsigned int n_0 );
69 };
70 
71 template<>
72 template<typename strategy, typename Tlo, typename Tro, typename Tr>
run(profiler & prof,const strategy & strat,unsigned int num_strings,const unsigned int * string_ptr,IndirectInputArg<Tlo> A_arg,unsigned int M,unsigned int N,unsigned int kern_k,const Tro * b_ptr,size_t,IndirectOutputArg<Tr> output_arg,const Tr * bias_ptr,Activation act,bool accumulate,const Nothing &,const int32_t *,unsigned int)73 inline void run_hybrid_kernel<Nothing, false, false>::run(
74 #ifdef CYCLE_PROFILING
75         profiler &prof,
76 #endif
77         const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
78         unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
79         const Nothing &, const int32_t *, unsigned int) {
80 #ifdef CYCLE_PROFILING
81     auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
82 #endif
83     UNUSED(kern_k);
84 
85     /* Indirect hybrid kernels read the full width of the bias.  So we need to detect the case where we are writing
86      * a partial block and pad the bias for that block. */
87     if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
88         /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
89         unsigned int N_remainder = N % strategy::out_width();
90         unsigned int N_bulk = N - N_remainder;
91 
92         /* Output argument to be used for the tail */
93         IndirectOutputArg<Tr> offset_output = output_arg;
94 
95         /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
96         if (N_bulk > 0) {
97             strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, output_arg, bias_ptr, act, accumulate);
98 
99             if (output_arg.is_indirect) {
100                 offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
101             } else {
102                 offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
103             }
104         }
105 
106         /* Pad the bias buffer for the remainder */
107         Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
108         memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
109 
110         /* Process the remainder, offsetting the B pointer as needed. */
111         strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder, b_ptr + (N_bulk * kern_k), offset_output, bias_pad_buffer, act, accumulate);
112     } else {
113         strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, bias_ptr, act, accumulate);
114     }
115 }
116 
117 template<>
118 template<typename strategy, typename Tlo, typename Tro, typename Tr>
run(profiler & prof,const strategy & strat,unsigned int num_strings,const unsigned int * string_ptr,IndirectInputArg<Tlo> A_arg,unsigned int M,unsigned int N,unsigned int kern_k,const Tro * b_ptr,size_t b_stride,IndirectOutputArg<Tr> output_arg,const Tr * bias_ptr,Activation act,bool accumulate,const Nothing &,const int32_t *,unsigned int)119 inline void run_hybrid_kernel<Nothing, false, true>::run(
120 #ifdef CYCLE_PROFILING
121         profiler &prof,
122 #endif
123         const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
124         unsigned int kern_k, const Tro *b_ptr, size_t b_stride, IndirectOutputArg<Tr> output_arg, const Tr *bias_ptr, Activation act, bool accumulate,
125         const Nothing &, const int32_t *, unsigned int) {
126 #ifdef CYCLE_PROFILING
127     auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
128 #endif
129     UNUSED(kern_k);
130 
131     /* Indirect hybrid kernels read the full width of the bias.  So we need to detect the case where we are writing
132      * a partial block and pad the bias for that block. */
133     if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) {
134         /* Break N into "N_bulk" (a multiple of output width) and "N_remainder" */
135         unsigned int N_remainder = N % strategy::out_width();
136         unsigned int N_bulk = N - N_remainder;
137 
138         /* Output argument to be used for the tail */
139         IndirectOutputArg<Tr> offset_output = output_arg;
140 
141         /* If there is a "bulk" to be processed, handle that and update "offset_output" appropriately. */
142         if (N_bulk > 0) {
143             strat.kernel(num_strings, string_ptr, A_arg, M, N_bulk, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
144 
145             if (output_arg.is_indirect) {
146                 offset_output = IndirectOutputArg<Tr>(output_arg.indirect.ptr, output_arg.indirect.offset + N_bulk);
147             } else {
148                 offset_output = IndirectOutputArg<Tr>(output_arg.direct.base + N_bulk, output_arg.direct.stride);
149             }
150         }
151 
152         /* Pad the bias buffer for the remainder */
153         Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr)));
154         memcpy(bias_pad_buffer, bias_ptr + N_bulk, N_remainder * sizeof(Tr));
155 
156         /* Process the remainder, offsetting the B pointer as needed. */
157         strat.kernel(num_strings, string_ptr, A_arg, M, N_remainder,
158                      b_ptr + (N_bulk / strategy::stripe_width()) * b_stride, b_stride, offset_output,
159                      bias_pad_buffer, act, accumulate);
160     } else {
161         strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, b_stride, output_arg, bias_ptr, act, accumulate);
162     }
163 }
164 
165 template<>
166 template<typename strategy, typename Tlo, typename Tro, typename Tr>
run(profiler & prof,const strategy & strat,unsigned int num_strings,const unsigned int * string_ptr,IndirectInputArg<Tlo> A_arg,unsigned int M,unsigned int N,unsigned int kern_k,const Tro * b_ptr,size_t,IndirectOutputArg<Tr> output_arg,const Tr *,Activation,bool,const Requantize32 & os,const int32_t * col_bias,unsigned int n_0)167 inline void run_hybrid_kernel<Requantize32, false, false>::run(
168 #ifdef CYCLE_PROFILING
169         profiler &prof,
170 #endif
171         const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
172         unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
173         const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
174 #ifdef CYCLE_PROFILING
175     auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
176 #endif
177     UNUSED(kern_k);
178 
179     strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, output_arg, &os, col_bias + n_0, n_0);
180 }
181 
182 template<>
183 template<typename strategy, typename Tlo, typename Tro, typename Tr>
run(profiler & prof,const strategy & strat,unsigned int num_strings,const unsigned int * string_ptr,IndirectInputArg<Tlo> A_arg,unsigned int M,unsigned int N,unsigned int kern_k,const Tro * b_ptr,size_t,IndirectOutputArg<Tr> output_arg,const Tr *,Activation,bool,const Requantize32 & os,const int32_t * col_bias,unsigned int n_0)184 inline void run_hybrid_kernel<Requantize32, true, false>::run(
185 #ifdef CYCLE_PROFILING
186         profiler &prof,
187 #endif
188         const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<Tlo> A_arg, unsigned int M, unsigned int N,
189         unsigned int kern_k, const Tro *b_ptr, size_t, IndirectOutputArg<Tr> output_arg, const Tr *, Activation, bool,
190         const Requantize32 &os, const int32_t *col_bias, unsigned int n_0 ) {
191     UNUSED(kern_k);
192     // On this route we will only process one kernel height at a time and will make sure this happens in the driver loop.
193     assert(M <= strategy::out_height());
194     // We don't yet support indirect output (as the quantizer can't do it).
195     assert(output_arg.is_indirect == false);
196 
197     // We need a row sum buffer and intermediate output buffer.
198     // These go on the stack as they are not too large, using an automatic array and alloca() respectively.
199     int32_t row_sums[strategy::out_height()];
200     typename strategy::result_type *result_buffer;
201 
202     unsigned int output_width = roundup(N, strategy::out_width());
203 
204     result_buffer = reinterpret_cast<typename strategy::result_type *>(alloca(output_width * strategy::out_height() * sizeof(typename strategy::result_type)));
205 
206     {
207 #ifdef CYCLE_PROFILING
208         auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_width()));
209 #endif
210         // Perform the GEMM, into the output buffer.
211         strat.kernel(num_strings, string_ptr, A_arg, M, N, b_ptr, IndirectOutputArg<typename strategy::result_type>(result_buffer, output_width), nullptr, Activation(), false);
212     }
213 
214     if (os.b_offset != 0) {
215 #ifdef CYCLE_PROFILING
216         auto p = prof.ScopedProfiler(PROFILE_ROWSUMS, (unsigned long)M * kern_k);
217 #endif
218         row_sums_indirect(num_strings, string_ptr, A_arg, M, row_sums, &os);
219     } else {
220         memset(row_sums, 0, sizeof(int32_t) * strategy::out_height());
221     }
222 
223     {
224 #ifdef CYCLE_PROFILING
225         auto p = prof.ScopedProfiler(PROFILE_QUANTIZE, (unsigned long)M * N);
226 #endif
227         // Quantize
228         requantize_block_32(os, N, M, result_buffer, output_width, output_arg.direct.base, output_arg.direct.stride, row_sums, col_bias + n_0, n_0);
229     }
230 }
231 
232 template<typename strategy, bool FixedFormat>
233 struct stripe_width {
getarm_gemm::__anon5621615b0111::stripe_width234     static unsigned int get() {
235         return strategy::stripe_width();
236     }
237 };
238 
239 template<typename strategy>
240 struct stripe_width<strategy, false> {
getarm_gemm::__anon5621615b0111::stripe_width241     static unsigned int get() {
242         return 0;
243     }
244 };
245 
246 template<typename strategy, bool FixedFormat>
247 struct kernel_weight_format {
getarm_gemm::__anon5621615b0111::kernel_weight_format248     static KernelWeightFormat get() {
249         return strategy::kernel_weight_format();
250     }
251 };
252 
253 template<typename strategy>
254 struct kernel_weight_format<strategy, false> {
getarm_gemm::__anon5621615b0111::kernel_weight_format255     static KernelWeightFormat get() {
256         return KernelWeightFormat::NON_FIXED;
257     }
258 };
259 
260 } // anonymous namespace
261 
262 // Implementation of the GemmCommon abstract class.
263 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool SeparateQuantize=false, bool FixedFormat=false>
264 class GemmHybridIndirect : public GemmCommon<To, Tr> {
265     typedef typename strategy::lhs_operand_type Tloi;
266     typedef typename strategy::rhs_operand_type Troi;
267     typedef typename strategy::result_type Tri;
268 
269     GemmArgs           _args;
270     OutputStage        _os = {};
271 
272     /* Quantized support (in addition to 'output stage' above) */
273     int32_t *_col_bias = nullptr;
274 
275     const unsigned int _Ktotal;
276     const unsigned int _rounded_Ksize;
277 
278     /* Blocking info */
279     const unsigned int _k_block;
280     const unsigned int _n_block;
281     const unsigned int _Mround;
282 
283     /* Pretransposed buffer. */
284     const Troi *_B_transposed=nullptr;
285 
286     /* Indirect parameters.  _indirect_buf doubles as a flag to indicate that "indirect" transform should be used. */
287     const To * const * const * _indirect_buf = nullptr;
288 
289     /* Convolver - only set up for convolution problems, so also doubles as a flag. */
290     std::unique_ptr<convolver<To>>  _convolver = nullptr;
291 
292     // Array of pointers to output rows
293 //    Tr * const *        _output_ptrs;
294 
295     const NDRange<4> _window_range;
296 
get_col_sum_size() const297     unsigned int get_col_sum_size() const {
298         if (std::is_same<OutputStage, Requantize32>::value) {
299             return _args._Nsize * _args._nmulti * sizeof(int32_t);
300         } else {
301             return 0;
302         }
303     }
304 
get_ktotal(const GemmArgs & args)305     static unsigned int get_ktotal(const GemmArgs &args) {
306         return args._Ksections * roundup(args._Ksize, strategy::k_unroll());
307     }
308 
compute_k_block(const GemmArgs & args)309     static unsigned int compute_k_block(const GemmArgs &args) {
310         // Some kernels don't support accumulate mode - these can't do K blocking at all.
311         if (!strategy::supports_accumulate() || std::is_same<OutputStage, Requantize32>::value) {
312             return get_ktotal(args);
313         }
314 
315         if (args._cfg && args._cfg->inner_block_size) {
316             return roundup(args._cfg->inner_block_size, strategy::k_unroll());
317         }
318 
319         // Experimental data suggests an optimal block size of 512 for FP32 (scaling accordingly for other
320         // datatypes); but don't divide into blocks until we hit 1.5X this size.
321         unsigned int target_block_size = 2048 / sizeof(To);
322         auto ktotal = get_ktotal(args);
323 
324         if (ktotal > ((target_block_size*3)/2)) {
325             unsigned int target_blocks = iceildiv(ktotal, target_block_size);
326 
327             unsigned int block_size = iceildiv(ktotal, target_blocks);
328 
329             block_size = roundup(block_size, strategy::k_unroll());
330 
331             return block_size;
332         }
333 
334         return ktotal;
335     }
336 
337     // New N blocking strategy: if it's narrow, or much taller than it is wide, do the full width.  Otherwise do a
338     // single block.
compute_n_block(const GemmArgs & args,const OutputStage os={})339     static unsigned int compute_n_block(const GemmArgs &args, const OutputStage os = {}) {
340         if (args._cfg && args._cfg->outer_block_size) {
341             return args._cfg->outer_block_size;
342         }
343 
344         if (args._Nsize <= 64) {
345             return args._Nsize;
346         }
347 
348         if ((args._Msize / args._Nsize) > 155) {
349             return args._Nsize;
350         }
351 
352         // "Asymmetric" quantizing GEMMs require a different approach - the tall skinny blocks we would otherwise
353         // use imply a great deal of repeated work performing the row sums.  If row sums are involved, work out how
354         // much "column" parallelism is going to be required and set the block size accordingly.
355         if (std::is_same<OutputStage, Requantize32>::value) {
356             const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
357 
358             // Row sums only needed if b_offset isn't 0
359             if (qp->b_offset != 0) {
360                 // We can already parallelize across batches, multis and rows (in units of 'out_height')
361                 int multi_row_parallelism = args._nmulti * args._nbatches * iceildiv(args._Msize, strategy::out_height());
362 
363                 // If this isn't enough, we will need to split up the columns too.
364                 if (multi_row_parallelism < args._maxthreads) {
365                     unsigned int columns_needed = iceildiv(args._maxthreads, multi_row_parallelism);
366 
367                     unsigned int n_block = iceildiv(args._Nsize, columns_needed);
368 
369                     return roundup(n_block, strategy::out_width());
370                 }
371 
372                 // Multi/Batch/Row parallelism is enough - don't split up the columns.
373                 return args._Nsize;
374             }
375         }
376 
377         if (args._Ksize <= 128 && args._maxthreads <= 16) {
378             return strategy::out_width() * 3;
379         }
380 
381         return strategy::out_width();
382     }
383 
384 public:
385     GemmHybridIndirect(GemmHybridIndirect &) = delete;
386     GemmHybridIndirect & operator= (GemmHybridIndirect &) = delete;
387 
388     /* Constructor */
GemmHybridIndirect(const GemmArgs & args,const OutputStage & os)389     GemmHybridIndirect(const GemmArgs &args, const OutputStage &os)
390               : _args(args), _os(os), _Ktotal(get_ktotal(args)),
391                 _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
392                 _k_block(compute_k_block(args)), _n_block(compute_n_block(args, os)),
393                 _Mround(roundup(args._Msize, strategy::out_height())),
394                 _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
395                               iceildiv(args._Nsize, _n_block), args._nmulti)
396     {
397         // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
398         // GemmConfig.  Clear out the pointer to avoid accidents.
399         _args._cfg = nullptr;
400     }
401 
402     /* Constructor without OutputStage */
GemmHybridIndirect(const GemmArgs & args)403     GemmHybridIndirect(const GemmArgs &args)
404               : _args(args), _Ktotal(get_ktotal(args)),
405                 _rounded_Ksize(roundup(args._Ksize, strategy::k_unroll())),
406                 _k_block(compute_k_block(args)), _n_block(compute_n_block(args)),
407                 _Mround(roundup(args._Msize, strategy::out_height())),
408                 _window_range(iceildiv(args._Msize, strategy::out_height()), args._nbatches,
409                               iceildiv(args._Nsize, _n_block), args._nmulti)
410     {
411         // We take a copy of the arguments (not a pointer or reference), but there is no lifetime requirement on the
412         // GemmConfig.  Clear out the pointer to avoid accidents.
413         _args._cfg = nullptr;
414     }
415 
416     // Interface implementation - Compulsory functions
get_window_size() const417     ndrange_t get_window_size() const override {
418         return { _window_range.total_size() };
419     }
420 
421     // This kernel can always be dynamically scheduled.
supports_dynamic_scheduling() const422     bool supports_dynamic_scheduling() const override {
423         return true;
424     }
425 
426     // Execute
execute(const ndcoord_t & work_range,const ndcoord_t &,int)427     void execute(const ndcoord_t &work_range, const ndcoord_t &, int) override {
428 #ifdef CYCLE_PROFILING
429         profiler prof;
430 #endif
431         strategy strat(_args._ci);
432 
433         std::vector<const To *>         in_row_ptrs;
434         std::vector<const To * const *> in_row_strings;
435         std::vector<unsigned int>       string_lengths;
436 
437         // In convolution mode, we need input pointers.
438         if (_convolver) {
439             in_row_ptrs = std::vector<const To *>(strategy::out_height() * _args._Ksections, nullptr);
440             in_row_strings = std::vector<const To * const *>(_args._Ksections, nullptr);
441 
442             for (unsigned int i=0; i<_args._Ksections; i++) {
443                 in_row_strings[i] = &(in_row_ptrs[i * strategy::out_height()]);
444             }
445         }
446 
447         // In any indirect mode, we need the string lengths.
448         if (_args._indirect_input) {
449             string_lengths = std::vector<unsigned int>(_args._Ksections, 0);
450         }
451 
452         /* Make sure we've been set up correctly. */
453         assert(FixedFormat || _B_transposed);
454         static_assert(std::is_same<To, Tloi>::value, "gemm_native: Operand types must be the same.");
455 //        static_assert(std::is_same<Tr, Tri>::value, "gemm_native: Result types must be the same.");
456 
457         /* For now, each work item implies all the K for a given output
458          * pixel (so we don't need to synchronize access to the output
459          * array).  So separate the loop over K blocks here.  */
460         for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
461             unsigned int kmax   = std::min(k0 + _k_block, _Ktotal);
462             unsigned int kern_k = roundup(kmax-k0, strategy::k_unroll());
463 
464             const bool first_pass = (k0 == 0);
465             const bool last_pass = (kmax == _Ktotal);
466 
467             unsigned int first_section = (k0 / _rounded_Ksize);
468             unsigned int first_offset  = (k0 % _rounded_Ksize);
469             unsigned int kleft = kern_k;
470             unsigned int sections=0;
471             unsigned int offset = first_offset;
472 
473             if (_args._indirect_input) {
474                 while (kleft) {
475                     // When chopping into sections: the amount that goes into 'string_lengths' is the amount to be
476                     // processed (excluding padding).  But the amount we subtract from 'kleft' takes account of any
477                     // padding applied.
478                     string_lengths[sections] = std::min(kleft, _args._Ksize - offset);
479                     kleft -= std::min(kleft, _rounded_Ksize - offset);
480                     sections++;
481                     offset=0;
482                 }
483             }
484 
485             auto p = _window_range.iterator(work_range.get_position(0), work_range.get_position_end(0));
486 
487             if (p.done()) {
488                 return;
489             }
490 
491             // Process rows either 'out_height' rows at a time, or do all valid rows at once with a single kernel call.
492             // The separate quantizer path only handles one block of rows at a time (as it has to store sums and intermediate results).
493             // THe convolution path only generates the pointers for one block of rows at a time.
494             const bool process_all_rows = (!SeparateQuantize && !_convolver);
495 
496             do {
497                 const unsigned int m_start = p.dim(0) * strategy::out_height();
498                 const unsigned int m_end   = process_all_rows ? std::min(p.dim0_max() * strategy::out_height(), _args._Msize) : std::min(m_start + strategy::out_height(), _args._Msize);
499 //                const unsigned int m_end   = std::min(m_start + strategy::out_height(), _args._Msize);
500                 const unsigned int batch   = p.dim(1);
501                 const unsigned int n0      = p.dim(2) * _n_block;
502                 const unsigned int nmax    = std::min(n0 + _n_block, _args._Nsize);
503                 const unsigned int multi   = p.dim(3);
504 
505                 const Troi *b_panel;
506                 if (FixedFormat) {
507                     b_panel = reinterpret_cast<const Troi *>(this->_Bptr) +
508                                (multi * this->_B_multi_stride) +
509                                ((n0 / stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
510                                (k0 * stripe_width<strategy, FixedFormat>::get());
511                 } else {
512                     b_panel = _B_transposed +
513                                (multi * roundup(_args._Nsize, strategy::out_width()) * _Ktotal) +
514                                (k0 * roundup(_args._Nsize, strategy::out_width())) +
515                                (n0 * kern_k);
516                 }
517 
518                 IndirectOutputArg<Tr> out_arg(this->_Cptr + (multi * this->_C_multi_stride) + (batch * this->_C_batch_stride) + (m_start * this->_ldc) + n0, this->_ldc);
519 
520 #ifdef CYCLE_PROFILING
521                 auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)(m_end - m_start) * kern_k * roundup(nmax-n0, strategy::out_width()));
522 #endif
523                 if (_indirect_buf) {
524                     run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
525 #ifdef CYCLE_PROFILING
526                                  prof,
527 #endif
528                                  strat, sections, string_lengths.data(),
529                                  IndirectInputArg<To>(_indirect_buf + (multi * _args._nbatches * _args._Ksections) + (batch * _args._Ksections) + first_section, m_start, first_offset),
530                                  (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
531                                  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
532                                  last_pass ? _args._act : Activation(),
533                                  !first_pass,
534                                  // Quantization parameters
535                                  _os, _col_bias+(multi * _args._Nsize), n0);
536                 } else if (_convolver) {
537                     auto conv_cols = _convolver->process_columns(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride), this->_lda, k0, kmax, _rounded_Ksize);
538 
539                     unsigned int pos=0;
540                     auto conv_rows = conv_cols.process_rows(m_start, m_end - m_start);
541 
542                     while (!conv_rows.finished()) {
543                         unsigned int width, conv_offset;
544 
545                         assert(pos < sections);
546 
547                         std::tie(width, conv_offset) = conv_rows.next_block(&(in_row_ptrs[pos * strategy::out_height()]));
548 
549                         if (pos==0) {
550                             assert(conv_offset == first_offset);
551                         }
552                         assert(width == string_lengths[pos]);
553                         pos++;
554                     }
555                     assert(pos == sections);
556 
557                     run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
558 #ifdef CYCLE_PROFILING
559                                  prof,
560 #endif
561                                  strat, sections, string_lengths.data(),
562                                  IndirectInputArg<To>(in_row_strings.data(), 0, first_offset),
563                                  (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
564                                  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
565                                  last_pass ? _args._act : Activation(),
566                                  !first_pass,
567                                  // Quantization parameters
568                                  _os, _col_bias+(multi * _args._Nsize), n0);
569                 } else {
570                     // Length to process.  This needs to exclude padding, but 'kmax' potentially includes it.
571                     const unsigned int len = (std::min(_args._Ksize, kmax) - k0);
572 
573                     run_hybrid_kernel<OutputStage, SeparateQuantize, FixedFormat>::run(
574 #ifdef CYCLE_PROFILING
575                                  prof,
576 #endif
577                                  strat, 1, &len,
578                                  IndirectInputArg<To>(this->_Aptr + (multi * this->_A_multi_stride) + (batch * this->_A_batch_stride) + m_start * this->_lda + k0, this->_lda),
579                                  (m_end - m_start), (nmax - n0), kern_k, b_panel, this->_ldb, out_arg,
580                                  (this->_bias && first_pass) ? this->_bias + (multi * this->_bias_multi_stride) + n0 : nullptr,
581                                  last_pass ? _args._act : Activation(),
582                                  !first_pass,
583                                  // Quantization parameters
584                                  _os, _col_bias+(multi * _args._Nsize), n0);
585                 }
586             } while (process_all_rows ? p.next_dim1() : p.next_dim0());
587         }
588     }
589 
590     // Interface implementation - pretransposed
B_is_pretransposed() const591     bool B_is_pretransposed() const override {
592         return (FixedFormat == false);
593     }
594 
B_pretranspose_required() const595     bool B_pretranspose_required() const override {
596         return (FixedFormat == false) && (_B_transposed==nullptr);
597     }
598 
get_B_pretransposed_array_size() const599     size_t get_B_pretransposed_array_size() const override {
600         if (FixedFormat) {
601             return 0;
602         }
603 
604         // Start with actual pretransposed buffer...
605         size_t size = roundup(_args._Nsize, strategy::out_width()) * _Ktotal * _args._nmulti * sizeof(Troi);
606 
607         // Space for result row pointers (not strictly needed any more but retained for indirect output testing)
608         size += _args._Msize * _args._nbatches * _args._nmulti * sizeof(const Tr *);
609 
610         if (std::is_same<OutputStage, Requantize32>::value) {
611             size += get_col_sum_size();
612         }
613 
614         return size;
615     }
616 
requantize_bias(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)617     void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
618         if (std::is_same<OutputStage, Requantize32>::value) {
619             _col_bias = reinterpret_cast<int32_t *>(in_buffer);
620 
621             Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os);
622 
623             for (unsigned int i=0; i<_args._nmulti; i++) {
624                 // The input is assumed not to have any padding between sections, so straightforward Ksize * Ksections computation gets the total size.
625                 compute_col_sums(*qp_ptr, _args._Nsize, _args._Ksize * _args._Ksections, B + (i * B_multi_stride), ldb, _col_bias + (i * _args._Nsize), _args._Ksize * _args._Ksections, i, 0);
626             }
627         }
628     }
629 
pretranspose_B_array(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)630     void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
631         requantize_bias(in_buffer, B, ldb, B_multi_stride);
632 
633         // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
634         uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
635         Troi *buffer = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
636         _B_transposed = buffer;
637 
638         strategy strat(_args._ci);
639 
640         for (unsigned int multi=0; multi<_args._nmulti; multi++) {
641             for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
642                 const unsigned int kmax=std::min(k0 + _k_block, _Ktotal);
643 
644                 /* Figure out the size of each block. */
645                 unsigned int k_size = kmax - k0;
646 
647                 if (_args._Ksections > 1) {
648                     // We need to insert padding at the end of each K section.
649                     // The computation needed is a little delicate - the coordinates from the block walker are expressed in
650                     // terms of the full, padded, _Ktotal.
651                     // But we need to transform each section with reference to the original, unpadded, input, letting the
652                     // transform pad each section as needed.
653 
654                     // This is needed for computations below.
655                     const unsigned int rounded_section_size = roundup(_args._Ksize, strategy::k_unroll());
656 
657                     // The expected output format is also an entire <out_width> columns interleaved, then the next set of
658                     // columns, and so on.  This means, as we are breaking it up vertically, we have to do it one column at
659                     // a time.
660                     for (unsigned int x0=0; x0 < _args._Nsize; x0 += strategy::out_width() ){
661                         unsigned int xmax = std::min(x0 + strategy::out_width(), _args._Nsize);
662 
663                         // Track where we are and how much work is left.
664                         unsigned int kpos  = k0;
665                         unsigned int kleft = k_size;
666 
667                         while (kleft) {
668                             // Which section are we in?  Based on the rounded-up section size.
669                             unsigned int k_section_base = kpos / rounded_section_size;
670                             // How far into the section are we?
671                             unsigned int k_offset = kpos - (k_section_base * rounded_section_size);
672 
673                             // We will either copy the rest of this section, or to the end of the requested length.
674                             unsigned int k_length = std::min(_args._Ksize - k_offset, kleft);
675 
676                             strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
677                                                       x0, xmax,
678                                                       (k_section_base * _args._Ksize) + k_offset,               // K starting point - compute row to read based on our section and the true section length.
679                                                       (k_section_base * _args._Ksize) + k_offset + k_length);   // K end point - starting point plus length computed above.
680 
681                             // We need to modify our position based on the ROUNDED version of what we just did.
682                             unsigned int padded_length = roundup(k_length, strategy::k_unroll());
683 
684                             buffer += strategy::out_width() * padded_length;
685 
686                             kpos  += padded_length;
687                             kleft -= padded_length;
688                         }
689                     }
690                 } else {
691                     // In the single K section case, can process the whole lot in one go.
692                     strat.transforms.PrepareB(buffer, B + (multi * B_multi_stride), ldb,
693                                               0, _args._Nsize, k0, std::min(kmax, _args._Ksize));
694                     buffer += roundup(_args._Nsize, strategy::out_width()) * roundup(kmax-k0, strategy::k_unroll());
695                 }
696             }
697         }
698     }
699 
set_pretransposed_B_data(void * in_buffer)700     void set_pretransposed_B_data(void *in_buffer) override {
701         // Put the transposed data after the column sums - in non-transposing cases get_col_sum_size() == 0
702         uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
703         _B_transposed = reinterpret_cast<Troi *>(buffer_int + get_col_sum_size());
704         _col_bias = reinterpret_cast<int32_t *>(in_buffer);
705     }
706 
707     // Estimate cycles for given problem given provided parameters.
708     // "perf_type" is a type to pass along to get_performance_parameters to get the right set of performance
709     // parameters - it's arbitrary but usually either the input or output type.
710     template <typename perf_type>
estimate_cycles(const GemmArgs & args,const OutputStage & os={})711     static uint64_t estimate_cycles(const GemmArgs &args, const OutputStage &os = {}) {
712         const PerformanceParameters params = strategy::template get_performance_parameters<perf_type>(args._ci);
713 
714         // Note: Current hybrid kernels don't actually round up height (they
715         // have paths for each possible height).  Might need to make this
716         // configurable in future.
717         uint64_t total_macs = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * roundup(args._Nsize, strategy::out_width()) * get_ktotal(args);
718 
719         float mac_cycles = static_cast<float>(total_macs) / params.kernel_macs_cycle;
720 
721         // TODO: A bit of a kludge here: current hybrid kernels incur extra
722         // overhead where the width is not a multiple of kernel width.  It's
723         // most noticable where the overall width is quite low, so add 15%
724         // penalty for such widths.
725         if ((args._Nsize < strategy::out_width()) || (args._Nsize > strategy::out_width() && args._Nsize < 2*strategy::out_width())) {
726             mac_cycles *= 1.15f;
727         }
728 
729         uint64_t total_cycles = mac_cycles;
730 
731         // Quantizing kernels with separate quantize need to add in the extra stages.
732         if (std::is_same<OutputStage, Requantize32>::value && SeparateQuantize) {
733             const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&os);
734 
735             // Row sums: need to consider each value in A (batch * multi * M * K)...
736             uint64_t rowsum_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * get_ktotal(args);
737 
738             // ... but row sums are skipped if B offset==0.
739             if (qp->b_offset == 0) {
740                 rowsum_bytes = 0;
741             }
742 
743             // Use "prepare bytes per cycle" to store "row sum values per cycle".
744             float rowsum_cycles = static_cast<float>(rowsum_bytes) / params.prepare_bytes_cycle;
745 
746             // Requantize: need to consider each value in C (batch * multi * M * N)
747             uint64_t requantize_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * args._Msize * args._Nsize;
748 
749             // Use "merge bytes per cycle" to store "requantize values per cycle".
750             float requantize_cycles = static_cast<float>(requantize_bytes) / params.merge_bytes_cycle;
751 
752             // Recalculate total_cycles with the extra components.
753             total_cycles = mac_cycles + rowsum_cycles + requantize_cycles;
754         }
755 
756         return total_cycles;
757     }
758 
set_quantized_bias(const int32_t * bias,size_t bias_multi_stride)759     void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
760         if (std::is_same<OutputStage, Requantize32>::value) {
761             Requantize32 *qp = reinterpret_cast<Requantize32 *>(&_os);
762 
763             qp->bias = bias;
764             qp->bias_multi_stride = bias_multi_stride;
765         }
766     }
767 
set_indirect_parameters(size_t string_len,const To * const * const * ptr)768     void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
769         assert(string_len == _args._Ksize);
770         _indirect_buf = ptr;
771     }
772 
set_convolution_parameters(ConvolutionParameters parms)773     void set_convolution_parameters(ConvolutionParameters parms) override {
774         assert(parms.input_channels == _args._Ksize);
775         _convolver = std::unique_ptr<convolver<To>>(new convolver<To>(parms));
776     }
777 
get_config()778     GemmConfig get_config() override {
779         GemmConfig c;
780 
781         c.method = GemmMethod::GEMM_HYBRID;
782         c.inner_block_size = _k_block;
783         c.outer_block_size = _n_block;
784         c.filter = get_type_name<strategy>();
785         c.weight_format = get_weight_format(kernel_weight_format<strategy, FixedFormat>::get(), sizeof(To));
786 
787         return c;
788     }
789 };
790 
791 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
792 using GemmHybridIndirectFixedFormat = GemmHybridIndirect<strategy, To, Tr, OutputStage, false, true>;
793 
794 } // namespace arm_gemm
795 
796 #ifdef __I_DEFINED_UNUSED
797 #undef UNUSED
798 #endif
799