xref: /aosp_15_r20/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/gemm_interleaved.hpp (revision c217d954acce2dbc11938adb493fc0abd69584f3)
1 /*
2  * Copyright (c) 2017-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #pragma once
25 
26 #include <algorithm>
27 #include <cassert>
28 
29 #include "arm_gemm.hpp"
30 #include "bfloat.hpp"
31 #include "convolver.hpp"
32 #include "kernel_weight_format.hpp"
33 #include "kernel_traits.hpp"
34 #include "mergeresults.hpp"
35 #include "performance_parameters.hpp"
36 #include "quantized.hpp"
37 #include "transform.hpp"
38 #include "utils.hpp"
39 
40 #ifdef CYCLE_PROFILING
41 #include "profiler.hpp"
42 #endif
43 
44 // Some macros used to decide how much working space to allocate.
45 // Round allocations up to the next cache line.
46 #define ALLOC_ROUND	64
47 #define ROUND_UP(x)	((((x) + ALLOC_ROUND-1) / ALLOC_ROUND) * ALLOC_ROUND)
48 
49 // Implementation of the GemmCommon abstract class.
50 //
51 // This implementation interleaves the source matrices in blocks - good for
52 // larger matrices.
53 
54 namespace arm_gemm {
55 
56 namespace {
57 
58 // Some kernels output to a linear buffer and require a separate merge step.
59 // Others output directly to the matrix result.  This helper class calls the
60 // appropriate functions, using templating to avoid calling non-existent
61 // functions.
62 template<bool MergeStep, bool FixedFormat, typename OutputStage>
63 class kernel_and_merge {
64 public:
65     template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
66     static void run (
67 #ifdef CYCLE_PROFILING
68         profiler &prof,
69 #endif
70         strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel,
71         Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
72         unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
73         const Activation &act, bool accumulate, const OutputStage &os, const int32_t *col_bias,
74         Tab *acc_buff);
75 };
76 
77 // Run a kernel and call the separate merge step
78 template<>
79 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
run(profiler & prof,strategy & strat,const To * a_ptr,const To * b_panel,size_t,Tri * c_panel,Tr * c_ptr,int ldc,int kern_k,unsigned int m_0,unsigned int m_max,unsigned int n_0,unsigned int n_max,const Tr * biasptr,const Activation & act,bool accumulate,const Nothing &,const int32_t *,Tab *)80 void kernel_and_merge<true, false, Nothing>::run(
81 #ifdef CYCLE_PROFILING
82         profiler &prof,
83 #endif
84         strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
85         Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
86         unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
87         const Activation &act, bool accumulate, const Nothing &, const int32_t *, Tab *)
88 {
89     const int bblocks = iceildiv(n_max - n_0, strategy::out_width());
90 
91     {
92 #ifdef CYCLE_PROFILING
93         auto p=prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width() * kern_k));
94 #endif
95 
96         strat.kernel(a_ptr, b_panel, c_panel, 1, bblocks, kern_k);
97     }
98 
99     {
100 #ifdef CYCLE_PROFILING
101         auto p=prof.ScopedProfiler(PROFILE_MERGE, (strategy::out_height() * bblocks * strategy::out_width() * sizeof(Tr)));
102 #endif
103         strat.transforms.Merge(c_ptr, c_panel, ldc, m_0, m_max, n_0, n_max, biasptr, act, accumulate);
104     }
105 }
106 
107 // Run a fixed-format kernel and call the separate merge step
108 template<>
109 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
run(profiler & prof,strategy & strat,const To * a_ptr,const To * b_panel,size_t b_stride,Tri * c_panel,Tr * c_ptr,int ldc,int kern_k,unsigned int m_0,unsigned int m_max,unsigned int n_0,unsigned int n_max,const Tr * biasptr,const Activation & act,bool accumulate,const Nothing &,const int32_t *,Tab *)110 void kernel_and_merge<true, true, Nothing>::run(
111 #ifdef CYCLE_PROFILING
112         profiler &prof,
113 #endif
114         strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel,
115         Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
116         unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *biasptr,
117         const Activation &act, bool accumulate, const Nothing &, const int32_t *, Tab *)
118 {
119     {
120 #ifdef CYCLE_PROFILING
121         const int bblocks = iceildiv(n_max - n_0, strategy::out_width());
122         auto p=prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width() * kern_k));
123 #endif
124 
125         strat.kernel(a_ptr, b_panel, b_stride, c_panel, 1, (n_max - n_0), kern_k);
126     }
127 
128     {
129 #ifdef CYCLE_PROFILING
130         const int bblocks = iceildiv(n_max - n_0, strategy::out_width());
131         auto p=prof.ScopedProfiler(PROFILE_MERGE, (strategy::out_height() * bblocks * strategy::out_width() * sizeof(Tr)));
132 #endif
133         strat.transforms.Merge(c_ptr, c_panel, ldc, m_0, m_max, n_0, n_max, biasptr, act, accumulate);
134     }
135 }
136 
137 // Run a kernel with integrated merge
138 template<>
139 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
run(profiler & prof,strategy & strat,const To * a_ptr,const To * b_panel,size_t,Tri *,Tr * c_ptr,int ldc,int kern_k,unsigned int m_0,unsigned int m_max,unsigned int n_0,unsigned int n_max,const Tr * biasptr,const Activation & act,bool accumulate,const Nothing &,const int32_t *,Tab * acc_buff)140 void kernel_and_merge<false, false, Nothing>::run(
141 #ifdef CYCLE_PROFILING
142         profiler &prof,
143 #endif
144         strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
145         Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
146         unsigned int n_0, unsigned int n_max, const Tr *biasptr,
147         const Activation &act, bool accumulate, const Nothing &, const int32_t *,
148         Tab *acc_buff)
149 {
150 #ifdef CYCLE_PROFILING
151     auto p=prof.ScopedProfiler(PROFILE_KERNEL, (m_max - m_0) * (n_max - n_0) * kern_k);
152 #endif
153 
154     // We need to offset the C pointer, but as it might be NULL (requesting output to accumulation buffer) we need
155     // to be careful not to offset a null pointer.
156     Tri *offset_c_ptr;
157 
158     if (c_ptr == nullptr) {
159         offset_c_ptr = nullptr;
160     } else {
161         offset_c_ptr = c_ptr + m_0 * ldc + n_0;
162     }
163 
164     strat.kernel(// A and B pointers are just the packed panels.
165                  a_ptr, b_panel,
166                  // Provide relevant part of output array and row stride.
167                  offset_c_ptr, ldc,
168                  // M, N, K sizes
169                  m_max-m_0, n_max - n_0, kern_k,
170                  // Bias, activation, accumulation.  Need to offset the bias as needed.
171                  biasptr ? biasptr + n_0 : nullptr, act, accumulate,
172                  // Accumulation buffer.
173                  acc_buff );
174 }
175 
176 // Run a kernel with integrated merge, quantizing
177 template<>
178 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
run(profiler & prof,strategy & strat,const To * a_ptr,const To * b_panel,size_t,Tri *,Tr * c_ptr,int ldc,int kern_k,unsigned int m_0,unsigned int m_max,unsigned int n_0,unsigned int n_max,const Tr *,const Activation &,bool accumulate,const Requantize32 & qp,const int32_t * col_bias,Tab * acc_buff)179 void kernel_and_merge<false, false, Requantize32>::run(
180 #ifdef CYCLE_PROFILING
181         profiler &prof,
182 #endif
183         strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *,
184         Tr *c_ptr, int ldc, int kern_k, unsigned int m_0, unsigned int m_max,
185         unsigned int n_0, unsigned int n_max, const Tr *,
186         const Activation &, bool accumulate, const Requantize32 &qp, const int32_t *col_bias,
187         Tab *acc_buff)
188 {
189 #ifdef CYCLE_PROFILING
190     auto p=prof.ScopedProfiler(PROFILE_KERNEL, (m_max - m_0) * (n_max - n_0) * kern_k);
191 #endif
192 
193     strat.kernel(// A and B pointers are just the packed panels.
194                  a_ptr, b_panel,
195                  // Provide relevant part of output array and row stride.
196                  c_ptr + m_0 * ldc + n_0, ldc,
197                  // M, N, K sizes
198                  m_max-m_0, n_max - n_0, kern_k,
199                  // Bias, activation, accumulation.  Need to offset the bias as needed.
200                  col_bias + n_0, qp, n_0, accumulate, acc_buff);
201 }
202 
203 // Run a kernel and call the separate quantize step
204 template<>
205 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab>
run(profiler & prof,strategy & strat,const To * a_ptr,const To * b_panel,size_t,Tri * c_panel,Tr * c_ptr,int ldc,int kern_k,unsigned int m_0,unsigned int m_max,unsigned int n_0,unsigned int n_max,const Tr *,const Activation &,bool,const Requantize32 & qp,const int32_t * col_bias,Tab *)206 void kernel_and_merge<true, false, Requantize32>::run(
207 #ifdef CYCLE_PROFILING
208         profiler &prof,
209 #endif
210         strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel,
211         Tr *c_ptr, int ldc, int kern_k, unsigned int m_0,
212         unsigned int m_max, unsigned int n_0, unsigned int n_max, const Tr *,
213         const Activation &, bool, const Requantize32 &qp, const int32_t *col_bias,
214         Tab *)
215 {
216     const int bblocks = iceildiv(n_max - n_0, strategy::out_width());
217 
218     {
219 #ifdef CYCLE_PROFILING
220         auto p=prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width() * kern_k));
221 #endif
222 
223         strat.kernel(a_ptr, b_panel, c_panel, 1, bblocks, kern_k);
224     }
225 
226     {
227 #ifdef CYCLE_PROFILING
228         auto p=prof.ScopedProfiler(PROFILE_QUANTIZE, ((m_max-m_0) * bblocks * strategy::out_width() * sizeof(Tr)));
229 #endif
230         // The interleaved kernel outputs in blocks - each block is a
231         // row-major matrix of size out_width * out_height.  The merge
232         // kernels are designed to deal with this but the requantizer is
233         // not, so we need to requantize one block at a time.
234         for (int i=0; i<bblocks; i++) {
235             unsigned int n_start = n_0 + (strategy::out_width() * i);
236             unsigned int n_end = std::min(n_start + strategy::out_width(), n_max);
237 
238             // The row bias is interleaved with the transposed A data, get a pointer to it here.
239             const int32_t *row_bias = reinterpret_cast<const int32_t *>(a_ptr + strategy::out_height() * kern_k);
240 
241             requantize_block_32(qp, (n_end - n_start), (m_max-m_0),
242                                 c_panel + (i * strategy::out_width() * strategy::out_height()), strategy::out_width(),
243                                 c_ptr + m_0 * ldc + n_start, ldc,
244                                 row_bias, col_bias + n_start, n_start);
245         }
246     }
247 }
248 
249 // Integer GEMMs can be used in two contexts - "normal" where the full 32-bit output is required, or in
250 // "requantizing" context where the output will be requantized.
251 //
252 // These require different input transforms, as if we are requantizing we want to sum the rows of the A input, and
253 // if we are not we don't.
254 //
255 // This helper class allows the appropriate transforms to be found, without requiring kernels that don't support
256 // quantization to define useless "quantized" transforms.
257 template<typename strategy, bool quantized>
258 class transform_type {
259 public:
260     typedef decltype(strategy::transforms) type;
261 };
262 
263 template<typename strategy>
264 class transform_type<strategy, true> {
265 public:
266     typedef decltype(strategy::transforms_quantized) type;
267 };
268 
269 // We need a similar trick here to figure out what type the accumulator buffer should be.
270 template<typename strategy, typename OutputStage>
271 class accumulate_buffer_type {
272 public:
273     typedef typename strategy::result_type type;
274 };
275 
276 template<typename strategy>
277 class accumulate_buffer_type<strategy, Requantize32> {
278 public:
279     typedef int32_t type;
280 };
281 
282 // Stripe width is a concept only needed for FixedFormat kernels.  Use an accessor to avoid issues in other scenarios.
283 template<typename strategy, bool FixedFormat>
284 struct get_stripe_width {
getarm_gemm::__anon3b10f53b0111::get_stripe_width285     static unsigned int get() {
286         return 0;
287     }
288 };
289 
290 template<typename strategy>
291 struct get_stripe_width<strategy, true> {
getarm_gemm::__anon3b10f53b0111::get_stripe_width292     static unsigned int get() {
293         return strategy::stripe_width();
294     }
295 };
296 
297 // KernelWeightFormat is a similar story.
298 template<typename strategy, bool FixedFormat, typename To>
299 struct get_kernel_weight_format {
getarm_gemm::__anon3b10f53b0111::get_kernel_weight_format300     static KernelWeightFormat get() {
301         return KernelWeightFormat::NON_FIXED;
302     }
303 };
304 
305 template<typename strategy, typename To>
306 struct get_kernel_weight_format<strategy, true, To> {
getarm_gemm::__anon3b10f53b0111::get_kernel_weight_format307     static KernelWeightFormat get() {
308         KernelWeightFormat kwf = strategy::kernel_weight_format();
309 
310         // If we are using a BF16 kernel to do an FP32 problem (fast mode) then we need to set the BF16 flag on the
311         // weight format.
312         if (std::is_same<To, float>::value && std::is_same<typename strategy::operand_type, bfloat16>::value) {
313             uint32_t kwf_i = static_cast<uint32_t>(kwf);
314             kwf_i |= 0x10;
315             kwf = static_cast<KernelWeightFormat>(kwf_i);
316         }
317 
318         return kwf;
319     }
320 };
321 
322 } // anonymous namespace
323 
324 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing, bool MergeStep=true, bool FixedFormat=false, bool ForceThreadColumns=false>
325 class GemmInterleaved : public GemmCommon<To, Tr> {
326     typedef typename strategy::operand_type Toi;
327     typedef typename strategy::result_type Tri;
328     typedef typename accumulate_buffer_type<strategy, OutputStage>::type Tab;
329 
330     /* const properties set by constructor */
331     const CPUInfo * const _ci;
332 
333     const unsigned int _Msize;
334     const unsigned int _Nsize;
335     const unsigned int _Ksize;
336     const unsigned int _Ksections;
337     const unsigned int _Ktotal;
338     const unsigned int _rounded_Ksize;
339 
340     const unsigned int _nbatches;
341     const unsigned int _nmulti;
342 
343     const bool _thread_columns;
344 
345     const Activation _act;
346 
347     const int _maxthreads;
348     int _nthreads;
349 
350     /* Blocking info */
351     unsigned int _k_block=0;
352     unsigned int _x_block=0;
353     unsigned int _Mround=0;
354 
355     /* Working space, pretransposed buffer, buffer manager */
356     const Toi *_B_transposed=nullptr;
357     void *_working_space=nullptr;
358 
359     Tab *_accumulation_buffer=nullptr;
360 
361     /* Output stage */
362     OutputStage  _os;
363 
364     /* Quantized support (in addition to 'output stage' above */
365     int32_t *col_bias = nullptr;
366 
367     /* Indirect parameters.  _indirect_buf doubles as a flag to indicate that "indirect" transform should be used. */
368     const To * const * const * _indirect_buf = nullptr;
369 
370     /* Convolver - only set up for convolution problems, so also doubles as a flag. */
371     std::unique_ptr<convolver<To>>  _convolver = nullptr;
372 
get_col_sum_size() const373     unsigned int get_col_sum_size() const {
374         if (std::is_same<OutputStage, Requantize32>::value) {
375             return _Nsize * _nmulti * sizeof(int32_t);
376         } else {
377             return 0;
378         }
379     }
380 
381     /* We will need to walk through the blocks of B in a few contexts, so
382      * factor that out.  */
383     class blockwalker {
384     private:
385         /* Size loops, etc. based on our parent's configuration */
386         const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &_parent;
387 
388         /* K, X and multi parameters for current iteration. */
389         unsigned int _k0=0, _x0=0, _multi=0;
390 
391         /* Range of X to iterate over - used in "ForceThreadColumns" cases */
392         unsigned int _x_start=0;
393         unsigned int _x_end=_parent._Nsize;
394 
395         unsigned int _index=0;
396         bool _done=false;
397         bool _newkblock=true;
398         bool _newmulti=true;
399 
400     public:
blockwalker(const GemmInterleaved<strategy,To,Tr,OutputStage,MergeStep,FixedFormat,ForceThreadColumns> & parent)401         blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &parent) : _parent(parent) { }
402 
blockwalker(const GemmInterleaved<strategy,To,Tr,OutputStage,MergeStep,FixedFormat,ForceThreadColumns> & parent,unsigned int x_start,unsigned int x_end)403         blockwalker(const GemmInterleaved<strategy, To, Tr, OutputStage, MergeStep, FixedFormat, ForceThreadColumns> &parent,
404                     unsigned int x_start, unsigned int x_end) : _parent(parent), _x0 (_x_start), _x_start(x_start), _x_end(x_end) { }
405 
xmax()406         unsigned int xmax() {
407             return std::min(_x0 + _parent._x_block, _x_end);
408         }
409 
kmax()410         unsigned int kmax() {
411             return std::min(_k0 + _parent._k_block, _parent._Ktotal);
412         }
413 
414         /* Advance to the next block, return false at the end. */
advance(void)415         bool advance(void) {
416             if (_done) {
417                 return false;
418             }
419 
420             _newkblock=false;
421             _x0 += _parent._x_block;
422             if (_x0 >= _x_end) {
423                 _x0=_x_start;
424                 _k0 += _parent._k_block;
425                 if (_k0 >= _parent._Ktotal) {
426                     _k0=0;
427                     _multi++;
428                     if (_multi >= _parent._nmulti) {
429                         _done=true;
430                         return false;
431                     }
432                     _newmulti=true;
433                 }
434                 _newkblock=true;
435             }
436             _index++;
437 
438             return true;
439         }
440 
k0(void)441         unsigned int k0(void) { return _k0; }
x0(void)442         unsigned int x0(void) { return _x0; }
multi(void)443         unsigned int multi(void) { return _multi; }
index(void)444         unsigned int index(void) { return _index; }
done(void)445         bool done(void) { return _done; }
newkblock(void)446         bool newkblock(void) { return _newkblock; }
447     };
448 
449     // "k block" has two distinct uses: figuring out which iterations of K
450     // to actually process, but also various size/pointer computations.  The
451     // latter needs to take account of the extra space needed for the row
452     // sums, if appropriate.
get_total_k_depth() const453     unsigned int get_total_k_depth() const {
454         unsigned int k_depth = _k_block;
455 
456         if (std::is_same<OutputStage, Requantize32>::value) {
457             k_depth += sizeof(int32_t) / sizeof(Toi);
458         }
459 
460         return k_depth;
461     }
462 
463     // A working size.
get_a_working_size() const464     size_t get_a_working_size() const {
465         if (_thread_columns) {
466             // For 2D threading: allocate a buffer of one block of rows per thread
467             return ROUND_UP(sizeof(Toi) * get_total_k_depth() * strategy::out_height() * _maxthreads);
468         } else {
469             // For 1D threaded: one of these needed, regardless of thread count.  Divided according to window.
470             return ROUND_UP(sizeof(Toi) * get_total_k_depth() * _Mround * _nbatches);
471         }
472     }
473 
474     // C working size: One needed per thread.  Not needed if there is no merge step.
get_c_working_size() const475     size_t get_c_working_size() const {
476         if (MergeStep) {
477             return ROUND_UP(sizeof(Tri) * _x_block * strategy::out_height());
478         } else {
479             return 0;
480         }
481     }
482 
483     // Accumulation buffer size
get_accumulation_buffer_size() const484     size_t get_accumulation_buffer_size() const {
485         // We only support an accumulation buffer for non-merge cases.
486         if (MergeStep) {
487             return 0;
488         }
489 
490         // Check if we are actually blocking
491         if (_k_block == _Ktotal) {
492             return 0;
493         }
494 
495         // We are no-merge, non-quantized with active blocking: accumulation buffer needed.
496         size_t size_per_buffer = sizeof(Tab) * strategy::out_height() * strategy::out_width();
497         size_t num_buffers = iceildiv(_Msize, strategy::out_height()) * iceildiv(_Nsize, strategy::out_width()) * _nbatches * _nmulti;
498 
499         return num_buffers * size_per_buffer;
500     }
501 
502     // Get pointer into accumulation buffer
get_accumulation_buffer(unsigned int M,unsigned int N,unsigned int batch,unsigned int multi) const503     Tab *get_accumulation_buffer(unsigned int M, unsigned int N, unsigned int batch, unsigned int multi) const {
504         // Don't do anything if there's no buffer.
505         if (_accumulation_buffer == nullptr) {
506             return nullptr;
507         }
508 
509         // Here we are indexing an appropriately sized pointer, so no sizeof() needed to convert to bytes.
510         size_t size_per_buffer = strategy::out_height() * strategy::out_width();
511 
512         size_t buffer_rows = iceildiv(_Msize, strategy::out_height());
513         size_t buffer_cols = iceildiv(_Nsize, strategy::out_width());
514         size_t buffers_per_batch = (buffer_rows * buffer_cols);
515         size_t buffers_per_multi = buffers_per_batch * _nbatches;
516 
517         // M/N must reference the top-left corner of a block.
518         size_t row = M / strategy::out_height();
519         assert(M % strategy::out_height() == 0);
520         size_t col = N / strategy::out_width();
521         assert(N % strategy::out_width() == 0);
522 
523         size_t buffer_index = multi * buffers_per_multi + batch * buffers_per_batch + row * buffer_cols + col;
524 
525         return _accumulation_buffer + (buffer_index * size_per_buffer);
526     }
527 
row_sum_multiplier() const528     int32_t row_sum_multiplier() const {
529         if (std::is_same<OutputStage, Requantize32>::value) {
530             const Requantize32 *qp = reinterpret_cast<const Requantize32 *>(&_os);
531 
532             return -qp->b_offset;
533         }
534 
535         return 0;
536     }
537 
538     // Heuristics to decide whether to use the 'thread columns' regime
is_thread_columns(const GemmArgs & args)539     static bool is_thread_columns(const GemmArgs &args) {
540         // For now, there is a templace parameter to force it.
541         if (ForceThreadColumns) {
542             return true;
543         }
544 
545         // Never do this for single threaded cases.
546         if (args._maxthreads == 1) {
547             return false;
548         }
549 
550         // How many blocks of work are available for threading on M?
551         int m_blocks = iceildiv(args._Msize, strategy::out_height()) * args._nbatches;
552 
553         // If we just can't share the work across threads with the row threading regime.
554         if (args._maxthreads > m_blocks) {
555             return true;
556         }
557 
558         // If the row threading regime is too wasteful (20% threshold)
559         if (((roundup(m_blocks, args._maxthreads) * 100) / m_blocks) > 120) {
560             return true;
561         }
562 
563         return false;
564     }
565 
get_ktotal(const GemmArgs & args)566     static unsigned int get_ktotal(const GemmArgs &args) {
567         return args._Ksections * roundup(args._Ksize, strategy::k_unroll());
568     }
569 
get_k_block_size(const GemmArgs & args)570     static unsigned int get_k_block_size(const GemmArgs &args) {
571         if (args._cfg && args._cfg->inner_block_size) {
572             return roundup(args._cfg->inner_block_size, strategy::k_unroll());
573         }
574 
575         // K blocking not supported if we are requantizing.
576         if (std::is_same<OutputStage, Requantize32>::value) {
577             return get_ktotal(args);
578         }
579 
580         // Special blocking for SME
581         if (is_sme<strategy>::value) {
582             // Don't bother to block below this size threshold, experimentally determined to be 320 for FP32
583             unsigned int scaling_threshold = 1280 / sizeof(Toi);
584 
585             if (get_ktotal(args) <= scaling_threshold) {
586                 return get_ktotal(args);
587             }
588 
589             // Once we are blocking, this (lower) threshold determines when we should use more blocks
590             // NOTE: Could be that some factor-based solution would work better here.
591             unsigned int max_block_size = 1024 / sizeof(Toi);
592 
593             unsigned int num_k_blocks = iceildiv(get_ktotal(args), max_block_size);
594 
595             unsigned int k_block = roundup(iceildiv(get_ktotal(args), num_k_blocks), strategy::k_unroll());
596 
597             return k_block;
598         }
599 
600         const unsigned int L1_size = args._ci->get_L1_cache_size();
601         unsigned int k_block;
602 
603         // k_block: Find out how much of the larger array can be loaded into half the cache.
604         // This should account for associative caches.
605         k_block = (L1_size / 2) / (sizeof(Toi) * (std::max(strategy::out_width(), strategy::out_height())));
606 
607         // Needs to be (at least a single) multiple of the K unroll level.
608         k_block /= strategy::k_unroll();
609         k_block = std::max(k_block, 1U) * strategy::k_unroll();
610 
611         // Now tune to presented problem size; this is how many blocks we need.
612         unsigned int num_k_blocks = iceildiv(get_ktotal(args), k_block);
613 
614         // So divide the space equally into that many blocks.
615         k_block = iceildiv(get_ktotal(args), num_k_blocks);
616 
617         // And round UP to the K unroll level required.
618         k_block = roundup(k_block, strategy::k_unroll());
619 
620         assert(k_block > 0);
621 
622         return k_block;
623     }
624 
get_x_block_size(const GemmArgs & args)625     static unsigned int get_x_block_size(const GemmArgs &args) {
626         if (is_thread_columns(args)) {
627             // In 2D mode, override X block, because we will process width first.
628             return roundup(args._Nsize, strategy::out_width());
629         }
630 
631         if (args._cfg && args._cfg->outer_block_size) {
632             return roundup(args._cfg->outer_block_size, strategy::out_width());
633         }
634 
635         unsigned int x_block;
636         const unsigned int L2_size = args._ci->get_L2_cache_size();
637         const unsigned int k_block = get_k_block_size(args);
638 
639         // x_block: Work out how many rows (of length k_block) will fit in the L2
640         // Don't allocate more than 90% of the L2 to allow for overheads, and subtract off the L1 contents.
641         const unsigned int scaled_l2_size = (L2_size * 9) / 10;
642         const unsigned int k_block_area = k_block * sizeof(Toi) * (strategy::out_width() + strategy::out_height());
643 
644         // .. if the L1 contents is bigger than the L2, just return a minimal size block.
645         if (k_block_area > scaled_l2_size) {
646             return strategy::out_width();
647         }
648 
649         x_block = (scaled_l2_size - k_block_area) / (sizeof(Toi) * k_block);
650 
651         // Needs to be (at least a single) multiple of the kernel output width.
652         x_block /= strategy::out_width();
653         x_block = std::max(x_block, 1u) * strategy::out_width();
654 
655         // And tune to the presented problem size.
656         unsigned int num_x_blocks = iceildiv(args._Nsize, x_block);
657         x_block = iceildiv(args._Nsize, num_x_blocks);
658 
659         x_block = roundup(x_block, strategy::out_width());
660 
661         assert(x_block > 0);
662 
663         return x_block;
664     }
665 
666 public:
667     GemmInterleaved(GemmInterleaved &) = delete;
668     GemmInterleaved & operator= (GemmInterleaved &) = delete;
669 
670     /* Constructor */
GemmInterleaved(const GemmArgs & args,const OutputStage & os)671     GemmInterleaved(const GemmArgs &args, const OutputStage &os)
672                     : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
673                       _Ksections(args._Ksections), _Ktotal(get_ktotal(args)),
674                       _rounded_Ksize(roundup(_Ksize, strategy::k_unroll())),
675                       _nbatches(args._nbatches), _nmulti(args._nmulti), _thread_columns(is_thread_columns(args)),
676                       _act(args._act), _maxthreads(args._maxthreads), _nthreads(args._maxthreads),
677                       _k_block(get_k_block_size(args)), _x_block(get_x_block_size(args)), _Mround(roundup(args._Msize, strategy::out_height())),
678                       _os(os) { }
679 
680     /* Constructor without OutputStage */
GemmInterleaved(const GemmArgs & args)681     GemmInterleaved(const GemmArgs &args)
682                     : _ci(args._ci), _Msize(args._Msize), _Nsize(args._Nsize), _Ksize(args._Ksize),
683                       _Ksections(args._Ksections), _Ktotal(get_ktotal(args)),
684                       _rounded_Ksize(roundup(_Ksize, strategy::k_unroll())),
685                       _nbatches(args._nbatches), _nmulti(args._nmulti), _thread_columns(is_thread_columns(args)),
686                       _act(args._act), _maxthreads(args._maxthreads), _nthreads(args._maxthreads),
687                       _k_block(get_k_block_size(args)), _x_block(get_x_block_size(args)), _Mround(roundup(args._Msize, strategy::out_height())),
688                       _os() { }
689 
690     // Interface implementation - Compulsory functions
691 
692     // Window size: Only the last thread should do a ragged block, so dole
693     // out work in units of out_height.  Factor batches into the window, but
694     // not multi for now (as this would cause problems with the buffer
695     // manager).
get_window_size() const696     ndrange_t get_window_size() const override {
697         unsigned int row_blocks = (_Mround / strategy::out_height()) * _nbatches;
698 
699         if (_thread_columns) {
700             return { row_blocks, iceildiv(_Nsize, strategy::out_width()) };
701         } else {
702             // _Mround is a multiple of out_height by definition.
703             return { row_blocks };
704         }
705     }
706 
707     // set_nthreads: pass on to buffer manager to avoid it waiting for non-existant threads.
set_nthreads(int nthreads)708     void set_nthreads(int nthreads) override {
709         _nthreads = std::min(nthreads, _maxthreads);
710     }
711 
712     // Execute
execute(const ndcoord_t & work_range,const ndcoord_t &,int threadid)713     void execute(const ndcoord_t &work_range, const ndcoord_t &, int threadid) override {
714 #ifdef CYCLE_PROFILING
715         profiler prof;
716 #endif
717 
718         /* Make sure we've been set up correctly. */
719         assert(FixedFormat || _B_transposed);
720         assert(_working_space);
721         int8_t *working_space_bytes = reinterpret_cast<int8_t *>(_working_space);
722 
723         /* Align if needed */
724         intptr_t working_space_v = reinterpret_cast<intptr_t>(_working_space);
725         if (working_space_v & 0x3f) {
726             intptr_t alignment_offset = 0x40 - (working_space_v & 0x3f);
727             working_space_bytes += alignment_offset;
728         }
729 
730         strategy strat(_ci);
731 
732         const auto start = work_range.get_position(0);
733         const auto end   = work_range.get_position_end(0);
734 
735         /* Translate 'start' and 'end' into a position within the batches and rows. */
736         const unsigned int window_per_batch = _Mround / strategy::out_height();
737         unsigned int batch_0   = start / window_per_batch;
738         unsigned int batch_end = end   / window_per_batch;
739 
740         // In ThreadColumns mode, process work one horizontal strip at a time.
741         // Transpose the block of needed rows at the start, then do all the work on that block.
742         if (_thread_columns) {
743             const auto start_x = work_range.get_position(1) * strategy::out_width();
744             const auto end_x = std::min(work_range.get_position_end(1) * strategy::out_width(), _Nsize);
745 
746             Tri * const c_panel = reinterpret_cast<Tri *>(working_space_bytes + (threadid * get_c_working_size()));
747             Toi * const a_panel = reinterpret_cast<Toi *>(working_space_bytes + (_maxthreads * get_c_working_size()) +
748                                        (threadid * sizeof(Toi) * get_total_k_depth() * strategy::out_height()));
749 
750             for (unsigned int multi=0; multi<_nmulti; multi++) {
751                 for (unsigned int k0=0; k0<_Ktotal; k0+=_k_block) {
752                     unsigned int kmax=std::min(k0+_k_block, _Ktotal);
753 
754                     unsigned int rounded_width = roundup(_Nsize, strategy::out_width());
755 
756                     const bool first_pass = (k0==0);
757                     const bool last_pass  = (kmax==_Ktotal);
758 
759                     // Figure out how many "K" the kernel will actually process.
760                     unsigned int kern_k = roundup(kmax - k0, strategy::k_unroll());
761 
762                     const Toi *b_ptr = FixedFormat ?
763                         reinterpret_cast<const Toi *>(this->_Bptr) + (multi * this->_B_multi_stride) +
764                                                      ((start_x / get_stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
765                                                      (k0 * get_stripe_width<strategy, FixedFormat>::get()) :
766                         _B_transposed + (rounded_width * _Ktotal * multi) + (k0 * rounded_width) + (start_x * kern_k);
767 
768                     unsigned int batch     = batch_0;
769                     unsigned int start_row = (start - (batch_0 * window_per_batch)) * strategy::out_height();
770 
771                     for (unsigned int p=start; p<end; p++) {
772                         unsigned int end_row = std::min(start_row + strategy::out_height(), _Msize);
773 
774                         // Set up transposed 'A' block
775                         {
776 #ifdef CYCLE_PROFILING
777                             auto p=prof.ScopedProfiler(PROFILE_PREPA, strategy::out_height() * (kmax-k0) * sizeof(Toi));
778 #endif
779                             // See comment above on transform_type<> class: this extracts either 'transforms' or
780                             // 'transforms_quantized' as appropriate.
781                             typename transform_type<strategy, MergeStep && std::is_same<OutputStage, Requantize32>::value>::type transforms;
782 
783                             if (_indirect_buf != nullptr) {
784                                 transforms.PrepareA_indirect(a_panel,
785                                                              _indirect_buf + (multi * _nbatches * _Ksections) + (batch * _Ksections), _Ksize,
786                                                              _rounded_Ksize, start_row, end_row, k0, kmax, row_sum_multiplier());
787                             } else if (_convolver) {
788                                 transforms.PrepareA_convolution(a_panel,
789                                                                 this->_Aptr + (batch * this->_A_batch_stride) + (multi * this->_A_multi_stride),
790                                                                 this->_lda, *_convolver, _rounded_Ksize, start_row, end_row, k0, kmax, row_sum_multiplier());
791                             } else {
792                                 transforms.PrepareA(a_panel,
793                                                     this->_Aptr + (batch * this->_A_batch_stride) + (multi * this->_A_multi_stride),
794                                                     this->_lda, start_row, end_row, k0, std::min(kmax, _Ksize), row_sum_multiplier());
795                             }
796                         }
797 
798                         // Perform the kernel and merge step, either separately or together as required.
799                         kernel_and_merge<MergeStep, FixedFormat, OutputStage>::run(
800                         #ifdef CYCLE_PROFILING
801                             prof,
802                         #endif
803                             // Strategy and panel pointers
804                             strat, a_panel, b_ptr, this->_ldb, c_panel,
805                             // Result buffer pointers
806                             this->_Cptr + (batch * this->_C_batch_stride) + (multi * this->_C_multi_stride), this->_ldc,
807                             // K size, and M/N ranges
808                             kern_k, start_row, end_row, start_x, end_x,
809                             // Only do bias on the first pass
810                             ((first_pass && this->_bias) ? this->_bias + (multi * this->_bias_multi_stride) : nullptr),
811                             // Only do activation on the last pass, and accumulation on any non-first pass.
812                             (last_pass ? _act : Activation()), !first_pass,
813                             // Pass in quantization parameters for requantizing kernels (others will ignore)
814                             _os, col_bias + (multi * _Nsize),
815                             // Accumulation buffer (not yet implemented on this path)
816                             static_cast<Tab *>(nullptr));
817 
818                         /* Increment to the next block */
819                         start_row += strategy::out_height();
820                         if (start_row >= _Msize) {
821                             start_row = 0;
822                             batch++;
823                         }
824                     }
825                 }
826             }
827         } else {
828             blockwalker current(*this);
829 
830             /* Compute the M values to operate on */
831             unsigned int m_0   = (start - (batch_0 * window_per_batch)) * strategy::out_height();
832             unsigned int m_max = (end - (batch_end * window_per_batch)) * strategy::out_height();
833 
834             // Private buffers.  Treat working_space as an array of C buffers
835             // (one per thread) first, followed by the (window-divided) A
836             // buffer.
837             // Set a_panel to the base of the A buffers - compute offsets into it based on M/batches later.
838             Toi * const a_panel = reinterpret_cast<Toi *>(working_space_bytes + (_maxthreads * get_c_working_size()));
839             Tri * const c_panel = reinterpret_cast<Tri *>(working_space_bytes + (threadid * get_c_working_size()));
840 
841             const Toi *b_panel;
842             b_panel = _B_transposed;
843 
844             // newkblock() is always true on the first iteration, so these will be set properly on the first loop.
845 
846             // kern_k tracks the accumulation depth for the CURRENT K block a_panel_stride similarly tracks the total
847             // stride of the A panel (i.e.  with 4 added for cases with embedded row sums)
848 
849             // These are distinct from k_block and get_total_k_depth() which are based on the target K block size, and
850             // used for addressing inside a_panel.
851 
852             // In cases where K blocking is in use and the blocks are not all the same size, the (smaller) final block
853             // won't use all the memory allocated.
854             unsigned int kern_k = 0;
855             unsigned int a_panel_stride = 0;
856 
857             for (;!current.done();current.advance()) {
858                 if (current.newkblock()) {
859 #ifdef CYCLE_PROFILING
860                     auto p=prof.ScopedProfiler(PROFILE_PREPA, (end - start) * strategy::out_height() * (current.kmax()-current.k0()) * sizeof(Toi));
861 #endif
862                     // See comment above on transform_type<> class: this extracts either 'transforms' or
863                     // 'transforms_quantized' as appropriate.
864                     typename transform_type<strategy, MergeStep && std::is_same<OutputStage, Requantize32>::value>::type transforms;
865 
866                     for (unsigned int batch = batch_0; batch <= batch_end; batch++) {
867                         unsigned int first_m = (batch == batch_0)   ? m_0   : 0;
868                         unsigned int last_m  = (batch == batch_end) ? m_max : _Msize;
869 
870                         if (first_m >= last_m)
871                             continue;
872 
873                         if (_indirect_buf != nullptr) {
874                             transforms.PrepareA_indirect(a_panel + ((batch * _Mround + first_m) * get_total_k_depth()),
875                                                       _indirect_buf + (current.multi() * _nbatches * _Ksections) + (batch * _Ksections), _Ksize,
876                                                       _rounded_Ksize, first_m, last_m, current.k0(), current.kmax(), row_sum_multiplier());
877                         } else if (_convolver) {
878                             transforms.PrepareA_convolution(a_panel + ((batch * _Mround + first_m) * get_total_k_depth()),
879                                                       this->_Aptr + (batch * this->_A_batch_stride) + (current.multi() * this->_A_multi_stride),
880                                                       this->_lda, *_convolver, _rounded_Ksize, first_m, last_m, current.k0(), current.kmax(), row_sum_multiplier());
881                         } else {
882                             transforms.PrepareA(a_panel + ((batch * _Mround + first_m) * get_total_k_depth()),
883                                                       this->_Aptr + (batch * this->_A_batch_stride) + (current.multi() * this->_A_multi_stride),
884                                                       this->_lda, first_m, last_m, current.k0(), std::min(_Ksize, current.kmax()), row_sum_multiplier());
885                         }
886                     }
887 
888                     // Figure out how many "K" the kernel will actually process.
889                     kern_k = roundup(current.kmax() - current.k0(), strategy::k_unroll());
890 
891                     // Requantizing GEMMs have the row sums built in to the
892                     // transposed data, so the stride between rows is 4 bytes
893                     // larger than the (rounded) K value.
894 
895                     if(std::is_same<OutputStage, Requantize32>::value) {
896                         a_panel_stride = kern_k + (sizeof(int32_t) / sizeof(Toi));
897                     } else {
898                         a_panel_stride = kern_k;
899                     }
900                 }
901 
902                 // For FixedFormat cases, figure out the B pointer.  The loop below moves through batches and vertically through the output so this will be the same throughout.
903                 if (FixedFormat) {
904                     b_panel = reinterpret_cast<const Toi *>(this->_Bptr) + (current.multi() * this->_B_multi_stride) +
905                                                                            ((current.x0() / get_stripe_width<strategy, FixedFormat>::get()) * this->_ldb) +
906                                                                            (current.k0() * get_stripe_width<strategy, FixedFormat>::get());
907                 }
908 
909                 /* Do the actual work. */
910                 for (unsigned int batch = batch_0; batch <= batch_end; batch++) {
911                     unsigned int first_m = (batch == batch_0)   ? m_0   : 0;
912                     unsigned int last_m  = (batch == batch_end) ? m_max : _Msize;
913 
914                     const Toi *a_ptr = a_panel + (batch * _Mround + first_m) * get_total_k_depth();
915 
916                     if (first_m >= last_m)
917                         continue;
918 
919                     // For the merge case we need to do this out_height() rows
920                     // at a time, as that is the size of our intermediate
921                     // buffer.  If we are not doing that, we can do all the
922                     // relevant rows in one go.
923                     unsigned int m_step = MergeStep ? strategy::out_height() : (last_m - first_m);
924 
925                     // But in the case where we have an accumulation buffer, we can't do that after all, unless
926                     // there is no N blocking.
927                     if (_accumulation_buffer && ((current.x0() != 0) || (current.xmax() < _Nsize))) {
928                         m_step = strategy::out_height();
929                     }
930 
931                     for (unsigned int y=first_m; y<last_m; y+=m_step) {
932                         unsigned int ymax = std::min(_Msize, y + m_step);
933 
934                         const bool first_pass = (current.k0() == 0);
935                         const bool last_pass  = (current.kmax() == _Ktotal);
936 
937                         // Pointer to appropriate part of result array.
938                         Tr *result_ptr = this->_Cptr + (batch * this->_C_batch_stride) + (current.multi() * this->_C_multi_stride);
939 
940                         // If we are using an accumulation buffer, we don't pass the result buffer to ask the kernel
941                         // to write things into the accumulation buffer instead, except on the last pass.
942                         if (_accumulation_buffer && !last_pass) {
943                             result_ptr = nullptr;
944                         }
945 
946                         // Perform the kernel and merge step, either separately or together as required.
947                         kernel_and_merge<MergeStep, FixedFormat, OutputStage>::run(
948                         #ifdef CYCLE_PROFILING
949                             prof,
950                         #endif
951                             // Strategy and panel pointers
952                             strat, a_ptr, b_panel, this->_ldb, c_panel,
953                             // Result buffer pointers
954                             result_ptr, this->_ldc,
955                             // K size, and M/N ranges
956                             kern_k, y, ymax, current.x0(), current.xmax(),
957                             // Only do bias on the first pass
958                             ((first_pass && this->_bias) ? this->_bias + (current.multi() * this->_bias_multi_stride) : nullptr),
959                             // Only do activation on the last pass, and accumulation on any non-first pass.
960                             (last_pass ? _act : Activation()), !first_pass,
961                             // Pass in quantization parameters for requantizing kernels (others will ignore)
962                             _os, col_bias + (current.multi() * _Nsize),
963                             // Accumulation buffer
964                             get_accumulation_buffer(y, current.x0(), batch, current.multi()) );
965 
966                         a_ptr += (strategy::out_height() * a_panel_stride);
967                     }
968                 }
969 
970                 if (FixedFormat == false) {
971                     b_panel += (roundup(current.xmax() - current.x0(), strategy::out_width()) * kern_k);
972                 }
973             }
974         }
975     }
976 
977     // Interface implementation - working space
get_working_size() const978     size_t get_working_size() const override {
979         // In all cases, we need one A buffer plus a C buffer per thread, plus an accumulation buffer.
980         size_t size = get_a_working_size() + (get_c_working_size() * _maxthreads) + get_accumulation_buffer_size();
981 
982         size += 128; // Add on two cache lines extra for alignment.
983 
984         return size;
985     }
986 
set_working_space(void * working_space)987     void set_working_space(void *working_space) override {
988         // Make sure everything ends up cache line aligned
989         int8_t *working_space_bytes = reinterpret_cast<int8_t *>(working_space);
990         intptr_t working_space_int = reinterpret_cast<intptr_t>(working_space);
991 
992         size_t diff=0;
993 
994         if (working_space_int & 0x3F) {
995             diff = 0x40 - (working_space_int & 0x3F);
996         }
997 
998         working_space_bytes += diff;
999         working_space_int += diff;
1000 
1001         // Pretransposed case: just set internal pointer to parameter value.
1002         _working_space = reinterpret_cast<void *>(working_space_bytes);
1003 
1004         // Set up accumulation buffer
1005         if (get_accumulation_buffer_size() > 0) {
1006             intptr_t acc_buff_int = working_space_int + get_a_working_size() + (get_c_working_size() * _maxthreads);
1007             // Make sure the accumulation buffer is aligned (needed if the other blocks are not a multiple of cache line length)
1008             if (acc_buff_int & 0x3F) {
1009                 acc_buff_int += (0x40 - (acc_buff_int & 0x3F));
1010             }
1011             _accumulation_buffer = reinterpret_cast<Tab *>(acc_buff_int);
1012         } else {
1013             _accumulation_buffer = nullptr;
1014         }
1015     }
1016 
1017     // Interface implementation - pretransposed
B_is_pretransposed() const1018     bool B_is_pretransposed() const override {
1019         return (FixedFormat == false);
1020     }
1021 
B_pretranspose_required() const1022     bool B_pretranspose_required() const override {
1023         return (FixedFormat == false) && (_B_transposed==nullptr);
1024     }
1025 
get_B_pretransposed_array_size() const1026     size_t get_B_pretransposed_array_size() const override {
1027         if (FixedFormat) {
1028             return 0;
1029         }
1030 
1031         unsigned int x_size = roundup(_Nsize, strategy::out_width());
1032 
1033         return (x_size * _Ktotal * _nmulti * sizeof(Toi)) + get_col_sum_size();
1034     }
1035 
requantize_bias(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)1036     void requantize_bias(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
1037         if (std::is_same<OutputStage, Requantize32>::value) {
1038             col_bias = reinterpret_cast<int32_t *>(in_buffer);
1039 
1040             Requantize32 *qp_ptr = reinterpret_cast<Requantize32 *>(&_os);
1041 
1042             for (unsigned int i=0; i<_nmulti; i++) {
1043                 // The input is assumed not to have any padding between sections, so straightforward Ksize * Ksections computation gets the total size.
1044                 compute_col_sums(*qp_ptr, _Nsize, _Ksize * _Ksections, B + (i * B_multi_stride), ldb, col_bias + (i * _Nsize), _Ksize * _Ksections, i, 0);
1045             }
1046         }
1047     }
1048 
pretranspose_B_array(void * in_buffer,const To * B,const int ldb,const int B_multi_stride)1049     void pretranspose_B_array(void *in_buffer, const To *B, const int ldb, const int B_multi_stride) override {
1050         requantize_bias(in_buffer, B, ldb, B_multi_stride);
1051 
1052         // Put the transposed data after the column sums - in non-quantized cases get_col_sum_size() == 0
1053         uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
1054         Toi *buffer = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
1055         _B_transposed = buffer;
1056 
1057         blockwalker current(*this);
1058         strategy strat(_ci);
1059 
1060         do {
1061             /* Figure out the size of each block. */
1062             unsigned int k_size = (current.kmax() - current.k0());
1063 
1064             if (_Ksections > 1) {
1065                 // We need to insert padding at the end of each K section.
1066                 // The computation needed is a little delicate - the coordinates from the block walker are expressed in
1067                 // terms of the full, padded, _Ktotal.
1068                 // But we need to transform each section with reference to the original, unpadded, input, letting the
1069                 // transform pad each section as needed.
1070 
1071                 // This is needed for computations below.
1072                 const unsigned int rounded_section_size = roundup(_Ksize, strategy::k_unroll());
1073 
1074                 // The expected output format is also an entire <out_width> columns interleaved, then the next set of
1075                 // columns, and so on.  This means, as we are breaking it up vertically, we have to do it one column at
1076                 // a time.
1077                 for (unsigned int x0=current.x0(); x0 < current.xmax(); x0 += strategy::out_width() ) {
1078                     unsigned int xmax = std::min(x0 + strategy::out_width(), current.xmax());
1079 
1080                     // Track where we are and how much work is left.
1081                     unsigned int kpos  = current.k0();
1082                     unsigned int kleft = k_size;
1083 
1084                     while (kleft) {
1085                         // Which section are we in?  Based on the rounded-up section size.
1086                         unsigned int k_section_base = kpos / rounded_section_size;
1087                         // How far into the section are we?
1088                         unsigned int k_offset = kpos - (k_section_base * rounded_section_size);
1089 
1090                         // We will either copy the rest of this section, or to the end of the requested length.
1091                         unsigned int k_length = std::min(_Ksize - k_offset, kleft);
1092 
1093                         strat.transforms.PrepareB(buffer, B + (current.multi() * B_multi_stride), ldb,
1094                                                   x0, xmax,
1095                                                   (k_section_base * _Ksize) + k_offset,               // K starting point - compute row to read based on our section and the true section length.
1096                                                   (k_section_base * _Ksize) + k_offset + k_length);   // K end point - starting point plus length computed above.
1097 
1098                         // We need to modify our position based on the ROUNDED version of what we just did.
1099                         unsigned int padded_length = roundup(k_length, strategy::k_unroll());
1100 
1101                         buffer += strategy::out_width() * padded_length;
1102 
1103                         kpos  += padded_length;
1104                         kleft -= padded_length;
1105                     }
1106                 }
1107             } else {
1108                 // In the single K section case, can process the whole lot in one go.
1109                 // Caution: 'blockwalker::kmax()' rounds up, so clamp to valid _Ksize.
1110                 strat.transforms.PrepareB(buffer, B + (current.multi() * B_multi_stride), ldb,
1111                                           current.x0(), current.xmax(), current.k0(), std::min(current.kmax(), _Ksize));
1112                 buffer += roundup(current.xmax() - current.x0(), strategy::out_width()) * roundup(current.kmax() - current.k0(), strategy::k_unroll());
1113             }
1114         } while (current.advance());
1115     }
1116 
set_pretransposed_B_data(void * in_buffer)1117     void set_pretransposed_B_data(void *in_buffer) override {
1118         // Put the transposed data after the column sums - in non-quantized cases get_col_sum_size() == 0
1119         uintptr_t buffer_int = reinterpret_cast<uintptr_t>(in_buffer);
1120         _B_transposed = reinterpret_cast<Toi *>(buffer_int + get_col_sum_size());
1121         col_bias = reinterpret_cast<int32_t *>(in_buffer);
1122     }
1123 
set_quantized_bias(const int32_t * bias,size_t bias_multi_stride)1124     void set_quantized_bias(const int32_t *bias, size_t bias_multi_stride) override {
1125         if (std::is_same<OutputStage, Requantize32>::value) {
1126             Requantize32 *qp = reinterpret_cast<Requantize32 *>(&_os);
1127 
1128             qp->bias = bias;
1129             qp->bias_multi_stride = bias_multi_stride;
1130         }
1131     }
1132 
set_indirect_parameters(size_t string_len,const To * const * const * ptr)1133     void set_indirect_parameters(size_t string_len, const To * const * const *ptr) override {
1134         assert(string_len == _Ksize);
1135         _indirect_buf = ptr;
1136     }
1137 
set_convolution_parameters(ConvolutionParameters parms)1138     void set_convolution_parameters(ConvolutionParameters parms) override {
1139         assert(parms.input_channels == _Ksize);
1140         _convolver = std::unique_ptr<convolver<To>>(new convolver<To>(parms));
1141     }
1142 
1143     // Estimate cycles for given problem given provided parameters
1144     template<typename perf_type>
estimate_cycles(const GemmArgs & args)1145     static uint64_t estimate_cycles(const GemmArgs &args) {
1146         unsigned int k_blocks = iceildiv(args._Ksize, get_k_block_size(args));
1147 
1148         const PerformanceParameters &params = strategy::template get_performance_parameters<perf_type>(args._ci);
1149 
1150         uint64_t total_macs    = static_cast<uint64_t>(args._nbatches) * args._nmulti * roundup(args._Msize, strategy::out_height()) * roundup(args._Nsize, strategy::out_width()) * get_ktotal(args);
1151         uint64_t prepare_bytes = static_cast<uint64_t>(args._nbatches) * args._nmulti * roundup(args._Msize, strategy::out_height()) * get_ktotal(args) * sizeof(Toi);
1152         uint64_t merge_bytes   = static_cast<uint64_t>(args._nbatches) * args._nmulti * k_blocks * args._Msize * roundup(args._Nsize, strategy::out_width()) * sizeof(Tr);
1153 
1154         float mac_cycles     = static_cast<float>(total_macs) / params.kernel_macs_cycle;
1155         float prepare_cycles = static_cast<float>(prepare_bytes) / params.prepare_bytes_cycle;
1156         float merge_cycles   = static_cast<float>(merge_bytes) / params.merge_bytes_cycle;
1157 
1158         float total_cycles = mac_cycles + prepare_cycles + merge_cycles;
1159 
1160         // We can't thread over multis or width, which makes this a poor
1161         // choice in many threaded cases.  Penalize that here.
1162         float parallelism_available = static_cast<float>(iceildiv(args._Msize, strategy::out_height()) * args._nbatches) * 0.9f;
1163 
1164         if (parallelism_available < args._maxthreads) {
1165             total_cycles *= (static_cast<float>(args._maxthreads) / parallelism_available);
1166         }
1167 
1168         return static_cast<uint64_t>(total_cycles);
1169     }
1170 
get_config()1171     GemmConfig get_config() override {
1172         GemmConfig c;
1173 
1174         c.method = GemmMethod::GEMM_INTERLEAVED;
1175         c.inner_block_size = _k_block;
1176         c.outer_block_size = _x_block;
1177         c.filter = get_type_name<strategy>();
1178         c.weight_format = get_weight_format(get_kernel_weight_format<strategy, FixedFormat, To>::get(), sizeof(To));
1179 
1180         return c;
1181     }
1182 };
1183 
1184 // Aliases for the variations
1185 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
1186 using GemmInterleavedNoMerge = GemmInterleaved<strategy, To, Tr, OutputStage, false>;
1187 
1188 template<typename strategy, typename To, typename Tr, typename OutputStage=Nothing>
1189 using GemmInterleavedFixedFormat = GemmInterleaved<strategy, To, Tr, OutputStage, true, true>;
1190 
1191 template<typename strategy, typename To, typename Tr>
1192 using GemmInterleavedPretransposedNoMergeQuantizedInline = GemmInterleaved<strategy, To, Tr, Requantize32, false>;
1193 
1194 template<typename strategy, typename To, typename Tr>
1195 using GemmInterleavedQuantized = GemmInterleaved<strategy, To, Tr, Requantize32>;
1196 
1197 } // namespace arm_gemm
1198