xref: /aosp_15_r20/external/XNNPACK/src/qs8-gemm/gen/1x2-minmax-fp32-scalar-imagic.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/scalar.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/math.h>
13 #include <xnnpack/gemm.h>
14 #include <xnnpack/unaligned.h>
15 
16 
xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_imagic(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qs8_gemm_minmax_fp32_ukernel_1x2__scalar_imagic(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     const int8_t* restrict a,
22     size_t a_stride,
23     const void* restrict w,
24     int8_t* restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(mr != 0);
30   assert(mr <= 1);
31   assert(nc != 0);
32   assert(kc != 0);
33 
34   const int8_t* a0 = a;
35   int8_t* c0 = c;
36 
37   do {
38     int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
39     int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
40     w = (const void*) ((const int32_t*) w + 2);
41 
42     size_t k = kc;
43     do {
44       const int32_t va0 = (int32_t) *a0++;
45 
46       const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
47       const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
48       w = (const void*) ((const int8_t*) w + 2);
49 
50       vacc0x0 += va0 * vb0;
51       vacc0x1 += va0 * vb1;
52 
53       k -= sizeof(int8_t);
54     } while (k != 0);
55 
56     float vfpacc0x0 = (float) vacc0x0;
57     float vfpacc0x1 = (float) vacc0x1;
58 
59     const float vscale = params->fp32_scalar_imagic.scale;
60     vfpacc0x0 *= vscale;
61     vfpacc0x1 *= vscale;
62 
63     const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
64     vfpacc0x0 += vmagic_bias;
65     vfpacc0x1 += vmagic_bias;
66 
67     int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
68     int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
69 
70     const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
71     vout0x0 = math_max_s32(vout0x0, vmagic_min);
72     vout0x1 = math_max_s32(vout0x1, vmagic_min);
73 
74     const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
75     vout0x0 = math_min_s32(vout0x0, vmagic_max);
76     vout0x1 = math_min_s32(vout0x1, vmagic_max);
77 
78     const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
79     vout0x0 -= vmagic_bias_less_zero_point;
80     vout0x1 -= vmagic_bias_less_zero_point;
81 
82     if XNN_LIKELY(nc >= 2) {
83       c0[0] = (int8_t) vout0x0;
84       c0[1] = (int8_t) vout0x1;
85 
86       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
87 
88       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
89 
90       nc -= 2;
91     } else {
92       if (nc & 1) {
93         c0[0] = (int8_t) vout0x0;
94       }
95 
96       nc = 0;
97     }
98   } while (nc != 0);
99 }
100