1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/scalar.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xnnpack/math.h>
13 #include <xnnpack/gemm.h>
14
15
xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_imagic(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_qs8_gemm_minmax_fp32_ukernel_2x4__scalar_imagic(
17 size_t mr,
18 size_t nc,
19 size_t kc,
20 const int8_t* restrict a,
21 size_t a_stride,
22 const void* restrict w,
23 int8_t* restrict c,
24 size_t cm_stride,
25 size_t cn_stride,
26 const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mr != 0);
29 assert(mr <= 2);
30 assert(nc != 0);
31 assert(kc != 0);
32
33 const int8_t* a0 = a;
34 int8_t* c0 = c;
35 const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
36 int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
37 if XNN_UNPREDICTABLE(mr != 2) {
38 a1 = a0;
39 c1 = c0;
40 }
41
42 do {
43 int32_t vacc0x0 = ((const int32_t*) w)[0];
44 int32_t vacc0x1 = ((const int32_t*) w)[1];
45 int32_t vacc0x2 = ((const int32_t*) w)[2];
46 int32_t vacc0x3 = ((const int32_t*) w)[3];
47 int32_t vacc1x0 = vacc0x0;
48 int32_t vacc1x1 = vacc0x1;
49 int32_t vacc1x2 = vacc0x2;
50 int32_t vacc1x3 = vacc0x3;
51 w = (const void*) ((const int32_t*) w + 4);
52
53 size_t k = kc;
54 do {
55 const int32_t va0 = (int32_t) *a0++;
56 const int32_t va1 = (int32_t) *a1++;
57
58 const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
59 const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
60 const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
61 const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
62 w = (const void*) ((const int8_t*) w + 4);
63
64 vacc0x0 += va0 * vb0;
65 vacc0x1 += va0 * vb1;
66 vacc0x2 += va0 * vb2;
67 vacc0x3 += va0 * vb3;
68 vacc1x0 += va1 * vb0;
69 vacc1x1 += va1 * vb1;
70 vacc1x2 += va1 * vb2;
71 vacc1x3 += va1 * vb3;
72
73 k -= sizeof(int8_t);
74 } while (k != 0);
75
76 float vfpacc0x0 = (float) vacc0x0;
77 float vfpacc0x1 = (float) vacc0x1;
78 float vfpacc0x2 = (float) vacc0x2;
79 float vfpacc0x3 = (float) vacc0x3;
80 float vfpacc1x0 = (float) vacc1x0;
81 float vfpacc1x1 = (float) vacc1x1;
82 float vfpacc1x2 = (float) vacc1x2;
83 float vfpacc1x3 = (float) vacc1x3;
84
85 const float vscale = params->fp32_scalar_imagic.scale;
86 vfpacc0x0 *= vscale;
87 vfpacc0x1 *= vscale;
88 vfpacc0x2 *= vscale;
89 vfpacc0x3 *= vscale;
90 vfpacc1x0 *= vscale;
91 vfpacc1x1 *= vscale;
92 vfpacc1x2 *= vscale;
93 vfpacc1x3 *= vscale;
94
95 const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
96 vfpacc0x0 += vmagic_bias;
97 vfpacc0x1 += vmagic_bias;
98 vfpacc0x2 += vmagic_bias;
99 vfpacc0x3 += vmagic_bias;
100 vfpacc1x0 += vmagic_bias;
101 vfpacc1x1 += vmagic_bias;
102 vfpacc1x2 += vmagic_bias;
103 vfpacc1x3 += vmagic_bias;
104
105 int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
106 int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
107 int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
108 int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
109 int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
110 int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
111 int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2);
112 int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3);
113
114 const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
115 vout0x0 = math_max_s32(vout0x0, vmagic_min);
116 vout0x1 = math_max_s32(vout0x1, vmagic_min);
117 vout0x2 = math_max_s32(vout0x2, vmagic_min);
118 vout0x3 = math_max_s32(vout0x3, vmagic_min);
119 vout1x0 = math_max_s32(vout1x0, vmagic_min);
120 vout1x1 = math_max_s32(vout1x1, vmagic_min);
121 vout1x2 = math_max_s32(vout1x2, vmagic_min);
122 vout1x3 = math_max_s32(vout1x3, vmagic_min);
123
124 const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
125 vout0x0 = math_min_s32(vout0x0, vmagic_max);
126 vout0x1 = math_min_s32(vout0x1, vmagic_max);
127 vout0x2 = math_min_s32(vout0x2, vmagic_max);
128 vout0x3 = math_min_s32(vout0x3, vmagic_max);
129 vout1x0 = math_min_s32(vout1x0, vmagic_max);
130 vout1x1 = math_min_s32(vout1x1, vmagic_max);
131 vout1x2 = math_min_s32(vout1x2, vmagic_max);
132 vout1x3 = math_min_s32(vout1x3, vmagic_max);
133
134 const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
135 vout0x0 -= vmagic_bias_less_zero_point;
136 vout0x1 -= vmagic_bias_less_zero_point;
137 vout0x2 -= vmagic_bias_less_zero_point;
138 vout0x3 -= vmagic_bias_less_zero_point;
139 vout1x0 -= vmagic_bias_less_zero_point;
140 vout1x1 -= vmagic_bias_less_zero_point;
141 vout1x2 -= vmagic_bias_less_zero_point;
142 vout1x3 -= vmagic_bias_less_zero_point;
143
144 if XNN_LIKELY(nc >= 4) {
145 c0[0] = (int8_t) vout0x0;
146 c0[1] = (int8_t) vout0x1;
147 c0[2] = (int8_t) vout0x2;
148 c0[3] = (int8_t) vout0x3;
149 c1[0] = (int8_t) vout1x0;
150 c1[1] = (int8_t) vout1x1;
151 c1[2] = (int8_t) vout1x2;
152 c1[3] = (int8_t) vout1x3;
153
154 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
155 a1 = (const int8_t*) ((uintptr_t) a1 - kc);
156
157 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
158 c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
159
160 nc -= 4;
161 } else {
162 if (nc & 2) {
163 c0[0] = (int8_t) vout0x0;
164 c0[1] = (int8_t) vout0x1;
165 vout0x0 = vout0x2;
166 c0 += 2;
167 c1[0] = (int8_t) vout1x0;
168 c1[1] = (int8_t) vout1x1;
169 vout1x0 = vout1x2;
170 c1 += 2;
171 }
172 if (nc & 1) {
173 c0[0] = (int8_t) vout0x0;
174 c1[0] = (int8_t) vout1x0;
175 }
176
177 nc = 0;
178 }
179 } while (nc != 0);
180 }
181