1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-gemm/scalar.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xnnpack/math.h>
13 #include <xnnpack/gemm.h>
14
15
xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_imagic(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])16 void xnn_qc8_gemm_minmax_fp32_ukernel_1x4__scalar_imagic(
17 size_t mr,
18 size_t nc,
19 size_t kc,
20 const int8_t* restrict a,
21 size_t a_stride,
22 const void* restrict w,
23 int8_t* restrict c,
24 size_t cm_stride,
25 size_t cn_stride,
26 const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
27 {
28 assert(mr != 0);
29 assert(mr <= 1);
30 assert(nc != 0);
31 assert(kc != 0);
32
33 const int8_t* a0 = a;
34 int8_t* c0 = c;
35
36 do {
37 int32_t vacc0x0 = ((const int32_t*) w)[0];
38 int32_t vacc0x1 = ((const int32_t*) w)[1];
39 int32_t vacc0x2 = ((const int32_t*) w)[2];
40 int32_t vacc0x3 = ((const int32_t*) w)[3];
41 w = (const void*) ((const int32_t*) w + 4);
42
43 size_t k = kc;
44 do {
45 const int32_t va0 = (int32_t) *a0++;
46
47 const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
48 const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
49 const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
50 const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
51 w = (const void*) ((const int8_t*) w + 4);
52
53 vacc0x0 += va0 * vb0;
54 vacc0x1 += va0 * vb1;
55 vacc0x2 += va0 * vb2;
56 vacc0x3 += va0 * vb3;
57
58 k -= sizeof(int8_t);
59 } while (k != 0);
60
61 float vfpacc0x0 = (float) vacc0x0;
62 float vfpacc0x1 = (float) vacc0x1;
63 float vfpacc0x2 = (float) vacc0x2;
64 float vfpacc0x3 = (float) vacc0x3;
65
66 const float vscale0 = ((const float*) w)[0];
67 vfpacc0x0 *= vscale0;
68 const float vscale1 = ((const float*) w)[1];
69 vfpacc0x1 *= vscale1;
70 const float vscale2 = ((const float*) w)[2];
71 vfpacc0x2 *= vscale2;
72 const float vscale3 = ((const float*) w)[3];
73 vfpacc0x3 *= vscale3;
74 w = (const void*) ((const float*) w + 4);
75
76 const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
77 vfpacc0x0 += vmagic_bias;
78 vfpacc0x1 += vmagic_bias;
79 vfpacc0x2 += vmagic_bias;
80 vfpacc0x3 += vmagic_bias;
81
82 int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
83 int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
84 int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
85 int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
86
87 const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
88 vout0x0 = math_max_s32(vout0x0, vmagic_min);
89 vout0x1 = math_max_s32(vout0x1, vmagic_min);
90 vout0x2 = math_max_s32(vout0x2, vmagic_min);
91 vout0x3 = math_max_s32(vout0x3, vmagic_min);
92
93 const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
94 vout0x0 = math_min_s32(vout0x0, vmagic_max);
95 vout0x1 = math_min_s32(vout0x1, vmagic_max);
96 vout0x2 = math_min_s32(vout0x2, vmagic_max);
97 vout0x3 = math_min_s32(vout0x3, vmagic_max);
98
99 const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
100 vout0x0 -= vmagic_bias_less_zero_point;
101 vout0x1 -= vmagic_bias_less_zero_point;
102 vout0x2 -= vmagic_bias_less_zero_point;
103 vout0x3 -= vmagic_bias_less_zero_point;
104
105 if XNN_LIKELY(nc >= 4) {
106 c0[0] = (int8_t) vout0x0;
107 c0[1] = (int8_t) vout0x1;
108 c0[2] = (int8_t) vout0x2;
109 c0[3] = (int8_t) vout0x3;
110
111 a0 = (const int8_t*) ((uintptr_t) a0 - kc);
112
113 c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
114
115 nc -= 4;
116 } else {
117 if (nc & 2) {
118 c0[0] = (int8_t) vout0x0;
119 c0[1] = (int8_t) vout0x1;
120 vout0x0 = vout0x2;
121 c0 += 2;
122 }
123 if (nc & 1) {
124 c0[0] = (int8_t) vout0x0;
125 }
126
127 nc = 0;
128 }
129 } while (nc != 0);
130 }
131