xref: /aosp_15_r20/external/XNNPACK/src/qc8-gemm/gen/1x4c2-minmax-fp32-sse41-ld128.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/MRx4c2-sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/unaligned.h>
17 
18 
19 
xnn_qc8_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld128(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])20 void xnn_qc8_gemm_minmax_fp32_ukernel_1x4c2__sse41_ld128(
21     size_t mr,
22     size_t nc,
23     size_t kc,
24     const int8_t* restrict a,
25     size_t a_stride,
26     const void* restrict w,
27     int8_t* restrict c,
28     size_t cm_stride,
29     size_t cn_stride,
30     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
31 {
32   assert(mr != 0);
33   assert(mr <= 1);
34   assert(nc != 0);
35   assert(kc != 0);
36   assert(kc % sizeof(int8_t) == 0);
37   assert(a != NULL);
38   assert(w != NULL);
39   assert(c != NULL);
40 
41   kc = round_up_po2(kc, 2 * sizeof(int8_t));
42   const int8_t* a0 = a;
43   int8_t* c0 = c;
44 
45   do {
46     __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
47     w = (const void*) ((const int32_t*) w + 4);
48 
49     size_t k = kc;
50     while (k >= 8 * sizeof(int8_t)) {
51       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
52       const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
53       a0 += 8;
54 
55       const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
56       const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
57       const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
58 
59       vacc0x0123 = _mm_add_epi32(vacc0x0123,
60         _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
61 
62       vacc0x0123 = _mm_add_epi32(vacc0x0123,
63         _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
64       const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
65       const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
66       const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
67 
68       vacc0x0123 = _mm_add_epi32(vacc0x0123,
69         _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
70 
71       vacc0x0123 = _mm_add_epi32(vacc0x0123,
72         _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
73 
74       w = (const void*) ((const int8_t*) w + 32);
75       k -= 8 * sizeof(int8_t);
76     }
77     if (k != 0) {
78       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
79       const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
80       a0 = (const int8_t*) ((uintptr_t) a0 + k);
81 
82       const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
83       const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
84       w = (const void*) ((const int8_t*) w + 8);
85 
86       vacc0x0123 = _mm_add_epi32(vacc0x0123,
87         _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
88 
89       if (k > 2 * sizeof(int8_t)) {
90         const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
91         const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
92         w = (const void*) ((const int8_t*) w + 8);
93 
94         vacc0x0123 = _mm_add_epi32(vacc0x0123,
95           _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
96 
97         if (k > 4 * sizeof(int8_t)) {
98           const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
99           const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
100           w = (const void*) ((const int8_t*) w + 8);
101 
102           vacc0x0123 = _mm_add_epi32(vacc0x0123,
103             _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
104         }
105       }
106     }
107 
108     __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
109 
110     const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
111     w = (const void*) ((const float*) w + 4);
112     vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
113 
114     const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
115     vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
116 
117     vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
118 
119     const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
120     __m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
121 
122 
123     __m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
124 
125     vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
126 
127     if (nc >= 4) {
128       unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
129 
130       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
131 
132       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
133 
134       nc -= 4;
135     } else {
136       if (nc & 2) {
137         unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
138         c0 += 2;
139         vout = _mm_srli_epi32(vout, 16);
140       }
141       if (nc & 1) {
142         *c0 = (int8_t) _mm_extract_epi8(vout, 0);
143       }
144 
145       nc = 0;
146     }
147   } while (nc != 0);
148 }
149