xref: /aosp_15_r20/external/XNNPACK/src/qs8-vcvt/gen/vcvt-sse2-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vcvt/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <tmmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qs8_vcvt_ukernel__sse2_x32(size_t n,const int8_t * x,int8_t * y,const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_vcvt_ukernel__sse2_x32(
20     size_t n,
21     const int8_t* x,
22     int8_t* y,
23     const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(int8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
31   const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
32   for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
33     const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
34     const __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
35     x += 32;
36 
37     const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
38     const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vm0);
39     const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vm0);
40     const __m128i vm1 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx1);
41     const __m128i vextx2 = _mm_unpacklo_epi8(vx1, vm1);
42     const __m128i vextx3 = _mm_unpackhi_epi8(vx1, vm1);
43 
44     const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
45     const __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier);
46     const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
47     const __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier);
48     const __m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier);
49     const __m128i vprodhi2 = _mm_mulhi_epi16(vextx2, vmultiplier);
50     const __m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier);
51     const __m128i vprodhi3 = _mm_mulhi_epi16(vextx3, vmultiplier);
52 
53     __m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
54     __m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
55     __m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
56     __m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
57     __m128i vacc4 = _mm_unpacklo_epi16(vprodlo2, vprodhi2);
58     __m128i vacc5 = _mm_unpackhi_epi16(vprodlo2, vprodhi2);
59     __m128i vacc6 = _mm_unpacklo_epi16(vprodlo3, vprodhi3);
60     __m128i vacc7 = _mm_unpackhi_epi16(vprodlo3, vprodhi3);
61 
62     vacc0 = _mm_sub_epi32(vbias, vacc0);
63     vacc1 = _mm_sub_epi32(vbias, vacc1);
64     vacc2 = _mm_sub_epi32(vbias, vacc2);
65     vacc3 = _mm_sub_epi32(vbias, vacc3);
66     vacc4 = _mm_sub_epi32(vbias, vacc4);
67     vacc5 = _mm_sub_epi32(vbias, vacc5);
68     vacc6 = _mm_sub_epi32(vbias, vacc6);
69     vacc7 = _mm_sub_epi32(vbias, vacc7);
70 
71     vacc0 = _mm_srai_epi32(vacc0, 8);
72     vacc1 = _mm_srai_epi32(vacc1, 8);
73     vacc2 = _mm_srai_epi32(vacc2, 8);
74     vacc3 = _mm_srai_epi32(vacc3, 8);
75     vacc4 = _mm_srai_epi32(vacc4, 8);
76     vacc5 = _mm_srai_epi32(vacc5, 8);
77     vacc6 = _mm_srai_epi32(vacc6, 8);
78     vacc7 = _mm_srai_epi32(vacc7, 8);
79 
80     vacc0 = _mm_packs_epi32(vacc0, vacc1);
81     vacc1 = _mm_packs_epi32(vacc2, vacc3);
82     vacc2 = _mm_packs_epi32(vacc4, vacc5);
83     vacc3 = _mm_packs_epi32(vacc6, vacc7);
84 
85     const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
86     const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
87 
88     _mm_storeu_si128((__m128i*) y, vy0);
89     _mm_storeu_si128((__m128i*) (y + 16), vy1);
90     y += 32;
91   }
92   for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
93     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
94     x += 16;
95 
96     const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
97     const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
98     const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
99 
100     const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
101     const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
102     const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
103     const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
104 
105     __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
106     __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
107     __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
108     __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
109 
110     vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
111     vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
112     vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
113     vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
114 
115     vacc_ll = _mm_srai_epi32(vacc_ll, 8);
116     vacc_lh = _mm_srai_epi32(vacc_lh, 8);
117     vacc_hl = _mm_srai_epi32(vacc_hl, 8);
118     vacc_hh = _mm_srai_epi32(vacc_hh, 8);
119 
120     const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
121     const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
122 
123     const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
124     _mm_storeu_si128((__m128i*) y, vy);
125     y += 16;
126   }
127   if XNN_UNLIKELY(n != 0) {
128     assert(n >= 1 * sizeof(int8_t));
129     assert(n <= 15 * sizeof(int8_t));
130 
131     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
132 
133     const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
134     const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
135     const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
136 
137     const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
138     const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
139     const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
140     const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
141 
142     __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
143     __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
144     __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
145     __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
146 
147     vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
148     vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
149     vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
150     vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
151 
152     vacc_ll = _mm_srai_epi32(vacc_ll, 8);
153     vacc_lh = _mm_srai_epi32(vacc_lh, 8);
154     vacc_hl = _mm_srai_epi32(vacc_hl, 8);
155     vacc_hh = _mm_srai_epi32(vacc_hh, 8);
156 
157     const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
158     const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
159 
160     __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
161     if (n & (8 * sizeof(int8_t))) {
162       _mm_storel_epi64((__m128i*) y, vy);
163       vy = _mm_unpackhi_epi64(vy, vy);
164       y += 8;
165     }
166     if (n & (4 * sizeof(int8_t))) {
167       unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
168       vy = _mm_srli_epi64(vy, 32);
169       y += 4;
170     }
171     uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
172     if (n & (2 * sizeof(int8_t))) {
173       unaligned_store_u16(y, (uint16_t) vy_lo);
174       vy_lo >>= 16;
175       y += 2;
176     }
177     if (n & (1 * sizeof(int8_t))) {
178       *y = (int8_t) vy_lo;
179     }
180   }
181 }
182