xref: /aosp_15_r20/external/XNNPACK/src/qs8-vlrelu/gen/vlrelu-sse2-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vlrelu/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vlrelu.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qs8_vlrelu_ukernel__sse2_x32(size_t n,const int8_t * x,int8_t * y,const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_vlrelu_ukernel__sse2_x32(
20     size_t n,
21     const int8_t* x,
22     int8_t* y,
23     const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(int8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
31   const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
32   const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
33   const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
34   const __m128i vzero = _mm_setzero_si128();
35   for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
36     const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
37     const __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
38     x += 32;
39 
40     const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
41     __m128i vextx0 = _mm_unpacklo_epi8(vx0, vm0);
42     __m128i vextx1 = _mm_unpackhi_epi8(vx0, vm0);
43     const __m128i vm1 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx1);
44     __m128i vextx2 = _mm_unpacklo_epi8(vx1, vm1);
45     __m128i vextx3 = _mm_unpackhi_epi8(vx1, vm1);
46 
47     __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
48     vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
49     __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
50     vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
51     __m128i vmultiplier2 = _mm_cmpgt_epi16(vextx2, vinput_zero_point);
52     vextx2 = _mm_sub_epi16(vinput_zero_point, vextx2);
53     __m128i vmultiplier3 = _mm_cmpgt_epi16(vextx3, vinput_zero_point);
54     vextx3 = _mm_sub_epi16(vinput_zero_point, vextx3);
55 
56     vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
57     vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
58     vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
59     vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
60 
61     vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
62     vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
63     vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
64     vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
65 
66     __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
67     __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
68     __m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier2);
69     __m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier3);
70 
71     vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
72     __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
73     vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
74     __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
75     vprodlo2 = _mm_srli_epi16(vprodlo2, 7);
76     __m128i vprodhi2 = _mm_mulhi_epi16(vextx2, vmultiplier2);
77     vprodlo3 = _mm_srli_epi16(vprodlo3, 7);
78     __m128i vprodhi3 = _mm_mulhi_epi16(vextx3, vmultiplier3);
79 
80     vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
81     vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
82     vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
83     vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
84     vprodhi2 = _mm_slli_epi16(vprodhi2, 8);
85     vprodlo2 = _mm_avg_epu16(vprodlo2, vzero);
86     vprodhi3 = _mm_slli_epi16(vprodhi3, 8);
87     vprodlo3 = _mm_avg_epu16(vprodlo3, vzero);
88 
89     __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
90     __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
91     __m128i vacc2 = _mm_add_epi16(vprodlo2, vprodhi2);
92     __m128i vacc3 = _mm_add_epi16(vprodlo3, vprodhi3);
93 
94     vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
95     vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
96     vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
97     vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
98 
99     const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
100     const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
101 
102     _mm_storeu_si128((__m128i*) y, vy0);
103     _mm_storeu_si128((__m128i*) (y + 16), vy1);
104     y += 32;
105   }
106   for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
107     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
108     x += 16;
109 
110     const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
111     __m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
112     __m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
113 
114     __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
115     __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
116     vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
117     vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
118 
119     vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
120     vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
121 
122     vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
123     vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
124 
125     __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
126     __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
127 
128     vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
129     vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
130     __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
131     __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
132 
133     vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
134     vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
135     vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
136     vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
137 
138     __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
139     __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
140 
141     vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
142     vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
143 
144     const __m128i vy = _mm_packs_epi16(vacc0, vacc1);
145     _mm_storeu_si128((__m128i*) y, vy);
146     y += 16;
147   }
148   if XNN_UNLIKELY(n != 0) {
149     assert(n >= 1 * sizeof(int8_t));
150     assert(n <= 15 * sizeof(int8_t));
151 
152     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
153 
154     const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
155     __m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
156     __m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
157 
158     __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
159     __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
160     vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
161     vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
162 
163     vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
164     vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
165 
166     vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
167     vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
168 
169     __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
170     __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
171 
172     vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
173     vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
174     __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
175     __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
176 
177     vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
178     vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
179     vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
180     vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
181 
182     __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
183     __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
184 
185     vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
186     vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
187 
188     __m128i vy = _mm_packs_epi16(vacc0, vacc1);
189     if (n & (8 * sizeof(int8_t))) {
190       _mm_storel_epi64((__m128i*) y, vy);
191       vy = _mm_unpackhi_epi64(vy, vy);
192       y += 8;
193     }
194     if (n & (4 * sizeof(int8_t))) {
195       unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
196       vy = _mm_srli_epi64(vy, 32);
197       y += 4;
198     }
199     uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
200     if (n & (2 * sizeof(int8_t))) {
201       unaligned_store_u16(y, (uint16_t) vy0);
202       vy0 >>= 16;
203       y += 2;
204     }
205     if (n & (1 * sizeof(int8_t))) {
206       *y = (int8_t) vy0;
207     }
208   }
209 }
210