xref: /aosp_15_r20/external/XNNPACK/src/qu8-vlrelu/gen/vlrelu-sse2-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vlrelu/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <emmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vlrelu.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qu8_vlrelu_ukernel__sse2_x32(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vlrelu_ukernel__sse2_x32(
20     size_t n,
21     const uint8_t* x,
22     uint8_t* y,
23     const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(uint8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
31   const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
32   const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
33   const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
34   const __m128i vzero = _mm_setzero_si128();
35   for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
36     const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
37     const __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
38     x += 32;
39 
40     __m128i vextx0 = _mm_unpacklo_epi8(vx0, vzero);
41     __m128i vextx1 = _mm_unpackhi_epi8(vx0, vzero);
42     __m128i vextx2 = _mm_unpacklo_epi8(vx1, vzero);
43     __m128i vextx3 = _mm_unpackhi_epi8(vx1, vzero);
44 
45     __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
46     vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
47     __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
48     vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
49     __m128i vmultiplier2 = _mm_cmpgt_epi16(vextx2, vinput_zero_point);
50     vextx2 = _mm_sub_epi16(vinput_zero_point, vextx2);
51     __m128i vmultiplier3 = _mm_cmpgt_epi16(vextx3, vinput_zero_point);
52     vextx3 = _mm_sub_epi16(vinput_zero_point, vextx3);
53 
54     vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
55     vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
56     vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
57     vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
58 
59     vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
60     vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
61     vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
62     vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
63 
64     __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
65     __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
66     __m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier2);
67     __m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier3);
68 
69     vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
70     __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
71     vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
72     __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
73     vprodlo2 = _mm_srli_epi16(vprodlo2, 7);
74     __m128i vprodhi2 = _mm_mulhi_epi16(vextx2, vmultiplier2);
75     vprodlo3 = _mm_srli_epi16(vprodlo3, 7);
76     __m128i vprodhi3 = _mm_mulhi_epi16(vextx3, vmultiplier3);
77 
78     vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
79     vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
80     vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
81     vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
82     vprodhi2 = _mm_slli_epi16(vprodhi2, 8);
83     vprodlo2 = _mm_avg_epu16(vprodlo2, vzero);
84     vprodhi3 = _mm_slli_epi16(vprodhi3, 8);
85     vprodlo3 = _mm_avg_epu16(vprodlo3, vzero);
86 
87     __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
88     __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
89     __m128i vacc2 = _mm_add_epi16(vprodlo2, vprodhi2);
90     __m128i vacc3 = _mm_add_epi16(vprodlo3, vprodhi3);
91 
92     vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
93     vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
94     vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
95     vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
96 
97     const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
98     const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
99 
100     _mm_storeu_si128((__m128i*) y, vy0);
101     _mm_storeu_si128((__m128i*) (y + 16), vy1);
102     y += 32;
103   }
104   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
105     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
106     x += 16;
107 
108     __m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
109     __m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
110 
111     __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
112     __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
113     vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
114     vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
115 
116     vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
117     vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
118 
119     vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
120     vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
121 
122     __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
123     __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
124 
125     vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
126     vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
127     __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
128     __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
129 
130     vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
131     vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
132     vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
133     vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
134 
135     __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
136     __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
137 
138     vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
139     vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
140 
141     const __m128i vy = _mm_packus_epi16(vacc0, vacc1);
142     _mm_storeu_si128((__m128i*) y, vy);
143     y += 16;
144   }
145   if XNN_UNLIKELY(n != 0) {
146     assert(n >= 1 * sizeof(uint8_t));
147     assert(n <= 15 * sizeof(uint8_t));
148 
149     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
150 
151     __m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
152     __m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
153 
154     __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
155     __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
156     vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
157     vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
158 
159     vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
160     vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
161 
162     vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
163     vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
164 
165     __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
166     __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
167 
168     vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
169     vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
170     __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
171     __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
172 
173     vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
174     vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
175     vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
176     vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
177 
178     __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
179     __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
180 
181     vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
182     vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
183 
184     __m128i vy = _mm_packus_epi16(vacc0, vacc1);
185     if (n & (8 * sizeof(uint8_t))) {
186       _mm_storel_epi64((__m128i*) y, vy);
187       vy = _mm_unpackhi_epi64(vy, vy);
188       y += 8;
189     }
190     if (n & (4 * sizeof(uint8_t))) {
191       unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
192       vy = _mm_srli_epi64(vy, 32);
193       y += 4;
194     }
195     uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
196     if (n & (2 * sizeof(uint8_t))) {
197       unaligned_store_u16(y, (uint16_t) vy0);
198       vy0 >>= 16;
199       y += 2;
200     }
201     if (n & (1 * sizeof(uint8_t))) {
202       *y = (uint8_t) vy0;
203     }
204   }
205 }
206