xref: /aosp_15_r20/external/XNNPACK/src/qu8-vlrelu/gen/vlrelu-ssse3-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vlrelu/ssse3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <tmmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vlrelu.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qu8_vlrelu_ukernel__ssse3_x32(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vlrelu_ukernel__ssse3_x32(
20     size_t n,
21     const uint8_t* x,
22     uint8_t* y,
23     const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(uint8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
31   const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
32   const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
33   const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
34   const __m128i vzero = _mm_setzero_si128();
35   for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
36     const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
37     const __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
38     x += 32;
39 
40     __m128i vacc0 = _mm_unpacklo_epi8(vx0, vzero);
41     __m128i vacc1 = _mm_unpackhi_epi8(vx0, vzero);
42     __m128i vacc2 = _mm_unpacklo_epi8(vx1, vzero);
43     __m128i vacc3 = _mm_unpackhi_epi8(vx1, vzero);
44 
45     __m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
46     vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
47     __m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
48     vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
49     __m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
50     vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
51     __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
52     vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
53 
54     vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
55     vacc0 = _mm_slli_epi16(vacc0, 7);
56     vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
57     vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
58     vacc1 = _mm_slli_epi16(vacc1, 7);
59     vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
60     vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
61     vacc2 = _mm_slli_epi16(vacc2, 7);
62     vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
63     vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
64     vacc3 = _mm_slli_epi16(vacc3, 7);
65     vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
66 
67     vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
68     vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
69     vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
70     vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
71 
72     vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
73     vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
74     vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
75     vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
76 
77     const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
78     const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
79 
80     _mm_storeu_si128((__m128i*) y, vy0);
81     _mm_storeu_si128((__m128i*) (y + 16), vy1);
82     y += 32;
83   }
84   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
85     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
86     x += 16;
87 
88     __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
89     __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
90     __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
91     __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
92     vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
93     vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
94     vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
95     vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
96     vacc_lo = _mm_slli_epi16(vacc_lo, 7);
97     vacc_hi = _mm_slli_epi16(vacc_hi, 7);
98     vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
99     vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
100     vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
101     vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
102     vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
103     vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
104 
105     const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
106     _mm_storeu_si128((__m128i*) y, vy);
107     y += 16;
108   }
109   if XNN_UNLIKELY(n != 0) {
110     assert(n >= 1 * sizeof(uint8_t));
111     assert(n <= 15 * sizeof(uint8_t));
112 
113     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
114 
115     __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
116     __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
117     __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
118     __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
119     vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
120     vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
121     vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
122     vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
123     vacc_lo = _mm_slli_epi16(vacc_lo, 7);
124     vacc_hi = _mm_slli_epi16(vacc_hi, 7);
125     vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
126     vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
127     vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
128     vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
129     vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
130     vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
131 
132     __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
133     if (n & (8 * sizeof(uint8_t))) {
134       _mm_storel_epi64((__m128i*) y, vy);
135       vy = _mm_unpackhi_epi64(vy, vy);
136       y += 8;
137     }
138     if (n & (4 * sizeof(uint8_t))) {
139       unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
140       vy = _mm_srli_epi64(vy, 32);
141       y += 4;
142     }
143     uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
144     if (n & (2 * sizeof(uint8_t))) {
145       unaligned_store_u16(y, (uint16_t) vy_lo);
146       vy_lo >>= 16;
147       y += 2;
148     }
149     if (n & (1 * sizeof(uint8_t))) {
150       *y = (uint8_t) vy_lo;
151     }
152   }
153 }
154