1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vlrelu/ssse3.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <tmmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vlrelu.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qs8_vlrelu_ukernel__ssse3_x32(size_t n,const int8_t * x,int8_t * y,const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_vlrelu_ukernel__ssse3_x32(
20 size_t n,
21 const int8_t* x,
22 int8_t* y,
23 const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(int8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
31 const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
32 const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
33 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
34 for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
35 const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
36 const __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
37 x += 32;
38
39 const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
40 __m128i vacc0 = _mm_unpacklo_epi8(vx0, vm0);
41 __m128i vacc1 = _mm_unpackhi_epi8(vx0, vm0);
42 const __m128i vm1 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx1);
43 __m128i vacc2 = _mm_unpacklo_epi8(vx1, vm1);
44 __m128i vacc3 = _mm_unpackhi_epi8(vx1, vm1);
45
46 __m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
47 vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
48 __m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
49 vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
50 __m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
51 vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
52 __m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
53 vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
54
55 vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
56 vacc0 = _mm_slli_epi16(vacc0, 7);
57 vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
58 vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
59 vacc1 = _mm_slli_epi16(vacc1, 7);
60 vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
61 vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
62 vacc2 = _mm_slli_epi16(vacc2, 7);
63 vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
64 vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
65 vacc3 = _mm_slli_epi16(vacc3, 7);
66 vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
67
68 vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
69 vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
70 vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
71 vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
72
73 vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
74 vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
75 vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
76 vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
77
78 const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
79 const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
80
81 _mm_storeu_si128((__m128i*) y, vy0);
82 _mm_storeu_si128((__m128i*) (y + 16), vy1);
83 y += 32;
84 }
85 for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
86 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
87 x += 16;
88
89 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
90 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
91 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
92 __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
93 __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
94 vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
95 vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
96 vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
97 vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
98 vacc_lo = _mm_slli_epi16(vacc_lo, 7);
99 vacc_hi = _mm_slli_epi16(vacc_hi, 7);
100 vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
101 vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
102 vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
103 vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
104 vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
105 vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
106
107 const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
108 _mm_storeu_si128((__m128i*) y, vy);
109 y += 16;
110 }
111 if XNN_UNLIKELY(n != 0) {
112 assert(n >= 1 * sizeof(int8_t));
113 assert(n <= 15 * sizeof(int8_t));
114
115 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
116
117 const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
118 __m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
119 __m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
120 __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
121 __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
122 vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
123 vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
124 vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
125 vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
126 vacc_lo = _mm_slli_epi16(vacc_lo, 7);
127 vacc_hi = _mm_slli_epi16(vacc_hi, 7);
128 vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
129 vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
130 vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
131 vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
132 vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
133 vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
134
135 __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
136 if (n & (8 * sizeof(int8_t))) {
137 _mm_storel_epi64((__m128i*) y, vy);
138 vy = _mm_unpackhi_epi64(vy, vy);
139 y += 8;
140 }
141 if (n & (4 * sizeof(int8_t))) {
142 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
143 vy = _mm_srli_epi64(vy, 32);
144 y += 4;
145 }
146 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
147 if (n & (2 * sizeof(int8_t))) {
148 unaligned_store_u16(y, (uint16_t) vy_lo);
149 vy_lo >>= 16;
150 y += 2;
151 }
152 if (n & (1 * sizeof(int8_t))) {
153 *y = (int8_t) vy_lo;
154 }
155 }
156 }
157