1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vcvt/sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <tmmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qu8_vcvt_ukernel__sse2_x32(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vcvt_ukernel__sse2_x32(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(uint8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
31 const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
32 const __m128i vzero = _mm_setzero_si128();
33 for (; n >= 32 * sizeof(uint8_t); n -= 32 * sizeof(uint8_t)) {
34 const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
35 const __m128i vx1 = _mm_loadu_si128((const __m128i*) (x + 16));
36 x += 32;
37
38 const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vzero);
39 const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vzero);
40 const __m128i vextx2 = _mm_unpacklo_epi8(vx1, vzero);
41 const __m128i vextx3 = _mm_unpackhi_epi8(vx1, vzero);
42
43 const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
44 const __m128i vprodhi0 = _mm_mulhi_epu16(vextx0, vmultiplier);
45 const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
46 const __m128i vprodhi1 = _mm_mulhi_epu16(vextx1, vmultiplier);
47 const __m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier);
48 const __m128i vprodhi2 = _mm_mulhi_epu16(vextx2, vmultiplier);
49 const __m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier);
50 const __m128i vprodhi3 = _mm_mulhi_epu16(vextx3, vmultiplier);
51
52 __m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
53 __m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
54 __m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
55 __m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
56 __m128i vacc4 = _mm_unpacklo_epi16(vprodlo2, vprodhi2);
57 __m128i vacc5 = _mm_unpackhi_epi16(vprodlo2, vprodhi2);
58 __m128i vacc6 = _mm_unpacklo_epi16(vprodlo3, vprodhi3);
59 __m128i vacc7 = _mm_unpackhi_epi16(vprodlo3, vprodhi3);
60
61 vacc0 = _mm_add_epi32(vacc0, vbias);
62 vacc1 = _mm_add_epi32(vacc1, vbias);
63 vacc2 = _mm_add_epi32(vacc2, vbias);
64 vacc3 = _mm_add_epi32(vacc3, vbias);
65 vacc4 = _mm_add_epi32(vacc4, vbias);
66 vacc5 = _mm_add_epi32(vacc5, vbias);
67 vacc6 = _mm_add_epi32(vacc6, vbias);
68 vacc7 = _mm_add_epi32(vacc7, vbias);
69
70 vacc0 = _mm_srai_epi32(vacc0, 8);
71 vacc1 = _mm_srai_epi32(vacc1, 8);
72 vacc2 = _mm_srai_epi32(vacc2, 8);
73 vacc3 = _mm_srai_epi32(vacc3, 8);
74 vacc4 = _mm_srai_epi32(vacc4, 8);
75 vacc5 = _mm_srai_epi32(vacc5, 8);
76 vacc6 = _mm_srai_epi32(vacc6, 8);
77 vacc7 = _mm_srai_epi32(vacc7, 8);
78
79 vacc0 = _mm_packs_epi32(vacc0, vacc1);
80 vacc1 = _mm_packs_epi32(vacc2, vacc3);
81 vacc2 = _mm_packs_epi32(vacc4, vacc5);
82 vacc3 = _mm_packs_epi32(vacc6, vacc7);
83
84 const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
85 const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
86
87 _mm_storeu_si128((__m128i*) y, vy0);
88 _mm_storeu_si128((__m128i*) (y + 16), vy1);
89 y += 32;
90 }
91 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
92 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
93 x += 16;
94
95 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
96 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
97
98 const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
99 const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
100 const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
101 const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
102
103 __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
104 __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
105 __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
106 __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
107
108 vacc_ll = _mm_add_epi32(vacc_ll, vbias);
109 vacc_lh = _mm_add_epi32(vacc_lh, vbias);
110 vacc_hl = _mm_add_epi32(vacc_hl, vbias);
111 vacc_hh = _mm_add_epi32(vacc_hh, vbias);
112
113 vacc_ll = _mm_srai_epi32(vacc_ll, 8);
114 vacc_lh = _mm_srai_epi32(vacc_lh, 8);
115 vacc_hl = _mm_srai_epi32(vacc_hl, 8);
116 vacc_hh = _mm_srai_epi32(vacc_hh, 8);
117
118 const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
119 const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
120
121 const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
122 _mm_storeu_si128((__m128i*) y, vy);
123 y += 16;
124 }
125 if XNN_UNLIKELY(n != 0) {
126 assert(n >= 1 * sizeof(uint8_t));
127 assert(n <= 15 * sizeof(uint8_t));
128
129 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
130
131 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
132 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
133
134 const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
135 const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
136 const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
137 const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
138
139 __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
140 __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
141 __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
142 __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
143
144 vacc_ll = _mm_add_epi32(vacc_ll, vbias);
145 vacc_lh = _mm_add_epi32(vacc_lh, vbias);
146 vacc_hl = _mm_add_epi32(vacc_hl, vbias);
147 vacc_hh = _mm_add_epi32(vacc_hh, vbias);
148
149 vacc_ll = _mm_srai_epi32(vacc_ll, 8);
150 vacc_lh = _mm_srai_epi32(vacc_lh, 8);
151 vacc_hl = _mm_srai_epi32(vacc_hl, 8);
152 vacc_hh = _mm_srai_epi32(vacc_hh, 8);
153
154 const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
155 const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
156
157 __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
158 if (n & (8 * sizeof(uint8_t))) {
159 _mm_storel_epi64((__m128i*) y, vy);
160 vy = _mm_unpackhi_epi64(vy, vy);
161 y += 8;
162 }
163 if (n & (4 * sizeof(uint8_t))) {
164 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
165 vy = _mm_srli_epi64(vy, 32);
166 y += 4;
167 }
168 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
169 if (n & (2 * sizeof(uint8_t))) {
170 unaligned_store_u16(y, (uint16_t) vy_lo);
171 vy_lo >>= 16;
172 y += 2;
173 }
174 if (n & (1 * sizeof(uint8_t))) {
175 *y = (uint8_t) vy_lo;
176 }
177 }
178 }
179