1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vcvt/sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <tmmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qu8_vcvt_ukernel__sse2_x16(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vcvt_ukernel__sse2_x16(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(uint8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
31 const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
32 const __m128i vzero = _mm_setzero_si128();
33 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
34 const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
35 x += 16;
36
37 const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vzero);
38 const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vzero);
39
40 const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
41 const __m128i vprodhi0 = _mm_mulhi_epu16(vextx0, vmultiplier);
42 const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
43 const __m128i vprodhi1 = _mm_mulhi_epu16(vextx1, vmultiplier);
44
45 __m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
46 __m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
47 __m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
48 __m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
49
50 vacc0 = _mm_add_epi32(vacc0, vbias);
51 vacc1 = _mm_add_epi32(vacc1, vbias);
52 vacc2 = _mm_add_epi32(vacc2, vbias);
53 vacc3 = _mm_add_epi32(vacc3, vbias);
54
55 vacc0 = _mm_srai_epi32(vacc0, 8);
56 vacc1 = _mm_srai_epi32(vacc1, 8);
57 vacc2 = _mm_srai_epi32(vacc2, 8);
58 vacc3 = _mm_srai_epi32(vacc3, 8);
59
60 vacc0 = _mm_packs_epi32(vacc0, vacc1);
61 vacc1 = _mm_packs_epi32(vacc2, vacc3);
62
63 const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
64
65 _mm_storeu_si128((__m128i*) y, vy0);
66 y += 16;
67 }
68 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
69 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
70 x += 16;
71
72 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
73 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
74
75 const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
76 const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
77 const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
78 const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
79
80 __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
81 __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
82 __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
83 __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
84
85 vacc_ll = _mm_add_epi32(vacc_ll, vbias);
86 vacc_lh = _mm_add_epi32(vacc_lh, vbias);
87 vacc_hl = _mm_add_epi32(vacc_hl, vbias);
88 vacc_hh = _mm_add_epi32(vacc_hh, vbias);
89
90 vacc_ll = _mm_srai_epi32(vacc_ll, 8);
91 vacc_lh = _mm_srai_epi32(vacc_lh, 8);
92 vacc_hl = _mm_srai_epi32(vacc_hl, 8);
93 vacc_hh = _mm_srai_epi32(vacc_hh, 8);
94
95 const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
96 const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
97
98 const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
99 _mm_storeu_si128((__m128i*) y, vy);
100 y += 16;
101 }
102 if XNN_UNLIKELY(n != 0) {
103 assert(n >= 1 * sizeof(uint8_t));
104 assert(n <= 15 * sizeof(uint8_t));
105
106 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
107
108 const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
109 const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
110
111 const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
112 const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
113 const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
114 const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
115
116 __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
117 __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
118 __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
119 __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
120
121 vacc_ll = _mm_add_epi32(vacc_ll, vbias);
122 vacc_lh = _mm_add_epi32(vacc_lh, vbias);
123 vacc_hl = _mm_add_epi32(vacc_hl, vbias);
124 vacc_hh = _mm_add_epi32(vacc_hh, vbias);
125
126 vacc_ll = _mm_srai_epi32(vacc_ll, 8);
127 vacc_lh = _mm_srai_epi32(vacc_lh, 8);
128 vacc_hl = _mm_srai_epi32(vacc_hl, 8);
129 vacc_hh = _mm_srai_epi32(vacc_hh, 8);
130
131 const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
132 const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
133
134 __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
135 if (n & (8 * sizeof(uint8_t))) {
136 _mm_storel_epi64((__m128i*) y, vy);
137 vy = _mm_unpackhi_epi64(vy, vy);
138 y += 8;
139 }
140 if (n & (4 * sizeof(uint8_t))) {
141 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
142 vy = _mm_srli_epi64(vy, 32);
143 y += 4;
144 }
145 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
146 if (n & (2 * sizeof(uint8_t))) {
147 unaligned_store_u16(y, (uint16_t) vy_lo);
148 vy_lo >>= 16;
149 y += 2;
150 }
151 if (n & (1 * sizeof(uint8_t))) {
152 *y = (uint8_t) vy_lo;
153 }
154 }
155 }
156