xref: /aosp_15_r20/external/XNNPACK/src/qs8-vcvt/gen/vcvt-sse2-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vcvt/sse2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <tmmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vcvt.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qs8_vcvt_ukernel__sse2_x16(size_t n,const int8_t * x,int8_t * y,const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_vcvt_ukernel__sse2_x16(
20     size_t n,
21     const int8_t* x,
22     int8_t* y,
23     const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(int8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
31   const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
32   for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
33     const __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
34     x += 16;
35 
36     const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
37     const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vm0);
38     const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vm0);
39 
40     const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
41     const __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier);
42     const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
43     const __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier);
44 
45     __m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
46     __m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
47     __m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
48     __m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
49 
50     vacc0 = _mm_sub_epi32(vbias, vacc0);
51     vacc1 = _mm_sub_epi32(vbias, vacc1);
52     vacc2 = _mm_sub_epi32(vbias, vacc2);
53     vacc3 = _mm_sub_epi32(vbias, vacc3);
54 
55     vacc0 = _mm_srai_epi32(vacc0, 8);
56     vacc1 = _mm_srai_epi32(vacc1, 8);
57     vacc2 = _mm_srai_epi32(vacc2, 8);
58     vacc3 = _mm_srai_epi32(vacc3, 8);
59 
60     vacc0 = _mm_packs_epi32(vacc0, vacc1);
61     vacc1 = _mm_packs_epi32(vacc2, vacc3);
62 
63     const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
64 
65     _mm_storeu_si128((__m128i*) y, vy0);
66     y += 16;
67   }
68   for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
69     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
70     x += 16;
71 
72     const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
73     const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
74     const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
75 
76     const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
77     const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
78     const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
79     const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
80 
81     __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
82     __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
83     __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
84     __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
85 
86     vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
87     vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
88     vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
89     vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
90 
91     vacc_ll = _mm_srai_epi32(vacc_ll, 8);
92     vacc_lh = _mm_srai_epi32(vacc_lh, 8);
93     vacc_hl = _mm_srai_epi32(vacc_hl, 8);
94     vacc_hh = _mm_srai_epi32(vacc_hh, 8);
95 
96     const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
97     const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
98 
99     const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
100     _mm_storeu_si128((__m128i*) y, vy);
101     y += 16;
102   }
103   if XNN_UNLIKELY(n != 0) {
104     assert(n >= 1 * sizeof(int8_t));
105     assert(n <= 15 * sizeof(int8_t));
106 
107     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
108 
109     const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
110     const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
111     const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
112 
113     const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
114     const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
115     const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
116     const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
117 
118     __m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
119     __m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
120     __m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
121     __m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
122 
123     vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
124     vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
125     vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
126     vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
127 
128     vacc_ll = _mm_srai_epi32(vacc_ll, 8);
129     vacc_lh = _mm_srai_epi32(vacc_lh, 8);
130     vacc_hl = _mm_srai_epi32(vacc_hl, 8);
131     vacc_hh = _mm_srai_epi32(vacc_hh, 8);
132 
133     const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
134     const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
135 
136     __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
137     if (n & (8 * sizeof(int8_t))) {
138       _mm_storel_epi64((__m128i*) y, vy);
139       vy = _mm_unpackhi_epi64(vy, vy);
140       y += 8;
141     }
142     if (n & (4 * sizeof(int8_t))) {
143       unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
144       vy = _mm_srli_epi64(vy, 32);
145       y += 4;
146     }
147     uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
148     if (n & (2 * sizeof(int8_t))) {
149       unaligned_store_u16(y, (uint16_t) vy_lo);
150       vy_lo >>= 16;
151       y += 2;
152     }
153     if (n & (1 * sizeof(int8_t))) {
154       *y = (int8_t) vy_lo;
155     }
156   }
157 }
158