xref: /aosp_15_r20/external/XNNPACK/src/qs8-vcvt/gen/vcvt-avx2-x32.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vcvt/avx2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17 
18 
xnn_qs8_vcvt_ukernel__avx2_x32(size_t n,const int8_t * x,int8_t * y,const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qs8_vcvt_ukernel__avx2_x32(
20     size_t n,
21     const int8_t* x,
22     int8_t* y,
23     const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(int8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
31   const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
32   const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
33   for (; n >= 32 * sizeof(int8_t); n -= 32 * sizeof(int8_t)) {
34     __m256i vacc0 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) x));
35     __m256i vacc1 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (x + 16)));
36     x += 32;
37 
38     vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
39     vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
40 
41     vacc0 = _mm256_slli_epi16(vacc0, 7);
42     vacc1 = _mm256_slli_epi16(vacc1, 7);
43 
44     vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier);
45     vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier);
46 
47     vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
48     vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
49 
50     __m256i vy0 = _mm256_packs_epi16(vacc0, vacc1);
51 
52     vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
53 
54     _mm256_storeu_si256((__m256i*) y, vy0);
55     y += 32;
56   }
57   for (; n >= 16 * sizeof(int8_t); n -= 16 * sizeof(int8_t)) {
58     __m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) x));
59     vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
60     vacc = _mm256_slli_epi16(vacc, 7);
61     vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
62     vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
63     x += 16;
64 
65     const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
66     const __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
67     _mm_storeu_si128((__m128i*) y, vy);
68     y += 16;
69   }
70   if XNN_UNLIKELY(n != 0) {
71     assert(n >= 1 * sizeof(int8_t));
72     assert(n <= 15 * sizeof(int8_t));
73 
74     __m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) x));
75     vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
76     vacc = _mm256_slli_epi16(vacc, 7);
77     vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
78     vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
79 
80     const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
81     __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
82     if (n & (8 * sizeof(int8_t))) {
83       _mm_storel_epi64((__m128i*) y, vy);
84       vy = _mm_unpackhi_epi64(vy, vy);
85       y += 8;
86     }
87     if (n & (4 * sizeof(int8_t))) {
88       _mm_storeu_si32(y, vy);
89       vy = _mm_srli_epi64(vy, 32);
90       y += 4;
91     }
92     if (n & (2 * sizeof(int8_t))) {
93       _mm_storeu_si16(y, vy);
94       vy = _mm_srli_epi32(vy, 16);
95       y += 2;
96     }
97     if (n & (1 * sizeof(int8_t))) {
98       *y = (int8_t) _mm_extract_epi8(vy, 0);
99     }
100   }
101 }
102