1 // Auto-generated file. Do not edit!
2 // Template: src/f32-qs8-vcvt/avx2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vcvt.h>
17
18
xnn_f32_qu8_vcvt_ukernel__avx2_x16(size_t n,const float * x,uint8_t * y,const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_qu8_vcvt_ukernel__avx2_x16(
20 size_t n,
21 const float* x,
22 uint8_t* y,
23 const union xnn_f32_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m256 vscale = _mm256_load_ps(params->avx2.scale);
31 const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->avx2.output_max_less_zero_point);
32 const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
33 const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx2.output_min);
34
35 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
36 __m256 vx01 = _mm256_loadu_ps(x);
37 __m256 vx23 = _mm256_loadu_ps(x + 8);
38 x += 16;
39
40 vx01 = _mm256_mul_ps(vx01, vscale);
41 vx23 = _mm256_mul_ps(vx23, vscale);
42
43 vx01 = _mm256_min_ps(vx01, voutput_max_less_zero_point);
44 vx23 = _mm256_min_ps(vx23, voutput_max_less_zero_point);
45
46 const __m256i vacc01 = _mm256_cvtps_epi32(vx01);
47 const __m256i vacc23 = _mm256_cvtps_epi32(vx23);
48
49 __m256i vacc0213 = _mm256_packs_epi32(vacc01, vacc23);
50
51 vacc0213 = _mm256_adds_epi16(vacc0213, voutput_zero_point);
52
53 const __m128i vy0213 = _mm_packus_epi16(_mm256_castsi256_si128(vacc0213), _mm256_extracti128_si256(vacc0213, 1));
54
55 __m128i vy0123 = _mm_shuffle_epi32(vy0213, _MM_SHUFFLE(3, 1, 2, 0));
56
57 vy0123 = _mm_max_epu8(vy0123, voutput_min);
58
59 _mm_storeu_si128((__m128i*) y, vy0123);
60 y += 16;
61 }
62 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
63 __m256 vx = _mm256_loadu_ps(x);
64 vx = _mm256_mul_ps(vx, vscale);
65 vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
66 x += 8;
67
68 const __m256i vacc = _mm256_cvtps_epi32(vx);
69
70 __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
71 vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
72 vy = _mm_packus_epi16(vy, vy);
73 vy = _mm_max_epu8(vy, voutput_min);
74
75 _mm_storel_epi64((__m128i*) y, vy);
76 y += 8;
77 }
78 if XNN_UNLIKELY(n != 0) {
79 assert(n >= 1 * sizeof(float));
80 assert(n <= 7 * sizeof(float));
81 const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2.mask_table[7] - n));
82
83 __m256 vx = _mm256_maskload_ps(x, vmask);
84 vx = _mm256_mul_ps(vx, vscale);
85 vx = _mm256_min_ps(vx, voutput_max_less_zero_point);
86
87 const __m256i vacc = _mm256_cvtps_epi32(vx);
88
89 __m128i vy = _mm_packs_epi32(_mm256_castsi256_si128(vacc), _mm256_extracti128_si256(vacc, 1));
90 vy = _mm_adds_epi16(vy, _mm256_castsi256_si128(voutput_zero_point));
91 vy = _mm_packus_epi16(vy, vy);
92 vy = _mm_max_epu8(vy, voutput_min);
93
94 if (n & (4 * sizeof(float))) {
95 _mm_storeu_si32(y, vy);
96 y += 4;
97 vy = _mm_srli_epi64(vy, 32);
98 }
99 if (n & (2 * sizeof(float))) {
100 _mm_storeu_si16(y, vy);
101 y += 2;
102 vy = _mm_srli_epi32(vy, 16);
103 }
104 if (n & (1 * sizeof(float))) {
105 *y = (uint8_t) _mm_extract_epi8(vy, 0);
106 }
107 }
108 }
109