1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vlrelu/avx2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vlrelu.h>
17
18
xnn_qu8_vlrelu_ukernel__avx2_x64(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vlrelu_ukernel__avx2_x64(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(uint8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
31 const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier);
32 const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier);
33 const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
34 for (; n >= 64 * sizeof(uint8_t); n -= 64 * sizeof(uint8_t)) {
35 __m256i vacc0 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) x));
36 __m256i vacc1 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (x + 16)));
37 __m256i vacc2 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (x + 32)));
38 __m256i vacc3 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (x + 48)));
39 x += 64;
40
41 __m256i vmultiplier0 = _mm256_cmpgt_epi16(vacc0, vinput_zero_point);
42 vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
43 __m256i vmultiplier1 = _mm256_cmpgt_epi16(vacc1, vinput_zero_point);
44 vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
45 __m256i vmultiplier2 = _mm256_cmpgt_epi16(vacc2, vinput_zero_point);
46 vacc2 = _mm256_sub_epi16(vinput_zero_point, vacc2);
47 __m256i vmultiplier3 = _mm256_cmpgt_epi16(vacc3, vinput_zero_point);
48 vacc3 = _mm256_sub_epi16(vinput_zero_point, vacc3);
49
50 vmultiplier0 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
51 vacc0 = _mm256_slli_epi16(vacc0, 7);
52 vmultiplier1 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
53 vacc1 = _mm256_slli_epi16(vacc1, 7);
54 vmultiplier2 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier2);
55 vacc2 = _mm256_slli_epi16(vacc2, 7);
56 vmultiplier3 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier3);
57 vacc3 = _mm256_slli_epi16(vacc3, 7);
58
59 vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier0);
60 vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier1);
61 vacc2 = _mm256_mulhrs_epi16(vacc2, vmultiplier2);
62 vacc3 = _mm256_mulhrs_epi16(vacc3, vmultiplier3);
63
64 vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
65 vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
66 vacc2 = _mm256_adds_epi16(vacc2, voutput_zero_point);
67 vacc3 = _mm256_adds_epi16(vacc3, voutput_zero_point);
68
69 __m256i vy0 = _mm256_packus_epi16(vacc0, vacc1);
70 __m256i vy1 = _mm256_packus_epi16(vacc2, vacc3);
71
72 vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
73 vy1 = _mm256_permute4x64_epi64(vy1, _MM_SHUFFLE(3, 1, 2, 0));
74
75 _mm256_storeu_si256((__m256i*) y, vy0);
76 _mm256_storeu_si256((__m256i*) (y + 32), vy1);
77 y += 64;
78 }
79 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
80 __m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) x));
81 __m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
82 vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
83 vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
84 vacc = _mm256_slli_epi16(vacc, 7);
85 vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
86 vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
87 x += 16;
88
89 const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
90 const __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
91 _mm_storeu_si128((__m128i*) y, vy);
92 y += 16;
93 }
94 if XNN_UNLIKELY(n != 0) {
95 assert(n >= 1 * sizeof(uint8_t));
96 assert(n <= 15 * sizeof(uint8_t));
97
98 __m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) x));
99 __m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
100 vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
101 vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
102 vacc = _mm256_slli_epi16(vacc, 7);
103 vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
104 vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
105
106 const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
107 __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
108 if (n & (8 * sizeof(uint8_t))) {
109 _mm_storel_epi64((__m128i*) y, vy);
110 vy = _mm_unpackhi_epi64(vy, vy);
111 y += 8;
112 }
113 if (n & (4 * sizeof(uint8_t))) {
114 _mm_storeu_si32(y, vy);
115 vy = _mm_srli_epi64(vy, 32);
116 y += 4;
117 }
118 if (n & (2 * sizeof(uint8_t))) {
119 _mm_storeu_si16(y, vy);
120 vy = _mm_srli_epi32(vy, 16);
121 y += 2;
122 }
123 if (n & (1 * sizeof(uint8_t))) {
124 *y = (uint8_t) _mm_extract_epi8(vy, 0);
125 }
126 }
127 }
128