1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vlrelu/sse4.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vlrelu.h>
17
18
xnn_qu8_vlrelu_ukernel__avx_x16(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vlrelu_ukernel__avx_x16(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(uint8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->avx.input_zero_point);
31 const __m128i vpositive_multiplier = _mm_load_si128((const __m128i*) params->avx.positive_multiplier);
32 const __m128i vnegative_multiplier = _mm_load_si128((const __m128i*) params->avx.negative_multiplier);
33 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
34 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
35 __m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) x));
36 __m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (x + 8)));
37 x += 16;
38
39 __m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
40 vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
41 __m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
42 vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
43
44 vmultiplier0 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
45 vacc0 = _mm_slli_epi16(vacc0, 7);
46 vmultiplier1 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
47 vacc1 = _mm_slli_epi16(vacc1, 7);
48
49 vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
50 vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
51
52 vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
53 vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
54
55 const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
56
57 _mm_storeu_si128((__m128i*) y, vy0);
58 y += 16;
59 }
60 for (; n >= 8 * sizeof(uint8_t); n -= 8 * sizeof(uint8_t)) {
61 __m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) x));
62 __m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
63 vacc = _mm_sub_epi16(vinput_zero_point, vacc);
64 vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
65 vacc = _mm_slli_epi16(vacc, 7);
66 vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
67 vacc = _mm_adds_epi16(vacc, voutput_zero_point);
68 x += 8;
69
70 const __m128i vy = _mm_packus_epi16(vacc, vacc);
71 _mm_storel_epi64((__m128i*) y, vy);
72 y += 8;
73 }
74 if XNN_UNLIKELY(n != 0) {
75 assert(n >= 1 * sizeof(uint8_t));
76 assert(n <= 7 * sizeof(uint8_t));
77
78 __m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) x));
79 __m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
80 vacc = _mm_sub_epi16(vinput_zero_point, vacc);
81 vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
82 vacc = _mm_slli_epi16(vacc, 7);
83 vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
84 vacc = _mm_adds_epi16(vacc, voutput_zero_point);
85
86 __m128i vy = _mm_packus_epi16(vacc, vacc);
87 if (n & (4 * sizeof(uint8_t))) {
88 _mm_storeu_si32(y, vy);
89 vy = _mm_srli_epi64(vy, 32);
90 y += 4;
91 }
92 if (n & (2 * sizeof(uint8_t))) {
93 _mm_storeu_si16(y, vy);
94 vy = _mm_srli_epi32(vy, 16);
95 y += 2;
96 }
97 if (n & (1 * sizeof(uint8_t))) {
98 *y = (uint8_t) _mm_extract_epi8(vy, 0);
99 }
100 }
101 }
102