1 // Auto-generated file. Do not edit!
2 // Template: src/qs8-vlrelu/sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/vlrelu.h>
16 #include <xnnpack/unaligned.h>
17
18
xnn_qu8_vlrelu_ukernel__sse2_x16(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vlrelu_ukernel__sse2_x16(
20 size_t n,
21 const uint8_t* x,
22 uint8_t* y,
23 const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(uint8_t) == 0);
27 assert(x != NULL);
28 assert(y != NULL);
29
30 const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
31 const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
32 const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
33 const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
34 const __m128i vzero = _mm_setzero_si128();
35 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
36 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
37 x += 16;
38
39 __m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
40 __m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
41
42 __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
43 __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
44 vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
45 vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
46
47 vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
48 vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
49
50 vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
51 vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
52
53 __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
54 __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
55
56 vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
57 vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
58 __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
59 __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
60
61 vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
62 vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
63 vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
64 vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
65
66 __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
67 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
68
69 vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
70 vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
71
72 const __m128i vy = _mm_packus_epi16(vacc0, vacc1);
73 _mm_storeu_si128((__m128i*) y, vy);
74 y += 16;
75 }
76 if XNN_UNLIKELY(n != 0) {
77 assert(n >= 1 * sizeof(uint8_t));
78 assert(n <= 15 * sizeof(uint8_t));
79
80 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
81
82 __m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
83 __m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
84
85 __m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
86 __m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
87 vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
88 vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
89
90 vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
91 vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
92
93 vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
94 vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
95
96 __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
97 __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
98
99 vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
100 vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
101 __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
102 __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
103
104 vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
105 vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
106 vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
107 vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
108
109 __m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
110 __m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
111
112 vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
113 vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
114
115 __m128i vy = _mm_packus_epi16(vacc0, vacc1);
116 if (n & (8 * sizeof(uint8_t))) {
117 _mm_storel_epi64((__m128i*) y, vy);
118 vy = _mm_unpackhi_epi64(vy, vy);
119 y += 8;
120 }
121 if (n & (4 * sizeof(uint8_t))) {
122 unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
123 vy = _mm_srli_epi64(vy, 32);
124 y += 4;
125 }
126 uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
127 if (n & (2 * sizeof(uint8_t))) {
128 unaligned_store_u16(y, (uint16_t) vy0);
129 vy0 >>= 16;
130 y += 2;
131 }
132 if (n & (1 * sizeof(uint8_t))) {
133 *y = (uint8_t) vy0;
134 }
135 }
136 }
137