xref: /aosp_15_r20/external/XNNPACK/src/qu8-vlrelu/gen/vlrelu-ssse3-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-vlrelu/ssse3.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <tmmintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vlrelu.h>
16 #include <xnnpack/unaligned.h>
17 
18 
xnn_qu8_vlrelu_ukernel__ssse3_x16(size_t n,const uint8_t * x,uint8_t * y,const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qu8_vlrelu_ukernel__ssse3_x16(
20     size_t n,
21     const uint8_t* x,
22     uint8_t* y,
23     const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(n != 0);
26   assert(n % sizeof(uint8_t) == 0);
27   assert(x != NULL);
28   assert(y != NULL);
29 
30   const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
31   const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
32   const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
33   const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
34   const __m128i vzero = _mm_setzero_si128();
35   for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
36     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
37     x += 16;
38 
39     __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
40     __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
41     __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
42     __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
43     vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
44     vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
45     vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
46     vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
47     vacc_lo = _mm_slli_epi16(vacc_lo, 7);
48     vacc_hi = _mm_slli_epi16(vacc_hi, 7);
49     vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
50     vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
51     vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
52     vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
53     vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
54     vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
55 
56     const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
57     _mm_storeu_si128((__m128i*) y, vy);
58     y += 16;
59   }
60   if XNN_UNLIKELY(n != 0) {
61     assert(n >= 1 * sizeof(uint8_t));
62     assert(n <= 15 * sizeof(uint8_t));
63 
64     const __m128i vx = _mm_loadu_si128((const __m128i*) x);
65 
66     __m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
67     __m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
68     __m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
69     __m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
70     vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
71     vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
72     vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
73     vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
74     vacc_lo = _mm_slli_epi16(vacc_lo, 7);
75     vacc_hi = _mm_slli_epi16(vacc_hi, 7);
76     vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
77     vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
78     vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
79     vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
80     vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
81     vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
82 
83     __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
84     if (n & (8 * sizeof(uint8_t))) {
85       _mm_storel_epi64((__m128i*) y, vy);
86       vy = _mm_unpackhi_epi64(vy, vy);
87       y += 8;
88     }
89     if (n & (4 * sizeof(uint8_t))) {
90       unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
91       vy = _mm_srli_epi64(vy, 32);
92       y += 4;
93     }
94     uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
95     if (n & (2 * sizeof(uint8_t))) {
96       unaligned_store_u16(y, (uint16_t) vy_lo);
97       vy_lo >>= 16;
98       y += 2;
99     }
100     if (n & (1 * sizeof(uint8_t))) {
101       *y = (uint8_t) vy_lo;
102     }
103   }
104 }
105