xref: /aosp_15_r20/external/XNNPACK/src/f16-vlrelu/gen/vlrelu-f16c-x16.c (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-vlrelu/f16c.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <immintrin.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vunary.h>
17 
18 
xnn_f16_vlrelu_ukernel__f16c_x16(size_t batch,const void * input,void * output,const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f16_vlrelu_ukernel__f16c_x16(
20     size_t batch,
21     const void* input,
22     void* output,
23     const union xnn_f16_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25   assert(batch != 0);
26   assert(batch % sizeof(uint16_t) == 0);
27 
28   const __m256 vslope = _mm256_load_ps(params->avx.slope);
29   const uint16_t* i = (const uint16_t*) input;
30   uint16_t* o = (uint16_t*) output;
31   for (; batch >= 16 * sizeof(uint16_t); batch -= 16 * sizeof(uint16_t)) {
32     const __m256 vx01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
33     const __m256 vx89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + 8)));
34     i += 16;
35 
36     __m256 vacc01234567 = _mm256_mul_ps(vx01234567, vslope);
37     __m256 vacc89ABCDEF = _mm256_mul_ps(vx89ABCDEF, vslope);
38 
39     vacc01234567 = _mm256_blendv_ps(vx01234567, vacc01234567, vx01234567);
40     vacc89ABCDEF = _mm256_blendv_ps(vx89ABCDEF, vacc89ABCDEF, vx89ABCDEF);
41 
42     _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc01234567, _MM_FROUND_NO_EXC));
43     _mm_storeu_si128((__m128i*) (o + 8), _mm256_cvtps_ph(vacc89ABCDEF, _MM_FROUND_NO_EXC));
44     o += 16;
45   }
46   for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) {
47     const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
48     i += 8;
49 
50     __m256 vacc = _mm256_mul_ps(vx, vslope);
51     vacc = _mm256_blendv_ps(vx, vacc, vx);
52 
53     _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vacc, _MM_FROUND_NO_EXC));
54     o += 8;
55   }
56   if XNN_UNLIKELY(batch != 0) {
57     const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i));
58 
59     __m256 vacc = _mm256_mul_ps(vx, vslope);
60     vacc = _mm256_blendv_ps(vx, vacc, vx);
61 
62     __m128i vh = _mm256_cvtps_ph(vacc, _MM_FROUND_NO_EXC);
63     if (batch & (4 * sizeof(uint16_t))) {
64       _mm_storel_epi64((__m128i*) o, vh);
65       vh = _mm_unpackhi_epi64(vh, vh);
66       o += 4;
67     }
68     if (batch & (2 * sizeof(uint16_t))) {
69       _mm_storeu_si32(o, vh);
70       vh = _mm_srli_epi64(vh, 32);
71       o += 2;
72     }
73     if (batch & (1 * sizeof(uint16_t))) {
74       *o = _mm_extract_epi16(vh, 0);
75     }
76   }
77 }
78