1 // Auto-generated file. Do not edit!
2 // Template: src/f16-vhswish/f16c.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/intrinsics-polyfill.h>
16 #include <xnnpack/vunary.h>
17
18
xnn_f16_vhswish_ukernel__f16c_x16(size_t n,const void * restrict x_ptr,void * restrict y_ptr,const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f16_vhswish_ukernel__f16c_x16(
20 size_t n,
21 const void* restrict x_ptr,
22 void* restrict y_ptr,
23 const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24 {
25 assert(n != 0);
26 assert(n % sizeof(uint16_t) == 0);
27
28 const uint16_t* x = (const uint16_t*) x_ptr;
29 uint16_t* y = (uint16_t*) y_ptr;
30
31 const __m256 vsixth = _mm256_load_ps(params->avx.sixth);
32 const __m256 vthree = _mm256_load_ps(params->avx.three);
33 const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six);
34 const __m128i vzero = _mm_setzero_si128();
35
36 for (; n >= 16 * sizeof(uint16_t); n -= 16 * sizeof(uint16_t)) {
37 __m256 vx01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
38 __m256 vx89ABCDEF = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (x + 8)));
39 x += 16;
40
41 __m128i vacc01234567 = _mm256_cvtps_ph(_mm256_add_ps(vx01234567, vthree), _MM_FROUND_NO_EXC);
42 vx01234567 = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx01234567, vsixth), _MM_FROUND_NO_EXC));
43 __m128i vacc89ABCDEF = _mm256_cvtps_ph(_mm256_add_ps(vx89ABCDEF, vthree), _MM_FROUND_NO_EXC);
44 vx89ABCDEF = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx89ABCDEF, vsixth), _MM_FROUND_NO_EXC));
45
46 vacc01234567 = _mm_max_epi16(vacc01234567, vzero);
47 vacc89ABCDEF = _mm_max_epi16(vacc89ABCDEF, vzero);
48
49 vacc01234567 = _mm_min_epi16(vacc01234567, vsix);
50 vacc89ABCDEF = _mm_min_epi16(vacc89ABCDEF, vsix);
51
52 vacc01234567 = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc01234567), vx01234567), _MM_FROUND_NO_EXC);
53 vacc89ABCDEF = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc89ABCDEF), vx89ABCDEF), _MM_FROUND_NO_EXC);
54
55 _mm_storeu_si128((__m128i*) y, vacc01234567);
56 _mm_storeu_si128((__m128i*) (y + 8), vacc89ABCDEF);
57 y += 16;
58 }
59 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) {
60 __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
61 x += 8;
62 __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC);
63 vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC));
64 vacc = _mm_max_epi16(vacc, vzero);
65 vacc = _mm_min_epi16(vacc, vsix);
66 vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC);
67 _mm_storeu_si128((__m128i*) y, vacc);
68 y += 8;
69 }
70 if XNN_UNLIKELY(n != 0) {
71 __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x));
72 __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC);
73 vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC));
74 vacc = _mm_max_epi16(vacc, vzero);
75 vacc = _mm_min_epi16(vacc, vsix);
76 vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC);
77
78 if (n & (4 * sizeof(uint16_t))) {
79 _mm_storel_epi64((__m128i*) y, vacc);
80 vacc = _mm_unpackhi_epi64(vacc, vacc);
81 y += 4;
82 }
83 if (n & (2 * sizeof(uint16_t))) {
84 _mm_storeu_si32(y, vacc);
85 vacc = _mm_srli_epi64(vacc, 32);
86 y += 2;
87 }
88 if (n & (1 * sizeof(uint16_t))) {
89 *y = (uint16_t) _mm_extract_epi16(vacc, 0);
90 }
91 }
92 }
93