xref: /aosp_15_r20/external/XNNPACK/src/f32-vhswish/avx512f.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 16 == 0
7$assert BATCH_TILE >= 16
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/vunary.h>
16
17
18void xnn_f32_vhswish_ukernel__avx512f_x${BATCH_TILE}(
19    size_t n,
20    const float* x,
21    float* y,
22    const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
23{
24  assert(n != 0);
25  assert(n % sizeof(float) == 0);
26
27  const __m512 vsixth = _mm512_set1_ps(params->avx512.sixth);
28  const __m512 vhalf = _mm512_set1_ps(params->avx512.half);
29  const __m512 vone = _mm512_set1_ps(params->avx512.one);
30  const __m512 vzero = _mm512_setzero_ps();
31
32  $if BATCH_TILE > 16:
33    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
34      const __m512 vx${ABC[0:16]} = _mm512_loadu_ps(x);
35      $for N in range(16, BATCH_TILE, 16):
36        const __m512 vx${ABC[N:N+16]} = _mm512_loadu_ps(x + ${N});
37      x += ${BATCH_TILE};
38
39      $for N in range(0, BATCH_TILE, 16):
40        __m512 vacc${ABC[N:N+16]} = _mm512_fmadd_ps(vx${ABC[N:N+16]}, vsixth, vhalf);
41
42      $for N in range(0, BATCH_TILE, 16):
43        vacc${ABC[N:N+16]} = _mm512_max_ps(vacc${ABC[N:N+16]}, vzero);
44
45      $for N in range(0, BATCH_TILE, 16):
46        vacc${ABC[N:N+16]} = _mm512_min_ps(vacc${ABC[N:N+16]}, vone);
47
48      $for N in range(0, BATCH_TILE, 16):
49        vacc${ABC[N:N+16]} = _mm512_mul_ps(vacc${ABC[N:N+16]}, vx${ABC[N:N+16]});
50
51      _mm512_storeu_ps(y, vacc${ABC[0:16]});
52      $for N in range(16, BATCH_TILE, 16):
53        _mm512_storeu_ps(y + ${N}, vacc${ABC[N:N+16]});
54      y += ${BATCH_TILE};
55    }
56  for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
57    const __m512 vx = _mm512_loadu_ps(x);
58    x += 16;
59    __m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
60    vacc = _mm512_max_ps(vacc, vzero);
61    vacc = _mm512_min_ps(vacc, vone);
62    vacc = _mm512_mul_ps(vacc, vx);
63    _mm512_storeu_ps(y, vacc);
64    y += 16;
65  }
66  if XNN_UNLIKELY(n != 0) {
67    assert(n >= 1 * sizeof(float));
68    assert(n <= 15 * sizeof(float));
69    // Prepare mask for valid 32-bit elements (depends on n).
70    n >>= 2 /* log2(sizeof(float)) */;
71    const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1)));
72
73    const __m512 vx = _mm512_maskz_loadu_ps(vmask, x);
74    __m512 vacc = _mm512_fmadd_ps(vx, vsixth, vhalf);
75    vacc = _mm512_max_ps(vacc, vzero);
76    vacc = _mm512_min_ps(vacc, vone);
77    vacc = _mm512_mul_ps(vacc, vx);
78    _mm512_mask_storeu_ps(y, vmask, vacc);
79  }
80}
81