xref: /aosp_15_r20/external/XNNPACK/src/f32-vsqrt/avx-sqrt.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 8 == 0
7$assert BATCH_TILE >= 8
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <immintrin.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vunary.h>
15
16
17void xnn_f32_vsqrt_ukernel__avx_sqrt_x${BATCH_TILE}(
18    size_t n,
19    const float* x,
20    float* y,
21    const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)])
22{
23  assert(n != 0);
24  assert(n % sizeof(float) == 0);
25
26  $if BATCH_TILE > 8:
27    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
28      const __m256 vx${ABC[0:8]} = _mm256_loadu_ps(x);
29      $for N in range(8, BATCH_TILE, 8):
30        const __m256 vx${ABC[N:N+8]} = _mm256_loadu_ps(x + ${N});
31      x += ${BATCH_TILE};
32
33      $for N in range(0, BATCH_TILE, 8):
34        const __m256 vy${ABC[N:N+8]} = _mm256_sqrt_ps(vx${ABC[N:N+8]});
35
36      _mm256_storeu_ps(y, vy${ABC[0:8]});
37      $for N in range(8, BATCH_TILE, 8):
38        _mm256_storeu_ps(y + ${N}, vy${ABC[N:N+8]});
39      y += ${BATCH_TILE};
40    }
41  for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
42    const __m256 vx = _mm256_loadu_ps(x);
43    x += 8;
44    const __m256 vy = _mm256_sqrt_ps(vx);
45    _mm256_storeu_ps(y, vy);
46    y += 8;
47  }
48  if XNN_UNLIKELY(n != 0) {
49    assert(n >= 1 * sizeof(float));
50    assert(n <= 7 * sizeof(float));
51    const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx.mask_table[7] - n));
52
53    const __m256 vx = _mm256_maskload_ps(x, vmask);
54    const __m256 vy = _mm256_sqrt_ps(vx);
55
56    __m128 vy_lo = _mm256_castps256_ps128(vy);
57    if (n & (4 * sizeof(float))) {
58      _mm_storeu_ps(y, vy_lo);
59      vy_lo = _mm256_extractf128_ps(vy, 1);
60      y += 4;
61    }
62    if (n & (2 * sizeof(float))) {
63      _mm_storel_pi((__m64*) y, vy_lo);
64      vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
65      y += 2;
66    }
67    if (n & (1 * sizeof(float))) {
68      _mm_store_ss(y, vy_lo);
69    }
70  }
71}
72