1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$SIMD_TILE = BATCH_TILE // 8 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <immintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/vunary.h> 16 17 18void xnn_f32_vsqrt_ukernel__fma3_nr1fma1adj_x${BATCH_TILE}( 19 size_t n, 20 const float* x, 21 float* y, 22 const union xnn_f32_sqrt_params params[restrict XNN_MIN_ELEMENTS(1)]) 23{ 24 assert(n != 0); 25 assert(n % sizeof(float) == 0); 26 27 const __m256 vhalf = _mm256_load_ps(params->fma.half); 28 $if BATCH_TILE > 8: 29 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 30 const __m256 vx${ABC[0]} = _mm256_loadu_ps(x); 31 $for N in range(1, SIMD_TILE): 32 const __m256 vx${ABC[N]} = _mm256_loadu_ps(x + ${N * 8}); 33 x += ${BATCH_TILE}; 34 35 $for N in range(SIMD_TILE): 36 const __m256 vrsqrtx${ABC[N]} = _mm256_rsqrt_ps(vx${ABC[N]}); 37 38 $for N in range(SIMD_TILE): 39 __m256 vsqrtx${ABC[N]} = _mm256_mul_ps(vrsqrtx${ABC[N]}, vx${ABC[N]}); 40 __m256 vhalfrsqrtx${ABC[N]} = _mm256_mul_ps(vrsqrtx${ABC[N]}, vhalf); 41 42 $for N in range(SIMD_TILE): 43 const __m256 vresidual${ABC[N]} = _mm256_fnmadd_ps(vsqrtx${ABC[N]}, vhalfrsqrtx${ABC[N]}, vhalf); 44 45 $for N in range(SIMD_TILE): 46 vhalfrsqrtx${ABC[N]} = _mm256_fmadd_ps(vhalfrsqrtx${ABC[N]}, vresidual${ABC[N]}, vhalfrsqrtx${ABC[N]}); 47 vsqrtx${ABC[N]} = _mm256_fmadd_ps(vsqrtx${ABC[N]}, vresidual${ABC[N]}, vsqrtx${ABC[N]}); 48 49 $for N in range(SIMD_TILE): 50 const __m256 vadjustment${ABC[N]} = _mm256_fnmadd_ps(vsqrtx${ABC[N]}, vsqrtx${ABC[N]}, vx${ABC[N]}); 51 52 $for N in range(SIMD_TILE): 53 const __m256 vy${ABC[N]} = _mm256_fmadd_ps(vhalfrsqrtx${ABC[N]}, vadjustment${ABC[N]}, vsqrtx${ABC[N]}); 54 55 _mm256_storeu_ps(y, vy${ABC[0]}); 56 $for N in range(1, SIMD_TILE): 57 _mm256_storeu_ps(y + ${N * 8}, vy${ABC[N]}); 58 y += ${BATCH_TILE}; 59 } 60 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { 61 const __m256 vx = _mm256_loadu_ps(x); 62 x += 8; 63 64 const __m256 vrsqrtx = _mm256_rsqrt_ps(vx); 65 __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx); 66 __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf); 67 const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf); 68 vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx); 69 vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx); 70 const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx); 71 const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx); 72 73 _mm256_storeu_ps(y, vy); 74 y += 8; 75 } 76 if XNN_UNLIKELY(n != 0) { 77 assert(n >= 1 * sizeof(float)); 78 assert(n <= 7 * sizeof(float)); 79 const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->fma.mask_table[7] - n)); 80 81 const __m256 vx = _mm256_maskload_ps(x, vmask); 82 83 const __m256 vrsqrtx = _mm256_rsqrt_ps(vx); 84 __m256 vsqrtx = _mm256_mul_ps(vrsqrtx, vx); 85 __m256 vhalfrsqrtx = _mm256_mul_ps(vrsqrtx, vhalf); 86 const __m256 vresidual = _mm256_fnmadd_ps(vsqrtx, vhalfrsqrtx, vhalf); 87 vhalfrsqrtx = _mm256_fmadd_ps(vhalfrsqrtx, vresidual, vhalfrsqrtx); 88 vsqrtx = _mm256_fmadd_ps(vsqrtx, vresidual, vsqrtx); 89 const __m256 vadjustment = _mm256_fnmadd_ps(vsqrtx, vsqrtx, vx); 90 const __m256 vy = _mm256_fmadd_ps(vhalfrsqrtx, vadjustment, vsqrtx); 91 92 __m128 vy_lo = _mm256_castps256_ps128(vy); 93 if (n & (4 * sizeof(float))) { 94 _mm_storeu_ps(y, vy_lo); 95 vy_lo = _mm256_extractf128_ps(vy, 1); 96 y += 4; 97 } 98 if (n & (2 * sizeof(float))) { 99 _mm_storel_pi((__m64*) y, vy_lo); 100 vy_lo = _mm_movehl_ps(vy_lo, vy_lo); 101 y += 2; 102 } 103 if (n & (1 * sizeof(float))) { 104 _mm_store_ss(y, vy_lo); 105 } 106 } 107} 108