xref: /aosp_15_r20/external/XNNPACK/src/f32-raddstoreexpminusmax/avx2-rr1-p5.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2019 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert ELEMENTS_TILE % 8 == 0
7$assert ELEMENTS_TILE >= 8
8$SIMD_TILE = ELEMENTS_TILE // 8
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/raddstoreexpminusmax.h>
15
16
17void xnn_f32_raddstoreexpminusmax_ukernel__avx2_rr1_p5_x${ELEMENTS_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}(
18    size_t elements,
19    const float* input,
20    const float* max,
21    float* output,
22    float* sum,
23    const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)])
24{
25  assert(elements % sizeof(float) == 0);
26
27  const __m256 vi_max = _mm256_broadcast_ss(max);
28  const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p5.log2e);
29  const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p5.magic_bias);
30  const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p5.minus_ln2);
31  const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p5.c5);
32  const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p5.c4);
33  const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p5.c3);
34  const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p5.c2);
35  const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p5.c1);
36  const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p5.denorm_cutoff);
37
38  $for K in range(ACCUMULATORS):
39    __m256 vacc${K} = _mm256_setzero_ps();
40  for (; elements >= ${ELEMENTS_TILE} * sizeof(float); elements -= ${ELEMENTS_TILE} * sizeof(float)) {
41    const __m256 vi0 = _mm256_loadu_ps(input);
42    $for N in range(1, SIMD_TILE):
43      const __m256 vi${N} = _mm256_loadu_ps(input + ${N * 8});
44    input += ${ELEMENTS_TILE};
45
46    $for N in range(SIMD_TILE):
47      const __m256 vx${N} = _mm256_sub_ps(vi${N}, vi_max);
48
49    $for N in range(SIMD_TILE):
50      __m256 vn${N} = _mm256_fmadd_ps(vx${N}, vlog2e, vmagic_bias);
51
52    $for N in range(SIMD_TILE):
53      const __m256 vs${N} = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn${N}), 23));
54
55    $for N in range(SIMD_TILE):
56      vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias);
57
58    $for N in range(SIMD_TILE):
59      __m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vx${N});
60
61    $for N in range(SIMD_TILE):
62      __m256 vp${N} = _mm256_fmadd_ps(vc5, vt${N}, vc4);
63
64    $for N in range(SIMD_TILE):
65      vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc3);
66
67    $for N in range(SIMD_TILE):
68      vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc2);
69
70    $for N in range(SIMD_TILE):
71      vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc1);
72
73    $for N in range(SIMD_TILE):
74      vt${N} = _mm256_mul_ps(vt${N}, vs${N});
75
76    $for N in range(SIMD_TILE):
77      __m256 vf${N} = _mm256_fmadd_ps(vt${N}, vp${N}, vs${N});
78
79    $for N in range(SIMD_TILE):
80      vf${N} = _mm256_andnot_ps(_mm256_cmp_ps(vx${N}, vdenorm_cutoff, _CMP_LT_OS), vf${N});
81
82    _mm256_storeu_ps(output, vf0);
83    $for N in range(1, SIMD_TILE):
84      _mm256_storeu_ps(output + ${N * 8}, vf${N});
85    output += ${ELEMENTS_TILE};
86
87    $for N in range(SIMD_TILE):
88      vacc${N % ACCUMULATORS} = _mm256_add_ps(vacc${N % ACCUMULATORS}, vf${N});
89  }
90  $if ACCUMULATORS > 1:
91    $ACC_SLICE = 1
92    $while ACC_SLICE < ACCUMULATORS:
93      $for A in range(0, ACCUMULATORS, ACC_SLICE * 2):
94        $if A + ACC_SLICE < ACCUMULATORS:
95          vacc${A} = _mm256_add_ps(vacc${A}, vacc${A + ACC_SLICE});
96      $ACC_SLICE *= 2
97
98  __m256 vacc = vacc0;
99  for (; elements >= 8 * sizeof(float); elements -= 8 * sizeof(float)) {
100    const __m256 vi = _mm256_loadu_ps(input);
101    input += 8;
102
103    const __m256 vx = _mm256_sub_ps(vi, vi_max);
104
105    __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
106
107    const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
108
109    vn = _mm256_sub_ps(vn, vmagic_bias);
110
111    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
112
113    __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
114    vp = _mm256_fmadd_ps(vp, vt, vc3);
115    vp = _mm256_fmadd_ps(vp, vt, vc2);
116    vp = _mm256_fmadd_ps(vp, vt, vc1);
117
118    vt = _mm256_mul_ps(vt, vs);
119    __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
120
121    vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
122
123    _mm256_storeu_ps(output, vf);
124    output += 8;
125
126    vacc = _mm256_add_ps(vacc, vf);
127  }
128  if (elements != 0) {
129    assert(elements >= 1 * sizeof(float));
130    assert(elements <= 7 * sizeof(float));
131    const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &params->avx2_rr1_p5.mask_table[7] - elements));
132
133    const __m256 vi = _mm256_maskload_ps(input, vmask);
134
135    const __m256 vx = _mm256_sub_ps(vi, vi_max);
136
137    __m256 vn = _mm256_fmadd_ps(vx, vlog2e, vmagic_bias);
138
139    const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23));
140
141    vn = _mm256_sub_ps(vn, vmagic_bias);
142
143    __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vx);
144
145    __m256 vp = _mm256_fmadd_ps(vc5, vt, vc4);
146    vp = _mm256_fmadd_ps(vp, vt, vc3);
147    vp = _mm256_fmadd_ps(vp, vt, vc2);
148    vp = _mm256_fmadd_ps(vp, vt, vc1);
149
150    vt = _mm256_mul_ps(vt, vs);
151    __m256 vf = _mm256_fmadd_ps(vt, vp, vs);
152
153    vf = _mm256_andnot_ps(_mm256_cmp_ps(vx, vdenorm_cutoff, _CMP_LT_OS), vf);
154
155    __m128 vf_lo = _mm256_castps256_ps128(vf);
156    if (elements & (4 * sizeof(float))) {
157      _mm_storeu_ps(output, vf_lo);
158      vf_lo = _mm256_extractf128_ps(vf, 1);
159      output += 4;
160    }
161    if (elements & (2 * sizeof(float))) {
162      _mm_storel_pi((__m64*) output, vf_lo);
163      vf_lo = _mm_movehl_ps(vf_lo, vf_lo);
164      output += 2;
165    }
166    if (elements & (1 * sizeof(float))) {
167      _mm_store_ss(output, vf_lo);
168    }
169
170    vacc = _mm256_add_ps(vacc, _mm256_and_ps(vf, _mm256_castsi256_ps(vmask)));
171  }
172  __m128 vacc_lo = _mm_add_ps(_mm256_castps256_ps128(vacc), _mm256_extractf128_ps(vacc, 1));
173  vacc_lo = _mm_add_ps(vacc_lo, _mm_movehl_ps(vacc_lo, vacc_lo));
174  vacc_lo = _mm_add_ss(vacc_lo, _mm_movehdup_ps(vacc_lo));
175  _mm_store_ss(sum, vacc_lo);
176  _mm256_zeroupper();
177}
178