xref: /aosp_15_r20/external/XNNPACK/src/f32-prelu/avx.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert CHANNEL_TILE % 8 == 0
7$assert CHANNEL_TILE >= 8
8$assert ROW_TILE >= 1
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/math.h>
15#include <xnnpack/prelu.h>
16
17
18static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
19
20void xnn_f32_prelu_ukernel__avx_${ROW_TILE}x${CHANNEL_TILE}(
21    size_t rows,
22    size_t channels,
23    const float*restrict input,
24    size_t input_stride,
25    const float*restrict weights,
26    float*restrict output,
27    size_t output_stride)
28{
29  assert(rows != 0);
30  assert(channels != 0);
31  assert(channels % sizeof(float) == 0);
32
33  const float* i0 = input;
34  float* o0 = output;
35  $for M in range(1, ROW_TILE):
36    const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride);
37    float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride);
38
39  const size_t input_increment = input_stride * ${ROW_TILE} - channels;
40  const size_t output_increment = output_stride * ${ROW_TILE} - channels;
41
42  do {
43    $for M in range(1, ROW_TILE):
44      $if M % 2 == 0:
45        if XNN_UNPREDICTABLE(rows <= ${M}) {
46          i${M} = i${M-1};
47          o${M} = o${M-1};
48        }
49      $else:
50        if XNN_UNPREDICTABLE(rows < ${M+1}) {
51          i${M} = i${M-1};
52          o${M} = o${M-1};
53        }
54
55    const float* w = weights;
56    size_t c = channels;
57    for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) {
58      const __m256 vw${ABC[0:8]} = _mm256_load_ps(w);
59      $for C in range(8, CHANNEL_TILE, 8):
60        const __m256 vw${ABC[C:C+8]} = _mm256_load_ps(w + ${C});
61      w += ${CHANNEL_TILE};
62
63      $for M in range(ROW_TILE):
64        const __m256 vi${M}x${ABC[0:8]} = _mm256_loadu_ps(i${M});
65        $for C in range(8, CHANNEL_TILE, 8):
66          const __m256 vi${M}x${ABC[C:C+8]} = _mm256_loadu_ps(i${M} + ${C});
67        i${M} += ${CHANNEL_TILE};
68
69      $for M in range(ROW_TILE):
70        $for C in range(0, CHANNEL_TILE, 8):
71          const __m256 vprod${M}x${ABC[C:C+8]} = _mm256_mul_ps(vi${M}x${ABC[C:C+8]}, vw${ABC[C:C+8]});
72
73      $for M in range(ROW_TILE):
74        $for C in range(0, CHANNEL_TILE, 8):
75          const __m256 vacc${M}x${ABC[C:C+8]} = _mm256_blendv_ps(vi${M}x${ABC[C:C+8]}, vprod${M}x${ABC[C:C+8]}, vi${M}x${ABC[C:C+8]});
76
77      $for M in range(ROW_TILE):
78        _mm256_storeu_ps(o${M}, vacc${M}x${ABC[0:8]});
79        $for C in range(8, CHANNEL_TILE, 8):
80          _mm256_storeu_ps(o${M} + ${C}, vacc${M}x${ABC[C:C+8]});
81        o${M} += ${CHANNEL_TILE};
82    }
83    $if CHANNEL_TILE > 8:
84      for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
85        const __m256 vw = _mm256_load_ps(w);
86        w += 8;
87
88        $for M in range(ROW_TILE):
89          const __m256 vi${M} = _mm256_loadu_ps(i${M});
90          i${M} += 8;
91
92        $for M in range(ROW_TILE):
93          const __m256 vprod${M} = _mm256_mul_ps(vi${M}, vw);
94
95        $for M in range(ROW_TILE):
96          const __m256 vacc${M} = _mm256_blendv_ps(vi${M}, vprod${M}, vi${M});
97
98        $for M in range(ROW_TILE):
99          _mm256_storeu_ps(o${M}, vacc${M});
100          o${M} += 8;
101      }
102    if XNN_UNLIKELY(c != 0) {
103      assert(c >= 1 * sizeof(float));
104      assert(c <= 7 * sizeof(float));
105      __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - c));
106
107      const __m256 vw = _mm256_maskload_ps(w, vmask);
108
109      $for M in range(ROW_TILE):
110        const __m256 vi${M} = _mm256_maskload_ps(i${M}, vmask);
111        i${M} = (const float*) ((uintptr_t) i${M} + c);
112
113      $for M in range(ROW_TILE):
114        const __m256 vprod${M} = _mm256_mul_ps(vi${M}, vw);
115
116      $for M in range(ROW_TILE):
117        __m256 vacc${M} = _mm256_blendv_ps(vi${M}, vprod${M}, vi${M});
118
119      $for M in range(ROW_TILE):
120        __m128 vacc${M}_lo = _mm256_castps256_ps128(vacc${M});
121      if (c & (4 * sizeof(float))) {
122        $for M in range(ROW_TILE):
123          _mm_storeu_ps(o${M}, vacc${M}_lo);
124
125        $for M in range(ROW_TILE):
126          vacc${M}_lo = _mm256_extractf128_ps(vacc${M}, 1);
127
128        $for M in range(ROW_TILE):
129          o${M} += 4;
130      }
131      if (c & (2 * sizeof(float))) {
132        $for M in range(ROW_TILE):
133          _mm_storel_pi((__m64*) o${M}, vacc${M}_lo);
134
135        $for M in range(ROW_TILE):
136          vacc${M}_lo = _mm_movehl_ps(vacc${M}_lo, vacc${M}_lo);
137
138        $for M in range(ROW_TILE):
139          o${M} += 2;
140      }
141      if (c & (1 * sizeof(float))) {
142        $for M in range(ROW_TILE):
143          _mm_store_ss(o${M}, vacc${M}_lo);
144
145        $for M in range(ROW_TILE):
146          o${M} += 1;
147      }
148    }
149    $for M in range(ROW_TILE):
150      i${M} = (const float*) ((uintptr_t) i${M} + input_increment);
151      o${M} = (float*) ((uintptr_t) o${M} + output_increment);
152    rows = doz(rows, ${ROW_TILE});
153  } while (rows != 0);
154}
155