xref: /aosp_15_r20/external/XNNPACK/src/f16-prelu/f16c.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2022 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert CHANNEL_TILE % 8 == 0
7$assert CHANNEL_TILE >= 8
8$assert ROW_TILE >= 1
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/math.h>
16#include <xnnpack/prelu.h>
17
18
19void xnn_f16_prelu_ukernel__f16c_${ROW_TILE}x${CHANNEL_TILE}(
20    size_t rows,
21    size_t channels,
22    const void* restrict input,
23    size_t input_stride,
24    const void* restrict weights,
25    void* restrict output,
26    size_t output_stride) XNN_OOB_READS
27{
28  assert(rows != 0);
29  assert(channels != 0);
30  assert(channels % sizeof(uint16_t) == 0);
31
32  const uint16_t* i0 = (const uint16_t*) input;
33  uint16_t* o0 = (uint16_t*) output;
34  $for M in range(1, ROW_TILE):
35    const uint16_t* i${M} = (const uint16_t*) ((uintptr_t) i${M-1} + input_stride);
36    uint16_t* o${M} = (uint16_t*) ((uintptr_t) o${M-1} + output_stride);
37
38  const size_t input_increment = input_stride * ${ROW_TILE} - channels;
39  const size_t output_increment = output_stride * ${ROW_TILE} - channels;
40
41  do {
42    $for M in range(1, ROW_TILE):
43      $if M % 2 == 0:
44        if XNN_UNPREDICTABLE(rows <= ${M}) {
45          i${M} = i${M-1};
46          o${M} = o${M-1};
47        }
48      $else:
49        if XNN_UNPREDICTABLE(rows < ${M+1}) {
50          i${M} = i${M-1};
51          o${M} = o${M-1};
52        }
53
54    const uint16_t* w = (const uint16_t*) weights;
55    size_t c = channels;
56    $if CHANNEL_TILE > 8:
57      for (; c >= ${CHANNEL_TILE} * sizeof(uint16_t); c -= ${CHANNEL_TILE} * sizeof(uint16_t)) {
58        const __m256 vw${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
59        $for C in range(8, CHANNEL_TILE, 8):
60          const __m256 vw${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + ${C})));
61        w += ${CHANNEL_TILE};
62
63        $for M in range(ROW_TILE):
64          const __m256 vi${M}x0${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M}));
65          $for C in range(8, CHANNEL_TILE, 8):
66            const __m256 vi${M}x0${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i${M} + ${C})));
67          i${M} += ${CHANNEL_TILE};
68
69        $for M in range(ROW_TILE):
70          $for C in range(0, CHANNEL_TILE, 8):
71            __m256 vacc${M}x0${ABC[C:C+8]} = _mm256_mul_ps(vi${M}x0${ABC[C:C+8]}, vw${ABC[C:C+8]});
72
73        $for M in range(ROW_TILE):
74          $for C in range(0, CHANNEL_TILE, 8):
75            vacc${M}x0${ABC[C:C+8]} = _mm256_blendv_ps(vi${M}x0${ABC[C:C+8]}, vacc${M}x0${ABC[C:C+8]}, vi${M}x0${ABC[C:C+8]});
76
77        $for M in range(ROW_TILE):
78          _mm_storeu_si128((__m128i*) o${M}, _mm256_cvtps_ph(vacc${M}x0${ABC[C:C+8]}, _MM_FROUND_NO_EXC));
79          $for C in range(0, CHANNEL_TILE, 8):
80            _mm_storeu_si128((__m128i*) (o${M} + ${C}), _mm256_cvtps_ph(vacc${M}x0${ABC[C:C+8]}, _MM_FROUND_NO_EXC));
81          o${M} += ${CHANNEL_TILE};
82      }
83    for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) {
84      const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
85      w += 8;
86
87      $for M in range(ROW_TILE):
88        const __m256 vi${M}x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M}));
89        i${M} += 8;
90
91      $for M in range(ROW_TILE):
92        __m256 vacc${M}x01234567 = _mm256_mul_ps(vi${M}x01234567, vw01234567);
93
94      $for M in range(ROW_TILE):
95        vacc${M}x01234567 = _mm256_blendv_ps(vi${M}x01234567, vacc${M}x01234567, vi${M}x01234567);
96
97      $for M in range(ROW_TILE):
98        _mm_storeu_si128((__m128i*) o${M}, _mm256_cvtps_ph(vacc${M}x01234567, _MM_FROUND_NO_EXC));
99        o${M} += 8;
100    }
101    if XNN_UNLIKELY(c != 0) {
102      const __m256 vw01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w));
103
104      $for M in range(ROW_TILE):
105        const __m256 vi${M}x01234567 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M}));
106        i${M} = (const uint16_t*) ((uintptr_t) i${M} + c);
107
108      $for M in range(ROW_TILE):
109        __m256 vacc${M}x01234567 = _mm256_mul_ps(vi${M}x01234567, vw01234567);
110
111      $for M in range(ROW_TILE):
112        vacc${M}x01234567 = _mm256_blendv_ps(vi${M}x01234567, vacc${M}x01234567, vi${M}x01234567);
113
114      $for M in range(ROW_TILE):
115        __m128i vh${M}x01234567 = _mm256_cvtps_ph(vacc${M}x01234567, _MM_FROUND_NO_EXC);
116      if (c & (4 * sizeof(uint16_t))) {
117        $for M in range(ROW_TILE):
118          _mm_storel_epi64((__m128i*) o${M}, vh${M}x01234567);
119
120        $for M in range(ROW_TILE):
121          vh${M}x01234567 = _mm_unpackhi_epi64(vh${M}x01234567, vh${M}x01234567);
122
123        $for M in range(ROW_TILE):
124          o${M} += 4;
125      }
126      if (c & (2 * sizeof(uint16_t))) {
127        $for M in range(ROW_TILE):
128          _mm_storeu_si32(o${M}, vh${M}x01234567);
129
130        $for M in range(ROW_TILE):
131          vh${M}x01234567 = _mm_srli_epi64(vh${M}x01234567, 32);
132
133        $for M in range(ROW_TILE):
134          o${M} += 2;
135      }
136      if (c & (1 * sizeof(uint16_t))) {
137        $for M in range(ROW_TILE):
138          *o${M} = (uint16_t) _mm_extract_epi16(vh${M}x01234567, 0);
139
140        $for M in range(ROW_TILE):
141          o${M} += 1;
142      }
143    }
144    $for M in range(ROW_TILE):
145      i${M} = (const uint16_t*) ((uintptr_t) i${M} + input_increment);
146      o${M} = (uint16_t*) ((uintptr_t) o${M} + output_increment);
147    rows = doz(rows, ${ROW_TILE});
148  } while (rows != 0);
149}
150