xref: /aosp_15_r20/external/XNNPACK/src/x8-lut/avx512skx-vpshufb.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE >= 64
7$assert BATCH_TILE % 64 == 0
8$SIMD_TILE = BATCH_TILE // 64
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12#include <immintrin.h>
13
14#include <xnnpack/intrinsics-polyfill.h>
15#include <xnnpack/lut.h>
16#include <xnnpack/common.h>
17
18
19void xnn_x8_lut_ukernel__avx512skx_vpshufb_x${BATCH_TILE}(
20    size_t n,
21    const uint8_t* x,
22    uint8_t* y,
23    const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
24{
25  assert(n != 0);
26  assert(x != NULL);
27  assert(y != NULL);
28
29  const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) t));
30  $for T in range(1, 16):
31    const __m512i vt${ABC[T]} = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + ${T * 16})));
32
33  const __m512i vtable0 = vt0;
34  $for T in range(1, 8):
35    const __m512i vtable${ABC[T]} = _mm512_xor_si512(vt${ABC[T-1]}, vt${ABC[T]});
36  $for T in range(8, 16):
37    const __m512i vtable${ABC[T]} = _mm512_xor_si512(_mm512_xor_si512(vt${ABC[T-1]}, vt${ABC[T]}), vtable${ABC[T-8]});
38
39  const __m512i voffset = _mm512_set1_epi8(16);
40  $if BATCH_TILE > 64:
41    for (; n >= ${BATCH_TILE} * sizeof(uint8_t); n -= ${BATCH_TILE} * sizeof(uint8_t)) {
42      __m512i vx0 = _mm512_loadu_si512(x);
43      $for N in range(1, SIMD_TILE):
44        __m512i vx${N} = _mm512_loadu_si512(x + ${N * 64});
45      x += ${BATCH_TILE};
46
47      $for N in range(SIMD_TILE):
48        __m512i vy${N} = _mm512_shuffle_epi8(vtable0, vx${N});
49
50      $for T in range(1, 9):
51        $for N in range(SIMD_TILE):
52          vx${N} = _mm512_sub_epi8(vx${N}, voffset);
53        $for N in range(SIMD_TILE):
54          vy${N} = _mm512_xor_si512(vy${N}, _mm512_shuffle_epi8(vtable${ABC[T]}, vx${N}));
55
56      $for T in range(9, 16):
57        $for N in range(SIMD_TILE):
58          vx${N} = _mm512_subs_epi8(vx${N}, voffset);
59        $for N in range(SIMD_TILE):
60          vy${N} = _mm512_xor_si512(vy${N}, _mm512_shuffle_epi8(vtable${ABC[T]}, vx${N}));
61
62      _mm512_storeu_si512(y, vy0);
63      $for N in range(1, SIMD_TILE):
64        _mm512_storeu_si512(y + ${N * 64}, vy${N});
65      y += ${BATCH_TILE};
66    }
67  for (; n >= 64 * sizeof(uint8_t); n -= 64 * sizeof(uint8_t)) {
68    __m512i vx = _mm512_loadu_si512(x);
69    x += 64;
70
71    __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
72
73    $for T in range(1, 9):
74      vx = _mm512_sub_epi8(vx, voffset);
75      vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
76
77    $for T in range(9, 16):
78      vx = _mm512_subs_epi8(vx, voffset);
79      vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
80
81    _mm512_storeu_si512(y, vy);
82    y += 64;
83  }
84  if XNN_UNLIKELY(n != 0) {
85    assert(n < 64);
86    const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << n) - UINT64_C(1)));
87
88    __m512i vx = _mm512_maskz_loadu_epi8(vmask, x);
89
90    __m512i vy = _mm512_shuffle_epi8(vtable0, vx);
91
92    $for T in range(1, 9):
93      vx = _mm512_sub_epi8(vx, voffset);
94      vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
95
96    $for T in range(9, 16):
97      vx = _mm512_subs_epi8(vx, voffset);
98      vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
99
100    _mm512_mask_storeu_epi8(y, vmask, vy);
101  }
102}
103