xref: /aosp_15_r20/external/XNNPACK/src/x8-lut/ssse3.c.in (revision 4bdc94577ba0e567308109d787f7fec7b531ce36)
1// Copyright 2021 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE >= 16
7$assert BATCH_TILE % 16 == 0
8$SIMD_TILE = BATCH_TILE // 16
9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
10#include <assert.h>
11
12$if AVX:
13  #include <immintrin.h>
14$else:
15  #include <tmmintrin.h>
16
17#include <xnnpack/common.h>
18$if AVX:
19  #include <xnnpack/intrinsics-polyfill.h>
20#include <xnnpack/lut.h>
21$if not AVX:
22  #include <xnnpack/unaligned.h>
23
24
25void xnn_x8_lut_ukernel__${"avx" if AVX else "ssse3"}_x${BATCH_TILE}(
26    size_t n,
27    const uint8_t* x,
28    uint8_t* y,
29    const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
30{
31  assert(n != 0);
32  assert(x != NULL);
33  assert(y != NULL);
34
35  const __m128i vt0 = _mm_load_si128((const __m128i*) t);
36  $for T in range(1, 16):
37    const __m128i vt${ABC[T]} = _mm_load_si128((const __m128i*) (t + ${T * 16}));
38
39  const __m128i vtable0 = vt0;
40  $for T in range(1, 8):
41    const __m128i vtable${ABC[T]} = _mm_xor_si128(vt${ABC[T-1]}, vt${ABC[T]});
42  $for T in range(8, 16):
43    const __m128i vtable${ABC[T]} = _mm_xor_si128(_mm_xor_si128(vt${ABC[T-1]}, vt${ABC[T]}), vtable${ABC[T-8]});
44
45  const __m128i voffset = _mm_set1_epi8(16);
46  $if BATCH_TILE > 16:
47    for (; n >= ${BATCH_TILE} * sizeof(uint8_t); n -= ${BATCH_TILE} * sizeof(uint8_t)) {
48      __m128i vx0 = _mm_loadu_si128((const __m128i*) x);
49      $for N in range(1, SIMD_TILE):
50        __m128i vx${N} = _mm_loadu_si128((const __m128i*) (x + ${N * 16}));
51      x += ${BATCH_TILE};
52
53      $for N in range(SIMD_TILE):
54        __m128i vy${N} = _mm_shuffle_epi8(vtable0, vx${N});
55
56      $for T in range(1, 9):
57        $for N in range(SIMD_TILE):
58          vx${N} = _mm_sub_epi8(vx${N}, voffset);
59        $for N in range(SIMD_TILE):
60          vy${N} = _mm_xor_si128(vy${N}, _mm_shuffle_epi8(vtable${ABC[T]}, vx${N}));
61
62      $for T in range(9, 16):
63        $for N in range(SIMD_TILE):
64          vx${N} = _mm_subs_epi8(vx${N}, voffset);
65        $for N in range(SIMD_TILE):
66          vy${N} = _mm_xor_si128(vy${N}, _mm_shuffle_epi8(vtable${ABC[T]}, vx${N}));
67
68      _mm_storeu_si128((__m128i*) y, vy0);
69      $for N in range(1, SIMD_TILE):
70        _mm_storeu_si128((__m128i*) (y + ${N * 16}), vy${N});
71      y += ${BATCH_TILE};
72    }
73  for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) {
74    __m128i vx = _mm_loadu_si128((const __m128i*) x);
75    x += 16;
76
77    __m128i vy = _mm_shuffle_epi8(vtable0, vx);
78
79    $for T in range(1, 9):
80      vx = _mm_sub_epi8(vx, voffset);
81      vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
82
83    $for T in range(9, 16):
84      vx = _mm_subs_epi8(vx, voffset);
85      vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
86
87    _mm_storeu_si128((__m128i*) y, vy);
88    y += 16;
89  }
90  if XNN_UNLIKELY(n != 0) {
91    __m128i vx = _mm_loadu_si128((const __m128i*) x);
92
93    __m128i vy = _mm_shuffle_epi8(vtable0, vx);
94
95    $for T in range(1, 9):
96      vx = _mm_sub_epi8(vx, voffset);
97      vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
98
99    $for T in range(9, 16):
100      vx = _mm_subs_epi8(vx, voffset);
101      vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx));
102
103    if (n & (8 * sizeof(uint8_t))) {
104      _mm_storel_epi64((__m128i*) y, vy);
105      vy = _mm_unpackhi_epi64(vy, vy);
106      y += 8;
107    }
108    if (n & (4 * sizeof(uint8_t))) {
109      $if AVX:
110        _mm_storeu_si32(y, vy);
111      $else:
112        unaligned_store_u32(y, (uint32_t) _mm_cvtsi128_si32(vy));
113      vy = _mm_srli_epi64(vy, 32);
114      y += 4;
115    }
116    $if AVX:
117      if (n & (2 * sizeof(uint8_t))) {
118        _mm_storeu_si16(y, vy);
119        vy = _mm_srli_epi32(vy, 16);
120        y += 2;
121      }
122      if (n & (1 * sizeof(uint8_t))) {
123        *y = (uint8_t) _mm_extract_epi8(vy, 0);
124      }
125    $else:
126      uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
127      if (n & (2 * sizeof(uint8_t))) {
128        unaligned_store_u16(y, (uint16_t) vy_lo);
129        vy_lo >>= 16;
130        y += 2;
131      }
132      if (n & (1 * sizeof(uint8_t))) {
133        *y = (uint8_t) vy_lo;
134      }
135  }
136}
137