1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE >= 32 7$assert BATCH_TILE % 32 == 0 8$SIMD_TILE = BATCH_TILE // 32 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <immintrin.h> 13 14#include <xnnpack/intrinsics-polyfill.h> 15#include <xnnpack/lut.h> 16#include <xnnpack/common.h> 17 18 19void xnn_x8_lut_ukernel__avx2_x${BATCH_TILE}( 20 size_t n, 21 const uint8_t* x, 22 uint8_t* y, 23 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)]) 24{ 25 assert(n != 0); 26 assert(x != NULL); 27 assert(y != NULL); 28 29 const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) t)); 30 $for T in range(1, 16): 31 const __m256i vt${ABC[T]} = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + ${T * 16}))); 32 33 const __m256i vtable0 = vt0; 34 $for T in range(1, 8): 35 const __m256i vtable${ABC[T]} = _mm256_xor_si256(vt${ABC[T-1]}, vt${ABC[T]}); 36 $for T in range(8, 16): 37 const __m256i vtable${ABC[T]} = _mm256_xor_si256(_mm256_xor_si256(vt${ABC[T-1]}, vt${ABC[T]}), vtable${ABC[T-8]}); 38 39 const __m256i voffset = _mm256_set1_epi8(16); 40 for (; n >= ${BATCH_TILE} * sizeof(uint8_t); n -= ${BATCH_TILE} * sizeof(uint8_t)) { 41 __m256i vx0 = _mm256_loadu_si256((const __m256i*) x); 42 $for N in range(1, SIMD_TILE): 43 __m256i vx${N} = _mm256_loadu_si256((const __m256i*) (x + ${N * 32})); 44 x += ${BATCH_TILE}; 45 46 $for N in range(SIMD_TILE): 47 __m256i vy${N} = _mm256_shuffle_epi8(vtable0, vx${N}); 48 49 $for T in range(1, 9): 50 $for N in range(SIMD_TILE): 51 vx${N} = _mm256_sub_epi8(vx${N}, voffset); 52 $for N in range(SIMD_TILE): 53 vy${N} = _mm256_xor_si256(vy${N}, _mm256_shuffle_epi8(vtable${ABC[T]}, vx${N})); 54 55 $for T in range(9, 16): 56 $for N in range(SIMD_TILE): 57 vx${N} = _mm256_subs_epi8(vx${N}, voffset); 58 $for N in range(SIMD_TILE): 59 vy${N} = _mm256_xor_si256(vy${N}, _mm256_shuffle_epi8(vtable${ABC[T]}, vx${N})); 60 61 _mm256_storeu_si256((__m256i*) y, vy0); 62 $for N in range(1, SIMD_TILE): 63 _mm256_storeu_si256((__m256i*) (y + ${N * 32}), vy${N}); 64 y += ${BATCH_TILE}; 65 } 66 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) { 67 __m128i vx = _mm_loadu_si128((const __m128i*) x); 68 x += 16; 69 70 __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx); 71 72 $for T in range(1, 9): 73 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset)); 74 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); 75 76 $for T in range(9, 16): 77 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset)); 78 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); 79 80 _mm_storeu_si128((__m128i*) y, vy); 81 y += 16; 82 } 83 if XNN_UNLIKELY(n != 0) { 84 __m128i vx = _mm_loadu_si128((const __m128i*) x); 85 86 __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx); 87 88 $for T in range(1, 9): 89 vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset)); 90 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); 91 92 $for T in range(9, 16): 93 vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset)); 94 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); 95 96 if (n & (8 * sizeof(uint8_t))) { 97 _mm_storel_epi64((__m128i*) y, vy); 98 vy = _mm_unpackhi_epi64(vy, vy); 99 y += 8; 100 } 101 if (n & (4 * sizeof(uint8_t))) { 102 _mm_storeu_si32(y, vy); 103 vy = _mm_srli_epi64(vy, 32); 104 y += 4; 105 } 106 if (n & (2 * sizeof(uint8_t))) { 107 _mm_storeu_si16(y, vy); 108 vy = _mm_srli_epi32(vy, 16); 109 y += 2; 110 } 111 if (n & (1 * sizeof(uint8_t))) { 112 *y = (uint8_t) _mm_extract_epi8(vy, 0); 113 } 114 } 115} 116