1 /* slide_hash_neon.c -- Optimized hash table shifting for ARM with support for NEON instructions
2  * Copyright (C) 2017-2020 Mika T. Lindqvist
3  *
4  * Authors:
5  * Mika T. Lindqvist <[email protected]>
6  * Jun He <[email protected]>
7  *
8  * For conditions of distribution and use, see copyright notice in zlib.h
9  */
10 
11 #if defined(ARM_NEON_SLIDEHASH)
12 #ifdef _M_ARM64
13 #  include <arm64_neon.h>
14 #else
15 #  include <arm_neon.h>
16 #endif
17 #include "../../zbuild.h"
18 #include "../../deflate.h"
19 #include "../../fallback_builtins.h"
20 
21 /* SIMD version of hash_chain rebase */
slide_hash_chain(Pos * table,uint32_t entries,uint16_t wsize)22 static inline void slide_hash_chain(Pos *table, uint32_t entries, uint16_t wsize) {
23     Z_REGISTER uint16x8_t v;
24     uint16x8x4_t p0, p1;
25     Z_REGISTER size_t n;
26 
27     size_t size = entries*sizeof(table[0]);
28     Assert((size % sizeof(uint16x8_t) * 8 == 0), "hash table size err");
29 
30     Assert(sizeof(Pos) == 2, "Wrong Pos size");
31     v = vdupq_n_u16(wsize);
32 
33     n = size / (sizeof(uint16x8_t) * 8);
34     do {
35         p0 = vld1q_u16_x4(table);
36         p1 = vld1q_u16_x4(table+32);
37         vqsubq_u16_x4_x1(p0, p0, v);
38         vqsubq_u16_x4_x1(p1, p1, v);
39         vst1q_u16_x4(table, p0);
40         vst1q_u16_x4(table+32, p1);
41         table += 64;
42     } while (--n);
43 }
44 
slide_hash_neon(deflate_state * s)45 Z_INTERNAL void slide_hash_neon(deflate_state *s) {
46     unsigned int wsize = s->w_size;
47 
48     slide_hash_chain(s->head, HASH_SIZE, wsize);
49     slide_hash_chain(s->prev, wsize, wsize);
50 }
51 #endif
52