1*412f47f9SXin Li /*
2*412f47f9SXin Li * AArch64-specific checksum implementation using NEON
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2020, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "networking.h"
9*412f47f9SXin Li #include "../chksum_common.h"
10*412f47f9SXin Li
11*412f47f9SXin Li #ifndef __ARM_NEON
12*412f47f9SXin Li #pragma GCC target("+simd")
13*412f47f9SXin Li #endif
14*412f47f9SXin Li
15*412f47f9SXin Li #include <arm_neon.h>
16*412f47f9SXin Li
17*412f47f9SXin Li always_inline
18*412f47f9SXin Li static inline uint64_t
slurp_head64(const void ** pptr,uint32_t * nbytes)19*412f47f9SXin Li slurp_head64(const void **pptr, uint32_t *nbytes)
20*412f47f9SXin Li {
21*412f47f9SXin Li Assert(*nbytes >= 8);
22*412f47f9SXin Li uint64_t sum = 0;
23*412f47f9SXin Li uint32_t off = (uintptr_t) *pptr % 8;
24*412f47f9SXin Li if (likely(off != 0))
25*412f47f9SXin Li {
26*412f47f9SXin Li /* Get rid of bytes 0..off-1 */
27*412f47f9SXin Li const unsigned char *ptr64 = align_ptr(*pptr, 8);
28*412f47f9SXin Li uint64_t mask = ALL_ONES << (CHAR_BIT * off);
29*412f47f9SXin Li uint64_t val = load64(ptr64) & mask;
30*412f47f9SXin Li /* Fold 64-bit sum to 33 bits */
31*412f47f9SXin Li sum = val >> 32;
32*412f47f9SXin Li sum += (uint32_t) val;
33*412f47f9SXin Li *pptr = ptr64 + 8;
34*412f47f9SXin Li *nbytes -= 8 - off;
35*412f47f9SXin Li }
36*412f47f9SXin Li return sum;
37*412f47f9SXin Li }
38*412f47f9SXin Li
39*412f47f9SXin Li always_inline
40*412f47f9SXin Li static inline uint64_t
slurp_tail64(uint64_t sum,const void * ptr,uint32_t nbytes)41*412f47f9SXin Li slurp_tail64(uint64_t sum, const void *ptr, uint32_t nbytes)
42*412f47f9SXin Li {
43*412f47f9SXin Li Assert(nbytes < 8);
44*412f47f9SXin Li if (likely(nbytes != 0))
45*412f47f9SXin Li {
46*412f47f9SXin Li /* Get rid of bytes 7..nbytes */
47*412f47f9SXin Li uint64_t mask = ALL_ONES >> (CHAR_BIT * (8 - nbytes));
48*412f47f9SXin Li Assert(__builtin_popcountl(mask) / CHAR_BIT == nbytes);
49*412f47f9SXin Li uint64_t val = load64(ptr) & mask;
50*412f47f9SXin Li sum += val >> 32;
51*412f47f9SXin Li sum += (uint32_t) val;
52*412f47f9SXin Li nbytes = 0;
53*412f47f9SXin Li }
54*412f47f9SXin Li Assert(nbytes == 0);
55*412f47f9SXin Li return sum;
56*412f47f9SXin Li }
57*412f47f9SXin Li
58*412f47f9SXin Li unsigned short
__chksum_aarch64_simd(const void * ptr,unsigned int nbytes)59*412f47f9SXin Li __chksum_aarch64_simd(const void *ptr, unsigned int nbytes)
60*412f47f9SXin Li {
61*412f47f9SXin Li bool swap = (uintptr_t) ptr & 1;
62*412f47f9SXin Li uint64_t sum;
63*412f47f9SXin Li
64*412f47f9SXin Li if (unlikely(nbytes < 50))
65*412f47f9SXin Li {
66*412f47f9SXin Li sum = slurp_small(ptr, nbytes);
67*412f47f9SXin Li swap = false;
68*412f47f9SXin Li goto fold;
69*412f47f9SXin Li }
70*412f47f9SXin Li
71*412f47f9SXin Li /* 8-byte align pointer */
72*412f47f9SXin Li Assert(nbytes >= 8);
73*412f47f9SXin Li sum = slurp_head64(&ptr, &nbytes);
74*412f47f9SXin Li Assert(((uintptr_t) ptr & 7) == 0);
75*412f47f9SXin Li
76*412f47f9SXin Li const uint32_t *may_alias ptr32 = ptr;
77*412f47f9SXin Li
78*412f47f9SXin Li uint64x2_t vsum0 = { 0, 0 };
79*412f47f9SXin Li uint64x2_t vsum1 = { 0, 0 };
80*412f47f9SXin Li uint64x2_t vsum2 = { 0, 0 };
81*412f47f9SXin Li uint64x2_t vsum3 = { 0, 0 };
82*412f47f9SXin Li
83*412f47f9SXin Li /* Sum groups of 64 bytes */
84*412f47f9SXin Li for (uint32_t i = 0; i < nbytes / 64; i++)
85*412f47f9SXin Li {
86*412f47f9SXin Li uint32x4_t vtmp0 = vld1q_u32(ptr32);
87*412f47f9SXin Li uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
88*412f47f9SXin Li uint32x4_t vtmp2 = vld1q_u32(ptr32 + 8);
89*412f47f9SXin Li uint32x4_t vtmp3 = vld1q_u32(ptr32 + 12);
90*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vtmp0);
91*412f47f9SXin Li vsum1 = vpadalq_u32(vsum1, vtmp1);
92*412f47f9SXin Li vsum2 = vpadalq_u32(vsum2, vtmp2);
93*412f47f9SXin Li vsum3 = vpadalq_u32(vsum3, vtmp3);
94*412f47f9SXin Li ptr32 += 16;
95*412f47f9SXin Li }
96*412f47f9SXin Li nbytes %= 64;
97*412f47f9SXin Li
98*412f47f9SXin Li /* Fold vsum2 and vsum3 into vsum0 and vsum1 */
99*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum2));
100*412f47f9SXin Li vsum1 = vpadalq_u32(vsum1, vreinterpretq_u32_u64(vsum3));
101*412f47f9SXin Li
102*412f47f9SXin Li /* Add any trailing group of 32 bytes */
103*412f47f9SXin Li if (nbytes & 32)
104*412f47f9SXin Li {
105*412f47f9SXin Li uint32x4_t vtmp0 = vld1q_u32(ptr32);
106*412f47f9SXin Li uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
107*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vtmp0);
108*412f47f9SXin Li vsum1 = vpadalq_u32(vsum1, vtmp1);
109*412f47f9SXin Li ptr32 += 8;
110*412f47f9SXin Li nbytes -= 32;
111*412f47f9SXin Li }
112*412f47f9SXin Li Assert(nbytes < 32);
113*412f47f9SXin Li
114*412f47f9SXin Li /* Fold vsum1 into vsum0 */
115*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum1));
116*412f47f9SXin Li
117*412f47f9SXin Li /* Add any trailing group of 16 bytes */
118*412f47f9SXin Li if (nbytes & 16)
119*412f47f9SXin Li {
120*412f47f9SXin Li uint32x4_t vtmp = vld1q_u32(ptr32);
121*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vtmp);
122*412f47f9SXin Li ptr32 += 4;
123*412f47f9SXin Li nbytes -= 16;
124*412f47f9SXin Li }
125*412f47f9SXin Li Assert(nbytes < 16);
126*412f47f9SXin Li
127*412f47f9SXin Li /* Add any trailing group of 8 bytes */
128*412f47f9SXin Li if (nbytes & 8)
129*412f47f9SXin Li {
130*412f47f9SXin Li uint32x2_t vtmp = vld1_u32(ptr32);
131*412f47f9SXin Li vsum0 = vaddw_u32(vsum0, vtmp);
132*412f47f9SXin Li ptr32 += 2;
133*412f47f9SXin Li nbytes -= 8;
134*412f47f9SXin Li }
135*412f47f9SXin Li Assert(nbytes < 8);
136*412f47f9SXin Li
137*412f47f9SXin Li uint64_t val = vaddlvq_u32(vreinterpretq_u32_u64(vsum0));
138*412f47f9SXin Li sum += val >> 32;
139*412f47f9SXin Li sum += (uint32_t) val;
140*412f47f9SXin Li
141*412f47f9SXin Li /* Handle any trailing 0..7 bytes */
142*412f47f9SXin Li sum = slurp_tail64(sum, ptr32, nbytes);
143*412f47f9SXin Li
144*412f47f9SXin Li fold:
145*412f47f9SXin Li return fold_and_swap(sum, swap);
146*412f47f9SXin Li }
147