1*412f47f9SXin Li /*
2*412f47f9SXin Li * Armv7-A specific checksum implementation using NEON
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2020, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li #include "networking.h"
9*412f47f9SXin Li #include "../chksum_common.h"
10*412f47f9SXin Li
11*412f47f9SXin Li #ifndef __ARM_NEON
12*412f47f9SXin Li #pragma GCC target("+simd")
13*412f47f9SXin Li #endif
14*412f47f9SXin Li
15*412f47f9SXin Li #include <arm_neon.h>
16*412f47f9SXin Li
17*412f47f9SXin Li unsigned short
__chksum_arm_simd(const void * ptr,unsigned int nbytes)18*412f47f9SXin Li __chksum_arm_simd(const void *ptr, unsigned int nbytes)
19*412f47f9SXin Li {
20*412f47f9SXin Li bool swap = (uintptr_t) ptr & 1;
21*412f47f9SXin Li uint64x1_t vsum = { 0 };
22*412f47f9SXin Li
23*412f47f9SXin Li if (unlikely(nbytes < 40))
24*412f47f9SXin Li {
25*412f47f9SXin Li uint64_t sum = slurp_small(ptr, nbytes);
26*412f47f9SXin Li return fold_and_swap(sum, false);
27*412f47f9SXin Li }
28*412f47f9SXin Li
29*412f47f9SXin Li /* 8-byte align pointer */
30*412f47f9SXin Li /* Inline slurp_head-like code since we use NEON here */
31*412f47f9SXin Li Assert(nbytes >= 8);
32*412f47f9SXin Li uint32_t off = (uintptr_t) ptr & 7;
33*412f47f9SXin Li if (likely(off != 0))
34*412f47f9SXin Li {
35*412f47f9SXin Li const uint64_t *may_alias ptr64 = align_ptr(ptr, 8);
36*412f47f9SXin Li uint64x1_t vword64 = vld1_u64(ptr64);
37*412f47f9SXin Li /* Get rid of bytes 0..off-1 */
38*412f47f9SXin Li uint64x1_t vmask = vdup_n_u64(ALL_ONES);
39*412f47f9SXin Li int64x1_t vshiftl = vdup_n_s64(CHAR_BIT * off);
40*412f47f9SXin Li vmask = vshl_u64(vmask, vshiftl);
41*412f47f9SXin Li vword64 = vand_u64(vword64, vmask);
42*412f47f9SXin Li uint32x2_t vtmp = vreinterpret_u32_u64(vword64);
43*412f47f9SXin Li /* Set accumulator */
44*412f47f9SXin Li vsum = vpaddl_u32(vtmp);
45*412f47f9SXin Li /* Update pointer and remaining size */
46*412f47f9SXin Li ptr = (char *) ptr64 + 8;
47*412f47f9SXin Li nbytes -= 8 - off;
48*412f47f9SXin Li }
49*412f47f9SXin Li Assert(((uintptr_t) ptr & 7) == 0);
50*412f47f9SXin Li
51*412f47f9SXin Li /* Sum groups of 64 bytes */
52*412f47f9SXin Li uint64x2_t vsum0 = { 0, 0 };
53*412f47f9SXin Li uint64x2_t vsum1 = { 0, 0 };
54*412f47f9SXin Li uint64x2_t vsum2 = { 0, 0 };
55*412f47f9SXin Li uint64x2_t vsum3 = { 0, 0 };
56*412f47f9SXin Li const uint32_t *may_alias ptr32 = ptr;
57*412f47f9SXin Li for (uint32_t i = 0; i < nbytes / 64; i++)
58*412f47f9SXin Li {
59*412f47f9SXin Li uint32x4_t vtmp0 = vld1q_u32(ptr32);
60*412f47f9SXin Li uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
61*412f47f9SXin Li uint32x4_t vtmp2 = vld1q_u32(ptr32 + 8);
62*412f47f9SXin Li uint32x4_t vtmp3 = vld1q_u32(ptr32 + 12);
63*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vtmp0);
64*412f47f9SXin Li vsum1 = vpadalq_u32(vsum1, vtmp1);
65*412f47f9SXin Li vsum2 = vpadalq_u32(vsum2, vtmp2);
66*412f47f9SXin Li vsum3 = vpadalq_u32(vsum3, vtmp3);
67*412f47f9SXin Li ptr32 += 16;
68*412f47f9SXin Li }
69*412f47f9SXin Li nbytes %= 64;
70*412f47f9SXin Li
71*412f47f9SXin Li /* Fold vsum1/vsum2/vsum3 into vsum0 */
72*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum2));
73*412f47f9SXin Li vsum1 = vpadalq_u32(vsum1, vreinterpretq_u32_u64(vsum3));
74*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum1));
75*412f47f9SXin Li
76*412f47f9SXin Li /* Add any trailing 16-byte groups */
77*412f47f9SXin Li while (likely(nbytes >= 16))
78*412f47f9SXin Li {
79*412f47f9SXin Li uint32x4_t vtmp0 = vld1q_u32(ptr32);
80*412f47f9SXin Li vsum0 = vpadalq_u32(vsum0, vtmp0);
81*412f47f9SXin Li ptr32 += 4;
82*412f47f9SXin Li nbytes -= 16;
83*412f47f9SXin Li }
84*412f47f9SXin Li Assert(nbytes < 16);
85*412f47f9SXin Li
86*412f47f9SXin Li /* Fold vsum0 into vsum */
87*412f47f9SXin Li {
88*412f47f9SXin Li /* 4xu32 (4x32b) -> 2xu64 (2x33b) */
89*412f47f9SXin Li vsum0 = vpaddlq_u32(vreinterpretq_u32_u64(vsum0));
90*412f47f9SXin Li /* 4xu32 (2x(1b+32b)) -> 2xu64 (2x(0b+32b)) */
91*412f47f9SXin Li vsum0 = vpaddlq_u32(vreinterpretq_u32_u64(vsum0));
92*412f47f9SXin Li /* 4xu32 (4x32b) -> 2xu64 (2x33b) */
93*412f47f9SXin Li Assert((vgetq_lane_u64(vsum0, 0) >> 32) == 0);
94*412f47f9SXin Li Assert((vgetq_lane_u64(vsum0, 1) >> 32) == 0);
95*412f47f9SXin Li uint32x2_t vtmp = vmovn_u64(vsum0);
96*412f47f9SXin Li /* Add to accumulator */
97*412f47f9SXin Li vsum = vpadal_u32(vsum, vtmp);
98*412f47f9SXin Li }
99*412f47f9SXin Li
100*412f47f9SXin Li /* Add any trailing group of 8 bytes */
101*412f47f9SXin Li if (nbytes & 8)
102*412f47f9SXin Li {
103*412f47f9SXin Li uint32x2_t vtmp = vld1_u32(ptr32);
104*412f47f9SXin Li /* Add to accumulator */
105*412f47f9SXin Li vsum = vpadal_u32(vsum, vtmp);
106*412f47f9SXin Li ptr32 += 2;
107*412f47f9SXin Li nbytes -= 8;
108*412f47f9SXin Li }
109*412f47f9SXin Li Assert(nbytes < 8);
110*412f47f9SXin Li
111*412f47f9SXin Li /* Handle any trailing 1..7 bytes */
112*412f47f9SXin Li if (likely(nbytes != 0))
113*412f47f9SXin Li {
114*412f47f9SXin Li Assert(((uintptr_t) ptr32 & 7) == 0);
115*412f47f9SXin Li Assert(nbytes < 8);
116*412f47f9SXin Li uint64x1_t vword64 = vld1_u64((const uint64_t *) ptr32);
117*412f47f9SXin Li /* Get rid of bytes 7..nbytes */
118*412f47f9SXin Li uint64x1_t vmask = vdup_n_u64(ALL_ONES);
119*412f47f9SXin Li int64x1_t vshiftr = vdup_n_s64(-CHAR_BIT * (8 - nbytes));
120*412f47f9SXin Li vmask = vshl_u64(vmask, vshiftr);/* Shift right */
121*412f47f9SXin Li vword64 = vand_u64(vword64, vmask);
122*412f47f9SXin Li /* Fold 64-bit sum to 33 bits */
123*412f47f9SXin Li vword64 = vpaddl_u32(vreinterpret_u32_u64(vword64));
124*412f47f9SXin Li /* Add to accumulator */
125*412f47f9SXin Li vsum = vpadal_u32(vsum, vreinterpret_u32_u64(vword64));
126*412f47f9SXin Li }
127*412f47f9SXin Li
128*412f47f9SXin Li /* Fold 64-bit vsum to 32 bits */
129*412f47f9SXin Li vsum = vpaddl_u32(vreinterpret_u32_u64(vsum));
130*412f47f9SXin Li vsum = vpaddl_u32(vreinterpret_u32_u64(vsum));
131*412f47f9SXin Li Assert(vget_lane_u32(vreinterpret_u32_u64(vsum), 1) == 0);
132*412f47f9SXin Li
133*412f47f9SXin Li /* Fold 32-bit vsum to 16 bits */
134*412f47f9SXin Li uint32x2_t vsum32 = vreinterpret_u32_u64(vsum);
135*412f47f9SXin Li vsum32 = vpaddl_u16(vreinterpret_u16_u32(vsum32));
136*412f47f9SXin Li vsum32 = vpaddl_u16(vreinterpret_u16_u32(vsum32));
137*412f47f9SXin Li Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 1) == 0);
138*412f47f9SXin Li Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 2) == 0);
139*412f47f9SXin Li Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 3) == 0);
140*412f47f9SXin Li
141*412f47f9SXin Li /* Convert to 16-bit scalar */
142*412f47f9SXin Li uint16_t sum = vget_lane_u16(vreinterpret_u16_u32(vsum32), 0);
143*412f47f9SXin Li
144*412f47f9SXin Li if (unlikely(swap))/* Odd base pointer is unexpected */
145*412f47f9SXin Li {
146*412f47f9SXin Li sum = bswap16(sum);
147*412f47f9SXin Li }
148*412f47f9SXin Li return sum;
149*412f47f9SXin Li }
150