xref: /freebsd/contrib/arm-optimized-routines/networking/arm/chksum_simd.c (revision 5ca8e32633c4ffbbcd6762e5888b6a4ba0708c6c)
1 /*
2  * Armv7-A specific checksum implementation using NEON
3  *
4  * Copyright (c) 2020, Arm Limited.
5  * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6  */
7 
8 #include "networking.h"
9 #include "../chksum_common.h"
10 
11 #ifndef __ARM_NEON
12 #pragma GCC target("+simd")
13 #endif
14 
15 #include <arm_neon.h>
16 
17 unsigned short
18 __chksum_arm_simd(const void *ptr, unsigned int nbytes)
19 {
20     bool swap = (uintptr_t) ptr & 1;
21     uint64x1_t vsum = { 0 };
22 
23     if (unlikely(nbytes < 40))
24     {
25 	uint64_t sum = slurp_small(ptr, nbytes);
26 	return fold_and_swap(sum, false);
27     }
28 
29     /* 8-byte align pointer */
30     /* Inline slurp_head-like code since we use NEON here */
31     Assert(nbytes >= 8);
32     uint32_t off = (uintptr_t) ptr & 7;
33     if (likely(off != 0))
34     {
35 	const uint64_t *may_alias ptr64 = align_ptr(ptr, 8);
36 	uint64x1_t vword64 = vld1_u64(ptr64);
37 	/* Get rid of bytes 0..off-1 */
38 	uint64x1_t vmask = vdup_n_u64(ALL_ONES);
39 	int64x1_t vshiftl = vdup_n_s64(CHAR_BIT * off);
40 	vmask = vshl_u64(vmask, vshiftl);
41 	vword64 = vand_u64(vword64, vmask);
42 	uint32x2_t vtmp = vreinterpret_u32_u64(vword64);
43 	/* Set accumulator */
44 	vsum = vpaddl_u32(vtmp);
45 	/* Update pointer and remaining size */
46 	ptr = (char *) ptr64 + 8;
47 	nbytes -= 8 - off;
48     }
49     Assert(((uintptr_t) ptr & 7) == 0);
50 
51     /* Sum groups of 64 bytes */
52     uint64x2_t vsum0 = { 0, 0 };
53     uint64x2_t vsum1 = { 0, 0 };
54     uint64x2_t vsum2 = { 0, 0 };
55     uint64x2_t vsum3 = { 0, 0 };
56     const uint32_t *may_alias ptr32 = ptr;
57     for (uint32_t i = 0; i < nbytes / 64; i++)
58     {
59 	uint32x4_t vtmp0 = vld1q_u32(ptr32);
60 	uint32x4_t vtmp1 = vld1q_u32(ptr32 + 4);
61 	uint32x4_t vtmp2 = vld1q_u32(ptr32 + 8);
62 	uint32x4_t vtmp3 = vld1q_u32(ptr32 + 12);
63 	vsum0 = vpadalq_u32(vsum0, vtmp0);
64 	vsum1 = vpadalq_u32(vsum1, vtmp1);
65 	vsum2 = vpadalq_u32(vsum2, vtmp2);
66 	vsum3 = vpadalq_u32(vsum3, vtmp3);
67 	ptr32 += 16;
68     }
69     nbytes %= 64;
70 
71     /* Fold vsum1/vsum2/vsum3 into vsum0 */
72     vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum2));
73     vsum1 = vpadalq_u32(vsum1, vreinterpretq_u32_u64(vsum3));
74     vsum0 = vpadalq_u32(vsum0, vreinterpretq_u32_u64(vsum1));
75 
76     /* Add any trailing 16-byte groups */
77     while (likely(nbytes >= 16))
78     {
79 	uint32x4_t vtmp0 = vld1q_u32(ptr32);
80 	vsum0 = vpadalq_u32(vsum0, vtmp0);
81 	ptr32 += 4;
82 	nbytes -= 16;
83     }
84     Assert(nbytes < 16);
85 
86     /* Fold vsum0 into vsum */
87     {
88 	/* 4xu32 (4x32b) -> 2xu64 (2x33b) */
89 	vsum0 = vpaddlq_u32(vreinterpretq_u32_u64(vsum0));
90 	/* 4xu32 (2x(1b+32b)) -> 2xu64 (2x(0b+32b)) */
91 	vsum0 = vpaddlq_u32(vreinterpretq_u32_u64(vsum0));
92 	/* 4xu32 (4x32b) -> 2xu64 (2x33b) */
93 	Assert((vgetq_lane_u64(vsum0, 0) >> 32) == 0);
94 	Assert((vgetq_lane_u64(vsum0, 1) >> 32) == 0);
95 	uint32x2_t vtmp = vmovn_u64(vsum0);
96 	/* Add to accumulator */
97 	vsum = vpadal_u32(vsum, vtmp);
98     }
99 
100     /* Add any trailing group of 8 bytes */
101     if (nbytes & 8)
102     {
103 	uint32x2_t vtmp = vld1_u32(ptr32);
104 	/* Add to accumulator */
105 	vsum = vpadal_u32(vsum, vtmp);
106 	ptr32 += 2;
107 	nbytes -= 8;
108     }
109     Assert(nbytes < 8);
110 
111     /* Handle any trailing 1..7 bytes */
112     if (likely(nbytes != 0))
113     {
114 	Assert(((uintptr_t) ptr32 & 7) == 0);
115 	Assert(nbytes < 8);
116 	uint64x1_t vword64 = vld1_u64((const uint64_t *) ptr32);
117 	/* Get rid of bytes 7..nbytes */
118 	uint64x1_t vmask = vdup_n_u64(ALL_ONES);
119 	int64x1_t vshiftr = vdup_n_s64(-CHAR_BIT * (8 - nbytes));
120 	vmask = vshl_u64(vmask, vshiftr);/* Shift right */
121 	vword64 = vand_u64(vword64, vmask);
122 	/* Fold 64-bit sum to 33 bits */
123 	vword64 = vpaddl_u32(vreinterpret_u32_u64(vword64));
124 	/* Add to accumulator */
125 	vsum = vpadal_u32(vsum, vreinterpret_u32_u64(vword64));
126     }
127 
128     /* Fold 64-bit vsum to 32 bits */
129     vsum = vpaddl_u32(vreinterpret_u32_u64(vsum));
130     vsum = vpaddl_u32(vreinterpret_u32_u64(vsum));
131     Assert(vget_lane_u32(vreinterpret_u32_u64(vsum), 1) == 0);
132 
133     /* Fold 32-bit vsum to 16 bits */
134     uint32x2_t vsum32 = vreinterpret_u32_u64(vsum);
135     vsum32 = vpaddl_u16(vreinterpret_u16_u32(vsum32));
136     vsum32 = vpaddl_u16(vreinterpret_u16_u32(vsum32));
137     Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 1) == 0);
138     Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 2) == 0);
139     Assert(vget_lane_u16(vreinterpret_u16_u32(vsum32), 3) == 0);
140 
141     /* Convert to 16-bit scalar */
142     uint16_t sum = vget_lane_u16(vreinterpret_u16_u32(vsum32), 0);
143 
144     if (unlikely(swap))/* Odd base pointer is unexpected */
145     {
146 	sum = bswap16(sum);
147     }
148     return sum;
149 }
150