1 /*
2 * Derived from crc32c.c version 1.1 by Mark Adler.
3 *
4 * Copyright (C) 2013 Mark Adler
5 *
6 * This software is provided 'as-is', without any express or implied warranty.
7 * In no event will the author be held liable for any damages arising from the
8 * use of this software.
9 *
10 * Permission is granted to anyone to use this software for any purpose,
11 * including commercial applications, and to alter it and redistribute it
12 * freely, subject to the following restrictions:
13 *
14 * 1. The origin of this software must not be misrepresented; you must not
15 * claim that you wrote the original software. If you use this software
16 * in a product, an acknowledgment in the product documentation would be
17 * appreciated but is not required.
18 * 2. Altered source versions must be plainly marked as such, and must not be
19 * misrepresented as being the original software.
20 * 3. This notice may not be removed or altered from any source distribution.
21 *
22 * Mark Adler
23 * madler@alumni.caltech.edu
24 */
25
26 #include <sys/cdefs.h>
27 /*
28 * This file is compiled in userspace in order to run ATF unit tests.
29 */
30 #ifndef _KERNEL
31 #include <stdint.h>
32 #include <stdlib.h>
33 #else
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #endif
37 #include <sys/gsb_crc32.h>
38
39 static __inline uint32_t
_mm_crc32_u8(uint32_t x,uint8_t y)40 _mm_crc32_u8(uint32_t x, uint8_t y)
41 {
42 /*
43 * clang (at least 3.9.[0-1]) pessimizes "rm" (y) and "m" (y)
44 * significantly and "r" (y) a lot by copying y to a different
45 * local variable (on the stack or in a register), so only use
46 * the latter. This costs a register and an instruction but
47 * not a uop.
48 */
49 __asm("crc32b %1,%0" : "+r" (x) : "r" (y));
50 return (x);
51 }
52
53 #ifdef __amd64__
54 static __inline uint64_t
_mm_crc32_u64(uint64_t x,uint64_t y)55 _mm_crc32_u64(uint64_t x, uint64_t y)
56 {
57 __asm("crc32q %1,%0" : "+r" (x) : "r" (y));
58 return (x);
59 }
60 #else
61 static __inline uint32_t
_mm_crc32_u32(uint32_t x,uint32_t y)62 _mm_crc32_u32(uint32_t x, uint32_t y)
63 {
64 __asm("crc32l %1,%0" : "+r" (x) : "r" (y));
65 return (x);
66 }
67 #endif
68
69 /* CRC-32C (iSCSI) polynomial in reversed bit order. */
70 #define POLY 0x82f63b78
71
72 /*
73 * Block sizes for three-way parallel crc computation. LONG and SHORT must
74 * both be powers of two.
75 */
76 #define LONG 128
77 #define SHORT 64
78
79 /*
80 * Tables for updating a crc for LONG, 2 * LONG, SHORT and 2 * SHORT bytes
81 * of value 0 later in the input stream, in the same way that the hardware
82 * would, but in software without calculating intermediate steps.
83 */
84 static uint32_t crc32c_long[4][256];
85 static uint32_t crc32c_2long[4][256];
86 static uint32_t crc32c_short[4][256];
87 static uint32_t crc32c_2short[4][256];
88
89 /*
90 * Multiply a matrix times a vector over the Galois field of two elements,
91 * GF(2). Each element is a bit in an unsigned integer. mat must have at
92 * least as many entries as the power of two for most significant one bit in
93 * vec.
94 */
95 static inline uint32_t
gf2_matrix_times(uint32_t * mat,uint32_t vec)96 gf2_matrix_times(uint32_t *mat, uint32_t vec)
97 {
98 uint32_t sum;
99
100 sum = 0;
101 while (vec) {
102 if (vec & 1)
103 sum ^= *mat;
104 vec >>= 1;
105 mat++;
106 }
107 return (sum);
108 }
109
110 /*
111 * Multiply a matrix by itself over GF(2). Both mat and square must have 32
112 * rows.
113 */
114 static inline void
gf2_matrix_square(uint32_t * square,uint32_t * mat)115 gf2_matrix_square(uint32_t *square, uint32_t *mat)
116 {
117 int n;
118
119 for (n = 0; n < 32; n++)
120 square[n] = gf2_matrix_times(mat, mat[n]);
121 }
122
123 /*
124 * Construct an operator to apply len zeros to a crc. len must be a power of
125 * two. If len is not a power of two, then the result is the same as for the
126 * largest power of two less than len. The result for len == 0 is the same as
127 * for len == 1. A version of this routine could be easily written for any
128 * len, but that is not needed for this application.
129 */
130 static void
crc32c_zeros_op(uint32_t * even,size_t len)131 crc32c_zeros_op(uint32_t *even, size_t len)
132 {
133 uint32_t odd[32]; /* odd-power-of-two zeros operator */
134 uint32_t row;
135 int n;
136
137 /* put operator for one zero bit in odd */
138 odd[0] = POLY; /* CRC-32C polynomial */
139 row = 1;
140 for (n = 1; n < 32; n++) {
141 odd[n] = row;
142 row <<= 1;
143 }
144
145 /* put operator for two zero bits in even */
146 gf2_matrix_square(even, odd);
147
148 /* put operator for four zero bits in odd */
149 gf2_matrix_square(odd, even);
150
151 /*
152 * first square will put the operator for one zero byte (eight zero
153 * bits), in even -- next square puts operator for two zero bytes in
154 * odd, and so on, until len has been rotated down to zero
155 */
156 do {
157 gf2_matrix_square(even, odd);
158 len >>= 1;
159 if (len == 0)
160 return;
161 gf2_matrix_square(odd, even);
162 len >>= 1;
163 } while (len);
164
165 /* answer ended up in odd -- copy to even */
166 for (n = 0; n < 32; n++)
167 even[n] = odd[n];
168 }
169
170 /*
171 * Take a length and build four lookup tables for applying the zeros operator
172 * for that length, byte-by-byte on the operand.
173 */
174 static void
crc32c_zeros(uint32_t zeros[][256],size_t len)175 crc32c_zeros(uint32_t zeros[][256], size_t len)
176 {
177 uint32_t op[32];
178 uint32_t n;
179
180 crc32c_zeros_op(op, len);
181 for (n = 0; n < 256; n++) {
182 zeros[0][n] = gf2_matrix_times(op, n);
183 zeros[1][n] = gf2_matrix_times(op, n << 8);
184 zeros[2][n] = gf2_matrix_times(op, n << 16);
185 zeros[3][n] = gf2_matrix_times(op, n << 24);
186 }
187 }
188
189 /* Apply the zeros operator table to crc. */
190 static inline uint32_t
crc32c_shift(uint32_t zeros[][256],uint32_t crc)191 crc32c_shift(uint32_t zeros[][256], uint32_t crc)
192 {
193
194 return (zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
195 zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24]);
196 }
197
198 /* Initialize tables for shifting crcs. */
199 static void
200 #ifndef _KERNEL
201 __attribute__((__constructor__))
202 #endif
crc32c_init_hw(void)203 crc32c_init_hw(void)
204 {
205 crc32c_zeros(crc32c_long, LONG);
206 crc32c_zeros(crc32c_2long, 2 * LONG);
207 crc32c_zeros(crc32c_short, SHORT);
208 crc32c_zeros(crc32c_2short, 2 * SHORT);
209 }
210 #ifdef _KERNEL
211 SYSINIT(crc32c_sse42, SI_SUB_LOCK, SI_ORDER_ANY, crc32c_init_hw, NULL);
212 #endif
213
214 /* Compute CRC-32C using the Intel hardware instruction. */
215 uint32_t
sse42_crc32c(uint32_t crc,const unsigned char * buf,unsigned len)216 sse42_crc32c(uint32_t crc, const unsigned char *buf, unsigned len)
217 {
218 #ifdef __amd64__
219 const size_t align = 8;
220 #else
221 const size_t align = 4;
222 #endif
223 const unsigned char *next, *end;
224 #ifdef __amd64__
225 uint64_t crc0, crc1, crc2;
226 #else
227 uint32_t crc0, crc1, crc2;
228 #endif
229
230 next = buf;
231 crc0 = crc;
232
233 /* Compute the crc to bring the data pointer to an aligned boundary. */
234 while (len && ((uintptr_t)next & (align - 1)) != 0) {
235 crc0 = _mm_crc32_u8(crc0, *next);
236 next++;
237 len--;
238 }
239
240 #if LONG > SHORT
241 /*
242 * Compute the crc on sets of LONG*3 bytes, executing three independent
243 * crc instructions, each on LONG bytes -- this is optimized for the
244 * Nehalem, Westmere, Sandy Bridge, and Ivy Bridge architectures, which
245 * have a throughput of one crc per cycle, but a latency of three
246 * cycles.
247 */
248 crc = 0;
249 while (len >= LONG * 3) {
250 crc1 = 0;
251 crc2 = 0;
252 end = next + LONG;
253 do {
254 #ifdef __amd64__
255 crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
256 crc1 = _mm_crc32_u64(crc1,
257 *(const uint64_t *)(next + LONG));
258 crc2 = _mm_crc32_u64(crc2,
259 *(const uint64_t *)(next + (LONG * 2)));
260 #else
261 crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
262 crc1 = _mm_crc32_u32(crc1,
263 *(const uint32_t *)(next + LONG));
264 crc2 = _mm_crc32_u32(crc2,
265 *(const uint32_t *)(next + (LONG * 2)));
266 #endif
267 next += align;
268 } while (next < end);
269 /*-
270 * Update the crc. Try to do it in parallel with the inner
271 * loop. 'crc' is used to accumulate crc0 and crc1
272 * produced by the inner loop so that the next iteration
273 * of the loop doesn't depend on anything except crc2.
274 *
275 * The full expression for the update is:
276 * crc = S*S*S*crc + S*S*crc0 + S*crc1
277 * where the terms are polynomials modulo the CRC polynomial.
278 * We regroup this subtly as:
279 * crc = S*S * (S*crc + crc0) + S*crc1.
280 * This has an extra dependency which reduces possible
281 * parallelism for the expression, but it turns out to be
282 * best to intentionally delay evaluation of this expression
283 * so that it competes less with the inner loop.
284 *
285 * We also intentionally reduce parallelism by feedng back
286 * crc2 to the inner loop as crc0 instead of accumulating
287 * it in crc. This synchronizes the loop with crc update.
288 * CPU and/or compiler schedulers produced bad order without
289 * this.
290 *
291 * Shifts take about 12 cycles each, so 3 here with 2
292 * parallelizable take about 24 cycles and the crc update
293 * takes slightly longer. 8 dependent crc32 instructions
294 * can run in 24 cycles, so the 3-way blocking is worse
295 * than useless for sizes less than 8 * <word size> = 64
296 * on amd64. In practice, SHORT = 32 confirms these
297 * timing calculations by giving a small improvement
298 * starting at size 96. Then the inner loop takes about
299 * 12 cycles and the crc update about 24, but these are
300 * partly in parallel so the total time is less than the
301 * 36 cycles that 12 dependent crc32 instructions would
302 * take.
303 *
304 * To have a chance of completely hiding the overhead for
305 * the crc update, the inner loop must take considerably
306 * longer than 24 cycles. LONG = 64 makes the inner loop
307 * take about 24 cycles, so is not quite large enough.
308 * LONG = 128 works OK. Unhideable overheads are about
309 * 12 cycles per inner loop. All assuming timing like
310 * Haswell.
311 */
312 crc = crc32c_shift(crc32c_long, crc) ^ crc0;
313 crc1 = crc32c_shift(crc32c_long, crc1);
314 crc = crc32c_shift(crc32c_2long, crc) ^ crc1;
315 crc0 = crc2;
316 next += LONG * 2;
317 len -= LONG * 3;
318 }
319 crc0 ^= crc;
320 #endif /* LONG > SHORT */
321
322 /*
323 * Do the same thing, but now on SHORT*3 blocks for the remaining data
324 * less than a LONG*3 block
325 */
326 crc = 0;
327 while (len >= SHORT * 3) {
328 crc1 = 0;
329 crc2 = 0;
330 end = next + SHORT;
331 do {
332 #ifdef __amd64__
333 crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
334 crc1 = _mm_crc32_u64(crc1,
335 *(const uint64_t *)(next + SHORT));
336 crc2 = _mm_crc32_u64(crc2,
337 *(const uint64_t *)(next + (SHORT * 2)));
338 #else
339 crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
340 crc1 = _mm_crc32_u32(crc1,
341 *(const uint32_t *)(next + SHORT));
342 crc2 = _mm_crc32_u32(crc2,
343 *(const uint32_t *)(next + (SHORT * 2)));
344 #endif
345 next += align;
346 } while (next < end);
347 crc = crc32c_shift(crc32c_short, crc) ^ crc0;
348 crc1 = crc32c_shift(crc32c_short, crc1);
349 crc = crc32c_shift(crc32c_2short, crc) ^ crc1;
350 crc0 = crc2;
351 next += SHORT * 2;
352 len -= SHORT * 3;
353 }
354 crc0 ^= crc;
355
356 /* Compute the crc on the remaining bytes at native word size. */
357 end = next + (len - (len & (align - 1)));
358 while (next < end) {
359 #ifdef __amd64__
360 crc0 = _mm_crc32_u64(crc0, *(const uint64_t *)next);
361 #else
362 crc0 = _mm_crc32_u32(crc0, *(const uint32_t *)next);
363 #endif
364 next += align;
365 }
366 len &= (align - 1);
367
368 /* Compute the crc for any trailing bytes. */
369 while (len) {
370 crc0 = _mm_crc32_u8(crc0, *next);
371 next++;
372 len--;
373 }
374
375 return ((uint32_t)crc0);
376 }
377