Lines Matching +full:1 +full:- +full:v0
2 // Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions
5 // Copyright (C) 2019-2024 Google LLC
17 // Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
65 // /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
72 .arch armv8-a+crypto
91 pmull2 \c64\().1q, \a16\().2d, \b64\().2d
92 pmull \b64\().1q, \a16\().1d, \b64\().1d
96 * Pairwise long polynomial multiplication of two 16-bit values
100 * by two 64-bit values
114 * 1 (w0*x1 ^ w1*x0) << 8 ^ | (y0*z1 ^ y1*z0) << 8 ^
128 * and after performing 8x8->16 bit long polynomial multiplication of
130 * we obtain the following four vectors of 16-bit elements:
141 * 80-bit results.
149 ext t7.16b, \b64\().16b, \b64\().16b, #1
223 CPU_LE( rev64 v0.16b, v0.16b )
231 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
243 eor v0.16b, v0.16b, v8.16b
252 // While >= 128 data bytes remain (not counting v0-v7), fold the 128
253 // bytes v0-v7 into them, storing the result back into v0-v7.
255 fold_32_bytes \p, v0, v1
263 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
268 fold_16_bytes \p, v0, v4
271 fold_16_bytes \p, v3, v7, 1
274 fold_16_bytes \p, v5, v7, 1
282 adds len, len, #(128-16)
291 CPU_LE( rev64 v0.16b, v0.16b )
292 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
293 eor v7.16b, v7.16b, v0.16b
304 // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
310 // v0 = last 16 original data bytes
312 ldr q0, [buf, #-16]
313 CPU_LE( rev64 v0.16b, v0.16b )
314 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
316 // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes.
322 // v3 = first chunk: v7 right-shifted by '16-len' bytes.
327 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
330 // v2 = second chunk: 'len' bytes from v0 (low-order bytes),
331 // then '16-len' bytes from v1 (high-order bytes).
332 bsl v2.16b, v1.16b, v0.16b
335 pmull16x64_\p fold_consts, v3, v0
336 eor v7.16b, v3.16b, v0.16b
351 movi v0.16b, #0
352 mov v0.h[7], init_crc
353 eor v7.16b, v7.16b, v0.16b
355 // Load the fold-across-16-bytes constants.
374 frame_push 1
376 // Compose { 0,0,0,0, 8,8,8,8, 1,1,1,1, 9,9,9,9 }
378 orr perm.2s, #1, lsl #16
379 orr perm.2s, #1, lsl #24
402 // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
410 // x^64. This produces a 128-bit value congruent to x^64 * M(x) and
412 ext v0.16b, v2.16b, v7.16b, #8
413 pmull2 v7.1q, v7.2d, fold_consts.2d // high bits * x^48 * (x^80 mod G(x))
414 eor v0.16b, v0.16b, v7.16b // + low bits * x^64
416 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit
418 ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
419 mov v0.s[3], v2.s[0] // zero high 32 bits
420 pmull v1.1q, v1.1d, fold_consts.1d // high 32 bits * x^48 * (x^48 mod G(x))
421 eor v0.16b, v0.16b, v1.16b // + low bits
427 pmull2 v1.1q, v0.2d, fold_consts.2d // high 32 bits * floor(x^48 / G(x))
429 pmull v1.1q, v1.1d, fold_consts.1d // *= G(x)
430 ushr v0.2d, v0.2d, #48
431 eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
432 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
434 umov w0, v0.h[0]
442 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
453 .quad 0x000000000000a010 // x^(1*128) mod G(x)
454 .quad 0x0000000000001faa // x^(1*128+64) mod G(x)
462 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
464 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.