Lines Matching +full:16 +full:g
96 * Pairwise long polynomial multiplication of two 16-bit values
115 * 2 (w0*x2 ^ w1*x1) << 16 ^ | (y0*z2 ^ y1*z1) << 16 ^
128 * and after performing 8x8->16 bit long polynomial multiplication of
130 * we obtain the following four vectors of 16-bit elements:
143 * (*) NOTE: the 16x64 bit polynomial multiply below is not equivalent
149 ext t7.16b, \b64\().16b, \b64\().16b, #1
150 tbl t5.16b, {\a16\().16b}, perm.16b
151 uzp1 t7.16b, \b64\().16b, t7.16b
153 ext \b64\().16b, t4.16b, t4.16b, #15
154 eor \c64\().16b, t8.16b, t5.16b
158 ext t6.16b, t5.16b, t5.16b, #8
162 pmull2 t5.8h, t7.16b, t5.16b
163 pmull2 t6.8h, t7.16b, t6.16b
165 ext t8.16b, t3.16b, t3.16b, #8
166 eor t4.16b, t4.16b, t6.16b
167 ext t7.16b, t5.16b, t5.16b, #8
168 ext t6.16b, t4.16b, t4.16b, #8
172 ext t5.16b, t5.16b, t5.16b, #14
184 CPU_LE( rev64 v11.16b, v11.16b )
185 CPU_LE( rev64 v12.16b, v12.16b )
189 CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
190 CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
192 eor \reg1\().16b, \reg1\().16b, v8.16b
193 eor \reg2\().16b, \reg2\().16b, v9.16b
194 eor \reg1\().16b, \reg1\().16b, v11.16b
195 eor \reg2\().16b, \reg2\().16b, v12.16b
202 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
204 eor \dst_reg\().16b, \dst_reg\().16b, v8.16b
205 eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b
223 CPU_LE( rev64 v0.16b, v0.16b )
224 CPU_LE( rev64 v1.16b, v1.16b )
225 CPU_LE( rev64 v2.16b, v2.16b )
226 CPU_LE( rev64 v3.16b, v3.16b )
227 CPU_LE( rev64 v4.16b, v4.16b )
228 CPU_LE( rev64 v5.16b, v5.16b )
229 CPU_LE( rev64 v6.16b, v6.16b )
230 CPU_LE( rev64 v7.16b, v7.16b )
231 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
232 CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
233 CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 )
234 CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 )
235 CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 )
236 CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 )
237 CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 )
238 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
240 // XOR the first 16 data *bits* with the initial CRC value.
241 movi v8.16b, #0
243 eor v0.16b, v0.16b, v8.16b
263 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7.
266 add fold_consts_ptr, fold_consts_ptr, #16
267 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
275 // Fold across 16 bytes.
280 // Then subtract 16 to simplify the termination condition of the
282 adds len, len, #(128-16)
284 // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7
289 eor v7.16b, v7.16b, v8.16b
290 ldr q0, [buf], #16
291 CPU_LE( rev64 v0.16b, v0.16b )
292 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
293 eor v7.16b, v7.16b, v0.16b
294 subs len, len, #16
298 // Add 16 to get the correct number of data bytes remaining in 0...15
299 // (not counting v7), following the previous extra subtraction by 16.
300 adds len, len, #16
304 // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first
305 // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To
308 // chunk of 16 bytes, then fold the first chunk into the second.
310 // v0 = last 16 original data bytes
312 ldr q0, [buf, #-16]
313 CPU_LE( rev64 v0.16b, v0.16b )
314 CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
317 adr_l x4, .Lbyteshift_table + 16
319 ld1 {v2.16b}, [x4]
320 tbl v1.16b, {v7.16b}, v2.16b
322 // v3 = first chunk: v7 right-shifted by '16-len' bytes.
323 movi v3.16b, #0x80
324 eor v2.16b, v2.16b, v3.16b
325 tbl v3.16b, {v7.16b}, v2.16b
327 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes.
328 sshr v2.16b, v2.16b, #7
331 // then '16-len' bytes from v1 (high-order bytes).
332 bsl v2.16b, v1.16b, v0.16b
336 eor v7.16b, v3.16b, v0.16b
337 eor v7.16b, v7.16b, v2.16b
341 // Checksumming a buffer of length 16...255 bytes
345 // Load the first 16 data bytes.
347 CPU_LE( rev64 v7.16b, v7.16b )
348 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
350 // XOR the first 16 data *bits* with the initial CRC value.
351 movi v0.16b, #0
353 eor v7.16b, v7.16b, v0.16b
355 // Load the fold-across-16-bytes constants.
356 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
358 cmp len, #16
359 b.eq .Lreduce_final_16_bytes_\@ // len == 16
362 add len, len, #16
371 // Assumes len >= 16.
378 orr perm.2s, #1, lsl #16
380 zip1 perm.16b, perm.16b, perm.16b
381 zip1 perm.16b, perm.16b, perm.16b
385 CPU_LE( rev64 v7.16b, v7.16b )
386 CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
397 // Assumes len >= 16.
402 // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC.
404 movi v2.16b, #0 // init zero register
406 // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'.
407 ld1 {fold_consts.2d}, [fold_consts_ptr], #16
412 ext v0.16b, v2.16b, v7.16b, #8
413 pmull2 v7.1q, v7.2d, fold_consts.2d // high bits * x^48 * (x^80 mod G(x))
414 eor v0.16b, v0.16b, v7.16b // + low bits * x^64
418 ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits
420 pmull v1.1q, v1.1d, fold_consts.1d // high 32 bits * x^48 * (x^48 mod G(x))
421 eor v0.16b, v0.16b, v1.16b // + low bits
423 // Load G(x) and floor(x^48 / G(x)).
427 pmull2 v1.1q, v0.2d, fold_consts.2d // high 32 bits * floor(x^48 / G(x))
429 pmull v1.1q, v1.1d, fold_consts.1d // *= G(x)
431 eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits
432 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0.
442 // G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0
444 .quad 0x0000000000006123 // x^(8*128) mod G(x)
445 .quad 0x0000000000002295 // x^(8*128+64) mod G(x)
447 .quad 0x0000000000001069 // x^(4*128) mod G(x)
448 .quad 0x000000000000dd31 // x^(4*128+64) mod G(x)
450 .quad 0x000000000000857d // x^(2*128) mod G(x)
451 .quad 0x0000000000007acc // x^(2*128+64) mod G(x)
453 .quad 0x000000000000a010 // x^(1*128) mod G(x)
454 .quad 0x0000000000001faa // x^(1*128+64) mod G(x)
456 .quad 0x1368000000000000 // x^48 * (x^48 mod G(x))
457 .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x))
459 .quad 0x0000000000018bb7 // G(x)
460 .quad 0x00000001f65a57f8 // floor(x^48 / G(x))
462 // For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 -
464 // ..., 0x80} XOR the index vector to shift right by '16 - len' bytes.