1// 2// Accelerated CRC-T10DIF using ARM NEON and Crypto Extensions instructions 3// 4// Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org> 5// Copyright (C) 2019 Google LLC <ebiggers@google.com> 6// 7// This program is free software; you can redistribute it and/or modify 8// it under the terms of the GNU General Public License version 2 as 9// published by the Free Software Foundation. 10// 11 12// Derived from the x86 version: 13// 14// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions 15// 16// Copyright (c) 2013, Intel Corporation 17// 18// Authors: 19// Erdinc Ozturk <erdinc.ozturk@intel.com> 20// Vinodh Gopal <vinodh.gopal@intel.com> 21// James Guilford <james.guilford@intel.com> 22// Tim Chen <tim.c.chen@linux.intel.com> 23// 24// This software is available to you under a choice of one of two 25// licenses. You may choose to be licensed under the terms of the GNU 26// General Public License (GPL) Version 2, available from the file 27// COPYING in the main directory of this source tree, or the 28// OpenIB.org BSD license below: 29// 30// Redistribution and use in source and binary forms, with or without 31// modification, are permitted provided that the following conditions are 32// met: 33// 34// * Redistributions of source code must retain the above copyright 35// notice, this list of conditions and the following disclaimer. 36// 37// * Redistributions in binary form must reproduce the above copyright 38// notice, this list of conditions and the following disclaimer in the 39// documentation and/or other materials provided with the 40// distribution. 41// 42// * Neither the name of the Intel Corporation nor the names of its 43// contributors may be used to endorse or promote products derived from 44// this software without specific prior written permission. 45// 46// 47// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY 48// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 49// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 50// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR 51// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 52// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 53// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 54// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 55// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 56// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 57// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58// 59// Reference paper titled "Fast CRC Computation for Generic 60// Polynomials Using PCLMULQDQ Instruction" 61// URL: http://www.intel.com/content/dam/www/public/us/en/documents 62// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf 63// 64 65#include <linux/linkage.h> 66#include <asm/assembler.h> 67 68#ifdef CONFIG_CPU_ENDIAN_BE8 69#define CPU_LE(code...) 70#else 71#define CPU_LE(code...) code 72#endif 73 74 .text 75 .arch armv8-a 76 .fpu crypto-neon-fp-armv8 77 78 init_crc .req r0 79 buf .req r1 80 len .req r2 81 82 fold_consts_ptr .req ip 83 84 q0l .req d0 85 q0h .req d1 86 q1l .req d2 87 q1h .req d3 88 q2l .req d4 89 q2h .req d5 90 q3l .req d6 91 q3h .req d7 92 q4l .req d8 93 q4h .req d9 94 q5l .req d10 95 q5h .req d11 96 q6l .req d12 97 q6h .req d13 98 q7l .req d14 99 q7h .req d15 100 q8l .req d16 101 q8h .req d17 102 q9l .req d18 103 q9h .req d19 104 q10l .req d20 105 q10h .req d21 106 q11l .req d22 107 q11h .req d23 108 q12l .req d24 109 q12h .req d25 110 111 FOLD_CONSTS .req q10 112 FOLD_CONST_L .req q10l 113 FOLD_CONST_H .req q10h 114 115 /* 116 * Pairwise long polynomial multiplication of two 16-bit values 117 * 118 * { w0, w1 }, { y0, y1 } 119 * 120 * by two 64-bit values 121 * 122 * { x0, x1, x2, x3, x4, x5, x6, x7 }, { z0, z1, z2, z3, z4, z5, z6, z7 } 123 * 124 * where each vector element is a byte, ordered from least to most 125 * significant. The resulting 80-bit vectors are XOR'ed together. 126 * 127 * This can be implemented using 8x8 long polynomial multiplication, by 128 * reorganizing the input so that each pairwise 8x8 multiplication 129 * produces one of the terms from the decomposition below, and 130 * combining the results of each rank and shifting them into place. 131 * 132 * Rank 133 * 0 w0*x0 ^ | y0*z0 ^ 134 * 1 (w0*x1 ^ w1*x0) << 8 ^ | (y0*z1 ^ y1*z0) << 8 ^ 135 * 2 (w0*x2 ^ w1*x1) << 16 ^ | (y0*z2 ^ y1*z1) << 16 ^ 136 * 3 (w0*x3 ^ w1*x2) << 24 ^ | (y0*z3 ^ y1*z2) << 24 ^ 137 * 4 (w0*x4 ^ w1*x3) << 32 ^ | (y0*z4 ^ y1*z3) << 32 ^ 138 * 5 (w0*x5 ^ w1*x4) << 40 ^ | (y0*z5 ^ y1*z4) << 40 ^ 139 * 6 (w0*x6 ^ w1*x5) << 48 ^ | (y0*z6 ^ y1*z5) << 48 ^ 140 * 7 (w0*x7 ^ w1*x6) << 56 ^ | (y0*z7 ^ y1*z6) << 56 ^ 141 * 8 w1*x7 << 64 | y1*z7 << 64 142 * 143 * The inputs can be reorganized into 144 * 145 * { w0, w0, w0, w0, y0, y0, y0, y0 }, { w1, w1, w1, w1, y1, y1, y1, y1 } 146 * { x0, x2, x4, x6, z0, z2, z4, z6 }, { x1, x3, x5, x7, z1, z3, z5, z7 } 147 * 148 * and after performing 8x8->16 bit long polynomial multiplication of 149 * each of the halves of the first vector with those of the second one, 150 * we obtain the following four vectors of 16-bit elements: 151 * 152 * a := { w0*x0, w0*x2, w0*x4, w0*x6 }, { y0*z0, y0*z2, y0*z4, y0*z6 } 153 * b := { w0*x1, w0*x3, w0*x5, w0*x7 }, { y0*z1, y0*z3, y0*z5, y0*z7 } 154 * c := { w1*x0, w1*x2, w1*x4, w1*x6 }, { y1*z0, y1*z2, y1*z4, y1*z6 } 155 * d := { w1*x1, w1*x3, w1*x5, w1*x7 }, { y1*z1, y1*z3, y1*z5, y1*z7 } 156 * 157 * Results b and c can be XORed together, as the vector elements have 158 * matching ranks. Then, the final XOR can be pulled forward, and 159 * applied between the halves of each of the remaining three vectors, 160 * which are then shifted into place, and XORed together to produce the 161 * final 80-bit result. 162 */ 163 .macro pmull16x64_p8, v16, v64 164 vext.8 q11, \v64, \v64, #1 165 vld1.64 {q12}, [r4, :128] 166 vuzp.8 q11, \v64 167 vtbl.8 d24, {\v16\()_L-\v16\()_H}, d24 168 vtbl.8 d25, {\v16\()_L-\v16\()_H}, d25 169 bl __pmull16x64_p8 170 veor \v64, q12, q14 171 .endm 172 173__pmull16x64_p8: 174 vmull.p8 q13, d23, d24 175 vmull.p8 q14, d23, d25 176 vmull.p8 q15, d22, d24 177 vmull.p8 q12, d22, d25 178 179 veor q14, q14, q15 180 veor d24, d24, d25 181 veor d26, d26, d27 182 veor d28, d28, d29 183 vmov.i32 d25, #0 184 vmov.i32 d29, #0 185 vext.8 q12, q12, q12, #14 186 vext.8 q14, q14, q14, #15 187 veor d24, d24, d26 188 bx lr 189ENDPROC(__pmull16x64_p8) 190 191 .macro pmull16x64_p64, v16, v64 192 vmull.p64 q11, \v64\()l, \v16\()_L 193 vmull.p64 \v64, \v64\()h, \v16\()_H 194 veor \v64, \v64, q11 195 .endm 196 197 // Fold reg1, reg2 into the next 32 data bytes, storing the result back 198 // into reg1, reg2. 199 .macro fold_32_bytes, reg1, reg2, p 200 vld1.64 {q8-q9}, [buf]! 201 202 pmull16x64_\p FOLD_CONST, \reg1 203 pmull16x64_\p FOLD_CONST, \reg2 204 205CPU_LE( vrev64.8 q8, q8 ) 206CPU_LE( vrev64.8 q9, q9 ) 207 vswp q8l, q8h 208 vswp q9l, q9h 209 210 veor.8 \reg1, \reg1, q8 211 veor.8 \reg2, \reg2, q9 212 .endm 213 214 // Fold src_reg into dst_reg, optionally loading the next fold constants 215 .macro fold_16_bytes, src_reg, dst_reg, p, load_next_consts 216 pmull16x64_\p FOLD_CONST, \src_reg 217 .ifnb \load_next_consts 218 vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! 219 .endif 220 veor.8 \dst_reg, \dst_reg, \src_reg 221 .endm 222 223 .macro crct10dif, p 224 // For sizes less than 256 bytes, we can't fold 128 bytes at a time. 225 cmp len, #256 226 blt .Lless_than_256_bytes\@ 227 228 mov_l fold_consts_ptr, .Lfold_across_128_bytes_consts 229 230 // Load the first 128 data bytes. Byte swapping is necessary to make 231 // the bit order match the polynomial coefficient order. 232 vld1.64 {q0-q1}, [buf]! 233 vld1.64 {q2-q3}, [buf]! 234 vld1.64 {q4-q5}, [buf]! 235 vld1.64 {q6-q7}, [buf]! 236CPU_LE( vrev64.8 q0, q0 ) 237CPU_LE( vrev64.8 q1, q1 ) 238CPU_LE( vrev64.8 q2, q2 ) 239CPU_LE( vrev64.8 q3, q3 ) 240CPU_LE( vrev64.8 q4, q4 ) 241CPU_LE( vrev64.8 q5, q5 ) 242CPU_LE( vrev64.8 q6, q6 ) 243CPU_LE( vrev64.8 q7, q7 ) 244 vswp q0l, q0h 245 vswp q1l, q1h 246 vswp q2l, q2h 247 vswp q3l, q3h 248 vswp q4l, q4h 249 vswp q5l, q5h 250 vswp q6l, q6h 251 vswp q7l, q7h 252 253 // XOR the first 16 data *bits* with the initial CRC value. 254 vmov.i8 q8h, #0 255 vmov.u16 q8h[3], init_crc 256 veor q0h, q0h, q8h 257 258 // Load the constants for folding across 128 bytes. 259 vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! 260 261 // Subtract 128 for the 128 data bytes just consumed. Subtract another 262 // 128 to simplify the termination condition of the following loop. 263 sub len, len, #256 264 265 // While >= 128 data bytes remain (not counting q0-q7), fold the 128 266 // bytes q0-q7 into them, storing the result back into q0-q7. 267.Lfold_128_bytes_loop\@: 268 fold_32_bytes q0, q1, \p 269 fold_32_bytes q2, q3, \p 270 fold_32_bytes q4, q5, \p 271 fold_32_bytes q6, q7, \p 272 subs len, len, #128 273 bge .Lfold_128_bytes_loop\@ 274 275 // Now fold the 112 bytes in q0-q6 into the 16 bytes in q7. 276 277 // Fold across 64 bytes. 278 vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! 279 fold_16_bytes q0, q4, \p 280 fold_16_bytes q1, q5, \p 281 fold_16_bytes q2, q6, \p 282 fold_16_bytes q3, q7, \p, 1 283 // Fold across 32 bytes. 284 fold_16_bytes q4, q6, \p 285 fold_16_bytes q5, q7, \p, 1 286 // Fold across 16 bytes. 287 fold_16_bytes q6, q7, \p 288 289 // Add 128 to get the correct number of data bytes remaining in 0...127 290 // (not counting q7), following the previous extra subtraction by 128. 291 // Then subtract 16 to simplify the termination condition of the 292 // following loop. 293 adds len, len, #(128-16) 294 295 // While >= 16 data bytes remain (not counting q7), fold the 16 bytes q7 296 // into them, storing the result back into q7. 297 blt .Lfold_16_bytes_loop_done\@ 298.Lfold_16_bytes_loop\@: 299 pmull16x64_\p FOLD_CONST, q7 300 vld1.64 {q0}, [buf]! 301CPU_LE( vrev64.8 q0, q0 ) 302 vswp q0l, q0h 303 veor.8 q7, q7, q0 304 subs len, len, #16 305 bge .Lfold_16_bytes_loop\@ 306 307.Lfold_16_bytes_loop_done\@: 308 // Add 16 to get the correct number of data bytes remaining in 0...15 309 // (not counting q7), following the previous extra subtraction by 16. 310 adds len, len, #16 311 beq .Lreduce_final_16_bytes\@ 312 313.Lhandle_partial_segment\@: 314 // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 315 // 16 bytes are in q7 and the rest are the remaining data in 'buf'. To 316 // do this without needing a fold constant for each possible 'len', 317 // redivide the bytes into a first chunk of 'len' bytes and a second 318 // chunk of 16 bytes, then fold the first chunk into the second. 319 320 // q0 = last 16 original data bytes 321 add buf, buf, len 322 sub buf, buf, #16 323 vld1.64 {q0}, [buf] 324CPU_LE( vrev64.8 q0, q0 ) 325 vswp q0l, q0h 326 327 // q1 = high order part of second chunk: q7 left-shifted by 'len' bytes. 328 mov_l r1, .Lbyteshift_table + 16 329 sub r1, r1, len 330 vld1.8 {q2}, [r1] 331 vtbl.8 q1l, {q7l-q7h}, q2l 332 vtbl.8 q1h, {q7l-q7h}, q2h 333 334 // q3 = first chunk: q7 right-shifted by '16-len' bytes. 335 vmov.i8 q3, #0x80 336 veor.8 q2, q2, q3 337 vtbl.8 q3l, {q7l-q7h}, q2l 338 vtbl.8 q3h, {q7l-q7h}, q2h 339 340 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. 341 vshr.s8 q2, q2, #7 342 343 // q2 = second chunk: 'len' bytes from q0 (low-order bytes), 344 // then '16-len' bytes from q1 (high-order bytes). 345 vbsl.8 q2, q1, q0 346 347 // Fold the first chunk into the second chunk, storing the result in q7. 348 pmull16x64_\p FOLD_CONST, q3 349 veor.8 q7, q3, q2 350 b .Lreduce_final_16_bytes\@ 351 352.Lless_than_256_bytes\@: 353 // Checksumming a buffer of length 16...255 bytes 354 355 mov_l fold_consts_ptr, .Lfold_across_16_bytes_consts 356 357 // Load the first 16 data bytes. 358 vld1.64 {q7}, [buf]! 359CPU_LE( vrev64.8 q7, q7 ) 360 vswp q7l, q7h 361 362 // XOR the first 16 data *bits* with the initial CRC value. 363 vmov.i8 q0h, #0 364 vmov.u16 q0h[3], init_crc 365 veor.8 q7h, q7h, q0h 366 367 // Load the fold-across-16-bytes constants. 368 vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! 369 370 cmp len, #16 371 beq .Lreduce_final_16_bytes\@ // len == 16 372 subs len, len, #32 373 addlt len, len, #16 374 blt .Lhandle_partial_segment\@ // 17 <= len <= 31 375 b .Lfold_16_bytes_loop\@ // 32 <= len <= 255 376 377.Lreduce_final_16_bytes\@: 378 .endm 379 380// 381// u16 crc_t10dif_pmull(u16 init_crc, const u8 *buf, size_t len); 382// 383// Assumes len >= 16. 384// 385ENTRY(crc_t10dif_pmull64) 386 crct10dif p64 387 388 // Reduce the 128-bit value M(x), stored in q7, to the final 16-bit CRC. 389 390 // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. 391 vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128]! 392 393 // Fold the high 64 bits into the low 64 bits, while also multiplying by 394 // x^64. This produces a 128-bit value congruent to x^64 * M(x) and 395 // whose low 48 bits are 0. 396 vmull.p64 q0, q7h, FOLD_CONST_H // high bits * x^48 * (x^80 mod G(x)) 397 veor.8 q0h, q0h, q7l // + low bits * x^64 398 399 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit 400 // value congruent to x^64 * M(x) and whose low 48 bits are 0. 401 vmov.i8 q1, #0 402 vmov s4, s3 // extract high 32 bits 403 vmov s3, s5 // zero high 32 bits 404 vmull.p64 q1, q1l, FOLD_CONST_L // high 32 bits * x^48 * (x^48 mod G(x)) 405 veor.8 q0, q0, q1 // + low bits 406 407 // Load G(x) and floor(x^48 / G(x)). 408 vld1.64 {FOLD_CONSTS}, [fold_consts_ptr, :128] 409 410 // Use Barrett reduction to compute the final CRC value. 411 vmull.p64 q1, q0h, FOLD_CONST_H // high 32 bits * floor(x^48 / G(x)) 412 vshr.u64 q1l, q1l, #32 // /= x^32 413 vmull.p64 q1, q1l, FOLD_CONST_L // *= G(x) 414 vshr.u64 q0l, q0l, #48 415 veor.8 q0l, q0l, q1l // + low 16 nonzero bits 416 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of q0. 417 418 vmov.u16 r0, q0l[0] 419 bx lr 420ENDPROC(crc_t10dif_pmull64) 421 422ENTRY(crc_t10dif_pmull8) 423 push {r4, lr} 424 mov_l r4, .L16x64perm 425 426 crct10dif p8 427 428CPU_LE( vrev64.8 q7, q7 ) 429 vswp q7l, q7h 430 vst1.64 {q7}, [r3, :128] 431 pop {r4, pc} 432ENDPROC(crc_t10dif_pmull8) 433 434 .section ".rodata", "a" 435 .align 4 436 437// Fold constants precomputed from the polynomial 0x18bb7 438// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 439.Lfold_across_128_bytes_consts: 440 .quad 0x0000000000006123 // x^(8*128) mod G(x) 441 .quad 0x0000000000002295 // x^(8*128+64) mod G(x) 442// .Lfold_across_64_bytes_consts: 443 .quad 0x0000000000001069 // x^(4*128) mod G(x) 444 .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) 445// .Lfold_across_32_bytes_consts: 446 .quad 0x000000000000857d // x^(2*128) mod G(x) 447 .quad 0x0000000000007acc // x^(2*128+64) mod G(x) 448.Lfold_across_16_bytes_consts: 449 .quad 0x000000000000a010 // x^(1*128) mod G(x) 450 .quad 0x0000000000001faa // x^(1*128+64) mod G(x) 451// .Lfinal_fold_consts: 452 .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) 453 .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) 454// .Lbarrett_reduction_consts: 455 .quad 0x0000000000018bb7 // G(x) 456 .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) 457 458// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - 459// len] is the index vector to shift left by 'len' bytes, and is also {0x80, 460// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. 461.Lbyteshift_table: 462 .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 463 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f 464 .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 465 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 466 467.L16x64perm: 468 .quad 0x808080800000000, 0x909090901010101 469