1// 2// Accelerated CRC-T10DIF using arm64 NEON and Crypto Extensions instructions 3// 4// Copyright (C) 2016 Linaro Ltd 5// Copyright (C) 2019-2024 Google LLC 6// 7// Authors: Ard Biesheuvel <ardb@google.com> 8// Eric Biggers <ebiggers@google.com> 9// 10// This program is free software; you can redistribute it and/or modify 11// it under the terms of the GNU General Public License version 2 as 12// published by the Free Software Foundation. 13// 14 15// Derived from the x86 version: 16// 17// Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions 18// 19// Copyright (c) 2013, Intel Corporation 20// 21// Authors: 22// Erdinc Ozturk <erdinc.ozturk@intel.com> 23// Vinodh Gopal <vinodh.gopal@intel.com> 24// James Guilford <james.guilford@intel.com> 25// Tim Chen <tim.c.chen@linux.intel.com> 26// 27// This software is available to you under a choice of one of two 28// licenses. You may choose to be licensed under the terms of the GNU 29// General Public License (GPL) Version 2, available from the file 30// COPYING in the main directory of this source tree, or the 31// OpenIB.org BSD license below: 32// 33// Redistribution and use in source and binary forms, with or without 34// modification, are permitted provided that the following conditions are 35// met: 36// 37// * Redistributions of source code must retain the above copyright 38// notice, this list of conditions and the following disclaimer. 39// 40// * Redistributions in binary form must reproduce the above copyright 41// notice, this list of conditions and the following disclaimer in the 42// documentation and/or other materials provided with the 43// distribution. 44// 45// * Neither the name of the Intel Corporation nor the names of its 46// contributors may be used to endorse or promote products derived from 47// this software without specific prior written permission. 48// 49// 50// THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY 51// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 52// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 53// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR 54// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 55// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 56// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 57// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 58// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 59// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 60// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 61// 62// Reference paper titled "Fast CRC Computation for Generic 63// Polynomials Using PCLMULQDQ Instruction" 64// URL: http://www.intel.com/content/dam/www/public/us/en/documents 65// /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf 66// 67 68#include <linux/linkage.h> 69#include <asm/assembler.h> 70 71 .text 72 .arch armv8-a+crypto 73 74 init_crc .req w0 75 buf .req x1 76 len .req x2 77 fold_consts_ptr .req x5 78 79 fold_consts .req v10 80 81 t3 .req v17 82 t4 .req v18 83 t5 .req v19 84 t6 .req v20 85 t7 .req v21 86 t8 .req v22 87 88 perm .req v27 89 90 .macro pmull16x64_p64, a16, b64, c64 91 pmull2 \c64\().1q, \a16\().2d, \b64\().2d 92 pmull \b64\().1q, \a16\().1d, \b64\().1d 93 .endm 94 95 /* 96 * Pairwise long polynomial multiplication of two 16-bit values 97 * 98 * { w0, w1 }, { y0, y1 } 99 * 100 * by two 64-bit values 101 * 102 * { x0, x1, x2, x3, x4, x5, x6, x7 }, { z0, z1, z2, z3, z4, z5, z6, z7 } 103 * 104 * where each vector element is a byte, ordered from least to most 105 * significant. 106 * 107 * This can be implemented using 8x8 long polynomial multiplication, by 108 * reorganizing the input so that each pairwise 8x8 multiplication 109 * produces one of the terms from the decomposition below, and 110 * combining the results of each rank and shifting them into place. 111 * 112 * Rank 113 * 0 w0*x0 ^ | y0*z0 ^ 114 * 1 (w0*x1 ^ w1*x0) << 8 ^ | (y0*z1 ^ y1*z0) << 8 ^ 115 * 2 (w0*x2 ^ w1*x1) << 16 ^ | (y0*z2 ^ y1*z1) << 16 ^ 116 * 3 (w0*x3 ^ w1*x2) << 24 ^ | (y0*z3 ^ y1*z2) << 24 ^ 117 * 4 (w0*x4 ^ w1*x3) << 32 ^ | (y0*z4 ^ y1*z3) << 32 ^ 118 * 5 (w0*x5 ^ w1*x4) << 40 ^ | (y0*z5 ^ y1*z4) << 40 ^ 119 * 6 (w0*x6 ^ w1*x5) << 48 ^ | (y0*z6 ^ y1*z5) << 48 ^ 120 * 7 (w0*x7 ^ w1*x6) << 56 ^ | (y0*z7 ^ y1*z6) << 56 ^ 121 * 8 w1*x7 << 64 | y1*z7 << 64 122 * 123 * The inputs can be reorganized into 124 * 125 * { w0, w0, w0, w0, y0, y0, y0, y0 }, { w1, w1, w1, w1, y1, y1, y1, y1 } 126 * { x0, x2, x4, x6, z0, z2, z4, z6 }, { x1, x3, x5, x7, z1, z3, z5, z7 } 127 * 128 * and after performing 8x8->16 bit long polynomial multiplication of 129 * each of the halves of the first vector with those of the second one, 130 * we obtain the following four vectors of 16-bit elements: 131 * 132 * a := { w0*x0, w0*x2, w0*x4, w0*x6 }, { y0*z0, y0*z2, y0*z4, y0*z6 } 133 * b := { w0*x1, w0*x3, w0*x5, w0*x7 }, { y0*z1, y0*z3, y0*z5, y0*z7 } 134 * c := { w1*x0, w1*x2, w1*x4, w1*x6 }, { y1*z0, y1*z2, y1*z4, y1*z6 } 135 * d := { w1*x1, w1*x3, w1*x5, w1*x7 }, { y1*z1, y1*z3, y1*z5, y1*z7 } 136 * 137 * Results b and c can be XORed together, as the vector elements have 138 * matching ranks. Then, the final XOR (*) can be pulled forward, and 139 * applied between the halves of each of the remaining three vectors, 140 * which are then shifted into place, and combined to produce two 141 * 80-bit results. 142 * 143 * (*) NOTE: the 16x64 bit polynomial multiply below is not equivalent 144 * to the 64x64 bit one above, but XOR'ing the outputs together will 145 * produce the expected result, and this is sufficient in the context of 146 * this algorithm. 147 */ 148 .macro pmull16x64_p8, a16, b64, c64 149 ext t7.16b, \b64\().16b, \b64\().16b, #1 150 tbl t5.16b, {\a16\().16b}, perm.16b 151 uzp1 t7.16b, \b64\().16b, t7.16b 152 bl __pmull_p8_16x64 153 ext \b64\().16b, t4.16b, t4.16b, #15 154 eor \c64\().16b, t8.16b, t5.16b 155 .endm 156 157SYM_FUNC_START_LOCAL(__pmull_p8_16x64) 158 ext t6.16b, t5.16b, t5.16b, #8 159 160 pmull t3.8h, t7.8b, t5.8b 161 pmull t4.8h, t7.8b, t6.8b 162 pmull2 t5.8h, t7.16b, t5.16b 163 pmull2 t6.8h, t7.16b, t6.16b 164 165 ext t8.16b, t3.16b, t3.16b, #8 166 eor t4.16b, t4.16b, t6.16b 167 ext t7.16b, t5.16b, t5.16b, #8 168 ext t6.16b, t4.16b, t4.16b, #8 169 eor t8.8b, t8.8b, t3.8b 170 eor t5.8b, t5.8b, t7.8b 171 eor t4.8b, t4.8b, t6.8b 172 ext t5.16b, t5.16b, t5.16b, #14 173 ret 174SYM_FUNC_END(__pmull_p8_16x64) 175 176 177 // Fold reg1, reg2 into the next 32 data bytes, storing the result back 178 // into reg1, reg2. 179 .macro fold_32_bytes, p, reg1, reg2 180 ldp q11, q12, [buf], #0x20 181 182 pmull16x64_\p fold_consts, \reg1, v8 183 184CPU_LE( rev64 v11.16b, v11.16b ) 185CPU_LE( rev64 v12.16b, v12.16b ) 186 187 pmull16x64_\p fold_consts, \reg2, v9 188 189CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 ) 190CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 ) 191 192 eor \reg1\().16b, \reg1\().16b, v8.16b 193 eor \reg2\().16b, \reg2\().16b, v9.16b 194 eor \reg1\().16b, \reg1\().16b, v11.16b 195 eor \reg2\().16b, \reg2\().16b, v12.16b 196 .endm 197 198 // Fold src_reg into dst_reg, optionally loading the next fold constants 199 .macro fold_16_bytes, p, src_reg, dst_reg, load_next_consts 200 pmull16x64_\p fold_consts, \src_reg, v8 201 .ifnb \load_next_consts 202 ld1 {fold_consts.2d}, [fold_consts_ptr], #16 203 .endif 204 eor \dst_reg\().16b, \dst_reg\().16b, v8.16b 205 eor \dst_reg\().16b, \dst_reg\().16b, \src_reg\().16b 206 .endm 207 208 .macro crc_t10dif_pmull, p 209 210 // For sizes less than 256 bytes, we can't fold 128 bytes at a time. 211 cmp len, #256 212 b.lt .Lless_than_256_bytes_\@ 213 214 adr_l fold_consts_ptr, .Lfold_across_128_bytes_consts 215 216 // Load the first 128 data bytes. Byte swapping is necessary to make 217 // the bit order match the polynomial coefficient order. 218 ldp q0, q1, [buf] 219 ldp q2, q3, [buf, #0x20] 220 ldp q4, q5, [buf, #0x40] 221 ldp q6, q7, [buf, #0x60] 222 add buf, buf, #0x80 223CPU_LE( rev64 v0.16b, v0.16b ) 224CPU_LE( rev64 v1.16b, v1.16b ) 225CPU_LE( rev64 v2.16b, v2.16b ) 226CPU_LE( rev64 v3.16b, v3.16b ) 227CPU_LE( rev64 v4.16b, v4.16b ) 228CPU_LE( rev64 v5.16b, v5.16b ) 229CPU_LE( rev64 v6.16b, v6.16b ) 230CPU_LE( rev64 v7.16b, v7.16b ) 231CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) 232CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 ) 233CPU_LE( ext v2.16b, v2.16b, v2.16b, #8 ) 234CPU_LE( ext v3.16b, v3.16b, v3.16b, #8 ) 235CPU_LE( ext v4.16b, v4.16b, v4.16b, #8 ) 236CPU_LE( ext v5.16b, v5.16b, v5.16b, #8 ) 237CPU_LE( ext v6.16b, v6.16b, v6.16b, #8 ) 238CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) 239 240 // XOR the first 16 data *bits* with the initial CRC value. 241 movi v8.16b, #0 242 mov v8.h[7], init_crc 243 eor v0.16b, v0.16b, v8.16b 244 245 // Load the constants for folding across 128 bytes. 246 ld1 {fold_consts.2d}, [fold_consts_ptr] 247 248 // Subtract 128 for the 128 data bytes just consumed. Subtract another 249 // 128 to simplify the termination condition of the following loop. 250 sub len, len, #256 251 252 // While >= 128 data bytes remain (not counting v0-v7), fold the 128 253 // bytes v0-v7 into them, storing the result back into v0-v7. 254.Lfold_128_bytes_loop_\@: 255 fold_32_bytes \p, v0, v1 256 fold_32_bytes \p, v2, v3 257 fold_32_bytes \p, v4, v5 258 fold_32_bytes \p, v6, v7 259 260 subs len, len, #128 261 b.ge .Lfold_128_bytes_loop_\@ 262 263 // Now fold the 112 bytes in v0-v6 into the 16 bytes in v7. 264 265 // Fold across 64 bytes. 266 add fold_consts_ptr, fold_consts_ptr, #16 267 ld1 {fold_consts.2d}, [fold_consts_ptr], #16 268 fold_16_bytes \p, v0, v4 269 fold_16_bytes \p, v1, v5 270 fold_16_bytes \p, v2, v6 271 fold_16_bytes \p, v3, v7, 1 272 // Fold across 32 bytes. 273 fold_16_bytes \p, v4, v6 274 fold_16_bytes \p, v5, v7, 1 275 // Fold across 16 bytes. 276 fold_16_bytes \p, v6, v7 277 278 // Add 128 to get the correct number of data bytes remaining in 0...127 279 // (not counting v7), following the previous extra subtraction by 128. 280 // Then subtract 16 to simplify the termination condition of the 281 // following loop. 282 adds len, len, #(128-16) 283 284 // While >= 16 data bytes remain (not counting v7), fold the 16 bytes v7 285 // into them, storing the result back into v7. 286 b.lt .Lfold_16_bytes_loop_done_\@ 287.Lfold_16_bytes_loop_\@: 288 pmull16x64_\p fold_consts, v7, v8 289 eor v7.16b, v7.16b, v8.16b 290 ldr q0, [buf], #16 291CPU_LE( rev64 v0.16b, v0.16b ) 292CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) 293 eor v7.16b, v7.16b, v0.16b 294 subs len, len, #16 295 b.ge .Lfold_16_bytes_loop_\@ 296 297.Lfold_16_bytes_loop_done_\@: 298 // Add 16 to get the correct number of data bytes remaining in 0...15 299 // (not counting v7), following the previous extra subtraction by 16. 300 adds len, len, #16 301 b.eq .Lreduce_final_16_bytes_\@ 302 303.Lhandle_partial_segment_\@: 304 // Reduce the last '16 + len' bytes where 1 <= len <= 15 and the first 305 // 16 bytes are in v7 and the rest are the remaining data in 'buf'. To 306 // do this without needing a fold constant for each possible 'len', 307 // redivide the bytes into a first chunk of 'len' bytes and a second 308 // chunk of 16 bytes, then fold the first chunk into the second. 309 310 // v0 = last 16 original data bytes 311 add buf, buf, len 312 ldr q0, [buf, #-16] 313CPU_LE( rev64 v0.16b, v0.16b ) 314CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 ) 315 316 // v1 = high order part of second chunk: v7 left-shifted by 'len' bytes. 317 adr_l x4, .Lbyteshift_table + 16 318 sub x4, x4, len 319 ld1 {v2.16b}, [x4] 320 tbl v1.16b, {v7.16b}, v2.16b 321 322 // v3 = first chunk: v7 right-shifted by '16-len' bytes. 323 movi v3.16b, #0x80 324 eor v2.16b, v2.16b, v3.16b 325 tbl v3.16b, {v7.16b}, v2.16b 326 327 // Convert to 8-bit masks: 'len' 0x00 bytes, then '16-len' 0xff bytes. 328 sshr v2.16b, v2.16b, #7 329 330 // v2 = second chunk: 'len' bytes from v0 (low-order bytes), 331 // then '16-len' bytes from v1 (high-order bytes). 332 bsl v2.16b, v1.16b, v0.16b 333 334 // Fold the first chunk into the second chunk, storing the result in v7. 335 pmull16x64_\p fold_consts, v3, v0 336 eor v7.16b, v3.16b, v0.16b 337 eor v7.16b, v7.16b, v2.16b 338 b .Lreduce_final_16_bytes_\@ 339 340.Lless_than_256_bytes_\@: 341 // Checksumming a buffer of length 16...255 bytes 342 343 adr_l fold_consts_ptr, .Lfold_across_16_bytes_consts 344 345 // Load the first 16 data bytes. 346 ldr q7, [buf], #0x10 347CPU_LE( rev64 v7.16b, v7.16b ) 348CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) 349 350 // XOR the first 16 data *bits* with the initial CRC value. 351 movi v0.16b, #0 352 mov v0.h[7], init_crc 353 eor v7.16b, v7.16b, v0.16b 354 355 // Load the fold-across-16-bytes constants. 356 ld1 {fold_consts.2d}, [fold_consts_ptr], #16 357 358 cmp len, #16 359 b.eq .Lreduce_final_16_bytes_\@ // len == 16 360 subs len, len, #32 361 b.ge .Lfold_16_bytes_loop_\@ // 32 <= len <= 255 362 add len, len, #16 363 b .Lhandle_partial_segment_\@ // 17 <= len <= 31 364 365.Lreduce_final_16_bytes_\@: 366 .endm 367 368// 369// u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 *buf, size_t len); 370// 371// Assumes len >= 16. 372// 373SYM_FUNC_START(crc_t10dif_pmull_p8) 374 frame_push 1 375 376 // Compose { 0,0,0,0, 8,8,8,8, 1,1,1,1, 9,9,9,9 } 377 movi perm.4h, #8, lsl #8 378 orr perm.2s, #1, lsl #16 379 orr perm.2s, #1, lsl #24 380 zip1 perm.16b, perm.16b, perm.16b 381 zip1 perm.16b, perm.16b, perm.16b 382 383 crc_t10dif_pmull p8 384 385CPU_LE( rev64 v7.16b, v7.16b ) 386CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 ) 387 str q7, [x3] 388 389 frame_pop 390 ret 391SYM_FUNC_END(crc_t10dif_pmull_p8) 392 393 .align 5 394// 395// u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 *buf, size_t len); 396// 397// Assumes len >= 16. 398// 399SYM_FUNC_START(crc_t10dif_pmull_p64) 400 crc_t10dif_pmull p64 401 402 // Reduce the 128-bit value M(x), stored in v7, to the final 16-bit CRC. 403 404 movi v2.16b, #0 // init zero register 405 406 // Load 'x^48 * (x^48 mod G(x))' and 'x^48 * (x^80 mod G(x))'. 407 ld1 {fold_consts.2d}, [fold_consts_ptr], #16 408 409 // Fold the high 64 bits into the low 64 bits, while also multiplying by 410 // x^64. This produces a 128-bit value congruent to x^64 * M(x) and 411 // whose low 48 bits are 0. 412 ext v0.16b, v2.16b, v7.16b, #8 413 pmull2 v7.1q, v7.2d, fold_consts.2d // high bits * x^48 * (x^80 mod G(x)) 414 eor v0.16b, v0.16b, v7.16b // + low bits * x^64 415 416 // Fold the high 32 bits into the low 96 bits. This produces a 96-bit 417 // value congruent to x^64 * M(x) and whose low 48 bits are 0. 418 ext v1.16b, v0.16b, v2.16b, #12 // extract high 32 bits 419 mov v0.s[3], v2.s[0] // zero high 32 bits 420 pmull v1.1q, v1.1d, fold_consts.1d // high 32 bits * x^48 * (x^48 mod G(x)) 421 eor v0.16b, v0.16b, v1.16b // + low bits 422 423 // Load G(x) and floor(x^48 / G(x)). 424 ld1 {fold_consts.2d}, [fold_consts_ptr] 425 426 // Use Barrett reduction to compute the final CRC value. 427 pmull2 v1.1q, v0.2d, fold_consts.2d // high 32 bits * floor(x^48 / G(x)) 428 ushr v1.2d, v1.2d, #32 // /= x^32 429 pmull v1.1q, v1.1d, fold_consts.1d // *= G(x) 430 ushr v0.2d, v0.2d, #48 431 eor v0.16b, v0.16b, v1.16b // + low 16 nonzero bits 432 // Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of v0. 433 434 umov w0, v0.h[0] 435 ret 436SYM_FUNC_END(crc_t10dif_pmull_p64) 437 438 .section ".rodata", "a" 439 .align 4 440 441// Fold constants precomputed from the polynomial 0x18bb7 442// G(x) = x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1 + x^0 443.Lfold_across_128_bytes_consts: 444 .quad 0x0000000000006123 // x^(8*128) mod G(x) 445 .quad 0x0000000000002295 // x^(8*128+64) mod G(x) 446// .Lfold_across_64_bytes_consts: 447 .quad 0x0000000000001069 // x^(4*128) mod G(x) 448 .quad 0x000000000000dd31 // x^(4*128+64) mod G(x) 449// .Lfold_across_32_bytes_consts: 450 .quad 0x000000000000857d // x^(2*128) mod G(x) 451 .quad 0x0000000000007acc // x^(2*128+64) mod G(x) 452.Lfold_across_16_bytes_consts: 453 .quad 0x000000000000a010 // x^(1*128) mod G(x) 454 .quad 0x0000000000001faa // x^(1*128+64) mod G(x) 455// .Lfinal_fold_consts: 456 .quad 0x1368000000000000 // x^48 * (x^48 mod G(x)) 457 .quad 0x2d56000000000000 // x^48 * (x^80 mod G(x)) 458// .Lbarrett_reduction_consts: 459 .quad 0x0000000000018bb7 // G(x) 460 .quad 0x00000001f65a57f8 // floor(x^48 / G(x)) 461 462// For 1 <= len <= 15, the 16-byte vector beginning at &byteshift_table[16 - 463// len] is the index vector to shift left by 'len' bytes, and is also {0x80, 464// ..., 0x80} XOR the index vector to shift right by '16 - len' bytes. 465.Lbyteshift_table: 466 .byte 0x0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87 467 .byte 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f 468 .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 469 .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe , 0x0 470