1/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */ 2// 3// VAES and VPCLMULQDQ optimized AES-GCM for x86_64 4// 5// Copyright 2024 Google LLC 6// 7// Author: Eric Biggers <ebiggers@google.com> 8// 9//------------------------------------------------------------------------------ 10// 11// This file is dual-licensed, meaning that you can use it under your choice of 12// either of the following two licenses: 13// 14// Licensed under the Apache License 2.0 (the "License"). You may obtain a copy 15// of the License at 16// 17// http://www.apache.org/licenses/LICENSE-2.0 18// 19// Unless required by applicable law or agreed to in writing, software 20// distributed under the License is distributed on an "AS IS" BASIS, 21// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 22// See the License for the specific language governing permissions and 23// limitations under the License. 24// 25// or 26// 27// Redistribution and use in source and binary forms, with or without 28// modification, are permitted provided that the following conditions are met: 29// 30// 1. Redistributions of source code must retain the above copyright notice, 31// this list of conditions and the following disclaimer. 32// 33// 2. Redistributions in binary form must reproduce the above copyright 34// notice, this list of conditions and the following disclaimer in the 35// documentation and/or other materials provided with the distribution. 36// 37// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 38// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 41// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 42// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 43// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 44// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 45// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 46// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 47// POSSIBILITY OF SUCH DAMAGE. 48// 49//------------------------------------------------------------------------------ 50// 51// This file implements AES-GCM (Galois/Counter Mode) for x86_64 CPUs that 52// support VAES (vector AES), VPCLMULQDQ (vector carryless multiplication), and 53// either AVX512 or AVX10. Some of the functions, notably the encryption and 54// decryption update functions which are the most performance-critical, are 55// provided in two variants generated from a macro: one using 256-bit vectors 56// (suffix: vaes_avx10_256) and one using 512-bit vectors (vaes_avx10_512). The 57// other, "shared" functions (vaes_avx10) use at most 256-bit vectors. 58// 59// The functions that use 512-bit vectors are intended for CPUs that support 60// 512-bit vectors *and* where using them doesn't cause significant 61// downclocking. They require the following CPU features: 62// 63// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/512) 64// 65// The other functions require the following CPU features: 66// 67// VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/256) 68// 69// All functions use the "System V" ABI. The Windows ABI is not supported. 70// 71// Note that we use "avx10" in the names of the functions as a shorthand to 72// really mean "AVX10 or a certain set of AVX512 features". Due to Intel's 73// introduction of AVX512 and then its replacement by AVX10, there doesn't seem 74// to be a simple way to name things that makes sense on all CPUs. 75// 76// Note that the macros that support both 256-bit and 512-bit vectors could 77// fairly easily be changed to support 128-bit too. However, this would *not* 78// be sufficient to allow the code to run on CPUs without AVX512 or AVX10, 79// because the code heavily uses several features of these extensions other than 80// the vector length: the increase in the number of SIMD registers from 16 to 81// 32, masking support, and new instructions such as vpternlogd (which can do a 82// three-argument XOR). These features are very useful for AES-GCM. 83 84#include <linux/linkage.h> 85 86.section .rodata 87.p2align 6 88 89 // A shuffle mask that reflects the bytes of 16-byte blocks 90.Lbswap_mask: 91 .octa 0x000102030405060708090a0b0c0d0e0f 92 93 // This is the GHASH reducing polynomial without its constant term, i.e. 94 // x^128 + x^7 + x^2 + x, represented using the backwards mapping 95 // between bits and polynomial coefficients. 96 // 97 // Alternatively, it can be interpreted as the naturally-ordered 98 // representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the 99 // "reversed" GHASH reducing polynomial without its x^128 term. 100.Lgfpoly: 101 .octa 0xc2000000000000000000000000000001 102 103 // Same as above, but with the (1 << 64) bit set. 104.Lgfpoly_and_internal_carrybit: 105 .octa 0xc2000000000000010000000000000001 106 107 // The below constants are used for incrementing the counter blocks. 108 // ctr_pattern points to the four 128-bit values [0, 1, 2, 3]. 109 // inc_2blocks and inc_4blocks point to the single 128-bit values 2 and 110 // 4. Note that the same '2' is reused in ctr_pattern and inc_2blocks. 111.Lctr_pattern: 112 .octa 0 113 .octa 1 114.Linc_2blocks: 115 .octa 2 116 .octa 3 117.Linc_4blocks: 118 .octa 4 119 120// Number of powers of the hash key stored in the key struct. The powers are 121// stored from highest (H^NUM_H_POWERS) to lowest (H^1). 122#define NUM_H_POWERS 16 123 124// Offset to AES key length (in bytes) in the key struct 125#define OFFSETOF_AESKEYLEN 480 126 127// Offset to start of hash key powers array in the key struct 128#define OFFSETOF_H_POWERS 512 129 130// Offset to end of hash key powers array in the key struct. 131// 132// This is immediately followed by three zeroized padding blocks, which are 133// included so that partial vectors can be handled more easily. E.g. if VL=64 134// and two blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most 135// padding blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded. 136#define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16)) 137 138.text 139 140// Set the vector length in bytes. This sets the VL variable and defines 141// register aliases V0-V31 that map to the ymm or zmm registers. 142.macro _set_veclen vl 143 .set VL, \vl 144.irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \ 145 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 146.if VL == 32 147 .set V\i, %ymm\i 148.elseif VL == 64 149 .set V\i, %zmm\i 150.else 151 .error "Unsupported vector length" 152.endif 153.endr 154.endm 155 156// The _ghash_mul_step macro does one step of GHASH multiplication of the 157// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the 158// reduced products in \dst. \t0, \t1, and \t2 are temporary registers of the 159// same size as \a and \b. To complete all steps, this must invoked with \i=0 160// through \i=9. The division into steps allows users of this macro to 161// optionally interleave the computation with other instructions. Users of this 162// macro must preserve the parameter registers across steps. 163// 164// The multiplications are done in GHASH's representation of the finite field 165// GF(2^128). Elements of GF(2^128) are represented as binary polynomials 166// (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial 167// G. The GCM specification uses G = x^128 + x^7 + x^2 + x + 1. Addition is 168// just XOR, while multiplication is more complex and has two parts: (a) do 169// carryless multiplication of two 128-bit input polynomials to get a 256-bit 170// intermediate product polynomial, and (b) reduce the intermediate product to 171// 128 bits by adding multiples of G that cancel out terms in it. (Adding 172// multiples of G doesn't change which field element the polynomial represents.) 173// 174// Unfortunately, the GCM specification maps bits to/from polynomial 175// coefficients backwards from the natural order. In each byte it specifies the 176// highest bit to be the lowest order polynomial coefficient, *not* the highest! 177// This makes it nontrivial to work with the GHASH polynomials. We could 178// reflect the bits, but x86 doesn't have an instruction that does that. 179// 180// Instead, we operate on the values without bit-reflecting them. This *mostly* 181// just works, since XOR and carryless multiplication are symmetric with respect 182// to bit order, but it has some consequences. First, due to GHASH's byte 183// order, by skipping bit reflection, *byte* reflection becomes necessary to 184// give the polynomial terms a consistent order. E.g., considering an N-bit 185// value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0 186// through N-1 of the byte-reflected value represent the coefficients of x^(N-1) 187// through x^0, whereas bits 0 through N-1 of the non-byte-reflected value 188// represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked 189// with. Fortunately, x86's vpshufb instruction can do byte reflection. 190// 191// Second, forgoing the bit reflection causes an extra multiple of x (still 192// using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each 193// multiplication. This is because an M-bit by N-bit carryless multiplication 194// really produces a (M+N-1)-bit product, but in practice it's zero-extended to 195// M+N bits. In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits 196// to polynomial coefficients backwards, this zero-extension actually changes 197// the product by introducing an extra factor of x. Therefore, users of this 198// macro must ensure that one of the inputs has an extra factor of x^-1, i.e. 199// the multiplicative inverse of x, to cancel out the extra x. 200// 201// Third, the backwards coefficients convention is just confusing to work with, 202// since it makes "low" and "high" in the polynomial math mean the opposite of 203// their normal meaning in computer programming. This can be solved by using an 204// alternative interpretation: the polynomial coefficients are understood to be 205// in the natural order, and the multiplication is actually \a * \b * x^-128 mod 206// x^128 + x^127 + x^126 + x^121 + 1. This doesn't change the inputs, outputs, 207// or the implementation at all; it just changes the mathematical interpretation 208// of what each instruction is doing. Starting from here, we'll use this 209// alternative interpretation, as it's easier to understand the code that way. 210// 211// Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 => 212// 128-bit carryless multiplication, so we break the 128 x 128 multiplication 213// into parts as follows (the _L and _H suffixes denote low and high 64 bits): 214// 215// LO = a_L * b_L 216// MI = (a_L * b_H) + (a_H * b_L) 217// HI = a_H * b_H 218// 219// The 256-bit product is x^128*HI + x^64*MI + LO. LO, MI, and HI are 128-bit. 220// Note that MI "overlaps" with LO and HI. We don't consolidate MI into LO and 221// HI right away, since the way the reduction works makes that unnecessary. 222// 223// For the reduction, we cancel out the low 128 bits by adding multiples of G = 224// x^128 + x^127 + x^126 + x^121 + 1. This is done by two iterations, each of 225// which cancels out the next lowest 64 bits. Consider a value x^64*A + B, 226// where A and B are 128-bit. Adding B_L*G to that value gives: 227// 228// x^64*A + B + B_L*G 229// = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1) 230// = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L 231// = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L 232// = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57)) 233// 234// So: if we sum A, B with its halves swapped, and the low half of B times x^63 235// + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the 236// original value x^64*A + B. I.e., the low 64 bits got canceled out. 237// 238// We just need to apply this twice: first to fold LO into MI, and second to 239// fold the updated MI into HI. 240// 241// The needed three-argument XORs are done using the vpternlogd instruction with 242// immediate 0x96, since this is faster than two vpxord instructions. 243// 244// A potential optimization, assuming that b is fixed per-key (if a is fixed 245// per-key it would work the other way around), is to use one iteration of the 246// reduction described above to precompute a value c such that x^64*c = b mod G, 247// and then multiply a_L by c (and implicitly by x^64) instead of by b: 248// 249// MI = (a_L * c_L) + (a_H * b_L) 250// HI = (a_L * c_H) + (a_H * b_H) 251// 252// This would eliminate the LO part of the intermediate product, which would 253// eliminate the need to fold LO into MI. This would save two instructions, 254// including a vpclmulqdq. However, we currently don't use this optimization 255// because it would require twice as many per-key precomputed values. 256// 257// Using Karatsuba multiplication instead of "schoolbook" multiplication 258// similarly would save a vpclmulqdq but does not seem to be worth it. 259.macro _ghash_mul_step i, a, b, dst, gfpoly, t0, t1, t2 260.if \i == 0 261 vpclmulqdq $0x00, \a, \b, \t0 // LO = a_L * b_L 262 vpclmulqdq $0x01, \a, \b, \t1 // MI_0 = a_L * b_H 263.elseif \i == 1 264 vpclmulqdq $0x10, \a, \b, \t2 // MI_1 = a_H * b_L 265.elseif \i == 2 266 vpxord \t2, \t1, \t1 // MI = MI_0 + MI_1 267.elseif \i == 3 268 vpclmulqdq $0x01, \t0, \gfpoly, \t2 // LO_L*(x^63 + x^62 + x^57) 269.elseif \i == 4 270 vpshufd $0x4e, \t0, \t0 // Swap halves of LO 271.elseif \i == 5 272 vpternlogd $0x96, \t2, \t0, \t1 // Fold LO into MI 273.elseif \i == 6 274 vpclmulqdq $0x11, \a, \b, \dst // HI = a_H * b_H 275.elseif \i == 7 276 vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57) 277.elseif \i == 8 278 vpshufd $0x4e, \t1, \t1 // Swap halves of MI 279.elseif \i == 9 280 vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI 281.endif 282.endm 283 284// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store 285// the reduced products in \dst. See _ghash_mul_step for full explanation. 286.macro _ghash_mul a, b, dst, gfpoly, t0, t1, t2 287.irp i, 0,1,2,3,4,5,6,7,8,9 288 _ghash_mul_step \i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2 289.endr 290.endm 291 292// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the 293// *unreduced* products to \lo, \mi, and \hi. 294.macro _ghash_mul_noreduce a, b, lo, mi, hi, t0, t1, t2, t3 295 vpclmulqdq $0x00, \a, \b, \t0 // a_L * b_L 296 vpclmulqdq $0x01, \a, \b, \t1 // a_L * b_H 297 vpclmulqdq $0x10, \a, \b, \t2 // a_H * b_L 298 vpclmulqdq $0x11, \a, \b, \t3 // a_H * b_H 299 vpxord \t0, \lo, \lo 300 vpternlogd $0x96, \t2, \t1, \mi 301 vpxord \t3, \hi, \hi 302.endm 303 304// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit 305// reduced products in \hi. See _ghash_mul_step for explanation of reduction. 306.macro _ghash_reduce lo, mi, hi, gfpoly, t0 307 vpclmulqdq $0x01, \lo, \gfpoly, \t0 308 vpshufd $0x4e, \lo, \lo 309 vpternlogd $0x96, \t0, \lo, \mi 310 vpclmulqdq $0x01, \mi, \gfpoly, \t0 311 vpshufd $0x4e, \mi, \mi 312 vpternlogd $0x96, \t0, \mi, \hi 313.endm 314 315// void aes_gcm_precompute_##suffix(struct aes_gcm_key_avx10 *key); 316// 317// Given the expanded AES key |key->aes_key|, this function derives the GHASH 318// subkey and initializes |key->ghash_key_powers| with powers of it. 319// 320// The number of key powers initialized is NUM_H_POWERS, and they are stored in 321// the order H^NUM_H_POWERS to H^1. The zeroized padding blocks after the key 322// powers themselves are also initialized. 323// 324// This macro supports both VL=32 and VL=64. _set_veclen must have been invoked 325// with the desired length. In the VL=32 case, the function computes twice as 326// many key powers than are actually used by the VL=32 GCM update functions. 327// This is done to keep the key format the same regardless of vector length. 328.macro _aes_gcm_precompute 329 330 // Function arguments 331 .set KEY, %rdi 332 333 // Additional local variables. V0-V2 and %rax are used as temporaries. 334 .set POWERS_PTR, %rsi 335 .set RNDKEYLAST_PTR, %rdx 336 .set H_CUR, V3 337 .set H_CUR_YMM, %ymm3 338 .set H_CUR_XMM, %xmm3 339 .set H_INC, V4 340 .set H_INC_YMM, %ymm4 341 .set H_INC_XMM, %xmm4 342 .set GFPOLY, V5 343 .set GFPOLY_YMM, %ymm5 344 .set GFPOLY_XMM, %xmm5 345 346 // Get pointer to lowest set of key powers (located at end of array). 347 lea OFFSETOFEND_H_POWERS-VL(KEY), POWERS_PTR 348 349 // Encrypt an all-zeroes block to get the raw hash subkey. 350 movl OFFSETOF_AESKEYLEN(KEY), %eax 351 lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR 352 vmovdqu (KEY), %xmm0 // Zero-th round key XOR all-zeroes block 353 add $16, KEY 3541: 355 vaesenc (KEY), %xmm0, %xmm0 356 add $16, KEY 357 cmp KEY, RNDKEYLAST_PTR 358 jne 1b 359 vaesenclast (RNDKEYLAST_PTR), %xmm0, %xmm0 360 361 // Reflect the bytes of the raw hash subkey. 362 vpshufb .Lbswap_mask(%rip), %xmm0, H_CUR_XMM 363 364 // Zeroize the padding blocks. 365 vpxor %xmm0, %xmm0, %xmm0 366 vmovdqu %ymm0, VL(POWERS_PTR) 367 vmovdqu %xmm0, VL+2*16(POWERS_PTR) 368 369 // Finish preprocessing the first key power, H^1. Since this GHASH 370 // implementation operates directly on values with the backwards bit 371 // order specified by the GCM standard, it's necessary to preprocess the 372 // raw key as follows. First, reflect its bytes. Second, multiply it 373 // by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards 374 // interpretation of polynomial coefficients), which can also be 375 // interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121 376 // + 1 using the alternative, natural interpretation of polynomial 377 // coefficients. For details, see the comment above _ghash_mul_step. 378 // 379 // Either way, for the multiplication the concrete operation performed 380 // is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2 381 // << 120) | 1 if a 1 bit was carried out. However, there's no 128-bit 382 // wide shift instruction, so instead double each of the two 64-bit 383 // halves and incorporate the internal carry bit into the value XOR'd. 384 vpshufd $0xd3, H_CUR_XMM, %xmm0 385 vpsrad $31, %xmm0, %xmm0 386 vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM 387 vpand .Lgfpoly_and_internal_carrybit(%rip), %xmm0, %xmm0 388 vpxor %xmm0, H_CUR_XMM, H_CUR_XMM 389 390 // Load the gfpoly constant. 391 vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY 392 393 // Square H^1 to get H^2. 394 // 395 // Note that as with H^1, all higher key powers also need an extra 396 // factor of x^-1 (or x using the natural interpretation). Nothing 397 // special needs to be done to make this happen, though: H^1 * H^1 would 398 // end up with two factors of x^-1, but the multiplication consumes one. 399 // So the product H^2 ends up with the desired one factor of x^-1. 400 _ghash_mul H_CUR_XMM, H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, \ 401 %xmm0, %xmm1, %xmm2 402 403 // Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2]. 404 vinserti128 $1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM 405 vinserti128 $1, H_INC_XMM, H_INC_YMM, H_INC_YMM 406 407.if VL == 64 408 // Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4]. 409 _ghash_mul H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \ 410 %ymm0, %ymm1, %ymm2 411 vinserti64x4 $1, H_CUR_YMM, H_INC, H_CUR 412 vshufi64x2 $0, H_INC, H_INC, H_INC 413.endif 414 415 // Store the lowest set of key powers. 416 vmovdqu8 H_CUR, (POWERS_PTR) 417 418 // Compute and store the remaining key powers. With VL=32, repeatedly 419 // multiply [H^(i+1), H^i] by [H^2, H^2] to get [H^(i+3), H^(i+2)]. 420 // With VL=64, repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by 421 // [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)]. 422 mov $(NUM_H_POWERS*16/VL) - 1, %eax 423.Lprecompute_next\@: 424 sub $VL, POWERS_PTR 425 _ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, V0, V1, V2 426 vmovdqu8 H_CUR, (POWERS_PTR) 427 dec %eax 428 jnz .Lprecompute_next\@ 429 430 vzeroupper // This is needed after using ymm or zmm registers. 431 RET 432.endm 433 434// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store 435// the result in \dst_xmm. This implicitly zeroizes the other lanes of dst. 436.macro _horizontal_xor src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm 437 vextracti32x4 $1, \src, \t0_xmm 438.if VL == 32 439 vpxord \t0_xmm, \src_xmm, \dst_xmm 440.elseif VL == 64 441 vextracti32x4 $2, \src, \t1_xmm 442 vextracti32x4 $3, \src, \t2_xmm 443 vpxord \t0_xmm, \src_xmm, \dst_xmm 444 vpternlogd $0x96, \t1_xmm, \t2_xmm, \dst_xmm 445.else 446 .error "Unsupported vector length" 447.endif 448.endm 449 450// Do one step of the GHASH update of the data blocks given in the vector 451// registers GHASHDATA[0-3]. \i specifies the step to do, 0 through 9. The 452// division into steps allows users of this macro to optionally interleave the 453// computation with other instructions. This macro uses the vector register 454// GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered; 455// H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and 456// GHASHTMP[0-2] as temporaries. This macro handles the byte-reflection of the 457// data blocks. The parameter registers must be preserved across steps. 458// 459// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) + 460// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the 461// operations are vectorized operations on vectors of 16-byte blocks. E.g., 462// with VL=32 there are 2 blocks per vector and the vectorized terms correspond 463// to the following non-vectorized terms: 464// 465// H_POW4*(GHASHDATA0 + GHASH_ACC) => H^8*(blk0 + GHASH_ACC_XMM) and H^7*(blk1 + 0) 466// H_POW3*GHASHDATA1 => H^6*blk2 and H^5*blk3 467// H_POW2*GHASHDATA2 => H^4*blk4 and H^3*blk5 468// H_POW1*GHASHDATA3 => H^2*blk6 and H^1*blk7 469// 470// With VL=64, we use 4 blocks/vector, H^16 through H^1, and blk0 through blk15. 471// 472// More concretely, this code does: 473// - Do vectorized "schoolbook" multiplications to compute the intermediate 474// 256-bit product of each block and its corresponding hash key power. 475// There are 4*VL/16 of these intermediate products. 476// - Sum (XOR) the intermediate 256-bit products across vectors. This leaves 477// VL/16 256-bit intermediate values. 478// - Do a vectorized reduction of these 256-bit intermediate values to 479// 128-bits each. This leaves VL/16 128-bit intermediate values. 480// - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM. 481// 482// See _ghash_mul_step for the full explanation of the operations performed for 483// each individual finite field multiplication and reduction. 484.macro _ghash_step_4x i 485.if \i == 0 486 vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0 487 vpxord GHASH_ACC, GHASHDATA0, GHASHDATA0 488 vpshufb BSWAP_MASK, GHASHDATA1, GHASHDATA1 489 vpshufb BSWAP_MASK, GHASHDATA2, GHASHDATA2 490.elseif \i == 1 491 vpshufb BSWAP_MASK, GHASHDATA3, GHASHDATA3 492 vpclmulqdq $0x00, H_POW4, GHASHDATA0, GHASH_ACC // LO_0 493 vpclmulqdq $0x00, H_POW3, GHASHDATA1, GHASHTMP0 // LO_1 494 vpclmulqdq $0x00, H_POW2, GHASHDATA2, GHASHTMP1 // LO_2 495.elseif \i == 2 496 vpxord GHASHTMP0, GHASH_ACC, GHASH_ACC // sum(LO_{1,0}) 497 vpclmulqdq $0x00, H_POW1, GHASHDATA3, GHASHTMP2 // LO_3 498 vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC // LO = sum(LO_{3,2,1,0}) 499 vpclmulqdq $0x01, H_POW4, GHASHDATA0, GHASHTMP0 // MI_0 500.elseif \i == 3 501 vpclmulqdq $0x01, H_POW3, GHASHDATA1, GHASHTMP1 // MI_1 502 vpclmulqdq $0x01, H_POW2, GHASHDATA2, GHASHTMP2 // MI_2 503 vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{2,1,0}) 504 vpclmulqdq $0x01, H_POW1, GHASHDATA3, GHASHTMP1 // MI_3 505.elseif \i == 4 506 vpclmulqdq $0x10, H_POW4, GHASHDATA0, GHASHTMP2 // MI_4 507 vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{4,3,2,1,0}) 508 vpclmulqdq $0x10, H_POW3, GHASHDATA1, GHASHTMP1 // MI_5 509 vpclmulqdq $0x10, H_POW2, GHASHDATA2, GHASHTMP2 // MI_6 510.elseif \i == 5 511 vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{6,5,4,3,2,1,0}) 512 vpclmulqdq $0x01, GHASH_ACC, GFPOLY, GHASHTMP2 // LO_L*(x^63 + x^62 + x^57) 513 vpclmulqdq $0x10, H_POW1, GHASHDATA3, GHASHTMP1 // MI_7 514 vpxord GHASHTMP1, GHASHTMP0, GHASHTMP0 // MI = sum(MI_{7,6,5,4,3,2,1,0}) 515.elseif \i == 6 516 vpshufd $0x4e, GHASH_ACC, GHASH_ACC // Swap halves of LO 517 vpclmulqdq $0x11, H_POW4, GHASHDATA0, GHASHDATA0 // HI_0 518 vpclmulqdq $0x11, H_POW3, GHASHDATA1, GHASHDATA1 // HI_1 519 vpclmulqdq $0x11, H_POW2, GHASHDATA2, GHASHDATA2 // HI_2 520.elseif \i == 7 521 vpternlogd $0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0 // Fold LO into MI 522 vpclmulqdq $0x11, H_POW1, GHASHDATA3, GHASHDATA3 // HI_3 523 vpternlogd $0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0}) 524 vpclmulqdq $0x01, GHASHTMP0, GFPOLY, GHASHTMP1 // MI_L*(x^63 + x^62 + x^57) 525.elseif \i == 8 526 vpxord GHASHDATA3, GHASHDATA0, GHASH_ACC // HI = sum(HI_{3,2,1,0}) 527 vpshufd $0x4e, GHASHTMP0, GHASHTMP0 // Swap halves of MI 528 vpternlogd $0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC // Fold MI into HI 529.elseif \i == 9 530 _horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \ 531 GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM 532.endif 533.endm 534 535// Do one non-last round of AES encryption on the counter blocks in V0-V3 using 536// the round key that has been broadcast to all 128-bit lanes of \round_key. 537.macro _vaesenc_4x round_key 538 vaesenc \round_key, V0, V0 539 vaesenc \round_key, V1, V1 540 vaesenc \round_key, V2, V2 541 vaesenc \round_key, V3, V3 542.endm 543 544// Start the AES encryption of four vectors of counter blocks. 545.macro _ctr_begin_4x 546 547 // Increment LE_CTR four times to generate four vectors of little-endian 548 // counter blocks, swap each to big-endian, and store them in V0-V3. 549 vpshufb BSWAP_MASK, LE_CTR, V0 550 vpaddd LE_CTR_INC, LE_CTR, LE_CTR 551 vpshufb BSWAP_MASK, LE_CTR, V1 552 vpaddd LE_CTR_INC, LE_CTR, LE_CTR 553 vpshufb BSWAP_MASK, LE_CTR, V2 554 vpaddd LE_CTR_INC, LE_CTR, LE_CTR 555 vpshufb BSWAP_MASK, LE_CTR, V3 556 vpaddd LE_CTR_INC, LE_CTR, LE_CTR 557 558 // AES "round zero": XOR in the zero-th round key. 559 vpxord RNDKEY0, V0, V0 560 vpxord RNDKEY0, V1, V1 561 vpxord RNDKEY0, V2, V2 562 vpxord RNDKEY0, V3, V3 563.endm 564 565// void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key, 566// const u32 le_ctr[4], u8 ghash_acc[16], 567// const u8 *src, u8 *dst, int datalen); 568// 569// This macro generates a GCM encryption or decryption update function with the 570// above prototype (with \enc selecting which one). This macro supports both 571// VL=32 and VL=64. _set_veclen must have been invoked with the desired length. 572// 573// This function computes the next portion of the CTR keystream, XOR's it with 574// |datalen| bytes from |src|, and writes the resulting encrypted or decrypted 575// data to |dst|. It also updates the GHASH accumulator |ghash_acc| using the 576// next |datalen| ciphertext bytes. 577// 578// |datalen| must be a multiple of 16, except on the last call where it can be 579// any length. The caller must do any buffering needed to ensure this. Both 580// in-place and out-of-place en/decryption are supported. 581// 582// |le_ctr| must give the current counter in little-endian format. For a new 583// message, the low word of the counter must be 2. This function loads the 584// counter from |le_ctr| and increments the loaded counter as needed, but it 585// does *not* store the updated counter back to |le_ctr|. The caller must 586// update |le_ctr| if any more data segments follow. Internally, only the low 587// 32-bit word of the counter is incremented, following the GCM standard. 588.macro _aes_gcm_update enc 589 590 // Function arguments 591 .set KEY, %rdi 592 .set LE_CTR_PTR, %rsi 593 .set GHASH_ACC_PTR, %rdx 594 .set SRC, %rcx 595 .set DST, %r8 596 .set DATALEN, %r9d 597 .set DATALEN64, %r9 // Zero-extend DATALEN before using! 598 599 // Additional local variables 600 601 // %rax and %k1 are used as temporary registers. LE_CTR_PTR is also 602 // available as a temporary register after the counter is loaded. 603 604 // AES key length in bytes 605 .set AESKEYLEN, %r10d 606 .set AESKEYLEN64, %r10 607 608 // Pointer to the last AES round key for the chosen AES variant 609 .set RNDKEYLAST_PTR, %r11 610 611 // In the main loop, V0-V3 are used as AES input and output. Elsewhere 612 // they are used as temporary registers. 613 614 // GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data. 615 .set GHASHDATA0, V4 616 .set GHASHDATA0_XMM, %xmm4 617 .set GHASHDATA1, V5 618 .set GHASHDATA1_XMM, %xmm5 619 .set GHASHDATA2, V6 620 .set GHASHDATA2_XMM, %xmm6 621 .set GHASHDATA3, V7 622 623 // BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values 624 // using vpshufb, copied to all 128-bit lanes. 625 .set BSWAP_MASK, V8 626 627 // RNDKEY temporarily holds the next AES round key. 628 .set RNDKEY, V9 629 630 // GHASH_ACC is the accumulator variable for GHASH. When fully reduced, 631 // only the lowest 128-bit lane can be nonzero. When not fully reduced, 632 // more than one lane may be used, and they need to be XOR'd together. 633 .set GHASH_ACC, V10 634 .set GHASH_ACC_XMM, %xmm10 635 636 // LE_CTR_INC is the vector of 32-bit words that need to be added to a 637 // vector of little-endian counter blocks to advance it forwards. 638 .set LE_CTR_INC, V11 639 640 // LE_CTR contains the next set of little-endian counter blocks. 641 .set LE_CTR, V12 642 643 // RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-5] contain cached AES round keys, 644 // copied to all 128-bit lanes. RNDKEY0 is the zero-th round key, 645 // RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last. 646 .set RNDKEY0, V13 647 .set RNDKEYLAST, V14 648 .set RNDKEY_M9, V15 649 .set RNDKEY_M8, V16 650 .set RNDKEY_M7, V17 651 .set RNDKEY_M6, V18 652 .set RNDKEY_M5, V19 653 654 // RNDKEYLAST[0-3] temporarily store the last AES round key XOR'd with 655 // the corresponding block of source data. This is useful because 656 // vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a), and key ^ b can 657 // be computed in parallel with the AES rounds. 658 .set RNDKEYLAST0, V20 659 .set RNDKEYLAST1, V21 660 .set RNDKEYLAST2, V22 661 .set RNDKEYLAST3, V23 662 663 // GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These 664 // cannot coincide with anything used for AES encryption, since for 665 // performance reasons GHASH and AES encryption are interleaved. 666 .set GHASHTMP0, V24 667 .set GHASHTMP1, V25 668 .set GHASHTMP2, V26 669 670 // H_POW[4-1] contain the powers of the hash key H^(4*VL/16)...H^1. The 671 // descending numbering reflects the order of the key powers. 672 .set H_POW4, V27 673 .set H_POW3, V28 674 .set H_POW2, V29 675 .set H_POW1, V30 676 677 // GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes. 678 .set GFPOLY, V31 679 680 // Load some constants. 681 vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK 682 vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY 683 684 // Load the GHASH accumulator and the starting counter. 685 vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM 686 vbroadcasti32x4 (LE_CTR_PTR), LE_CTR 687 688 // Load the AES key length in bytes. 689 movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN 690 691 // Make RNDKEYLAST_PTR point to the last AES round key. This is the 692 // round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256 693 // respectively. Then load the zero-th and last round keys. 694 lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR 695 vbroadcasti32x4 (KEY), RNDKEY0 696 vbroadcasti32x4 (RNDKEYLAST_PTR), RNDKEYLAST 697 698 // Finish initializing LE_CTR by adding [0, 1, ...] to its low words. 699 vpaddd .Lctr_pattern(%rip), LE_CTR, LE_CTR 700 701 // Initialize LE_CTR_INC to contain VL/16 in all 128-bit lanes. 702.if VL == 32 703 vbroadcasti32x4 .Linc_2blocks(%rip), LE_CTR_INC 704.elseif VL == 64 705 vbroadcasti32x4 .Linc_4blocks(%rip), LE_CTR_INC 706.else 707 .error "Unsupported vector length" 708.endif 709 710 // If there are at least 4*VL bytes of data, then continue into the loop 711 // that processes 4*VL bytes of data at a time. Otherwise skip it. 712 // 713 // Pre-subtracting 4*VL from DATALEN saves an instruction from the main 714 // loop and also ensures that at least one write always occurs to 715 // DATALEN, zero-extending it and allowing DATALEN64 to be used later. 716 sub $4*VL, DATALEN 717 jl .Lcrypt_loop_4x_done\@ 718 719 // Load powers of the hash key. 720 vmovdqu8 OFFSETOFEND_H_POWERS-4*VL(KEY), H_POW4 721 vmovdqu8 OFFSETOFEND_H_POWERS-3*VL(KEY), H_POW3 722 vmovdqu8 OFFSETOFEND_H_POWERS-2*VL(KEY), H_POW2 723 vmovdqu8 OFFSETOFEND_H_POWERS-1*VL(KEY), H_POW1 724 725 // Main loop: en/decrypt and hash 4 vectors at a time. 726 // 727 // When possible, interleave the AES encryption of the counter blocks 728 // with the GHASH update of the ciphertext blocks. This improves 729 // performance on many CPUs because the execution ports used by the VAES 730 // instructions often differ from those used by vpclmulqdq and other 731 // instructions used in GHASH. For example, many Intel CPUs dispatch 732 // vaesenc to ports 0 and 1 and vpclmulqdq to port 5. 733 // 734 // The interleaving is easiest to do during decryption, since during 735 // decryption the ciphertext blocks are immediately available. For 736 // encryption, instead encrypt the first set of blocks, then hash those 737 // blocks while encrypting the next set of blocks, repeat that as 738 // needed, and finally hash the last set of blocks. 739 740.if \enc 741 // Encrypt the first 4 vectors of plaintext blocks. Leave the resulting 742 // ciphertext in GHASHDATA[0-3] for GHASH. 743 _ctr_begin_4x 744 lea 16(KEY), %rax 7451: 746 vbroadcasti32x4 (%rax), RNDKEY 747 _vaesenc_4x RNDKEY 748 add $16, %rax 749 cmp %rax, RNDKEYLAST_PTR 750 jne 1b 751 vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0 752 vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1 753 vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2 754 vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3 755 vaesenclast RNDKEYLAST0, V0, GHASHDATA0 756 vaesenclast RNDKEYLAST1, V1, GHASHDATA1 757 vaesenclast RNDKEYLAST2, V2, GHASHDATA2 758 vaesenclast RNDKEYLAST3, V3, GHASHDATA3 759 vmovdqu8 GHASHDATA0, 0*VL(DST) 760 vmovdqu8 GHASHDATA1, 1*VL(DST) 761 vmovdqu8 GHASHDATA2, 2*VL(DST) 762 vmovdqu8 GHASHDATA3, 3*VL(DST) 763 add $4*VL, SRC 764 add $4*VL, DST 765 sub $4*VL, DATALEN 766 jl .Lghash_last_ciphertext_4x\@ 767.endif 768 769 // Cache as many additional AES round keys as possible. 770.irp i, 9,8,7,6,5 771 vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i 772.endr 773 774.Lcrypt_loop_4x\@: 775 776 // If decrypting, load more ciphertext blocks into GHASHDATA[0-3]. If 777 // encrypting, GHASHDATA[0-3] already contain the previous ciphertext. 778.if !\enc 779 vmovdqu8 0*VL(SRC), GHASHDATA0 780 vmovdqu8 1*VL(SRC), GHASHDATA1 781 vmovdqu8 2*VL(SRC), GHASHDATA2 782 vmovdqu8 3*VL(SRC), GHASHDATA3 783.endif 784 785 // Start the AES encryption of the counter blocks. 786 _ctr_begin_4x 787 cmp $24, AESKEYLEN 788 jl 128f // AES-128? 789 je 192f // AES-192? 790 // AES-256 791 vbroadcasti32x4 -13*16(RNDKEYLAST_PTR), RNDKEY 792 _vaesenc_4x RNDKEY 793 vbroadcasti32x4 -12*16(RNDKEYLAST_PTR), RNDKEY 794 _vaesenc_4x RNDKEY 795192: 796 vbroadcasti32x4 -11*16(RNDKEYLAST_PTR), RNDKEY 797 _vaesenc_4x RNDKEY 798 vbroadcasti32x4 -10*16(RNDKEYLAST_PTR), RNDKEY 799 _vaesenc_4x RNDKEY 800128: 801 802 // XOR the source data with the last round key, saving the result in 803 // RNDKEYLAST[0-3]. This reduces latency by taking advantage of the 804 // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a). 805.if \enc 806 vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0 807 vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1 808 vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2 809 vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3 810.else 811 vpxord GHASHDATA0, RNDKEYLAST, RNDKEYLAST0 812 vpxord GHASHDATA1, RNDKEYLAST, RNDKEYLAST1 813 vpxord GHASHDATA2, RNDKEYLAST, RNDKEYLAST2 814 vpxord GHASHDATA3, RNDKEYLAST, RNDKEYLAST3 815.endif 816 817 // Finish the AES encryption of the counter blocks in V0-V3, interleaved 818 // with the GHASH update of the ciphertext blocks in GHASHDATA[0-3]. 819.irp i, 9,8,7,6,5 820 _vaesenc_4x RNDKEY_M\i 821 _ghash_step_4x (9 - \i) 822.endr 823.irp i, 4,3,2,1 824 vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY 825 _vaesenc_4x RNDKEY 826 _ghash_step_4x (9 - \i) 827.endr 828 _ghash_step_4x 9 829 830 // Do the last AES round. This handles the XOR with the source data 831 // too, as per the optimization described above. 832 vaesenclast RNDKEYLAST0, V0, GHASHDATA0 833 vaesenclast RNDKEYLAST1, V1, GHASHDATA1 834 vaesenclast RNDKEYLAST2, V2, GHASHDATA2 835 vaesenclast RNDKEYLAST3, V3, GHASHDATA3 836 837 // Store the en/decrypted data to DST. 838 vmovdqu8 GHASHDATA0, 0*VL(DST) 839 vmovdqu8 GHASHDATA1, 1*VL(DST) 840 vmovdqu8 GHASHDATA2, 2*VL(DST) 841 vmovdqu8 GHASHDATA3, 3*VL(DST) 842 843 add $4*VL, SRC 844 add $4*VL, DST 845 sub $4*VL, DATALEN 846 jge .Lcrypt_loop_4x\@ 847 848.if \enc 849.Lghash_last_ciphertext_4x\@: 850 // Update GHASH with the last set of ciphertext blocks. 851.irp i, 0,1,2,3,4,5,6,7,8,9 852 _ghash_step_4x \i 853.endr 854.endif 855 856.Lcrypt_loop_4x_done\@: 857 858 // Undo the extra subtraction by 4*VL and check whether data remains. 859 add $4*VL, DATALEN 860 jz .Ldone\@ 861 862 // The data length isn't a multiple of 4*VL. Process the remaining data 863 // of length 1 <= DATALEN < 4*VL, up to one vector (VL bytes) at a time. 864 // Going one vector at a time may seem inefficient compared to having 865 // separate code paths for each possible number of vectors remaining. 866 // However, using a loop keeps the code size down, and it performs 867 // surprising well; modern CPUs will start executing the next iteration 868 // before the previous one finishes and also predict the number of loop 869 // iterations. For a similar reason, we roll up the AES rounds. 870 // 871 // On the last iteration, the remaining length may be less than VL. 872 // Handle this using masking. 873 // 874 // Since there are enough key powers available for all remaining data, 875 // there is no need to do a GHASH reduction after each iteration. 876 // Instead, multiply each remaining block by its own key power, and only 877 // do a GHASH reduction at the very end. 878 879 // Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N 880 // is the number of blocks that remain. 881 .set POWERS_PTR, LE_CTR_PTR // LE_CTR_PTR is free to be reused. 882 mov DATALEN, %eax 883 neg %rax 884 and $~15, %rax // -round_up(DATALEN, 16) 885 lea OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR 886 887 // Start collecting the unreduced GHASH intermediate value LO, MI, HI. 888 .set LO, GHASHDATA0 889 .set LO_XMM, GHASHDATA0_XMM 890 .set MI, GHASHDATA1 891 .set MI_XMM, GHASHDATA1_XMM 892 .set HI, GHASHDATA2 893 .set HI_XMM, GHASHDATA2_XMM 894 vpxor LO_XMM, LO_XMM, LO_XMM 895 vpxor MI_XMM, MI_XMM, MI_XMM 896 vpxor HI_XMM, HI_XMM, HI_XMM 897 898.Lcrypt_loop_1x\@: 899 900 // Select the appropriate mask for this iteration: all 1's if 901 // DATALEN >= VL, otherwise DATALEN 1's. Do this branchlessly using the 902 // bzhi instruction from BMI2. (This relies on DATALEN <= 255.) 903.if VL < 64 904 mov $-1, %eax 905 bzhi DATALEN, %eax, %eax 906 kmovd %eax, %k1 907.else 908 mov $-1, %rax 909 bzhi DATALEN64, %rax, %rax 910 kmovq %rax, %k1 911.endif 912 913 // Encrypt a vector of counter blocks. This does not need to be masked. 914 vpshufb BSWAP_MASK, LE_CTR, V0 915 vpaddd LE_CTR_INC, LE_CTR, LE_CTR 916 vpxord RNDKEY0, V0, V0 917 lea 16(KEY), %rax 9181: 919 vbroadcasti32x4 (%rax), RNDKEY 920 vaesenc RNDKEY, V0, V0 921 add $16, %rax 922 cmp %rax, RNDKEYLAST_PTR 923 jne 1b 924 vaesenclast RNDKEYLAST, V0, V0 925 926 // XOR the data with the appropriate number of keystream bytes. 927 vmovdqu8 (SRC), V1{%k1}{z} 928 vpxord V1, V0, V0 929 vmovdqu8 V0, (DST){%k1} 930 931 // Update GHASH with the ciphertext block(s), without reducing. 932 // 933 // In the case of DATALEN < VL, the ciphertext is zero-padded to VL. 934 // (If decrypting, it's done by the above masked load. If encrypting, 935 // it's done by the below masked register-to-register move.) Note that 936 // if DATALEN <= VL - 16, there will be additional padding beyond the 937 // padding of the last block specified by GHASH itself; i.e., there may 938 // be whole block(s) that get processed by the GHASH multiplication and 939 // reduction instructions but should not actually be included in the 940 // GHASH. However, any such blocks are all-zeroes, and the values that 941 // they're multiplied with are also all-zeroes. Therefore they just add 942 // 0 * 0 = 0 to the final GHASH result, which makes no difference. 943 vmovdqu8 (POWERS_PTR), H_POW1 944.if \enc 945 vmovdqu8 V0, V1{%k1}{z} 946.endif 947 vpshufb BSWAP_MASK, V1, V0 948 vpxord GHASH_ACC, V0, V0 949 _ghash_mul_noreduce H_POW1, V0, LO, MI, HI, GHASHDATA3, V1, V2, V3 950 vpxor GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM 951 952 add $VL, POWERS_PTR 953 add $VL, SRC 954 add $VL, DST 955 sub $VL, DATALEN 956 jg .Lcrypt_loop_1x\@ 957 958 // Finally, do the GHASH reduction. 959 _ghash_reduce LO, MI, HI, GFPOLY, V0 960 _horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2 961 962.Ldone\@: 963 // Store the updated GHASH accumulator back to memory. 964 vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR) 965 966 vzeroupper // This is needed after using ymm or zmm registers. 967 RET 968.endm 969 970// void aes_gcm_enc_final_vaes_avx10(const struct aes_gcm_key_avx10 *key, 971// const u32 le_ctr[4], u8 ghash_acc[16], 972// u64 total_aadlen, u64 total_datalen); 973// bool aes_gcm_dec_final_vaes_avx10(const struct aes_gcm_key_avx10 *key, 974// const u32 le_ctr[4], 975// const u8 ghash_acc[16], 976// u64 total_aadlen, u64 total_datalen, 977// const u8 tag[16], int taglen); 978// 979// This macro generates one of the above two functions (with \enc selecting 980// which one). Both functions finish computing the GCM authentication tag by 981// updating GHASH with the lengths block and encrypting the GHASH accumulator. 982// |total_aadlen| and |total_datalen| must be the total length of the additional 983// authenticated data and the en/decrypted data in bytes, respectively. 984// 985// The encryption function then stores the full-length (16-byte) computed 986// authentication tag to |ghash_acc|. The decryption function instead loads the 987// expected authentication tag (the one that was transmitted) from the 16-byte 988// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the 989// computed tag in constant time, and returns true if and only if they match. 990.macro _aes_gcm_final enc 991 992 // Function arguments 993 .set KEY, %rdi 994 .set LE_CTR_PTR, %rsi 995 .set GHASH_ACC_PTR, %rdx 996 .set TOTAL_AADLEN, %rcx 997 .set TOTAL_DATALEN, %r8 998 .set TAG, %r9 999 .set TAGLEN, %r10d // Originally at 8(%rsp) 1000 1001 // Additional local variables. 1002 // %rax, %xmm0-%xmm3, and %k1 are used as temporary registers. 1003 .set AESKEYLEN, %r11d 1004 .set AESKEYLEN64, %r11 1005 .set GFPOLY, %xmm4 1006 .set BSWAP_MASK, %xmm5 1007 .set LE_CTR, %xmm6 1008 .set GHASH_ACC, %xmm7 1009 .set H_POW1, %xmm8 1010 1011 // Load some constants. 1012 vmovdqa .Lgfpoly(%rip), GFPOLY 1013 vmovdqa .Lbswap_mask(%rip), BSWAP_MASK 1014 1015 // Load the AES key length in bytes. 1016 movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN 1017 1018 // Set up a counter block with 1 in the low 32-bit word. This is the 1019 // counter that produces the ciphertext needed to encrypt the auth tag. 1020 // GFPOLY has 1 in the low word, so grab the 1 from there using a blend. 1021 vpblendd $0xe, (LE_CTR_PTR), GFPOLY, LE_CTR 1022 1023 // Build the lengths block and XOR it with the GHASH accumulator. 1024 // Although the lengths block is defined as the AAD length followed by 1025 // the en/decrypted data length, both in big-endian byte order, a byte 1026 // reflection of the full block is needed because of the way we compute 1027 // GHASH (see _ghash_mul_step). By using little-endian values in the 1028 // opposite order, we avoid having to reflect any bytes here. 1029 vmovq TOTAL_DATALEN, %xmm0 1030 vpinsrq $1, TOTAL_AADLEN, %xmm0, %xmm0 1031 vpsllq $3, %xmm0, %xmm0 // Bytes to bits 1032 vpxor (GHASH_ACC_PTR), %xmm0, GHASH_ACC 1033 1034 // Load the first hash key power (H^1), which is stored last. 1035 vmovdqu8 OFFSETOFEND_H_POWERS-16(KEY), H_POW1 1036 1037.if !\enc 1038 // Prepare a mask of TAGLEN one bits. 1039 movl 8(%rsp), TAGLEN 1040 mov $-1, %eax 1041 bzhi TAGLEN, %eax, %eax 1042 kmovd %eax, %k1 1043.endif 1044 1045 // Make %rax point to the last AES round key for the chosen AES variant. 1046 lea 6*16(KEY,AESKEYLEN64,4), %rax 1047 1048 // Start the AES encryption of the counter block by swapping the counter 1049 // block to big-endian and XOR-ing it with the zero-th AES round key. 1050 vpshufb BSWAP_MASK, LE_CTR, %xmm0 1051 vpxor (KEY), %xmm0, %xmm0 1052 1053 // Complete the AES encryption and multiply GHASH_ACC by H^1. 1054 // Interleave the AES and GHASH instructions to improve performance. 1055 cmp $24, AESKEYLEN 1056 jl 128f // AES-128? 1057 je 192f // AES-192? 1058 // AES-256 1059 vaesenc -13*16(%rax), %xmm0, %xmm0 1060 vaesenc -12*16(%rax), %xmm0, %xmm0 1061192: 1062 vaesenc -11*16(%rax), %xmm0, %xmm0 1063 vaesenc -10*16(%rax), %xmm0, %xmm0 1064128: 1065.irp i, 0,1,2,3,4,5,6,7,8 1066 _ghash_mul_step \i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \ 1067 %xmm1, %xmm2, %xmm3 1068 vaesenc (\i-9)*16(%rax), %xmm0, %xmm0 1069.endr 1070 _ghash_mul_step 9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \ 1071 %xmm1, %xmm2, %xmm3 1072 1073 // Undo the byte reflection of the GHASH accumulator. 1074 vpshufb BSWAP_MASK, GHASH_ACC, GHASH_ACC 1075 1076 // Do the last AES round and XOR the resulting keystream block with the 1077 // GHASH accumulator to produce the full computed authentication tag. 1078 // 1079 // Reduce latency by taking advantage of the property vaesenclast(key, 1080 // a) ^ b == vaesenclast(key ^ b, a). I.e., XOR GHASH_ACC into the last 1081 // round key, instead of XOR'ing the final AES output with GHASH_ACC. 1082 // 1083 // enc_final then returns the computed auth tag, while dec_final 1084 // compares it with the transmitted one and returns a bool. To compare 1085 // the tags, dec_final XORs them together and uses vptest to check 1086 // whether the result is all-zeroes. This should be constant-time. 1087 // dec_final applies the vaesenclast optimization to this additional 1088 // value XOR'd too, using vpternlogd to XOR the last round key, GHASH 1089 // accumulator, and transmitted auth tag together in one instruction. 1090.if \enc 1091 vpxor (%rax), GHASH_ACC, %xmm1 1092 vaesenclast %xmm1, %xmm0, GHASH_ACC 1093 vmovdqu GHASH_ACC, (GHASH_ACC_PTR) 1094.else 1095 vmovdqu (TAG), %xmm1 1096 vpternlogd $0x96, (%rax), GHASH_ACC, %xmm1 1097 vaesenclast %xmm1, %xmm0, %xmm0 1098 xor %eax, %eax 1099 vmovdqu8 %xmm0, %xmm0{%k1}{z} // Truncate to TAGLEN bytes 1100 vptest %xmm0, %xmm0 1101 sete %al 1102.endif 1103 // No need for vzeroupper here, since only used xmm registers were used. 1104 RET 1105.endm 1106 1107_set_veclen 32 1108SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_256) 1109 _aes_gcm_precompute 1110SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_256) 1111SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_256) 1112 _aes_gcm_update 1 1113SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_256) 1114SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_256) 1115 _aes_gcm_update 0 1116SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_256) 1117 1118_set_veclen 64 1119SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_512) 1120 _aes_gcm_precompute 1121SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_512) 1122SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_512) 1123 _aes_gcm_update 1 1124SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_512) 1125SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_512) 1126 _aes_gcm_update 0 1127SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_512) 1128 1129// void aes_gcm_aad_update_vaes_avx10(const struct aes_gcm_key_avx10 *key, 1130// u8 ghash_acc[16], 1131// const u8 *aad, int aadlen); 1132// 1133// This function processes the AAD (Additional Authenticated Data) in GCM. 1134// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the 1135// data given by |aad| and |aadlen|. |key->ghash_key_powers| must have been 1136// initialized. On the first call, |ghash_acc| must be all zeroes. |aadlen| 1137// must be a multiple of 16, except on the last call where it can be any length. 1138// The caller must do any buffering needed to ensure this. 1139// 1140// AES-GCM is almost always used with small amounts of AAD, less than 32 bytes. 1141// Therefore, for AAD processing we currently only provide this implementation 1142// which uses 256-bit vectors (ymm registers) and only has a 1x-wide loop. This 1143// keeps the code size down, and it enables some micro-optimizations, e.g. using 1144// VEX-coded instructions instead of EVEX-coded to save some instruction bytes. 1145// To optimize for large amounts of AAD, we could implement a 4x-wide loop and 1146// provide a version using 512-bit vectors, but that doesn't seem to be useful. 1147SYM_FUNC_START(aes_gcm_aad_update_vaes_avx10) 1148 1149 // Function arguments 1150 .set KEY, %rdi 1151 .set GHASH_ACC_PTR, %rsi 1152 .set AAD, %rdx 1153 .set AADLEN, %ecx 1154 .set AADLEN64, %rcx // Zero-extend AADLEN before using! 1155 1156 // Additional local variables. 1157 // %rax, %ymm0-%ymm3, and %k1 are used as temporary registers. 1158 .set BSWAP_MASK, %ymm4 1159 .set GFPOLY, %ymm5 1160 .set GHASH_ACC, %ymm6 1161 .set GHASH_ACC_XMM, %xmm6 1162 .set H_POW1, %ymm7 1163 1164 // Load some constants. 1165 vbroadcasti128 .Lbswap_mask(%rip), BSWAP_MASK 1166 vbroadcasti128 .Lgfpoly(%rip), GFPOLY 1167 1168 // Load the GHASH accumulator. 1169 vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM 1170 1171 // Update GHASH with 32 bytes of AAD at a time. 1172 // 1173 // Pre-subtracting 32 from AADLEN saves an instruction from the loop and 1174 // also ensures that at least one write always occurs to AADLEN, 1175 // zero-extending it and allowing AADLEN64 to be used later. 1176 sub $32, AADLEN 1177 jl .Laad_loop_1x_done 1178 vmovdqu8 OFFSETOFEND_H_POWERS-32(KEY), H_POW1 // [H^2, H^1] 1179.Laad_loop_1x: 1180 vmovdqu (AAD), %ymm0 1181 vpshufb BSWAP_MASK, %ymm0, %ymm0 1182 vpxor %ymm0, GHASH_ACC, GHASH_ACC 1183 _ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \ 1184 %ymm0, %ymm1, %ymm2 1185 vextracti128 $1, GHASH_ACC, %xmm0 1186 vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM 1187 add $32, AAD 1188 sub $32, AADLEN 1189 jge .Laad_loop_1x 1190.Laad_loop_1x_done: 1191 add $32, AADLEN 1192 jz .Laad_done 1193 1194 // Update GHASH with the remaining 1 <= AADLEN < 32 bytes of AAD. 1195 mov $-1, %eax 1196 bzhi AADLEN, %eax, %eax 1197 kmovd %eax, %k1 1198 vmovdqu8 (AAD), %ymm0{%k1}{z} 1199 neg AADLEN64 1200 and $~15, AADLEN64 // -round_up(AADLEN, 16) 1201 vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1 1202 vpshufb BSWAP_MASK, %ymm0, %ymm0 1203 vpxor %ymm0, GHASH_ACC, GHASH_ACC 1204 _ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \ 1205 %ymm0, %ymm1, %ymm2 1206 vextracti128 $1, GHASH_ACC, %xmm0 1207 vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM 1208 1209.Laad_done: 1210 // Store the updated GHASH accumulator back to memory. 1211 vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR) 1212 1213 vzeroupper // This is needed after using ymm or zmm registers. 1214 RET 1215SYM_FUNC_END(aes_gcm_aad_update_vaes_avx10) 1216 1217SYM_FUNC_START(aes_gcm_enc_final_vaes_avx10) 1218 _aes_gcm_final 1 1219SYM_FUNC_END(aes_gcm_enc_final_vaes_avx10) 1220SYM_FUNC_START(aes_gcm_dec_final_vaes_avx10) 1221 _aes_gcm_final 0 1222SYM_FUNC_END(aes_gcm_dec_final_vaes_avx10) 1223