xref: /linux/arch/x86/crypto/aes-gcm-vaes-avx512.S (revision 8f4c9978de91a9a3b37df1e74d6201acfba6cefd)
112beec21SEric Biggers/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
212beec21SEric Biggers//
312beec21SEric Biggers// AES-GCM implementation for x86_64 CPUs that support the following CPU
412beec21SEric Biggers// features: VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2
512beec21SEric Biggers//
612beec21SEric Biggers// Copyright 2024 Google LLC
712beec21SEric Biggers//
812beec21SEric Biggers// Author: Eric Biggers <ebiggers@google.com>
912beec21SEric Biggers//
1012beec21SEric Biggers//------------------------------------------------------------------------------
1112beec21SEric Biggers//
1212beec21SEric Biggers// This file is dual-licensed, meaning that you can use it under your choice of
1312beec21SEric Biggers// either of the following two licenses:
1412beec21SEric Biggers//
1512beec21SEric Biggers// Licensed under the Apache License 2.0 (the "License").  You may obtain a copy
1612beec21SEric Biggers// of the License at
1712beec21SEric Biggers//
1812beec21SEric Biggers//	http://www.apache.org/licenses/LICENSE-2.0
1912beec21SEric Biggers//
2012beec21SEric Biggers// Unless required by applicable law or agreed to in writing, software
2112beec21SEric Biggers// distributed under the License is distributed on an "AS IS" BASIS,
2212beec21SEric Biggers// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
2312beec21SEric Biggers// See the License for the specific language governing permissions and
2412beec21SEric Biggers// limitations under the License.
2512beec21SEric Biggers//
2612beec21SEric Biggers// or
2712beec21SEric Biggers//
2812beec21SEric Biggers// Redistribution and use in source and binary forms, with or without
2912beec21SEric Biggers// modification, are permitted provided that the following conditions are met:
3012beec21SEric Biggers//
3112beec21SEric Biggers// 1. Redistributions of source code must retain the above copyright notice,
3212beec21SEric Biggers//    this list of conditions and the following disclaimer.
3312beec21SEric Biggers//
3412beec21SEric Biggers// 2. Redistributions in binary form must reproduce the above copyright
3512beec21SEric Biggers//    notice, this list of conditions and the following disclaimer in the
3612beec21SEric Biggers//    documentation and/or other materials provided with the distribution.
3712beec21SEric Biggers//
3812beec21SEric Biggers// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
3912beec21SEric Biggers// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
4012beec21SEric Biggers// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
4112beec21SEric Biggers// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
4212beec21SEric Biggers// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
4312beec21SEric Biggers// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
4412beec21SEric Biggers// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
4512beec21SEric Biggers// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
4612beec21SEric Biggers// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
4712beec21SEric Biggers// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
4812beec21SEric Biggers// POSSIBILITY OF SUCH DAMAGE.
4912beec21SEric Biggers
5012beec21SEric Biggers#include <linux/linkage.h>
5112beec21SEric Biggers
5212beec21SEric Biggers.section .rodata
5312beec21SEric Biggers.p2align 6
5412beec21SEric Biggers
5512beec21SEric Biggers	// A shuffle mask that reflects the bytes of 16-byte blocks
5612beec21SEric Biggers.Lbswap_mask:
5712beec21SEric Biggers	.octa	0x000102030405060708090a0b0c0d0e0f
5812beec21SEric Biggers
5912beec21SEric Biggers	// This is the GHASH reducing polynomial without its constant term, i.e.
6012beec21SEric Biggers	// x^128 + x^7 + x^2 + x, represented using the backwards mapping
6112beec21SEric Biggers	// between bits and polynomial coefficients.
6212beec21SEric Biggers	//
6312beec21SEric Biggers	// Alternatively, it can be interpreted as the naturally-ordered
6412beec21SEric Biggers	// representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the
6512beec21SEric Biggers	// "reversed" GHASH reducing polynomial without its x^128 term.
6612beec21SEric Biggers.Lgfpoly:
6712beec21SEric Biggers	.octa	0xc2000000000000000000000000000001
6812beec21SEric Biggers
6912beec21SEric Biggers	// Same as above, but with the (1 << 64) bit set.
7012beec21SEric Biggers.Lgfpoly_and_internal_carrybit:
7112beec21SEric Biggers	.octa	0xc2000000000000010000000000000001
7212beec21SEric Biggers
734b582e0fSEric Biggers	// Values needed to prepare the initial vector of counter blocks.
7412beec21SEric Biggers.Lctr_pattern:
7512beec21SEric Biggers	.octa	0
7612beec21SEric Biggers	.octa	1
7712beec21SEric Biggers	.octa	2
7812beec21SEric Biggers	.octa	3
794b582e0fSEric Biggers
804b582e0fSEric Biggers	// The number of AES blocks per vector, as a 128-bit value.
8112beec21SEric Biggers.Linc_4blocks:
8212beec21SEric Biggers	.octa	4
8312beec21SEric Biggers
8412beec21SEric Biggers// Number of powers of the hash key stored in the key struct.  The powers are
8512beec21SEric Biggers// stored from highest (H^NUM_H_POWERS) to lowest (H^1).
8612beec21SEric Biggers#define NUM_H_POWERS		16
8712beec21SEric Biggers
8812beec21SEric Biggers// Offset to AES key length (in bytes) in the key struct
8912beec21SEric Biggers#define OFFSETOF_AESKEYLEN	480
9012beec21SEric Biggers
9112beec21SEric Biggers// Offset to start of hash key powers array in the key struct
9212beec21SEric Biggers#define OFFSETOF_H_POWERS	512
9312beec21SEric Biggers
9412beec21SEric Biggers// Offset to end of hash key powers array in the key struct.
9512beec21SEric Biggers//
9612beec21SEric Biggers// This is immediately followed by three zeroized padding blocks, which are
974b582e0fSEric Biggers// included so that partial vectors can be handled more easily.  E.g. if two
984b582e0fSEric Biggers// blocks remain, we load the 4 values [H^2, H^1, 0, 0].  The most padding
994b582e0fSEric Biggers// blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.
10012beec21SEric Biggers#define OFFSETOFEND_H_POWERS	(OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
10112beec21SEric Biggers
10212beec21SEric Biggers.text
10312beec21SEric Biggers
10412beec21SEric Biggers// The _ghash_mul_step macro does one step of GHASH multiplication of the
10512beec21SEric Biggers// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the
10612beec21SEric Biggers// reduced products in \dst.  \t0, \t1, and \t2 are temporary registers of the
10712beec21SEric Biggers// same size as \a and \b.  To complete all steps, this must invoked with \i=0
10812beec21SEric Biggers// through \i=9.  The division into steps allows users of this macro to
10912beec21SEric Biggers// optionally interleave the computation with other instructions.  Users of this
11012beec21SEric Biggers// macro must preserve the parameter registers across steps.
11112beec21SEric Biggers//
11212beec21SEric Biggers// The multiplications are done in GHASH's representation of the finite field
11312beec21SEric Biggers// GF(2^128).  Elements of GF(2^128) are represented as binary polynomials
11412beec21SEric Biggers// (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial
11512beec21SEric Biggers// G.  The GCM specification uses G = x^128 + x^7 + x^2 + x + 1.  Addition is
11612beec21SEric Biggers// just XOR, while multiplication is more complex and has two parts: (a) do
11712beec21SEric Biggers// carryless multiplication of two 128-bit input polynomials to get a 256-bit
11812beec21SEric Biggers// intermediate product polynomial, and (b) reduce the intermediate product to
11912beec21SEric Biggers// 128 bits by adding multiples of G that cancel out terms in it.  (Adding
12012beec21SEric Biggers// multiples of G doesn't change which field element the polynomial represents.)
12112beec21SEric Biggers//
12212beec21SEric Biggers// Unfortunately, the GCM specification maps bits to/from polynomial
12312beec21SEric Biggers// coefficients backwards from the natural order.  In each byte it specifies the
12412beec21SEric Biggers// highest bit to be the lowest order polynomial coefficient, *not* the highest!
12512beec21SEric Biggers// This makes it nontrivial to work with the GHASH polynomials.  We could
12612beec21SEric Biggers// reflect the bits, but x86 doesn't have an instruction that does that.
12712beec21SEric Biggers//
12812beec21SEric Biggers// Instead, we operate on the values without bit-reflecting them.  This *mostly*
12912beec21SEric Biggers// just works, since XOR and carryless multiplication are symmetric with respect
13012beec21SEric Biggers// to bit order, but it has some consequences.  First, due to GHASH's byte
13112beec21SEric Biggers// order, by skipping bit reflection, *byte* reflection becomes necessary to
13212beec21SEric Biggers// give the polynomial terms a consistent order.  E.g., considering an N-bit
13312beec21SEric Biggers// value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0
13412beec21SEric Biggers// through N-1 of the byte-reflected value represent the coefficients of x^(N-1)
13512beec21SEric Biggers// through x^0, whereas bits 0 through N-1 of the non-byte-reflected value
13612beec21SEric Biggers// represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked
13712beec21SEric Biggers// with.  Fortunately, x86's vpshufb instruction can do byte reflection.
13812beec21SEric Biggers//
13912beec21SEric Biggers// Second, forgoing the bit reflection causes an extra multiple of x (still
14012beec21SEric Biggers// using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each
14112beec21SEric Biggers// multiplication.  This is because an M-bit by N-bit carryless multiplication
14212beec21SEric Biggers// really produces a (M+N-1)-bit product, but in practice it's zero-extended to
14312beec21SEric Biggers// M+N bits.  In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits
14412beec21SEric Biggers// to polynomial coefficients backwards, this zero-extension actually changes
14512beec21SEric Biggers// the product by introducing an extra factor of x.  Therefore, users of this
14612beec21SEric Biggers// macro must ensure that one of the inputs has an extra factor of x^-1, i.e.
14712beec21SEric Biggers// the multiplicative inverse of x, to cancel out the extra x.
14812beec21SEric Biggers//
14912beec21SEric Biggers// Third, the backwards coefficients convention is just confusing to work with,
15012beec21SEric Biggers// since it makes "low" and "high" in the polynomial math mean the opposite of
15112beec21SEric Biggers// their normal meaning in computer programming.  This can be solved by using an
15212beec21SEric Biggers// alternative interpretation: the polynomial coefficients are understood to be
15312beec21SEric Biggers// in the natural order, and the multiplication is actually \a * \b * x^-128 mod
15412beec21SEric Biggers// x^128 + x^127 + x^126 + x^121 + 1.  This doesn't change the inputs, outputs,
15512beec21SEric Biggers// or the implementation at all; it just changes the mathematical interpretation
15612beec21SEric Biggers// of what each instruction is doing.  Starting from here, we'll use this
15712beec21SEric Biggers// alternative interpretation, as it's easier to understand the code that way.
15812beec21SEric Biggers//
15912beec21SEric Biggers// Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 =>
16012beec21SEric Biggers// 128-bit carryless multiplication, so we break the 128 x 128 multiplication
16112beec21SEric Biggers// into parts as follows (the _L and _H suffixes denote low and high 64 bits):
16212beec21SEric Biggers//
16312beec21SEric Biggers//     LO = a_L * b_L
16412beec21SEric Biggers//     MI = (a_L * b_H) + (a_H * b_L)
16512beec21SEric Biggers//     HI = a_H * b_H
16612beec21SEric Biggers//
16712beec21SEric Biggers// The 256-bit product is x^128*HI + x^64*MI + LO.  LO, MI, and HI are 128-bit.
16812beec21SEric Biggers// Note that MI "overlaps" with LO and HI.  We don't consolidate MI into LO and
16912beec21SEric Biggers// HI right away, since the way the reduction works makes that unnecessary.
17012beec21SEric Biggers//
17112beec21SEric Biggers// For the reduction, we cancel out the low 128 bits by adding multiples of G =
17212beec21SEric Biggers// x^128 + x^127 + x^126 + x^121 + 1.  This is done by two iterations, each of
17312beec21SEric Biggers// which cancels out the next lowest 64 bits.  Consider a value x^64*A + B,
17412beec21SEric Biggers// where A and B are 128-bit.  Adding B_L*G to that value gives:
17512beec21SEric Biggers//
17612beec21SEric Biggers//       x^64*A + B + B_L*G
17712beec21SEric Biggers//     = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1)
17812beec21SEric Biggers//     = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L
17912beec21SEric Biggers//     = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L
18012beec21SEric Biggers//     = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57))
18112beec21SEric Biggers//
18212beec21SEric Biggers// So: if we sum A, B with its halves swapped, and the low half of B times x^63
18312beec21SEric Biggers// + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the
18412beec21SEric Biggers// original value x^64*A + B.  I.e., the low 64 bits got canceled out.
18512beec21SEric Biggers//
18612beec21SEric Biggers// We just need to apply this twice: first to fold LO into MI, and second to
18712beec21SEric Biggers// fold the updated MI into HI.
18812beec21SEric Biggers//
18912beec21SEric Biggers// The needed three-argument XORs are done using the vpternlogd instruction with
19012beec21SEric Biggers// immediate 0x96, since this is faster than two vpxord instructions.
19112beec21SEric Biggers//
19212beec21SEric Biggers// A potential optimization, assuming that b is fixed per-key (if a is fixed
19312beec21SEric Biggers// per-key it would work the other way around), is to use one iteration of the
19412beec21SEric Biggers// reduction described above to precompute a value c such that x^64*c = b mod G,
19512beec21SEric Biggers// and then multiply a_L by c (and implicitly by x^64) instead of by b:
19612beec21SEric Biggers//
19712beec21SEric Biggers//     MI = (a_L * c_L) + (a_H * b_L)
19812beec21SEric Biggers//     HI = (a_L * c_H) + (a_H * b_H)
19912beec21SEric Biggers//
20012beec21SEric Biggers// This would eliminate the LO part of the intermediate product, which would
20112beec21SEric Biggers// eliminate the need to fold LO into MI.  This would save two instructions,
20212beec21SEric Biggers// including a vpclmulqdq.  However, we currently don't use this optimization
20312beec21SEric Biggers// because it would require twice as many per-key precomputed values.
20412beec21SEric Biggers//
20512beec21SEric Biggers// Using Karatsuba multiplication instead of "schoolbook" multiplication
20612beec21SEric Biggers// similarly would save a vpclmulqdq but does not seem to be worth it.
20712beec21SEric Biggers.macro	_ghash_mul_step	i, a, b, dst, gfpoly, t0, t1, t2
20812beec21SEric Biggers.if \i == 0
20912beec21SEric Biggers	vpclmulqdq	$0x00, \a, \b, \t0	  // LO = a_L * b_L
21012beec21SEric Biggers	vpclmulqdq	$0x01, \a, \b, \t1	  // MI_0 = a_L * b_H
21112beec21SEric Biggers.elseif \i == 1
21212beec21SEric Biggers	vpclmulqdq	$0x10, \a, \b, \t2	  // MI_1 = a_H * b_L
21312beec21SEric Biggers.elseif \i == 2
21412beec21SEric Biggers	vpxord		\t2, \t1, \t1		  // MI = MI_0 + MI_1
21512beec21SEric Biggers.elseif \i == 3
21612beec21SEric Biggers	vpclmulqdq	$0x01, \t0, \gfpoly, \t2  // LO_L*(x^63 + x^62 + x^57)
21712beec21SEric Biggers.elseif \i == 4
21812beec21SEric Biggers	vpshufd		$0x4e, \t0, \t0		  // Swap halves of LO
21912beec21SEric Biggers.elseif \i == 5
22012beec21SEric Biggers	vpternlogd	$0x96, \t2, \t0, \t1	  // Fold LO into MI
22112beec21SEric Biggers.elseif \i == 6
22212beec21SEric Biggers	vpclmulqdq	$0x11, \a, \b, \dst	  // HI = a_H * b_H
22312beec21SEric Biggers.elseif \i == 7
22412beec21SEric Biggers	vpclmulqdq	$0x01, \t1, \gfpoly, \t0  // MI_L*(x^63 + x^62 + x^57)
22512beec21SEric Biggers.elseif \i == 8
22612beec21SEric Biggers	vpshufd		$0x4e, \t1, \t1		  // Swap halves of MI
22712beec21SEric Biggers.elseif \i == 9
22812beec21SEric Biggers	vpternlogd	$0x96, \t0, \t1, \dst	  // Fold MI into HI
22912beec21SEric Biggers.endif
23012beec21SEric Biggers.endm
23112beec21SEric Biggers
23212beec21SEric Biggers// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store
23312beec21SEric Biggers// the reduced products in \dst.  See _ghash_mul_step for full explanation.
23412beec21SEric Biggers.macro	_ghash_mul	a, b, dst, gfpoly, t0, t1, t2
23512beec21SEric Biggers.irp i, 0,1,2,3,4,5,6,7,8,9
23612beec21SEric Biggers	_ghash_mul_step	\i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2
23712beec21SEric Biggers.endr
23812beec21SEric Biggers.endm
23912beec21SEric Biggers
24012beec21SEric Biggers// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the
24112beec21SEric Biggers// *unreduced* products to \lo, \mi, and \hi.
24212beec21SEric Biggers.macro	_ghash_mul_noreduce	a, b, lo, mi, hi, t0, t1, t2, t3
24312beec21SEric Biggers	vpclmulqdq	$0x00, \a, \b, \t0	// a_L * b_L
24412beec21SEric Biggers	vpclmulqdq	$0x01, \a, \b, \t1	// a_L * b_H
24512beec21SEric Biggers	vpclmulqdq	$0x10, \a, \b, \t2	// a_H * b_L
24612beec21SEric Biggers	vpclmulqdq	$0x11, \a, \b, \t3	// a_H * b_H
24712beec21SEric Biggers	vpxord		\t0, \lo, \lo
24812beec21SEric Biggers	vpternlogd	$0x96, \t2, \t1, \mi
24912beec21SEric Biggers	vpxord		\t3, \hi, \hi
25012beec21SEric Biggers.endm
25112beec21SEric Biggers
25212beec21SEric Biggers// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit
25312beec21SEric Biggers// reduced products in \hi.  See _ghash_mul_step for explanation of reduction.
25412beec21SEric Biggers.macro	_ghash_reduce	lo, mi, hi, gfpoly, t0
25512beec21SEric Biggers	vpclmulqdq	$0x01, \lo, \gfpoly, \t0
25612beec21SEric Biggers	vpshufd		$0x4e, \lo, \lo
25712beec21SEric Biggers	vpternlogd	$0x96, \t0, \lo, \mi
25812beec21SEric Biggers	vpclmulqdq	$0x01, \mi, \gfpoly, \t0
25912beec21SEric Biggers	vpshufd		$0x4e, \mi, \mi
26012beec21SEric Biggers	vpternlogd	$0x96, \t0, \mi, \hi
26112beec21SEric Biggers.endm
26212beec21SEric Biggers
2635ab1ff2eSEric Biggers// This is a specialized version of _ghash_mul that computes \a * \a, i.e. it
2645ab1ff2eSEric Biggers// squares \a.  It skips computing MI = (a_L * a_H) + (a_H * a_L) = 0.
2655ab1ff2eSEric Biggers.macro	_ghash_square	a, dst, gfpoly, t0, t1
2665ab1ff2eSEric Biggers	vpclmulqdq	$0x00, \a, \a, \t0	  // LO = a_L * a_L
2675ab1ff2eSEric Biggers	vpclmulqdq	$0x11, \a, \a, \dst	  // HI = a_H * a_H
2685ab1ff2eSEric Biggers	vpclmulqdq	$0x01, \t0, \gfpoly, \t1  // LO_L*(x^63 + x^62 + x^57)
2695ab1ff2eSEric Biggers	vpshufd		$0x4e, \t0, \t0		  // Swap halves of LO
2705ab1ff2eSEric Biggers	vpxord		\t0, \t1, \t1		  // Fold LO into MI
2715ab1ff2eSEric Biggers	vpclmulqdq	$0x01, \t1, \gfpoly, \t0  // MI_L*(x^63 + x^62 + x^57)
2725ab1ff2eSEric Biggers	vpshufd		$0x4e, \t1, \t1		  // Swap halves of MI
2735ab1ff2eSEric Biggers	vpternlogd	$0x96, \t0, \t1, \dst	  // Fold MI into HI
2745ab1ff2eSEric Biggers.endm
2755ab1ff2eSEric Biggers
27612beec21SEric Biggers// void aes_gcm_precompute_vaes_avx512(struct aes_gcm_key_vaes_avx512 *key);
27712beec21SEric Biggers//
278e0abd005SEric Biggers// Given the expanded AES key |key->base.aes_key|, derive the GHASH subkey and
279e0abd005SEric Biggers// initialize |key->h_powers| and |key->padding|.
2805213aefaSEric BiggersSYM_FUNC_START(aes_gcm_precompute_vaes_avx512)
28112beec21SEric Biggers
28212beec21SEric Biggers	// Function arguments
28312beec21SEric Biggers	.set	KEY,		%rdi
28412beec21SEric Biggers
2854b582e0fSEric Biggers	// Additional local variables.
2864b582e0fSEric Biggers	// %zmm[0-2] and %rax are used as temporaries.
28712beec21SEric Biggers	.set	POWERS_PTR,	%rsi
28812beec21SEric Biggers	.set	RNDKEYLAST_PTR,	%rdx
2894b582e0fSEric Biggers	.set	H_CUR,		%zmm3
29012beec21SEric Biggers	.set	H_CUR_YMM,	%ymm3
29112beec21SEric Biggers	.set	H_CUR_XMM,	%xmm3
2924b582e0fSEric Biggers	.set	H_INC,		%zmm4
29312beec21SEric Biggers	.set	H_INC_YMM,	%ymm4
29412beec21SEric Biggers	.set	H_INC_XMM,	%xmm4
2954b582e0fSEric Biggers	.set	GFPOLY,		%zmm5
29612beec21SEric Biggers	.set	GFPOLY_YMM,	%ymm5
29712beec21SEric Biggers	.set	GFPOLY_XMM,	%xmm5
29812beec21SEric Biggers
29912beec21SEric Biggers	// Get pointer to lowest set of key powers (located at end of array).
3004b582e0fSEric Biggers	lea		OFFSETOFEND_H_POWERS-64(KEY), POWERS_PTR
30112beec21SEric Biggers
30212beec21SEric Biggers	// Encrypt an all-zeroes block to get the raw hash subkey.
30312beec21SEric Biggers	movl		OFFSETOF_AESKEYLEN(KEY), %eax
30412beec21SEric Biggers	lea		6*16(KEY,%rax,4), RNDKEYLAST_PTR
30512beec21SEric Biggers	vmovdqu		(KEY), %xmm0  // Zero-th round key XOR all-zeroes block
30612beec21SEric Biggers	add		$16, KEY
30712beec21SEric Biggers1:
30812beec21SEric Biggers	vaesenc		(KEY), %xmm0, %xmm0
30912beec21SEric Biggers	add		$16, KEY
31012beec21SEric Biggers	cmp		KEY, RNDKEYLAST_PTR
31112beec21SEric Biggers	jne		1b
31212beec21SEric Biggers	vaesenclast	(RNDKEYLAST_PTR), %xmm0, %xmm0
31312beec21SEric Biggers
31412beec21SEric Biggers	// Reflect the bytes of the raw hash subkey.
31512beec21SEric Biggers	vpshufb		.Lbswap_mask(%rip), %xmm0, H_CUR_XMM
31612beec21SEric Biggers
31712beec21SEric Biggers	// Zeroize the padding blocks.
31812beec21SEric Biggers	vpxor		%xmm0, %xmm0, %xmm0
3194b582e0fSEric Biggers	vmovdqu		%ymm0, 64(POWERS_PTR)
3204b582e0fSEric Biggers	vmovdqu		%xmm0, 64+2*16(POWERS_PTR)
32112beec21SEric Biggers
32212beec21SEric Biggers	// Finish preprocessing the first key power, H^1.  Since this GHASH
32312beec21SEric Biggers	// implementation operates directly on values with the backwards bit
32412beec21SEric Biggers	// order specified by the GCM standard, it's necessary to preprocess the
32512beec21SEric Biggers	// raw key as follows.  First, reflect its bytes.  Second, multiply it
32612beec21SEric Biggers	// by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards
32712beec21SEric Biggers	// interpretation of polynomial coefficients), which can also be
32812beec21SEric Biggers	// interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121
32912beec21SEric Biggers	// + 1 using the alternative, natural interpretation of polynomial
33012beec21SEric Biggers	// coefficients.  For details, see the comment above _ghash_mul_step.
33112beec21SEric Biggers	//
33212beec21SEric Biggers	// Either way, for the multiplication the concrete operation performed
33312beec21SEric Biggers	// is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2
33412beec21SEric Biggers	// << 120) | 1 if a 1 bit was carried out.  However, there's no 128-bit
33512beec21SEric Biggers	// wide shift instruction, so instead double each of the two 64-bit
33612beec21SEric Biggers	// halves and incorporate the internal carry bit into the value XOR'd.
33712beec21SEric Biggers	vpshufd		$0xd3, H_CUR_XMM, %xmm0
33812beec21SEric Biggers	vpsrad		$31, %xmm0, %xmm0
33912beec21SEric Biggers	vpaddq		H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
34012beec21SEric Biggers	// H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit
34112beec21SEric Biggers	vpternlogd	$0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM
34212beec21SEric Biggers
34312beec21SEric Biggers	// Load the gfpoly constant.
34412beec21SEric Biggers	vbroadcasti32x4	.Lgfpoly(%rip), GFPOLY
34512beec21SEric Biggers
34612beec21SEric Biggers	// Square H^1 to get H^2.
34712beec21SEric Biggers	//
34812beec21SEric Biggers	// Note that as with H^1, all higher key powers also need an extra
34912beec21SEric Biggers	// factor of x^-1 (or x using the natural interpretation).  Nothing
35012beec21SEric Biggers	// special needs to be done to make this happen, though: H^1 * H^1 would
35112beec21SEric Biggers	// end up with two factors of x^-1, but the multiplication consumes one.
35212beec21SEric Biggers	// So the product H^2 ends up with the desired one factor of x^-1.
3535ab1ff2eSEric Biggers	_ghash_square	H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, %xmm0, %xmm1
35412beec21SEric Biggers
35512beec21SEric Biggers	// Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].
35612beec21SEric Biggers	vinserti128	$1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM
35712beec21SEric Biggers	vinserti128	$1, H_INC_XMM, H_INC_YMM, H_INC_YMM
35812beec21SEric Biggers
35912beec21SEric Biggers	// Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].
36012beec21SEric Biggers	_ghash_mul	H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \
36112beec21SEric Biggers			%ymm0, %ymm1, %ymm2
36212beec21SEric Biggers	vinserti64x4	$1, H_CUR_YMM, H_INC, H_CUR
36312beec21SEric Biggers	vshufi64x2	$0, H_INC, H_INC, H_INC
36412beec21SEric Biggers
36512beec21SEric Biggers	// Store the lowest set of key powers.
36612beec21SEric Biggers	vmovdqu8	H_CUR, (POWERS_PTR)
36712beec21SEric Biggers
3684b582e0fSEric Biggers	// Compute and store the remaining key powers.
3694b582e0fSEric Biggers	// Repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by
37012beec21SEric Biggers	// [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].
3714b582e0fSEric Biggers	mov		$3, %eax
3725213aefaSEric Biggers.Lprecompute_next:
3734b582e0fSEric Biggers	sub		$64, POWERS_PTR
3744b582e0fSEric Biggers	_ghash_mul	H_INC, H_CUR, H_CUR, GFPOLY, %zmm0, %zmm1, %zmm2
37512beec21SEric Biggers	vmovdqu8	H_CUR, (POWERS_PTR)
37612beec21SEric Biggers	dec		%eax
3775213aefaSEric Biggers	jnz		.Lprecompute_next
37812beec21SEric Biggers
37912beec21SEric Biggers	vzeroupper	// This is needed after using ymm or zmm registers.
38012beec21SEric Biggers	RET
3815213aefaSEric BiggersSYM_FUNC_END(aes_gcm_precompute_vaes_avx512)
38212beec21SEric Biggers
38312beec21SEric Biggers// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store
38412beec21SEric Biggers// the result in \dst_xmm.  This implicitly zeroizes the other lanes of dst.
38512beec21SEric Biggers.macro	_horizontal_xor	src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm
38612beec21SEric Biggers	vextracti32x4	$1, \src, \t0_xmm
38712beec21SEric Biggers	vextracti32x4	$2, \src, \t1_xmm
38812beec21SEric Biggers	vextracti32x4	$3, \src, \t2_xmm
38912beec21SEric Biggers	vpxord		\t0_xmm, \src_xmm, \dst_xmm
39012beec21SEric Biggers	vpternlogd	$0x96, \t1_xmm, \t2_xmm, \dst_xmm
39112beec21SEric Biggers.endm
39212beec21SEric Biggers
39312beec21SEric Biggers// Do one step of the GHASH update of the data blocks given in the vector
39412beec21SEric Biggers// registers GHASHDATA[0-3].  \i specifies the step to do, 0 through 9.  The
39512beec21SEric Biggers// division into steps allows users of this macro to optionally interleave the
39612beec21SEric Biggers// computation with other instructions.  This macro uses the vector register
39712beec21SEric Biggers// GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered;
39812beec21SEric Biggers// H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and
39912beec21SEric Biggers// GHASHTMP[0-2] as temporaries.  This macro handles the byte-reflection of the
40012beec21SEric Biggers// data blocks.  The parameter registers must be preserved across steps.
40112beec21SEric Biggers//
40212beec21SEric Biggers// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +
40312beec21SEric Biggers// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the
4044b582e0fSEric Biggers// operations are vectorized operations on 512-bit vectors of 128-bit blocks.
4054b582e0fSEric Biggers// The vectorized terms correspond to the following non-vectorized terms:
40612beec21SEric Biggers//
4074b582e0fSEric Biggers//       H_POW4*(GHASHDATA0 + GHASH_ACC) => H^16*(blk0 + GHASH_ACC_XMM),
4084b582e0fSEric Biggers//              H^15*(blk1 + 0), H^14*(blk2 + 0), and H^13*(blk3 + 0)
4094b582e0fSEric Biggers//       H_POW3*GHASHDATA1 => H^12*blk4, H^11*blk5, H^10*blk6, and H^9*blk7
4104b582e0fSEric Biggers//       H_POW2*GHASHDATA2 => H^8*blk8,  H^7*blk9,  H^6*blk10, and H^5*blk11
4114b582e0fSEric Biggers//       H_POW1*GHASHDATA3 => H^4*blk12, H^3*blk13, H^2*blk14, and H^1*blk15
41212beec21SEric Biggers//
41312beec21SEric Biggers// More concretely, this code does:
41412beec21SEric Biggers//   - Do vectorized "schoolbook" multiplications to compute the intermediate
41512beec21SEric Biggers//     256-bit product of each block and its corresponding hash key power.
4164b582e0fSEric Biggers//   - Sum (XOR) the intermediate 256-bit products across vectors.
41712beec21SEric Biggers//   - Do a vectorized reduction of these 256-bit intermediate values to
4184b582e0fSEric Biggers//     128-bits each.
41912beec21SEric Biggers//   - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.
42012beec21SEric Biggers//
42112beec21SEric Biggers// See _ghash_mul_step for the full explanation of the operations performed for
42212beec21SEric Biggers// each individual finite field multiplication and reduction.
42312beec21SEric Biggers.macro	_ghash_step_4x	i
42412beec21SEric Biggers.if \i == 0
42512beec21SEric Biggers	vpshufb		BSWAP_MASK, GHASHDATA0, GHASHDATA0
42612beec21SEric Biggers	vpxord		GHASH_ACC, GHASHDATA0, GHASHDATA0
42712beec21SEric Biggers	vpshufb		BSWAP_MASK, GHASHDATA1, GHASHDATA1
42812beec21SEric Biggers	vpshufb		BSWAP_MASK, GHASHDATA2, GHASHDATA2
42912beec21SEric Biggers.elseif \i == 1
43012beec21SEric Biggers	vpshufb		BSWAP_MASK, GHASHDATA3, GHASHDATA3
43112beec21SEric Biggers	vpclmulqdq	$0x00, H_POW4, GHASHDATA0, GHASH_ACC	// LO_0
43212beec21SEric Biggers	vpclmulqdq	$0x00, H_POW3, GHASHDATA1, GHASHTMP0	// LO_1
43312beec21SEric Biggers	vpclmulqdq	$0x00, H_POW2, GHASHDATA2, GHASHTMP1	// LO_2
43412beec21SEric Biggers.elseif \i == 2
43512beec21SEric Biggers	vpxord		GHASHTMP0, GHASH_ACC, GHASH_ACC		// sum(LO_{1,0})
43612beec21SEric Biggers	vpclmulqdq	$0x00, H_POW1, GHASHDATA3, GHASHTMP2	// LO_3
43712beec21SEric Biggers	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC	// LO = sum(LO_{3,2,1,0})
43812beec21SEric Biggers	vpclmulqdq	$0x01, H_POW4, GHASHDATA0, GHASHTMP0	// MI_0
43912beec21SEric Biggers.elseif \i == 3
44012beec21SEric Biggers	vpclmulqdq	$0x01, H_POW3, GHASHDATA1, GHASHTMP1	// MI_1
44112beec21SEric Biggers	vpclmulqdq	$0x01, H_POW2, GHASHDATA2, GHASHTMP2	// MI_2
44212beec21SEric Biggers	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0	// sum(MI_{2,1,0})
44312beec21SEric Biggers	vpclmulqdq	$0x01, H_POW1, GHASHDATA3, GHASHTMP1	// MI_3
44412beec21SEric Biggers.elseif \i == 4
44512beec21SEric Biggers	vpclmulqdq	$0x10, H_POW4, GHASHDATA0, GHASHTMP2	// MI_4
44612beec21SEric Biggers	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0	// sum(MI_{4,3,2,1,0})
44712beec21SEric Biggers	vpclmulqdq	$0x10, H_POW3, GHASHDATA1, GHASHTMP1	// MI_5
44812beec21SEric Biggers	vpclmulqdq	$0x10, H_POW2, GHASHDATA2, GHASHTMP2	// MI_6
44912beec21SEric Biggers.elseif \i == 5
45012beec21SEric Biggers	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0	// sum(MI_{6,5,4,3,2,1,0})
45112beec21SEric Biggers	vpclmulqdq	$0x01, GHASH_ACC, GFPOLY, GHASHTMP2	// LO_L*(x^63 + x^62 + x^57)
45212beec21SEric Biggers	vpclmulqdq	$0x10, H_POW1, GHASHDATA3, GHASHTMP1	// MI_7
45312beec21SEric Biggers	vpxord		GHASHTMP1, GHASHTMP0, GHASHTMP0		// MI = sum(MI_{7,6,5,4,3,2,1,0})
45412beec21SEric Biggers.elseif \i == 6
45512beec21SEric Biggers	vpshufd		$0x4e, GHASH_ACC, GHASH_ACC		// Swap halves of LO
45612beec21SEric Biggers	vpclmulqdq	$0x11, H_POW4, GHASHDATA0, GHASHDATA0	// HI_0
45712beec21SEric Biggers	vpclmulqdq	$0x11, H_POW3, GHASHDATA1, GHASHDATA1	// HI_1
45812beec21SEric Biggers	vpclmulqdq	$0x11, H_POW2, GHASHDATA2, GHASHDATA2	// HI_2
45912beec21SEric Biggers.elseif \i == 7
46012beec21SEric Biggers	vpternlogd	$0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0	// Fold LO into MI
46112beec21SEric Biggers	vpclmulqdq	$0x11, H_POW1, GHASHDATA3, GHASHDATA3	// HI_3
46212beec21SEric Biggers	vpternlogd	$0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0})
46312beec21SEric Biggers	vpclmulqdq	$0x01, GHASHTMP0, GFPOLY, GHASHTMP1	// MI_L*(x^63 + x^62 + x^57)
46412beec21SEric Biggers.elseif \i == 8
46512beec21SEric Biggers	vpxord		GHASHDATA3, GHASHDATA0, GHASH_ACC	// HI = sum(HI_{3,2,1,0})
46612beec21SEric Biggers	vpshufd		$0x4e, GHASHTMP0, GHASHTMP0		// Swap halves of MI
46712beec21SEric Biggers	vpternlogd	$0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC	// Fold MI into HI
46812beec21SEric Biggers.elseif \i == 9
46912beec21SEric Biggers	_horizontal_xor	GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
47012beec21SEric Biggers			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
47112beec21SEric Biggers.endif
47212beec21SEric Biggers.endm
47312beec21SEric Biggers
474*05794985SEric Biggers// Update GHASH with four vectors of data blocks.  See _ghash_step_4x for full
475*05794985SEric Biggers// explanation.
476*05794985SEric Biggers.macro	_ghash_4x
477*05794985SEric Biggers.irp i, 0,1,2,3,4,5,6,7,8,9
478*05794985SEric Biggers	_ghash_step_4x	\i
479*05794985SEric Biggers.endr
480*05794985SEric Biggers.endm
481*05794985SEric Biggers
4825213aefaSEric Biggers// void aes_gcm_aad_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
4835213aefaSEric Biggers//				       u8 ghash_acc[16],
4845213aefaSEric Biggers//				       const u8 *aad, int aadlen);
4855213aefaSEric Biggers//
4865213aefaSEric Biggers// This function processes the AAD (Additional Authenticated Data) in GCM.
4875213aefaSEric Biggers// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
488e0abd005SEric Biggers// data given by |aad| and |aadlen|.  On the first call, |ghash_acc| must be all
489e0abd005SEric Biggers// zeroes.  |aadlen| must be a multiple of 16, except on the last call where it
490e0abd005SEric Biggers// can be any length.  The caller must do any buffering needed to ensure this.
4915213aefaSEric Biggers//
492*05794985SEric Biggers// This handles large amounts of AAD efficiently, while also keeping overhead
493*05794985SEric Biggers// low for small amounts which is the common case.  TLS and IPsec use less than
494*05794985SEric Biggers// one block of AAD, but (uncommonly) other use cases may use much more.
4955213aefaSEric BiggersSYM_FUNC_START(aes_gcm_aad_update_vaes_avx512)
4965213aefaSEric Biggers
4975213aefaSEric Biggers	// Function arguments
4985213aefaSEric Biggers	.set	KEY,		%rdi
4995213aefaSEric Biggers	.set	GHASH_ACC_PTR,	%rsi
5005213aefaSEric Biggers	.set	AAD,		%rdx
5015213aefaSEric Biggers	.set	AADLEN,		%ecx
5025213aefaSEric Biggers	.set	AADLEN64,	%rcx	// Zero-extend AADLEN before using!
5035213aefaSEric Biggers
5045213aefaSEric Biggers	// Additional local variables.
505*05794985SEric Biggers	// %rax and %k1 are used as temporary registers.
506*05794985SEric Biggers	.set	GHASHDATA0,	%zmm0
507*05794985SEric Biggers	.set	GHASHDATA0_XMM,	%xmm0
508*05794985SEric Biggers	.set	GHASHDATA1,	%zmm1
509*05794985SEric Biggers	.set	GHASHDATA1_XMM,	%xmm1
510*05794985SEric Biggers	.set	GHASHDATA2,	%zmm2
511*05794985SEric Biggers	.set	GHASHDATA2_XMM,	%xmm2
512*05794985SEric Biggers	.set	GHASHDATA3,	%zmm3
513*05794985SEric Biggers	.set	BSWAP_MASK,	%zmm4
514*05794985SEric Biggers	.set	BSWAP_MASK_XMM,	%xmm4
515*05794985SEric Biggers	.set	GHASH_ACC,	%zmm5
516*05794985SEric Biggers	.set	GHASH_ACC_XMM,	%xmm5
517*05794985SEric Biggers	.set	H_POW4,		%zmm6
518*05794985SEric Biggers	.set	H_POW3,		%zmm7
519*05794985SEric Biggers	.set	H_POW2,		%zmm8
520*05794985SEric Biggers	.set	H_POW1,		%zmm9
521*05794985SEric Biggers	.set	H_POW1_XMM,	%xmm9
522*05794985SEric Biggers	.set	GFPOLY,		%zmm10
523*05794985SEric Biggers	.set	GFPOLY_XMM,	%xmm10
524*05794985SEric Biggers	.set	GHASHTMP0,	%zmm11
525*05794985SEric Biggers	.set	GHASHTMP1,	%zmm12
526*05794985SEric Biggers	.set	GHASHTMP2,	%zmm13
5275213aefaSEric Biggers
5285213aefaSEric Biggers	// Load the GHASH accumulator.
5295213aefaSEric Biggers	vmovdqu		(GHASH_ACC_PTR), GHASH_ACC_XMM
5305213aefaSEric Biggers
531*05794985SEric Biggers	// Check for the common case of AADLEN <= 16, as well as AADLEN == 0.
532*05794985SEric Biggers	cmp		$16, AADLEN
533*05794985SEric Biggers	jg		.Laad_more_than_16bytes
534*05794985SEric Biggers	test		AADLEN, AADLEN
5355213aefaSEric Biggers	jz		.Laad_done
5365213aefaSEric Biggers
537*05794985SEric Biggers	// Fast path: update GHASH with 1 <= AADLEN <= 16 bytes of AAD.
538*05794985SEric Biggers	vmovdqu		.Lbswap_mask(%rip), BSWAP_MASK_XMM
539*05794985SEric Biggers	vmovdqu		.Lgfpoly(%rip), GFPOLY_XMM
5405213aefaSEric Biggers	mov		$-1, %eax
5415213aefaSEric Biggers	bzhi		AADLEN, %eax, %eax
5425213aefaSEric Biggers	kmovd		%eax, %k1
543*05794985SEric Biggers	vmovdqu8	(AAD), GHASHDATA0_XMM{%k1}{z}
544*05794985SEric Biggers	vmovdqu		OFFSETOFEND_H_POWERS-16(KEY), H_POW1_XMM
545*05794985SEric Biggers	vpshufb		BSWAP_MASK_XMM, GHASHDATA0_XMM, GHASHDATA0_XMM
546*05794985SEric Biggers	vpxor		GHASHDATA0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
547*05794985SEric Biggers	_ghash_mul	H_POW1_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM, GFPOLY_XMM, \
548*05794985SEric Biggers			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
549*05794985SEric Biggers	jmp		.Laad_done
550*05794985SEric Biggers
551*05794985SEric Biggers.Laad_more_than_16bytes:
552*05794985SEric Biggers	vbroadcasti32x4	.Lbswap_mask(%rip), BSWAP_MASK
553*05794985SEric Biggers	vbroadcasti32x4	.Lgfpoly(%rip), GFPOLY
554*05794985SEric Biggers
555*05794985SEric Biggers	// If AADLEN >= 256, update GHASH with 256 bytes of AAD at a time.
556*05794985SEric Biggers	sub		$256, AADLEN
557*05794985SEric Biggers	jl		.Laad_loop_4x_done
558*05794985SEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
559*05794985SEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
560*05794985SEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
561*05794985SEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
562*05794985SEric Biggers.Laad_loop_4x:
563*05794985SEric Biggers	vmovdqu8	0*64(AAD), GHASHDATA0
564*05794985SEric Biggers	vmovdqu8	1*64(AAD), GHASHDATA1
565*05794985SEric Biggers	vmovdqu8	2*64(AAD), GHASHDATA2
566*05794985SEric Biggers	vmovdqu8	3*64(AAD), GHASHDATA3
567*05794985SEric Biggers	_ghash_4x
568*05794985SEric Biggers	add		$256, AAD
569*05794985SEric Biggers	sub		$256, AADLEN
570*05794985SEric Biggers	jge		.Laad_loop_4x
571*05794985SEric Biggers.Laad_loop_4x_done:
572*05794985SEric Biggers
573*05794985SEric Biggers	// If AADLEN >= 64, update GHASH with 64 bytes of AAD at a time.
574*05794985SEric Biggers	add		$192, AADLEN
575*05794985SEric Biggers	jl		.Laad_loop_1x_done
576*05794985SEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
577*05794985SEric Biggers.Laad_loop_1x:
578*05794985SEric Biggers	vmovdqu8	(AAD), GHASHDATA0
579*05794985SEric Biggers	vpshufb		BSWAP_MASK, GHASHDATA0, GHASHDATA0
580*05794985SEric Biggers	vpxord		GHASHDATA0, GHASH_ACC, GHASH_ACC
581*05794985SEric Biggers	_ghash_mul	H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
582*05794985SEric Biggers			GHASHDATA0, GHASHDATA1, GHASHDATA2
583*05794985SEric Biggers	_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
584*05794985SEric Biggers			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
585*05794985SEric Biggers	add		$64, AAD
586*05794985SEric Biggers	sub		$64, AADLEN
587*05794985SEric Biggers	jge		.Laad_loop_1x
588*05794985SEric Biggers.Laad_loop_1x_done:
589*05794985SEric Biggers
590*05794985SEric Biggers	// Update GHASH with the remaining 0 <= AADLEN < 64 bytes of AAD.
591*05794985SEric Biggers	add		$64, AADLEN
592*05794985SEric Biggers	jz		.Laad_done
593*05794985SEric Biggers	mov		$-1, %rax
594*05794985SEric Biggers	bzhi		AADLEN64, %rax, %rax
595*05794985SEric Biggers	kmovq		%rax, %k1
596*05794985SEric Biggers	vmovdqu8	(AAD), GHASHDATA0{%k1}{z}
5975213aefaSEric Biggers	neg		AADLEN64
5985213aefaSEric Biggers	and		$~15, AADLEN64  // -round_up(AADLEN, 16)
5995213aefaSEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1
600*05794985SEric Biggers	vpshufb		BSWAP_MASK, GHASHDATA0, GHASHDATA0
601*05794985SEric Biggers	vpxord		GHASHDATA0, GHASH_ACC, GHASH_ACC
6025213aefaSEric Biggers	_ghash_mul	H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
603*05794985SEric Biggers			GHASHDATA0, GHASHDATA1, GHASHDATA2
604*05794985SEric Biggers	_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
605*05794985SEric Biggers			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
6065213aefaSEric Biggers
6075213aefaSEric Biggers.Laad_done:
6085213aefaSEric Biggers	// Store the updated GHASH accumulator back to memory.
6095213aefaSEric Biggers	vmovdqu		GHASH_ACC_XMM, (GHASH_ACC_PTR)
6105213aefaSEric Biggers
6115213aefaSEric Biggers	vzeroupper	// This is needed after using ymm or zmm registers.
6125213aefaSEric Biggers	RET
6135213aefaSEric BiggersSYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
6145213aefaSEric Biggers
6154b582e0fSEric Biggers// Do one non-last round of AES encryption on the blocks in %zmm[0-3] using the
6164b582e0fSEric Biggers// round key that has been broadcast to all 128-bit lanes of \round_key.
61712beec21SEric Biggers.macro	_vaesenc_4x	round_key
6184b582e0fSEric Biggers	vaesenc		\round_key, %zmm0, %zmm0
6194b582e0fSEric Biggers	vaesenc		\round_key, %zmm1, %zmm1
6204b582e0fSEric Biggers	vaesenc		\round_key, %zmm2, %zmm2
6214b582e0fSEric Biggers	vaesenc		\round_key, %zmm3, %zmm3
62212beec21SEric Biggers.endm
62312beec21SEric Biggers
62412beec21SEric Biggers// Start the AES encryption of four vectors of counter blocks.
62512beec21SEric Biggers.macro	_ctr_begin_4x
62612beec21SEric Biggers
62712beec21SEric Biggers	// Increment LE_CTR four times to generate four vectors of little-endian
6284b582e0fSEric Biggers	// counter blocks, swap each to big-endian, and store them in %zmm[0-3].
6294b582e0fSEric Biggers	vpshufb		BSWAP_MASK, LE_CTR, %zmm0
63012beec21SEric Biggers	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
6314b582e0fSEric Biggers	vpshufb		BSWAP_MASK, LE_CTR, %zmm1
63212beec21SEric Biggers	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
6334b582e0fSEric Biggers	vpshufb		BSWAP_MASK, LE_CTR, %zmm2
63412beec21SEric Biggers	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
6354b582e0fSEric Biggers	vpshufb		BSWAP_MASK, LE_CTR, %zmm3
63612beec21SEric Biggers	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
63712beec21SEric Biggers
63812beec21SEric Biggers	// AES "round zero": XOR in the zero-th round key.
6394b582e0fSEric Biggers	vpxord		RNDKEY0, %zmm0, %zmm0
6404b582e0fSEric Biggers	vpxord		RNDKEY0, %zmm1, %zmm1
6414b582e0fSEric Biggers	vpxord		RNDKEY0, %zmm2, %zmm2
6424b582e0fSEric Biggers	vpxord		RNDKEY0, %zmm3, %zmm3
64312beec21SEric Biggers.endm
64412beec21SEric Biggers
6454b582e0fSEric Biggers// Do the last AES round for four vectors of counter blocks %zmm[0-3], XOR
6464b582e0fSEric Biggers// source data with the resulting keystream, and write the result to DST and
64712beec21SEric Biggers// GHASHDATA[0-3].  (Implementation differs slightly, but has the same effect.)
64812beec21SEric Biggers.macro	_aesenclast_and_xor_4x
64912beec21SEric Biggers	// XOR the source data with the last round key, saving the result in
65012beec21SEric Biggers	// GHASHDATA[0-3].  This reduces latency by taking advantage of the
65112beec21SEric Biggers	// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
6524b582e0fSEric Biggers	vpxord		0*64(SRC), RNDKEYLAST, GHASHDATA0
6534b582e0fSEric Biggers	vpxord		1*64(SRC), RNDKEYLAST, GHASHDATA1
6544b582e0fSEric Biggers	vpxord		2*64(SRC), RNDKEYLAST, GHASHDATA2
6554b582e0fSEric Biggers	vpxord		3*64(SRC), RNDKEYLAST, GHASHDATA3
65612beec21SEric Biggers
65712beec21SEric Biggers	// Do the last AES round.  This handles the XOR with the source data
65812beec21SEric Biggers	// too, as per the optimization described above.
6594b582e0fSEric Biggers	vaesenclast	GHASHDATA0, %zmm0, GHASHDATA0
6604b582e0fSEric Biggers	vaesenclast	GHASHDATA1, %zmm1, GHASHDATA1
6614b582e0fSEric Biggers	vaesenclast	GHASHDATA2, %zmm2, GHASHDATA2
6624b582e0fSEric Biggers	vaesenclast	GHASHDATA3, %zmm3, GHASHDATA3
66312beec21SEric Biggers
66412beec21SEric Biggers	// Store the en/decrypted data to DST.
6654b582e0fSEric Biggers	vmovdqu8	GHASHDATA0, 0*64(DST)
6664b582e0fSEric Biggers	vmovdqu8	GHASHDATA1, 1*64(DST)
6674b582e0fSEric Biggers	vmovdqu8	GHASHDATA2, 2*64(DST)
6684b582e0fSEric Biggers	vmovdqu8	GHASHDATA3, 3*64(DST)
66912beec21SEric Biggers.endm
67012beec21SEric Biggers
67112beec21SEric Biggers// void aes_gcm_{enc,dec}_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
67212beec21SEric Biggers//					     const u32 le_ctr[4], u8 ghash_acc[16],
67312beec21SEric Biggers//					     const u8 *src, u8 *dst, int datalen);
67412beec21SEric Biggers//
67512beec21SEric Biggers// This macro generates a GCM encryption or decryption update function with the
6764b582e0fSEric Biggers// above prototype (with \enc selecting which one).  The function computes the
6774b582e0fSEric Biggers// next portion of the CTR keystream, XOR's it with |datalen| bytes from |src|,
6784b582e0fSEric Biggers// and writes the resulting encrypted or decrypted data to |dst|.  It also
6794b582e0fSEric Biggers// updates the GHASH accumulator |ghash_acc| using the next |datalen| ciphertext
6804b582e0fSEric Biggers// bytes.
68112beec21SEric Biggers//
68212beec21SEric Biggers// |datalen| must be a multiple of 16, except on the last call where it can be
68312beec21SEric Biggers// any length.  The caller must do any buffering needed to ensure this.  Both
68412beec21SEric Biggers// in-place and out-of-place en/decryption are supported.
68512beec21SEric Biggers//
686e0abd005SEric Biggers// |le_ctr| must give the current counter in little-endian format.  This
687e0abd005SEric Biggers// function loads the counter from |le_ctr| and increments the loaded counter as
688e0abd005SEric Biggers// needed, but it does *not* store the updated counter back to |le_ctr|.  The
689e0abd005SEric Biggers// caller must update |le_ctr| if any more data segments follow.  Internally,
690e0abd005SEric Biggers// only the low 32-bit word of the counter is incremented, following the GCM
691e0abd005SEric Biggers// standard.
69212beec21SEric Biggers.macro	_aes_gcm_update	enc
69312beec21SEric Biggers
69412beec21SEric Biggers	// Function arguments
69512beec21SEric Biggers	.set	KEY,		%rdi
69612beec21SEric Biggers	.set	LE_CTR_PTR,	%rsi
69712beec21SEric Biggers	.set	GHASH_ACC_PTR,	%rdx
69812beec21SEric Biggers	.set	SRC,		%rcx
69912beec21SEric Biggers	.set	DST,		%r8
70012beec21SEric Biggers	.set	DATALEN,	%r9d
70112beec21SEric Biggers	.set	DATALEN64,	%r9	// Zero-extend DATALEN before using!
70212beec21SEric Biggers
70312beec21SEric Biggers	// Additional local variables
70412beec21SEric Biggers
70512beec21SEric Biggers	// %rax and %k1 are used as temporary registers.  LE_CTR_PTR is also
70612beec21SEric Biggers	// available as a temporary register after the counter is loaded.
70712beec21SEric Biggers
70812beec21SEric Biggers	// AES key length in bytes
70912beec21SEric Biggers	.set	AESKEYLEN,	%r10d
71012beec21SEric Biggers	.set	AESKEYLEN64,	%r10
71112beec21SEric Biggers
71212beec21SEric Biggers	// Pointer to the last AES round key for the chosen AES variant
71312beec21SEric Biggers	.set	RNDKEYLAST_PTR,	%r11
71412beec21SEric Biggers
7154b582e0fSEric Biggers	// In the main loop, %zmm[0-3] are used as AES input and output.
7164b582e0fSEric Biggers	// Elsewhere they are used as temporary registers.
71712beec21SEric Biggers
71812beec21SEric Biggers	// GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.
7194b582e0fSEric Biggers	.set	GHASHDATA0,	%zmm4
72012beec21SEric Biggers	.set	GHASHDATA0_XMM,	%xmm4
7214b582e0fSEric Biggers	.set	GHASHDATA1,	%zmm5
72212beec21SEric Biggers	.set	GHASHDATA1_XMM,	%xmm5
7234b582e0fSEric Biggers	.set	GHASHDATA2,	%zmm6
72412beec21SEric Biggers	.set	GHASHDATA2_XMM,	%xmm6
7254b582e0fSEric Biggers	.set	GHASHDATA3,	%zmm7
72612beec21SEric Biggers
72712beec21SEric Biggers	// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values
72812beec21SEric Biggers	// using vpshufb, copied to all 128-bit lanes.
7294b582e0fSEric Biggers	.set	BSWAP_MASK,	%zmm8
73012beec21SEric Biggers
73112beec21SEric Biggers	// RNDKEY temporarily holds the next AES round key.
7324b582e0fSEric Biggers	.set	RNDKEY,		%zmm9
73312beec21SEric Biggers
73412beec21SEric Biggers	// GHASH_ACC is the accumulator variable for GHASH.  When fully reduced,
73512beec21SEric Biggers	// only the lowest 128-bit lane can be nonzero.  When not fully reduced,
73612beec21SEric Biggers	// more than one lane may be used, and they need to be XOR'd together.
7374b582e0fSEric Biggers	.set	GHASH_ACC,	%zmm10
73812beec21SEric Biggers	.set	GHASH_ACC_XMM,	%xmm10
73912beec21SEric Biggers
74012beec21SEric Biggers	// LE_CTR_INC is the vector of 32-bit words that need to be added to a
74112beec21SEric Biggers	// vector of little-endian counter blocks to advance it forwards.
7424b582e0fSEric Biggers	.set	LE_CTR_INC,	%zmm11
74312beec21SEric Biggers
74412beec21SEric Biggers	// LE_CTR contains the next set of little-endian counter blocks.
7454b582e0fSEric Biggers	.set	LE_CTR,		%zmm12
74612beec21SEric Biggers
74712beec21SEric Biggers	// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
74812beec21SEric Biggers	// copied to all 128-bit lanes.  RNDKEY0 is the zero-th round key,
74912beec21SEric Biggers	// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
7504b582e0fSEric Biggers	.set	RNDKEY0,	%zmm13
7514b582e0fSEric Biggers	.set	RNDKEYLAST,	%zmm14
7524b582e0fSEric Biggers	.set	RNDKEY_M9,	%zmm15
7534b582e0fSEric Biggers	.set	RNDKEY_M8,	%zmm16
7544b582e0fSEric Biggers	.set	RNDKEY_M7,	%zmm17
7554b582e0fSEric Biggers	.set	RNDKEY_M6,	%zmm18
7564b582e0fSEric Biggers	.set	RNDKEY_M5,	%zmm19
7574b582e0fSEric Biggers	.set	RNDKEY_M4,	%zmm20
7584b582e0fSEric Biggers	.set	RNDKEY_M3,	%zmm21
7594b582e0fSEric Biggers	.set	RNDKEY_M2,	%zmm22
7604b582e0fSEric Biggers	.set	RNDKEY_M1,	%zmm23
76112beec21SEric Biggers
76212beec21SEric Biggers	// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x.  These
76312beec21SEric Biggers	// cannot coincide with anything used for AES encryption, since for
76412beec21SEric Biggers	// performance reasons GHASH and AES encryption are interleaved.
7654b582e0fSEric Biggers	.set	GHASHTMP0,	%zmm24
7664b582e0fSEric Biggers	.set	GHASHTMP1,	%zmm25
7674b582e0fSEric Biggers	.set	GHASHTMP2,	%zmm26
76812beec21SEric Biggers
7694b582e0fSEric Biggers	// H_POW[4-1] contain the powers of the hash key H^16...H^1.  The
77012beec21SEric Biggers	// descending numbering reflects the order of the key powers.
7714b582e0fSEric Biggers	.set	H_POW4,		%zmm27
7724b582e0fSEric Biggers	.set	H_POW3,		%zmm28
7734b582e0fSEric Biggers	.set	H_POW2,		%zmm29
7744b582e0fSEric Biggers	.set	H_POW1,		%zmm30
77512beec21SEric Biggers
77612beec21SEric Biggers	// GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.
7774b582e0fSEric Biggers	.set	GFPOLY,		%zmm31
77812beec21SEric Biggers
77912beec21SEric Biggers	// Load some constants.
78012beec21SEric Biggers	vbroadcasti32x4	.Lbswap_mask(%rip), BSWAP_MASK
78112beec21SEric Biggers	vbroadcasti32x4	.Lgfpoly(%rip), GFPOLY
78212beec21SEric Biggers
78312beec21SEric Biggers	// Load the GHASH accumulator and the starting counter.
78412beec21SEric Biggers	vmovdqu		(GHASH_ACC_PTR), GHASH_ACC_XMM
78512beec21SEric Biggers	vbroadcasti32x4	(LE_CTR_PTR), LE_CTR
78612beec21SEric Biggers
78712beec21SEric Biggers	// Load the AES key length in bytes.
78812beec21SEric Biggers	movl		OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
78912beec21SEric Biggers
79012beec21SEric Biggers	// Make RNDKEYLAST_PTR point to the last AES round key.  This is the
79112beec21SEric Biggers	// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
79212beec21SEric Biggers	// respectively.  Then load the zero-th and last round keys.
79312beec21SEric Biggers	lea		6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
79412beec21SEric Biggers	vbroadcasti32x4	(KEY), RNDKEY0
79512beec21SEric Biggers	vbroadcasti32x4	(RNDKEYLAST_PTR), RNDKEYLAST
79612beec21SEric Biggers
79712beec21SEric Biggers	// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.
79812beec21SEric Biggers	vpaddd		.Lctr_pattern(%rip), LE_CTR, LE_CTR
79912beec21SEric Biggers
8004b582e0fSEric Biggers	// Load 4 into all 128-bit lanes of LE_CTR_INC.
80112beec21SEric Biggers	vbroadcasti32x4	.Linc_4blocks(%rip), LE_CTR_INC
80212beec21SEric Biggers
8034b582e0fSEric Biggers	// If there are at least 256 bytes of data, then continue into the loop
8044b582e0fSEric Biggers	// that processes 256 bytes of data at a time.  Otherwise skip it.
80512beec21SEric Biggers	//
8064b582e0fSEric Biggers	// Pre-subtracting 256 from DATALEN saves an instruction from the main
80712beec21SEric Biggers	// loop and also ensures that at least one write always occurs to
80812beec21SEric Biggers	// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
8094b582e0fSEric Biggers	sub		$256, DATALEN
81012beec21SEric Biggers	jl		.Lcrypt_loop_4x_done\@
81112beec21SEric Biggers
81212beec21SEric Biggers	// Load powers of the hash key.
8134b582e0fSEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
8144b582e0fSEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
8154b582e0fSEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
8164b582e0fSEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
81712beec21SEric Biggers
81812beec21SEric Biggers	// Main loop: en/decrypt and hash 4 vectors at a time.
81912beec21SEric Biggers	//
82012beec21SEric Biggers	// When possible, interleave the AES encryption of the counter blocks
82112beec21SEric Biggers	// with the GHASH update of the ciphertext blocks.  This improves
82212beec21SEric Biggers	// performance on many CPUs because the execution ports used by the VAES
82312beec21SEric Biggers	// instructions often differ from those used by vpclmulqdq and other
82412beec21SEric Biggers	// instructions used in GHASH.  For example, many Intel CPUs dispatch
82512beec21SEric Biggers	// vaesenc to ports 0 and 1 and vpclmulqdq to port 5.
82612beec21SEric Biggers	//
82712beec21SEric Biggers	// The interleaving is easiest to do during decryption, since during
82812beec21SEric Biggers	// decryption the ciphertext blocks are immediately available.  For
82912beec21SEric Biggers	// encryption, instead encrypt the first set of blocks, then hash those
83012beec21SEric Biggers	// blocks while encrypting the next set of blocks, repeat that as
83112beec21SEric Biggers	// needed, and finally hash the last set of blocks.
83212beec21SEric Biggers
83312beec21SEric Biggers.if \enc
83412beec21SEric Biggers	// Encrypt the first 4 vectors of plaintext blocks.  Leave the resulting
83512beec21SEric Biggers	// ciphertext in GHASHDATA[0-3] for GHASH.
83612beec21SEric Biggers	_ctr_begin_4x
83712beec21SEric Biggers	lea		16(KEY), %rax
83812beec21SEric Biggers1:
83912beec21SEric Biggers	vbroadcasti32x4	(%rax), RNDKEY
84012beec21SEric Biggers	_vaesenc_4x	RNDKEY
84112beec21SEric Biggers	add		$16, %rax
84212beec21SEric Biggers	cmp		%rax, RNDKEYLAST_PTR
84312beec21SEric Biggers	jne		1b
84412beec21SEric Biggers	_aesenclast_and_xor_4x
8454b582e0fSEric Biggers	add		$256, SRC
8464b582e0fSEric Biggers	add		$256, DST
8474b582e0fSEric Biggers	sub		$256, DATALEN
84812beec21SEric Biggers	jl		.Lghash_last_ciphertext_4x\@
84912beec21SEric Biggers.endif
85012beec21SEric Biggers
85112beec21SEric Biggers	// Cache as many additional AES round keys as possible.
85212beec21SEric Biggers.irp i, 9,8,7,6,5,4,3,2,1
85312beec21SEric Biggers	vbroadcasti32x4	-\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
85412beec21SEric Biggers.endr
85512beec21SEric Biggers
85612beec21SEric Biggers.Lcrypt_loop_4x\@:
85712beec21SEric Biggers
85812beec21SEric Biggers	// If decrypting, load more ciphertext blocks into GHASHDATA[0-3].  If
85912beec21SEric Biggers	// encrypting, GHASHDATA[0-3] already contain the previous ciphertext.
86012beec21SEric Biggers.if !\enc
8614b582e0fSEric Biggers	vmovdqu8	0*64(SRC), GHASHDATA0
8624b582e0fSEric Biggers	vmovdqu8	1*64(SRC), GHASHDATA1
8634b582e0fSEric Biggers	vmovdqu8	2*64(SRC), GHASHDATA2
8644b582e0fSEric Biggers	vmovdqu8	3*64(SRC), GHASHDATA3
86512beec21SEric Biggers.endif
86612beec21SEric Biggers
86712beec21SEric Biggers	// Start the AES encryption of the counter blocks.
86812beec21SEric Biggers	_ctr_begin_4x
86912beec21SEric Biggers	cmp		$24, AESKEYLEN
87012beec21SEric Biggers	jl		128f	// AES-128?
87112beec21SEric Biggers	je		192f	// AES-192?
87212beec21SEric Biggers	// AES-256
87312beec21SEric Biggers	vbroadcasti32x4	-13*16(RNDKEYLAST_PTR), RNDKEY
87412beec21SEric Biggers	_vaesenc_4x	RNDKEY
87512beec21SEric Biggers	vbroadcasti32x4	-12*16(RNDKEYLAST_PTR), RNDKEY
87612beec21SEric Biggers	_vaesenc_4x	RNDKEY
87712beec21SEric Biggers192:
87812beec21SEric Biggers	vbroadcasti32x4	-11*16(RNDKEYLAST_PTR), RNDKEY
87912beec21SEric Biggers	_vaesenc_4x	RNDKEY
88012beec21SEric Biggers	vbroadcasti32x4	-10*16(RNDKEYLAST_PTR), RNDKEY
88112beec21SEric Biggers	_vaesenc_4x	RNDKEY
88212beec21SEric Biggers128:
88312beec21SEric Biggers
8844b582e0fSEric Biggers	// Finish the AES encryption of the counter blocks in %zmm[0-3],
8854b582e0fSEric Biggers	// interleaved with the GHASH update of the ciphertext blocks in
8864b582e0fSEric Biggers	// GHASHDATA[0-3].
88712beec21SEric Biggers.irp i, 9,8,7,6,5,4,3,2,1
88812beec21SEric Biggers	_ghash_step_4x  (9 - \i)
88912beec21SEric Biggers	_vaesenc_4x	RNDKEY_M\i
89012beec21SEric Biggers.endr
89112beec21SEric Biggers	_ghash_step_4x	9
89212beec21SEric Biggers	_aesenclast_and_xor_4x
8934b582e0fSEric Biggers	add		$256, SRC
8944b582e0fSEric Biggers	add		$256, DST
8954b582e0fSEric Biggers	sub		$256, DATALEN
89612beec21SEric Biggers	jge		.Lcrypt_loop_4x\@
89712beec21SEric Biggers
89812beec21SEric Biggers.if \enc
89912beec21SEric Biggers.Lghash_last_ciphertext_4x\@:
90012beec21SEric Biggers	// Update GHASH with the last set of ciphertext blocks.
901*05794985SEric Biggers	_ghash_4x
90212beec21SEric Biggers.endif
90312beec21SEric Biggers
90412beec21SEric Biggers.Lcrypt_loop_4x_done\@:
90512beec21SEric Biggers
9064b582e0fSEric Biggers	// Undo the extra subtraction by 256 and check whether data remains.
9074b582e0fSEric Biggers	add		$256, DATALEN
90812beec21SEric Biggers	jz		.Ldone\@
90912beec21SEric Biggers
9104b582e0fSEric Biggers	// The data length isn't a multiple of 256 bytes.  Process the remaining
9114b582e0fSEric Biggers	// data of length 1 <= DATALEN < 256, up to one 64-byte vector at a
9124b582e0fSEric Biggers	// time.  Going one vector at a time may seem inefficient compared to
9134b582e0fSEric Biggers	// having separate code paths for each possible number of vectors
9144b582e0fSEric Biggers	// remaining.  However, using a loop keeps the code size down, and it
9154b582e0fSEric Biggers	// performs surprising well; modern CPUs will start executing the next
9164b582e0fSEric Biggers	// iteration before the previous one finishes and also predict the
9174b582e0fSEric Biggers	// number of loop iterations.  For a similar reason, we roll up the AES
9184b582e0fSEric Biggers	// rounds.
91912beec21SEric Biggers	//
9204b582e0fSEric Biggers	// On the last iteration, the remaining length may be less than 64
9214b582e0fSEric Biggers	// bytes.  Handle this using masking.
92212beec21SEric Biggers	//
92312beec21SEric Biggers	// Since there are enough key powers available for all remaining data,
92412beec21SEric Biggers	// there is no need to do a GHASH reduction after each iteration.
92512beec21SEric Biggers	// Instead, multiply each remaining block by its own key power, and only
92612beec21SEric Biggers	// do a GHASH reduction at the very end.
92712beec21SEric Biggers
92812beec21SEric Biggers	// Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N
92912beec21SEric Biggers	// is the number of blocks that remain.
93012beec21SEric Biggers	.set		POWERS_PTR, LE_CTR_PTR	// LE_CTR_PTR is free to be reused.
93112beec21SEric Biggers	mov		DATALEN, %eax
93212beec21SEric Biggers	neg		%rax
93312beec21SEric Biggers	and		$~15, %rax  // -round_up(DATALEN, 16)
93412beec21SEric Biggers	lea		OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR
93512beec21SEric Biggers
93612beec21SEric Biggers	// Start collecting the unreduced GHASH intermediate value LO, MI, HI.
93712beec21SEric Biggers	.set		LO, GHASHDATA0
93812beec21SEric Biggers	.set		LO_XMM, GHASHDATA0_XMM
93912beec21SEric Biggers	.set		MI, GHASHDATA1
94012beec21SEric Biggers	.set		MI_XMM, GHASHDATA1_XMM
94112beec21SEric Biggers	.set		HI, GHASHDATA2
94212beec21SEric Biggers	.set		HI_XMM, GHASHDATA2_XMM
94312beec21SEric Biggers	vpxor		LO_XMM, LO_XMM, LO_XMM
94412beec21SEric Biggers	vpxor		MI_XMM, MI_XMM, MI_XMM
94512beec21SEric Biggers	vpxor		HI_XMM, HI_XMM, HI_XMM
94612beec21SEric Biggers
94712beec21SEric Biggers.Lcrypt_loop_1x\@:
94812beec21SEric Biggers
94912beec21SEric Biggers	// Select the appropriate mask for this iteration: all 1's if
9504b582e0fSEric Biggers	// DATALEN >= 64, otherwise DATALEN 1's.  Do this branchlessly using the
95112beec21SEric Biggers	// bzhi instruction from BMI2.  (This relies on DATALEN <= 255.)
95212beec21SEric Biggers	mov		$-1, %rax
95312beec21SEric Biggers	bzhi		DATALEN64, %rax, %rax
95412beec21SEric Biggers	kmovq		%rax, %k1
95512beec21SEric Biggers
95612beec21SEric Biggers	// Encrypt a vector of counter blocks.  This does not need to be masked.
9574b582e0fSEric Biggers	vpshufb		BSWAP_MASK, LE_CTR, %zmm0
95812beec21SEric Biggers	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
9594b582e0fSEric Biggers	vpxord		RNDKEY0, %zmm0, %zmm0
96012beec21SEric Biggers	lea		16(KEY), %rax
96112beec21SEric Biggers1:
96212beec21SEric Biggers	vbroadcasti32x4	(%rax), RNDKEY
9634b582e0fSEric Biggers	vaesenc		RNDKEY, %zmm0, %zmm0
96412beec21SEric Biggers	add		$16, %rax
96512beec21SEric Biggers	cmp		%rax, RNDKEYLAST_PTR
96612beec21SEric Biggers	jne		1b
9674b582e0fSEric Biggers	vaesenclast	RNDKEYLAST, %zmm0, %zmm0
96812beec21SEric Biggers
96912beec21SEric Biggers	// XOR the data with the appropriate number of keystream bytes.
9704b582e0fSEric Biggers	vmovdqu8	(SRC), %zmm1{%k1}{z}
9714b582e0fSEric Biggers	vpxord		%zmm1, %zmm0, %zmm0
9724b582e0fSEric Biggers	vmovdqu8	%zmm0, (DST){%k1}
97312beec21SEric Biggers
97412beec21SEric Biggers	// Update GHASH with the ciphertext block(s), without reducing.
97512beec21SEric Biggers	//
9764b582e0fSEric Biggers	// In the case of DATALEN < 64, the ciphertext is zero-padded to 64
9774b582e0fSEric Biggers	// bytes.  (If decrypting, it's done by the above masked load.  If
9784b582e0fSEric Biggers	// encrypting, it's done by the below masked register-to-register move.)
9794b582e0fSEric Biggers	// Note that if DATALEN <= 48, there will be additional padding beyond
9804b582e0fSEric Biggers	// the padding of the last block specified by GHASH itself; i.e., there
9814b582e0fSEric Biggers	// may be whole block(s) that get processed by the GHASH multiplication
9824b582e0fSEric Biggers	// and reduction instructions but should not actually be included in the
98312beec21SEric Biggers	// GHASH.  However, any such blocks are all-zeroes, and the values that
98412beec21SEric Biggers	// they're multiplied with are also all-zeroes.  Therefore they just add
98512beec21SEric Biggers	// 0 * 0 = 0 to the final GHASH result, which makes no difference.
98612beec21SEric Biggers	vmovdqu8	(POWERS_PTR), H_POW1
98712beec21SEric Biggers.if \enc
9884b582e0fSEric Biggers	vmovdqu8	%zmm0, %zmm1{%k1}{z}
98912beec21SEric Biggers.endif
9904b582e0fSEric Biggers	vpshufb		BSWAP_MASK, %zmm1, %zmm0
9914b582e0fSEric Biggers	vpxord		GHASH_ACC, %zmm0, %zmm0
9924b582e0fSEric Biggers	_ghash_mul_noreduce	H_POW1, %zmm0, LO, MI, HI, \
9934b582e0fSEric Biggers				GHASHDATA3, %zmm1, %zmm2, %zmm3
99412beec21SEric Biggers	vpxor		GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
99512beec21SEric Biggers
9964b582e0fSEric Biggers	add		$64, POWERS_PTR
9974b582e0fSEric Biggers	add		$64, SRC
9984b582e0fSEric Biggers	add		$64, DST
9994b582e0fSEric Biggers	sub		$64, DATALEN
100012beec21SEric Biggers	jg		.Lcrypt_loop_1x\@
100112beec21SEric Biggers
100212beec21SEric Biggers	// Finally, do the GHASH reduction.
10034b582e0fSEric Biggers	_ghash_reduce	LO, MI, HI, GFPOLY, %zmm0
100412beec21SEric Biggers	_horizontal_xor	HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2
100512beec21SEric Biggers
100612beec21SEric Biggers.Ldone\@:
100712beec21SEric Biggers	// Store the updated GHASH accumulator back to memory.
100812beec21SEric Biggers	vmovdqu		GHASH_ACC_XMM, (GHASH_ACC_PTR)
100912beec21SEric Biggers
101012beec21SEric Biggers	vzeroupper	// This is needed after using ymm or zmm registers.
101112beec21SEric Biggers	RET
101212beec21SEric Biggers.endm
101312beec21SEric Biggers
101412beec21SEric Biggers// void aes_gcm_enc_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
101512beec21SEric Biggers//				      const u32 le_ctr[4], u8 ghash_acc[16],
101612beec21SEric Biggers//				      u64 total_aadlen, u64 total_datalen);
101712beec21SEric Biggers// bool aes_gcm_dec_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
101812beec21SEric Biggers//				      const u32 le_ctr[4],
101912beec21SEric Biggers//				      const u8 ghash_acc[16],
102012beec21SEric Biggers//				      u64 total_aadlen, u64 total_datalen,
102112beec21SEric Biggers//				      const u8 tag[16], int taglen);
102212beec21SEric Biggers//
102312beec21SEric Biggers// This macro generates one of the above two functions (with \enc selecting
102412beec21SEric Biggers// which one).  Both functions finish computing the GCM authentication tag by
102512beec21SEric Biggers// updating GHASH with the lengths block and encrypting the GHASH accumulator.
102612beec21SEric Biggers// |total_aadlen| and |total_datalen| must be the total length of the additional
102712beec21SEric Biggers// authenticated data and the en/decrypted data in bytes, respectively.
102812beec21SEric Biggers//
102912beec21SEric Biggers// The encryption function then stores the full-length (16-byte) computed
103012beec21SEric Biggers// authentication tag to |ghash_acc|.  The decryption function instead loads the
103112beec21SEric Biggers// expected authentication tag (the one that was transmitted) from the 16-byte
103212beec21SEric Biggers// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the
103312beec21SEric Biggers// computed tag in constant time, and returns true if and only if they match.
103412beec21SEric Biggers.macro	_aes_gcm_final	enc
103512beec21SEric Biggers
103612beec21SEric Biggers	// Function arguments
103712beec21SEric Biggers	.set	KEY,		%rdi
103812beec21SEric Biggers	.set	LE_CTR_PTR,	%rsi
103912beec21SEric Biggers	.set	GHASH_ACC_PTR,	%rdx
104012beec21SEric Biggers	.set	TOTAL_AADLEN,	%rcx
104112beec21SEric Biggers	.set	TOTAL_DATALEN,	%r8
104212beec21SEric Biggers	.set	TAG,		%r9
104312beec21SEric Biggers	.set	TAGLEN,		%r10d	// Originally at 8(%rsp)
104412beec21SEric Biggers
104512beec21SEric Biggers	// Additional local variables.
104612beec21SEric Biggers	// %rax, %xmm0-%xmm3, and %k1 are used as temporary registers.
104712beec21SEric Biggers	.set	AESKEYLEN,	%r11d
104812beec21SEric Biggers	.set	AESKEYLEN64,	%r11
104912beec21SEric Biggers	.set	GFPOLY,		%xmm4
105012beec21SEric Biggers	.set	BSWAP_MASK,	%xmm5
105112beec21SEric Biggers	.set	LE_CTR,		%xmm6
105212beec21SEric Biggers	.set	GHASH_ACC,	%xmm7
105312beec21SEric Biggers	.set	H_POW1,		%xmm8
105412beec21SEric Biggers
105512beec21SEric Biggers	// Load some constants.
105612beec21SEric Biggers	vmovdqa		.Lgfpoly(%rip), GFPOLY
105712beec21SEric Biggers	vmovdqa		.Lbswap_mask(%rip), BSWAP_MASK
105812beec21SEric Biggers
105912beec21SEric Biggers	// Load the AES key length in bytes.
106012beec21SEric Biggers	movl		OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
106112beec21SEric Biggers
106212beec21SEric Biggers	// Set up a counter block with 1 in the low 32-bit word.  This is the
106312beec21SEric Biggers	// counter that produces the ciphertext needed to encrypt the auth tag.
106412beec21SEric Biggers	// GFPOLY has 1 in the low word, so grab the 1 from there using a blend.
106512beec21SEric Biggers	vpblendd	$0xe, (LE_CTR_PTR), GFPOLY, LE_CTR
106612beec21SEric Biggers
106712beec21SEric Biggers	// Build the lengths block and XOR it with the GHASH accumulator.
106812beec21SEric Biggers	// Although the lengths block is defined as the AAD length followed by
106912beec21SEric Biggers	// the en/decrypted data length, both in big-endian byte order, a byte
107012beec21SEric Biggers	// reflection of the full block is needed because of the way we compute
107112beec21SEric Biggers	// GHASH (see _ghash_mul_step).  By using little-endian values in the
107212beec21SEric Biggers	// opposite order, we avoid having to reflect any bytes here.
107312beec21SEric Biggers	vmovq		TOTAL_DATALEN, %xmm0
107412beec21SEric Biggers	vpinsrq		$1, TOTAL_AADLEN, %xmm0, %xmm0
107512beec21SEric Biggers	vpsllq		$3, %xmm0, %xmm0	// Bytes to bits
107612beec21SEric Biggers	vpxor		(GHASH_ACC_PTR), %xmm0, GHASH_ACC
107712beec21SEric Biggers
107812beec21SEric Biggers	// Load the first hash key power (H^1), which is stored last.
107912beec21SEric Biggers	vmovdqu8	OFFSETOFEND_H_POWERS-16(KEY), H_POW1
108012beec21SEric Biggers
108112beec21SEric Biggers.if !\enc
108212beec21SEric Biggers	// Prepare a mask of TAGLEN one bits.
108312beec21SEric Biggers	movl		8(%rsp), TAGLEN
108412beec21SEric Biggers	mov		$-1, %eax
108512beec21SEric Biggers	bzhi		TAGLEN, %eax, %eax
108612beec21SEric Biggers	kmovd		%eax, %k1
108712beec21SEric Biggers.endif
108812beec21SEric Biggers
108912beec21SEric Biggers	// Make %rax point to the last AES round key for the chosen AES variant.
109012beec21SEric Biggers	lea		6*16(KEY,AESKEYLEN64,4), %rax
109112beec21SEric Biggers
109212beec21SEric Biggers	// Start the AES encryption of the counter block by swapping the counter
109312beec21SEric Biggers	// block to big-endian and XOR-ing it with the zero-th AES round key.
109412beec21SEric Biggers	vpshufb		BSWAP_MASK, LE_CTR, %xmm0
109512beec21SEric Biggers	vpxor		(KEY), %xmm0, %xmm0
109612beec21SEric Biggers
109712beec21SEric Biggers	// Complete the AES encryption and multiply GHASH_ACC by H^1.
109812beec21SEric Biggers	// Interleave the AES and GHASH instructions to improve performance.
109912beec21SEric Biggers	cmp		$24, AESKEYLEN
110012beec21SEric Biggers	jl		128f	// AES-128?
110112beec21SEric Biggers	je		192f	// AES-192?
110212beec21SEric Biggers	// AES-256
110312beec21SEric Biggers	vaesenc		-13*16(%rax), %xmm0, %xmm0
110412beec21SEric Biggers	vaesenc		-12*16(%rax), %xmm0, %xmm0
110512beec21SEric Biggers192:
110612beec21SEric Biggers	vaesenc		-11*16(%rax), %xmm0, %xmm0
110712beec21SEric Biggers	vaesenc		-10*16(%rax), %xmm0, %xmm0
110812beec21SEric Biggers128:
110912beec21SEric Biggers.irp i, 0,1,2,3,4,5,6,7,8
111012beec21SEric Biggers	_ghash_mul_step	\i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
111112beec21SEric Biggers			%xmm1, %xmm2, %xmm3
111212beec21SEric Biggers	vaesenc		(\i-9)*16(%rax), %xmm0, %xmm0
111312beec21SEric Biggers.endr
111412beec21SEric Biggers	_ghash_mul_step	9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
111512beec21SEric Biggers			%xmm1, %xmm2, %xmm3
111612beec21SEric Biggers
111712beec21SEric Biggers	// Undo the byte reflection of the GHASH accumulator.
111812beec21SEric Biggers	vpshufb		BSWAP_MASK, GHASH_ACC, GHASH_ACC
111912beec21SEric Biggers
112012beec21SEric Biggers	// Do the last AES round and XOR the resulting keystream block with the
112112beec21SEric Biggers	// GHASH accumulator to produce the full computed authentication tag.
112212beec21SEric Biggers	//
112312beec21SEric Biggers	// Reduce latency by taking advantage of the property vaesenclast(key,
112412beec21SEric Biggers	// a) ^ b == vaesenclast(key ^ b, a).  I.e., XOR GHASH_ACC into the last
112512beec21SEric Biggers	// round key, instead of XOR'ing the final AES output with GHASH_ACC.
112612beec21SEric Biggers	//
112712beec21SEric Biggers	// enc_final then returns the computed auth tag, while dec_final
112812beec21SEric Biggers	// compares it with the transmitted one and returns a bool.  To compare
112912beec21SEric Biggers	// the tags, dec_final XORs them together and uses vptest to check
113012beec21SEric Biggers	// whether the result is all-zeroes.  This should be constant-time.
113112beec21SEric Biggers	// dec_final applies the vaesenclast optimization to this additional
113212beec21SEric Biggers	// value XOR'd too, using vpternlogd to XOR the last round key, GHASH
113312beec21SEric Biggers	// accumulator, and transmitted auth tag together in one instruction.
113412beec21SEric Biggers.if \enc
113512beec21SEric Biggers	vpxor		(%rax), GHASH_ACC, %xmm1
113612beec21SEric Biggers	vaesenclast	%xmm1, %xmm0, GHASH_ACC
113712beec21SEric Biggers	vmovdqu		GHASH_ACC, (GHASH_ACC_PTR)
113812beec21SEric Biggers.else
113912beec21SEric Biggers	vmovdqu		(TAG), %xmm1
114012beec21SEric Biggers	vpternlogd	$0x96, (%rax), GHASH_ACC, %xmm1
114112beec21SEric Biggers	vaesenclast	%xmm1, %xmm0, %xmm0
114212beec21SEric Biggers	xor		%eax, %eax
114312beec21SEric Biggers	vmovdqu8	%xmm0, %xmm0{%k1}{z}	// Truncate to TAGLEN bytes
114412beec21SEric Biggers	vptest		%xmm0, %xmm0
114512beec21SEric Biggers	sete		%al
114612beec21SEric Biggers.endif
114712beec21SEric Biggers	// No need for vzeroupper here, since only used xmm registers were used.
114812beec21SEric Biggers	RET
114912beec21SEric Biggers.endm
115012beec21SEric Biggers
115112beec21SEric BiggersSYM_FUNC_START(aes_gcm_enc_update_vaes_avx512)
115212beec21SEric Biggers	_aes_gcm_update	1
115312beec21SEric BiggersSYM_FUNC_END(aes_gcm_enc_update_vaes_avx512)
115412beec21SEric BiggersSYM_FUNC_START(aes_gcm_dec_update_vaes_avx512)
115512beec21SEric Biggers	_aes_gcm_update	0
115612beec21SEric BiggersSYM_FUNC_END(aes_gcm_dec_update_vaes_avx512)
115712beec21SEric Biggers
115812beec21SEric BiggersSYM_FUNC_START(aes_gcm_enc_final_vaes_avx512)
115912beec21SEric Biggers	_aes_gcm_final	1
116012beec21SEric BiggersSYM_FUNC_END(aes_gcm_enc_final_vaes_avx512)
116112beec21SEric BiggersSYM_FUNC_START(aes_gcm_dec_final_vaes_avx512)
116212beec21SEric Biggers	_aes_gcm_final	0
116312beec21SEric BiggersSYM_FUNC_END(aes_gcm_dec_final_vaes_avx512)
1164