xref: /linux/arch/x86/crypto/aes-gcm-vaes-avx512.S (revision 8f4c9978de91a9a3b37df1e74d6201acfba6cefd)
1/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
2//
3// AES-GCM implementation for x86_64 CPUs that support the following CPU
4// features: VAES && VPCLMULQDQ && AVX512BW && AVX512VL && BMI2
5//
6// Copyright 2024 Google LLC
7//
8// Author: Eric Biggers <ebiggers@google.com>
9//
10//------------------------------------------------------------------------------
11//
12// This file is dual-licensed, meaning that you can use it under your choice of
13// either of the following two licenses:
14//
15// Licensed under the Apache License 2.0 (the "License").  You may obtain a copy
16// of the License at
17//
18//	http://www.apache.org/licenses/LICENSE-2.0
19//
20// Unless required by applicable law or agreed to in writing, software
21// distributed under the License is distributed on an "AS IS" BASIS,
22// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23// See the License for the specific language governing permissions and
24// limitations under the License.
25//
26// or
27//
28// Redistribution and use in source and binary forms, with or without
29// modification, are permitted provided that the following conditions are met:
30//
31// 1. Redistributions of source code must retain the above copyright notice,
32//    this list of conditions and the following disclaimer.
33//
34// 2. Redistributions in binary form must reproduce the above copyright
35//    notice, this list of conditions and the following disclaimer in the
36//    documentation and/or other materials provided with the distribution.
37//
38// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
39// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
42// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
43// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
44// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
45// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
46// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
47// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
48// POSSIBILITY OF SUCH DAMAGE.
49
50#include <linux/linkage.h>
51
52.section .rodata
53.p2align 6
54
55	// A shuffle mask that reflects the bytes of 16-byte blocks
56.Lbswap_mask:
57	.octa	0x000102030405060708090a0b0c0d0e0f
58
59	// This is the GHASH reducing polynomial without its constant term, i.e.
60	// x^128 + x^7 + x^2 + x, represented using the backwards mapping
61	// between bits and polynomial coefficients.
62	//
63	// Alternatively, it can be interpreted as the naturally-ordered
64	// representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the
65	// "reversed" GHASH reducing polynomial without its x^128 term.
66.Lgfpoly:
67	.octa	0xc2000000000000000000000000000001
68
69	// Same as above, but with the (1 << 64) bit set.
70.Lgfpoly_and_internal_carrybit:
71	.octa	0xc2000000000000010000000000000001
72
73	// Values needed to prepare the initial vector of counter blocks.
74.Lctr_pattern:
75	.octa	0
76	.octa	1
77	.octa	2
78	.octa	3
79
80	// The number of AES blocks per vector, as a 128-bit value.
81.Linc_4blocks:
82	.octa	4
83
84// Number of powers of the hash key stored in the key struct.  The powers are
85// stored from highest (H^NUM_H_POWERS) to lowest (H^1).
86#define NUM_H_POWERS		16
87
88// Offset to AES key length (in bytes) in the key struct
89#define OFFSETOF_AESKEYLEN	480
90
91// Offset to start of hash key powers array in the key struct
92#define OFFSETOF_H_POWERS	512
93
94// Offset to end of hash key powers array in the key struct.
95//
96// This is immediately followed by three zeroized padding blocks, which are
97// included so that partial vectors can be handled more easily.  E.g. if two
98// blocks remain, we load the 4 values [H^2, H^1, 0, 0].  The most padding
99// blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.
100#define OFFSETOFEND_H_POWERS	(OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
101
102.text
103
104// The _ghash_mul_step macro does one step of GHASH multiplication of the
105// 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the
106// reduced products in \dst.  \t0, \t1, and \t2 are temporary registers of the
107// same size as \a and \b.  To complete all steps, this must invoked with \i=0
108// through \i=9.  The division into steps allows users of this macro to
109// optionally interleave the computation with other instructions.  Users of this
110// macro must preserve the parameter registers across steps.
111//
112// The multiplications are done in GHASH's representation of the finite field
113// GF(2^128).  Elements of GF(2^128) are represented as binary polynomials
114// (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial
115// G.  The GCM specification uses G = x^128 + x^7 + x^2 + x + 1.  Addition is
116// just XOR, while multiplication is more complex and has two parts: (a) do
117// carryless multiplication of two 128-bit input polynomials to get a 256-bit
118// intermediate product polynomial, and (b) reduce the intermediate product to
119// 128 bits by adding multiples of G that cancel out terms in it.  (Adding
120// multiples of G doesn't change which field element the polynomial represents.)
121//
122// Unfortunately, the GCM specification maps bits to/from polynomial
123// coefficients backwards from the natural order.  In each byte it specifies the
124// highest bit to be the lowest order polynomial coefficient, *not* the highest!
125// This makes it nontrivial to work with the GHASH polynomials.  We could
126// reflect the bits, but x86 doesn't have an instruction that does that.
127//
128// Instead, we operate on the values without bit-reflecting them.  This *mostly*
129// just works, since XOR and carryless multiplication are symmetric with respect
130// to bit order, but it has some consequences.  First, due to GHASH's byte
131// order, by skipping bit reflection, *byte* reflection becomes necessary to
132// give the polynomial terms a consistent order.  E.g., considering an N-bit
133// value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0
134// through N-1 of the byte-reflected value represent the coefficients of x^(N-1)
135// through x^0, whereas bits 0 through N-1 of the non-byte-reflected value
136// represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked
137// with.  Fortunately, x86's vpshufb instruction can do byte reflection.
138//
139// Second, forgoing the bit reflection causes an extra multiple of x (still
140// using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each
141// multiplication.  This is because an M-bit by N-bit carryless multiplication
142// really produces a (M+N-1)-bit product, but in practice it's zero-extended to
143// M+N bits.  In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits
144// to polynomial coefficients backwards, this zero-extension actually changes
145// the product by introducing an extra factor of x.  Therefore, users of this
146// macro must ensure that one of the inputs has an extra factor of x^-1, i.e.
147// the multiplicative inverse of x, to cancel out the extra x.
148//
149// Third, the backwards coefficients convention is just confusing to work with,
150// since it makes "low" and "high" in the polynomial math mean the opposite of
151// their normal meaning in computer programming.  This can be solved by using an
152// alternative interpretation: the polynomial coefficients are understood to be
153// in the natural order, and the multiplication is actually \a * \b * x^-128 mod
154// x^128 + x^127 + x^126 + x^121 + 1.  This doesn't change the inputs, outputs,
155// or the implementation at all; it just changes the mathematical interpretation
156// of what each instruction is doing.  Starting from here, we'll use this
157// alternative interpretation, as it's easier to understand the code that way.
158//
159// Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 =>
160// 128-bit carryless multiplication, so we break the 128 x 128 multiplication
161// into parts as follows (the _L and _H suffixes denote low and high 64 bits):
162//
163//     LO = a_L * b_L
164//     MI = (a_L * b_H) + (a_H * b_L)
165//     HI = a_H * b_H
166//
167// The 256-bit product is x^128*HI + x^64*MI + LO.  LO, MI, and HI are 128-bit.
168// Note that MI "overlaps" with LO and HI.  We don't consolidate MI into LO and
169// HI right away, since the way the reduction works makes that unnecessary.
170//
171// For the reduction, we cancel out the low 128 bits by adding multiples of G =
172// x^128 + x^127 + x^126 + x^121 + 1.  This is done by two iterations, each of
173// which cancels out the next lowest 64 bits.  Consider a value x^64*A + B,
174// where A and B are 128-bit.  Adding B_L*G to that value gives:
175//
176//       x^64*A + B + B_L*G
177//     = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1)
178//     = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L
179//     = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L
180//     = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57))
181//
182// So: if we sum A, B with its halves swapped, and the low half of B times x^63
183// + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the
184// original value x^64*A + B.  I.e., the low 64 bits got canceled out.
185//
186// We just need to apply this twice: first to fold LO into MI, and second to
187// fold the updated MI into HI.
188//
189// The needed three-argument XORs are done using the vpternlogd instruction with
190// immediate 0x96, since this is faster than two vpxord instructions.
191//
192// A potential optimization, assuming that b is fixed per-key (if a is fixed
193// per-key it would work the other way around), is to use one iteration of the
194// reduction described above to precompute a value c such that x^64*c = b mod G,
195// and then multiply a_L by c (and implicitly by x^64) instead of by b:
196//
197//     MI = (a_L * c_L) + (a_H * b_L)
198//     HI = (a_L * c_H) + (a_H * b_H)
199//
200// This would eliminate the LO part of the intermediate product, which would
201// eliminate the need to fold LO into MI.  This would save two instructions,
202// including a vpclmulqdq.  However, we currently don't use this optimization
203// because it would require twice as many per-key precomputed values.
204//
205// Using Karatsuba multiplication instead of "schoolbook" multiplication
206// similarly would save a vpclmulqdq but does not seem to be worth it.
207.macro	_ghash_mul_step	i, a, b, dst, gfpoly, t0, t1, t2
208.if \i == 0
209	vpclmulqdq	$0x00, \a, \b, \t0	  // LO = a_L * b_L
210	vpclmulqdq	$0x01, \a, \b, \t1	  // MI_0 = a_L * b_H
211.elseif \i == 1
212	vpclmulqdq	$0x10, \a, \b, \t2	  // MI_1 = a_H * b_L
213.elseif \i == 2
214	vpxord		\t2, \t1, \t1		  // MI = MI_0 + MI_1
215.elseif \i == 3
216	vpclmulqdq	$0x01, \t0, \gfpoly, \t2  // LO_L*(x^63 + x^62 + x^57)
217.elseif \i == 4
218	vpshufd		$0x4e, \t0, \t0		  // Swap halves of LO
219.elseif \i == 5
220	vpternlogd	$0x96, \t2, \t0, \t1	  // Fold LO into MI
221.elseif \i == 6
222	vpclmulqdq	$0x11, \a, \b, \dst	  // HI = a_H * b_H
223.elseif \i == 7
224	vpclmulqdq	$0x01, \t1, \gfpoly, \t0  // MI_L*(x^63 + x^62 + x^57)
225.elseif \i == 8
226	vpshufd		$0x4e, \t1, \t1		  // Swap halves of MI
227.elseif \i == 9
228	vpternlogd	$0x96, \t0, \t1, \dst	  // Fold MI into HI
229.endif
230.endm
231
232// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store
233// the reduced products in \dst.  See _ghash_mul_step for full explanation.
234.macro	_ghash_mul	a, b, dst, gfpoly, t0, t1, t2
235.irp i, 0,1,2,3,4,5,6,7,8,9
236	_ghash_mul_step	\i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2
237.endr
238.endm
239
240// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the
241// *unreduced* products to \lo, \mi, and \hi.
242.macro	_ghash_mul_noreduce	a, b, lo, mi, hi, t0, t1, t2, t3
243	vpclmulqdq	$0x00, \a, \b, \t0	// a_L * b_L
244	vpclmulqdq	$0x01, \a, \b, \t1	// a_L * b_H
245	vpclmulqdq	$0x10, \a, \b, \t2	// a_H * b_L
246	vpclmulqdq	$0x11, \a, \b, \t3	// a_H * b_H
247	vpxord		\t0, \lo, \lo
248	vpternlogd	$0x96, \t2, \t1, \mi
249	vpxord		\t3, \hi, \hi
250.endm
251
252// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit
253// reduced products in \hi.  See _ghash_mul_step for explanation of reduction.
254.macro	_ghash_reduce	lo, mi, hi, gfpoly, t0
255	vpclmulqdq	$0x01, \lo, \gfpoly, \t0
256	vpshufd		$0x4e, \lo, \lo
257	vpternlogd	$0x96, \t0, \lo, \mi
258	vpclmulqdq	$0x01, \mi, \gfpoly, \t0
259	vpshufd		$0x4e, \mi, \mi
260	vpternlogd	$0x96, \t0, \mi, \hi
261.endm
262
263// This is a specialized version of _ghash_mul that computes \a * \a, i.e. it
264// squares \a.  It skips computing MI = (a_L * a_H) + (a_H * a_L) = 0.
265.macro	_ghash_square	a, dst, gfpoly, t0, t1
266	vpclmulqdq	$0x00, \a, \a, \t0	  // LO = a_L * a_L
267	vpclmulqdq	$0x11, \a, \a, \dst	  // HI = a_H * a_H
268	vpclmulqdq	$0x01, \t0, \gfpoly, \t1  // LO_L*(x^63 + x^62 + x^57)
269	vpshufd		$0x4e, \t0, \t0		  // Swap halves of LO
270	vpxord		\t0, \t1, \t1		  // Fold LO into MI
271	vpclmulqdq	$0x01, \t1, \gfpoly, \t0  // MI_L*(x^63 + x^62 + x^57)
272	vpshufd		$0x4e, \t1, \t1		  // Swap halves of MI
273	vpternlogd	$0x96, \t0, \t1, \dst	  // Fold MI into HI
274.endm
275
276// void aes_gcm_precompute_vaes_avx512(struct aes_gcm_key_vaes_avx512 *key);
277//
278// Given the expanded AES key |key->base.aes_key|, derive the GHASH subkey and
279// initialize |key->h_powers| and |key->padding|.
280SYM_FUNC_START(aes_gcm_precompute_vaes_avx512)
281
282	// Function arguments
283	.set	KEY,		%rdi
284
285	// Additional local variables.
286	// %zmm[0-2] and %rax are used as temporaries.
287	.set	POWERS_PTR,	%rsi
288	.set	RNDKEYLAST_PTR,	%rdx
289	.set	H_CUR,		%zmm3
290	.set	H_CUR_YMM,	%ymm3
291	.set	H_CUR_XMM,	%xmm3
292	.set	H_INC,		%zmm4
293	.set	H_INC_YMM,	%ymm4
294	.set	H_INC_XMM,	%xmm4
295	.set	GFPOLY,		%zmm5
296	.set	GFPOLY_YMM,	%ymm5
297	.set	GFPOLY_XMM,	%xmm5
298
299	// Get pointer to lowest set of key powers (located at end of array).
300	lea		OFFSETOFEND_H_POWERS-64(KEY), POWERS_PTR
301
302	// Encrypt an all-zeroes block to get the raw hash subkey.
303	movl		OFFSETOF_AESKEYLEN(KEY), %eax
304	lea		6*16(KEY,%rax,4), RNDKEYLAST_PTR
305	vmovdqu		(KEY), %xmm0  // Zero-th round key XOR all-zeroes block
306	add		$16, KEY
3071:
308	vaesenc		(KEY), %xmm0, %xmm0
309	add		$16, KEY
310	cmp		KEY, RNDKEYLAST_PTR
311	jne		1b
312	vaesenclast	(RNDKEYLAST_PTR), %xmm0, %xmm0
313
314	// Reflect the bytes of the raw hash subkey.
315	vpshufb		.Lbswap_mask(%rip), %xmm0, H_CUR_XMM
316
317	// Zeroize the padding blocks.
318	vpxor		%xmm0, %xmm0, %xmm0
319	vmovdqu		%ymm0, 64(POWERS_PTR)
320	vmovdqu		%xmm0, 64+2*16(POWERS_PTR)
321
322	// Finish preprocessing the first key power, H^1.  Since this GHASH
323	// implementation operates directly on values with the backwards bit
324	// order specified by the GCM standard, it's necessary to preprocess the
325	// raw key as follows.  First, reflect its bytes.  Second, multiply it
326	// by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards
327	// interpretation of polynomial coefficients), which can also be
328	// interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121
329	// + 1 using the alternative, natural interpretation of polynomial
330	// coefficients.  For details, see the comment above _ghash_mul_step.
331	//
332	// Either way, for the multiplication the concrete operation performed
333	// is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2
334	// << 120) | 1 if a 1 bit was carried out.  However, there's no 128-bit
335	// wide shift instruction, so instead double each of the two 64-bit
336	// halves and incorporate the internal carry bit into the value XOR'd.
337	vpshufd		$0xd3, H_CUR_XMM, %xmm0
338	vpsrad		$31, %xmm0, %xmm0
339	vpaddq		H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
340	// H_CUR_XMM ^= xmm0 & gfpoly_and_internal_carrybit
341	vpternlogd	$0x78, .Lgfpoly_and_internal_carrybit(%rip), %xmm0, H_CUR_XMM
342
343	// Load the gfpoly constant.
344	vbroadcasti32x4	.Lgfpoly(%rip), GFPOLY
345
346	// Square H^1 to get H^2.
347	//
348	// Note that as with H^1, all higher key powers also need an extra
349	// factor of x^-1 (or x using the natural interpretation).  Nothing
350	// special needs to be done to make this happen, though: H^1 * H^1 would
351	// end up with two factors of x^-1, but the multiplication consumes one.
352	// So the product H^2 ends up with the desired one factor of x^-1.
353	_ghash_square	H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, %xmm0, %xmm1
354
355	// Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].
356	vinserti128	$1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM
357	vinserti128	$1, H_INC_XMM, H_INC_YMM, H_INC_YMM
358
359	// Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].
360	_ghash_mul	H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \
361			%ymm0, %ymm1, %ymm2
362	vinserti64x4	$1, H_CUR_YMM, H_INC, H_CUR
363	vshufi64x2	$0, H_INC, H_INC, H_INC
364
365	// Store the lowest set of key powers.
366	vmovdqu8	H_CUR, (POWERS_PTR)
367
368	// Compute and store the remaining key powers.
369	// Repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by
370	// [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].
371	mov		$3, %eax
372.Lprecompute_next:
373	sub		$64, POWERS_PTR
374	_ghash_mul	H_INC, H_CUR, H_CUR, GFPOLY, %zmm0, %zmm1, %zmm2
375	vmovdqu8	H_CUR, (POWERS_PTR)
376	dec		%eax
377	jnz		.Lprecompute_next
378
379	vzeroupper	// This is needed after using ymm or zmm registers.
380	RET
381SYM_FUNC_END(aes_gcm_precompute_vaes_avx512)
382
383// XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store
384// the result in \dst_xmm.  This implicitly zeroizes the other lanes of dst.
385.macro	_horizontal_xor	src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm
386	vextracti32x4	$1, \src, \t0_xmm
387	vextracti32x4	$2, \src, \t1_xmm
388	vextracti32x4	$3, \src, \t2_xmm
389	vpxord		\t0_xmm, \src_xmm, \dst_xmm
390	vpternlogd	$0x96, \t1_xmm, \t2_xmm, \dst_xmm
391.endm
392
393// Do one step of the GHASH update of the data blocks given in the vector
394// registers GHASHDATA[0-3].  \i specifies the step to do, 0 through 9.  The
395// division into steps allows users of this macro to optionally interleave the
396// computation with other instructions.  This macro uses the vector register
397// GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered;
398// H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and
399// GHASHTMP[0-2] as temporaries.  This macro handles the byte-reflection of the
400// data blocks.  The parameter registers must be preserved across steps.
401//
402// The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +
403// H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the
404// operations are vectorized operations on 512-bit vectors of 128-bit blocks.
405// The vectorized terms correspond to the following non-vectorized terms:
406//
407//       H_POW4*(GHASHDATA0 + GHASH_ACC) => H^16*(blk0 + GHASH_ACC_XMM),
408//              H^15*(blk1 + 0), H^14*(blk2 + 0), and H^13*(blk3 + 0)
409//       H_POW3*GHASHDATA1 => H^12*blk4, H^11*blk5, H^10*blk6, and H^9*blk7
410//       H_POW2*GHASHDATA2 => H^8*blk8,  H^7*blk9,  H^6*blk10, and H^5*blk11
411//       H_POW1*GHASHDATA3 => H^4*blk12, H^3*blk13, H^2*blk14, and H^1*blk15
412//
413// More concretely, this code does:
414//   - Do vectorized "schoolbook" multiplications to compute the intermediate
415//     256-bit product of each block and its corresponding hash key power.
416//   - Sum (XOR) the intermediate 256-bit products across vectors.
417//   - Do a vectorized reduction of these 256-bit intermediate values to
418//     128-bits each.
419//   - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.
420//
421// See _ghash_mul_step for the full explanation of the operations performed for
422// each individual finite field multiplication and reduction.
423.macro	_ghash_step_4x	i
424.if \i == 0
425	vpshufb		BSWAP_MASK, GHASHDATA0, GHASHDATA0
426	vpxord		GHASH_ACC, GHASHDATA0, GHASHDATA0
427	vpshufb		BSWAP_MASK, GHASHDATA1, GHASHDATA1
428	vpshufb		BSWAP_MASK, GHASHDATA2, GHASHDATA2
429.elseif \i == 1
430	vpshufb		BSWAP_MASK, GHASHDATA3, GHASHDATA3
431	vpclmulqdq	$0x00, H_POW4, GHASHDATA0, GHASH_ACC	// LO_0
432	vpclmulqdq	$0x00, H_POW3, GHASHDATA1, GHASHTMP0	// LO_1
433	vpclmulqdq	$0x00, H_POW2, GHASHDATA2, GHASHTMP1	// LO_2
434.elseif \i == 2
435	vpxord		GHASHTMP0, GHASH_ACC, GHASH_ACC		// sum(LO_{1,0})
436	vpclmulqdq	$0x00, H_POW1, GHASHDATA3, GHASHTMP2	// LO_3
437	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC	// LO = sum(LO_{3,2,1,0})
438	vpclmulqdq	$0x01, H_POW4, GHASHDATA0, GHASHTMP0	// MI_0
439.elseif \i == 3
440	vpclmulqdq	$0x01, H_POW3, GHASHDATA1, GHASHTMP1	// MI_1
441	vpclmulqdq	$0x01, H_POW2, GHASHDATA2, GHASHTMP2	// MI_2
442	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0	// sum(MI_{2,1,0})
443	vpclmulqdq	$0x01, H_POW1, GHASHDATA3, GHASHTMP1	// MI_3
444.elseif \i == 4
445	vpclmulqdq	$0x10, H_POW4, GHASHDATA0, GHASHTMP2	// MI_4
446	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0	// sum(MI_{4,3,2,1,0})
447	vpclmulqdq	$0x10, H_POW3, GHASHDATA1, GHASHTMP1	// MI_5
448	vpclmulqdq	$0x10, H_POW2, GHASHDATA2, GHASHTMP2	// MI_6
449.elseif \i == 5
450	vpternlogd	$0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0	// sum(MI_{6,5,4,3,2,1,0})
451	vpclmulqdq	$0x01, GHASH_ACC, GFPOLY, GHASHTMP2	// LO_L*(x^63 + x^62 + x^57)
452	vpclmulqdq	$0x10, H_POW1, GHASHDATA3, GHASHTMP1	// MI_7
453	vpxord		GHASHTMP1, GHASHTMP0, GHASHTMP0		// MI = sum(MI_{7,6,5,4,3,2,1,0})
454.elseif \i == 6
455	vpshufd		$0x4e, GHASH_ACC, GHASH_ACC		// Swap halves of LO
456	vpclmulqdq	$0x11, H_POW4, GHASHDATA0, GHASHDATA0	// HI_0
457	vpclmulqdq	$0x11, H_POW3, GHASHDATA1, GHASHDATA1	// HI_1
458	vpclmulqdq	$0x11, H_POW2, GHASHDATA2, GHASHDATA2	// HI_2
459.elseif \i == 7
460	vpternlogd	$0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0	// Fold LO into MI
461	vpclmulqdq	$0x11, H_POW1, GHASHDATA3, GHASHDATA3	// HI_3
462	vpternlogd	$0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0})
463	vpclmulqdq	$0x01, GHASHTMP0, GFPOLY, GHASHTMP1	// MI_L*(x^63 + x^62 + x^57)
464.elseif \i == 8
465	vpxord		GHASHDATA3, GHASHDATA0, GHASH_ACC	// HI = sum(HI_{3,2,1,0})
466	vpshufd		$0x4e, GHASHTMP0, GHASHTMP0		// Swap halves of MI
467	vpternlogd	$0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC	// Fold MI into HI
468.elseif \i == 9
469	_horizontal_xor	GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
470			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
471.endif
472.endm
473
474// Update GHASH with four vectors of data blocks.  See _ghash_step_4x for full
475// explanation.
476.macro	_ghash_4x
477.irp i, 0,1,2,3,4,5,6,7,8,9
478	_ghash_step_4x	\i
479.endr
480.endm
481
482// void aes_gcm_aad_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
483//				       u8 ghash_acc[16],
484//				       const u8 *aad, int aadlen);
485//
486// This function processes the AAD (Additional Authenticated Data) in GCM.
487// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
488// data given by |aad| and |aadlen|.  On the first call, |ghash_acc| must be all
489// zeroes.  |aadlen| must be a multiple of 16, except on the last call where it
490// can be any length.  The caller must do any buffering needed to ensure this.
491//
492// This handles large amounts of AAD efficiently, while also keeping overhead
493// low for small amounts which is the common case.  TLS and IPsec use less than
494// one block of AAD, but (uncommonly) other use cases may use much more.
495SYM_FUNC_START(aes_gcm_aad_update_vaes_avx512)
496
497	// Function arguments
498	.set	KEY,		%rdi
499	.set	GHASH_ACC_PTR,	%rsi
500	.set	AAD,		%rdx
501	.set	AADLEN,		%ecx
502	.set	AADLEN64,	%rcx	// Zero-extend AADLEN before using!
503
504	// Additional local variables.
505	// %rax and %k1 are used as temporary registers.
506	.set	GHASHDATA0,	%zmm0
507	.set	GHASHDATA0_XMM,	%xmm0
508	.set	GHASHDATA1,	%zmm1
509	.set	GHASHDATA1_XMM,	%xmm1
510	.set	GHASHDATA2,	%zmm2
511	.set	GHASHDATA2_XMM,	%xmm2
512	.set	GHASHDATA3,	%zmm3
513	.set	BSWAP_MASK,	%zmm4
514	.set	BSWAP_MASK_XMM,	%xmm4
515	.set	GHASH_ACC,	%zmm5
516	.set	GHASH_ACC_XMM,	%xmm5
517	.set	H_POW4,		%zmm6
518	.set	H_POW3,		%zmm7
519	.set	H_POW2,		%zmm8
520	.set	H_POW1,		%zmm9
521	.set	H_POW1_XMM,	%xmm9
522	.set	GFPOLY,		%zmm10
523	.set	GFPOLY_XMM,	%xmm10
524	.set	GHASHTMP0,	%zmm11
525	.set	GHASHTMP1,	%zmm12
526	.set	GHASHTMP2,	%zmm13
527
528	// Load the GHASH accumulator.
529	vmovdqu		(GHASH_ACC_PTR), GHASH_ACC_XMM
530
531	// Check for the common case of AADLEN <= 16, as well as AADLEN == 0.
532	cmp		$16, AADLEN
533	jg		.Laad_more_than_16bytes
534	test		AADLEN, AADLEN
535	jz		.Laad_done
536
537	// Fast path: update GHASH with 1 <= AADLEN <= 16 bytes of AAD.
538	vmovdqu		.Lbswap_mask(%rip), BSWAP_MASK_XMM
539	vmovdqu		.Lgfpoly(%rip), GFPOLY_XMM
540	mov		$-1, %eax
541	bzhi		AADLEN, %eax, %eax
542	kmovd		%eax, %k1
543	vmovdqu8	(AAD), GHASHDATA0_XMM{%k1}{z}
544	vmovdqu		OFFSETOFEND_H_POWERS-16(KEY), H_POW1_XMM
545	vpshufb		BSWAP_MASK_XMM, GHASHDATA0_XMM, GHASHDATA0_XMM
546	vpxor		GHASHDATA0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
547	_ghash_mul	H_POW1_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM, GFPOLY_XMM, \
548			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
549	jmp		.Laad_done
550
551.Laad_more_than_16bytes:
552	vbroadcasti32x4	.Lbswap_mask(%rip), BSWAP_MASK
553	vbroadcasti32x4	.Lgfpoly(%rip), GFPOLY
554
555	// If AADLEN >= 256, update GHASH with 256 bytes of AAD at a time.
556	sub		$256, AADLEN
557	jl		.Laad_loop_4x_done
558	vmovdqu8	OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
559	vmovdqu8	OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
560	vmovdqu8	OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
561	vmovdqu8	OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
562.Laad_loop_4x:
563	vmovdqu8	0*64(AAD), GHASHDATA0
564	vmovdqu8	1*64(AAD), GHASHDATA1
565	vmovdqu8	2*64(AAD), GHASHDATA2
566	vmovdqu8	3*64(AAD), GHASHDATA3
567	_ghash_4x
568	add		$256, AAD
569	sub		$256, AADLEN
570	jge		.Laad_loop_4x
571.Laad_loop_4x_done:
572
573	// If AADLEN >= 64, update GHASH with 64 bytes of AAD at a time.
574	add		$192, AADLEN
575	jl		.Laad_loop_1x_done
576	vmovdqu8	OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
577.Laad_loop_1x:
578	vmovdqu8	(AAD), GHASHDATA0
579	vpshufb		BSWAP_MASK, GHASHDATA0, GHASHDATA0
580	vpxord		GHASHDATA0, GHASH_ACC, GHASH_ACC
581	_ghash_mul	H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
582			GHASHDATA0, GHASHDATA1, GHASHDATA2
583	_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
584			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
585	add		$64, AAD
586	sub		$64, AADLEN
587	jge		.Laad_loop_1x
588.Laad_loop_1x_done:
589
590	// Update GHASH with the remaining 0 <= AADLEN < 64 bytes of AAD.
591	add		$64, AADLEN
592	jz		.Laad_done
593	mov		$-1, %rax
594	bzhi		AADLEN64, %rax, %rax
595	kmovq		%rax, %k1
596	vmovdqu8	(AAD), GHASHDATA0{%k1}{z}
597	neg		AADLEN64
598	and		$~15, AADLEN64  // -round_up(AADLEN, 16)
599	vmovdqu8	OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1
600	vpshufb		BSWAP_MASK, GHASHDATA0, GHASHDATA0
601	vpxord		GHASHDATA0, GHASH_ACC, GHASH_ACC
602	_ghash_mul	H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
603			GHASHDATA0, GHASHDATA1, GHASHDATA2
604	_horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
605			GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
606
607.Laad_done:
608	// Store the updated GHASH accumulator back to memory.
609	vmovdqu		GHASH_ACC_XMM, (GHASH_ACC_PTR)
610
611	vzeroupper	// This is needed after using ymm or zmm registers.
612	RET
613SYM_FUNC_END(aes_gcm_aad_update_vaes_avx512)
614
615// Do one non-last round of AES encryption on the blocks in %zmm[0-3] using the
616// round key that has been broadcast to all 128-bit lanes of \round_key.
617.macro	_vaesenc_4x	round_key
618	vaesenc		\round_key, %zmm0, %zmm0
619	vaesenc		\round_key, %zmm1, %zmm1
620	vaesenc		\round_key, %zmm2, %zmm2
621	vaesenc		\round_key, %zmm3, %zmm3
622.endm
623
624// Start the AES encryption of four vectors of counter blocks.
625.macro	_ctr_begin_4x
626
627	// Increment LE_CTR four times to generate four vectors of little-endian
628	// counter blocks, swap each to big-endian, and store them in %zmm[0-3].
629	vpshufb		BSWAP_MASK, LE_CTR, %zmm0
630	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
631	vpshufb		BSWAP_MASK, LE_CTR, %zmm1
632	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
633	vpshufb		BSWAP_MASK, LE_CTR, %zmm2
634	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
635	vpshufb		BSWAP_MASK, LE_CTR, %zmm3
636	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
637
638	// AES "round zero": XOR in the zero-th round key.
639	vpxord		RNDKEY0, %zmm0, %zmm0
640	vpxord		RNDKEY0, %zmm1, %zmm1
641	vpxord		RNDKEY0, %zmm2, %zmm2
642	vpxord		RNDKEY0, %zmm3, %zmm3
643.endm
644
645// Do the last AES round for four vectors of counter blocks %zmm[0-3], XOR
646// source data with the resulting keystream, and write the result to DST and
647// GHASHDATA[0-3].  (Implementation differs slightly, but has the same effect.)
648.macro	_aesenclast_and_xor_4x
649	// XOR the source data with the last round key, saving the result in
650	// GHASHDATA[0-3].  This reduces latency by taking advantage of the
651	// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
652	vpxord		0*64(SRC), RNDKEYLAST, GHASHDATA0
653	vpxord		1*64(SRC), RNDKEYLAST, GHASHDATA1
654	vpxord		2*64(SRC), RNDKEYLAST, GHASHDATA2
655	vpxord		3*64(SRC), RNDKEYLAST, GHASHDATA3
656
657	// Do the last AES round.  This handles the XOR with the source data
658	// too, as per the optimization described above.
659	vaesenclast	GHASHDATA0, %zmm0, GHASHDATA0
660	vaesenclast	GHASHDATA1, %zmm1, GHASHDATA1
661	vaesenclast	GHASHDATA2, %zmm2, GHASHDATA2
662	vaesenclast	GHASHDATA3, %zmm3, GHASHDATA3
663
664	// Store the en/decrypted data to DST.
665	vmovdqu8	GHASHDATA0, 0*64(DST)
666	vmovdqu8	GHASHDATA1, 1*64(DST)
667	vmovdqu8	GHASHDATA2, 2*64(DST)
668	vmovdqu8	GHASHDATA3, 3*64(DST)
669.endm
670
671// void aes_gcm_{enc,dec}_update_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
672//					     const u32 le_ctr[4], u8 ghash_acc[16],
673//					     const u8 *src, u8 *dst, int datalen);
674//
675// This macro generates a GCM encryption or decryption update function with the
676// above prototype (with \enc selecting which one).  The function computes the
677// next portion of the CTR keystream, XOR's it with |datalen| bytes from |src|,
678// and writes the resulting encrypted or decrypted data to |dst|.  It also
679// updates the GHASH accumulator |ghash_acc| using the next |datalen| ciphertext
680// bytes.
681//
682// |datalen| must be a multiple of 16, except on the last call where it can be
683// any length.  The caller must do any buffering needed to ensure this.  Both
684// in-place and out-of-place en/decryption are supported.
685//
686// |le_ctr| must give the current counter in little-endian format.  This
687// function loads the counter from |le_ctr| and increments the loaded counter as
688// needed, but it does *not* store the updated counter back to |le_ctr|.  The
689// caller must update |le_ctr| if any more data segments follow.  Internally,
690// only the low 32-bit word of the counter is incremented, following the GCM
691// standard.
692.macro	_aes_gcm_update	enc
693
694	// Function arguments
695	.set	KEY,		%rdi
696	.set	LE_CTR_PTR,	%rsi
697	.set	GHASH_ACC_PTR,	%rdx
698	.set	SRC,		%rcx
699	.set	DST,		%r8
700	.set	DATALEN,	%r9d
701	.set	DATALEN64,	%r9	// Zero-extend DATALEN before using!
702
703	// Additional local variables
704
705	// %rax and %k1 are used as temporary registers.  LE_CTR_PTR is also
706	// available as a temporary register after the counter is loaded.
707
708	// AES key length in bytes
709	.set	AESKEYLEN,	%r10d
710	.set	AESKEYLEN64,	%r10
711
712	// Pointer to the last AES round key for the chosen AES variant
713	.set	RNDKEYLAST_PTR,	%r11
714
715	// In the main loop, %zmm[0-3] are used as AES input and output.
716	// Elsewhere they are used as temporary registers.
717
718	// GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.
719	.set	GHASHDATA0,	%zmm4
720	.set	GHASHDATA0_XMM,	%xmm4
721	.set	GHASHDATA1,	%zmm5
722	.set	GHASHDATA1_XMM,	%xmm5
723	.set	GHASHDATA2,	%zmm6
724	.set	GHASHDATA2_XMM,	%xmm6
725	.set	GHASHDATA3,	%zmm7
726
727	// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values
728	// using vpshufb, copied to all 128-bit lanes.
729	.set	BSWAP_MASK,	%zmm8
730
731	// RNDKEY temporarily holds the next AES round key.
732	.set	RNDKEY,		%zmm9
733
734	// GHASH_ACC is the accumulator variable for GHASH.  When fully reduced,
735	// only the lowest 128-bit lane can be nonzero.  When not fully reduced,
736	// more than one lane may be used, and they need to be XOR'd together.
737	.set	GHASH_ACC,	%zmm10
738	.set	GHASH_ACC_XMM,	%xmm10
739
740	// LE_CTR_INC is the vector of 32-bit words that need to be added to a
741	// vector of little-endian counter blocks to advance it forwards.
742	.set	LE_CTR_INC,	%zmm11
743
744	// LE_CTR contains the next set of little-endian counter blocks.
745	.set	LE_CTR,		%zmm12
746
747	// RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-1] contain cached AES round keys,
748	// copied to all 128-bit lanes.  RNDKEY0 is the zero-th round key,
749	// RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
750	.set	RNDKEY0,	%zmm13
751	.set	RNDKEYLAST,	%zmm14
752	.set	RNDKEY_M9,	%zmm15
753	.set	RNDKEY_M8,	%zmm16
754	.set	RNDKEY_M7,	%zmm17
755	.set	RNDKEY_M6,	%zmm18
756	.set	RNDKEY_M5,	%zmm19
757	.set	RNDKEY_M4,	%zmm20
758	.set	RNDKEY_M3,	%zmm21
759	.set	RNDKEY_M2,	%zmm22
760	.set	RNDKEY_M1,	%zmm23
761
762	// GHASHTMP[0-2] are temporary variables used by _ghash_step_4x.  These
763	// cannot coincide with anything used for AES encryption, since for
764	// performance reasons GHASH and AES encryption are interleaved.
765	.set	GHASHTMP0,	%zmm24
766	.set	GHASHTMP1,	%zmm25
767	.set	GHASHTMP2,	%zmm26
768
769	// H_POW[4-1] contain the powers of the hash key H^16...H^1.  The
770	// descending numbering reflects the order of the key powers.
771	.set	H_POW4,		%zmm27
772	.set	H_POW3,		%zmm28
773	.set	H_POW2,		%zmm29
774	.set	H_POW1,		%zmm30
775
776	// GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.
777	.set	GFPOLY,		%zmm31
778
779	// Load some constants.
780	vbroadcasti32x4	.Lbswap_mask(%rip), BSWAP_MASK
781	vbroadcasti32x4	.Lgfpoly(%rip), GFPOLY
782
783	// Load the GHASH accumulator and the starting counter.
784	vmovdqu		(GHASH_ACC_PTR), GHASH_ACC_XMM
785	vbroadcasti32x4	(LE_CTR_PTR), LE_CTR
786
787	// Load the AES key length in bytes.
788	movl		OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
789
790	// Make RNDKEYLAST_PTR point to the last AES round key.  This is the
791	// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
792	// respectively.  Then load the zero-th and last round keys.
793	lea		6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
794	vbroadcasti32x4	(KEY), RNDKEY0
795	vbroadcasti32x4	(RNDKEYLAST_PTR), RNDKEYLAST
796
797	// Finish initializing LE_CTR by adding [0, 1, ...] to its low words.
798	vpaddd		.Lctr_pattern(%rip), LE_CTR, LE_CTR
799
800	// Load 4 into all 128-bit lanes of LE_CTR_INC.
801	vbroadcasti32x4	.Linc_4blocks(%rip), LE_CTR_INC
802
803	// If there are at least 256 bytes of data, then continue into the loop
804	// that processes 256 bytes of data at a time.  Otherwise skip it.
805	//
806	// Pre-subtracting 256 from DATALEN saves an instruction from the main
807	// loop and also ensures that at least one write always occurs to
808	// DATALEN, zero-extending it and allowing DATALEN64 to be used later.
809	sub		$256, DATALEN
810	jl		.Lcrypt_loop_4x_done\@
811
812	// Load powers of the hash key.
813	vmovdqu8	OFFSETOFEND_H_POWERS-4*64(KEY), H_POW4
814	vmovdqu8	OFFSETOFEND_H_POWERS-3*64(KEY), H_POW3
815	vmovdqu8	OFFSETOFEND_H_POWERS-2*64(KEY), H_POW2
816	vmovdqu8	OFFSETOFEND_H_POWERS-1*64(KEY), H_POW1
817
818	// Main loop: en/decrypt and hash 4 vectors at a time.
819	//
820	// When possible, interleave the AES encryption of the counter blocks
821	// with the GHASH update of the ciphertext blocks.  This improves
822	// performance on many CPUs because the execution ports used by the VAES
823	// instructions often differ from those used by vpclmulqdq and other
824	// instructions used in GHASH.  For example, many Intel CPUs dispatch
825	// vaesenc to ports 0 and 1 and vpclmulqdq to port 5.
826	//
827	// The interleaving is easiest to do during decryption, since during
828	// decryption the ciphertext blocks are immediately available.  For
829	// encryption, instead encrypt the first set of blocks, then hash those
830	// blocks while encrypting the next set of blocks, repeat that as
831	// needed, and finally hash the last set of blocks.
832
833.if \enc
834	// Encrypt the first 4 vectors of plaintext blocks.  Leave the resulting
835	// ciphertext in GHASHDATA[0-3] for GHASH.
836	_ctr_begin_4x
837	lea		16(KEY), %rax
8381:
839	vbroadcasti32x4	(%rax), RNDKEY
840	_vaesenc_4x	RNDKEY
841	add		$16, %rax
842	cmp		%rax, RNDKEYLAST_PTR
843	jne		1b
844	_aesenclast_and_xor_4x
845	add		$256, SRC
846	add		$256, DST
847	sub		$256, DATALEN
848	jl		.Lghash_last_ciphertext_4x\@
849.endif
850
851	// Cache as many additional AES round keys as possible.
852.irp i, 9,8,7,6,5,4,3,2,1
853	vbroadcasti32x4	-\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
854.endr
855
856.Lcrypt_loop_4x\@:
857
858	// If decrypting, load more ciphertext blocks into GHASHDATA[0-3].  If
859	// encrypting, GHASHDATA[0-3] already contain the previous ciphertext.
860.if !\enc
861	vmovdqu8	0*64(SRC), GHASHDATA0
862	vmovdqu8	1*64(SRC), GHASHDATA1
863	vmovdqu8	2*64(SRC), GHASHDATA2
864	vmovdqu8	3*64(SRC), GHASHDATA3
865.endif
866
867	// Start the AES encryption of the counter blocks.
868	_ctr_begin_4x
869	cmp		$24, AESKEYLEN
870	jl		128f	// AES-128?
871	je		192f	// AES-192?
872	// AES-256
873	vbroadcasti32x4	-13*16(RNDKEYLAST_PTR), RNDKEY
874	_vaesenc_4x	RNDKEY
875	vbroadcasti32x4	-12*16(RNDKEYLAST_PTR), RNDKEY
876	_vaesenc_4x	RNDKEY
877192:
878	vbroadcasti32x4	-11*16(RNDKEYLAST_PTR), RNDKEY
879	_vaesenc_4x	RNDKEY
880	vbroadcasti32x4	-10*16(RNDKEYLAST_PTR), RNDKEY
881	_vaesenc_4x	RNDKEY
882128:
883
884	// Finish the AES encryption of the counter blocks in %zmm[0-3],
885	// interleaved with the GHASH update of the ciphertext blocks in
886	// GHASHDATA[0-3].
887.irp i, 9,8,7,6,5,4,3,2,1
888	_ghash_step_4x  (9 - \i)
889	_vaesenc_4x	RNDKEY_M\i
890.endr
891	_ghash_step_4x	9
892	_aesenclast_and_xor_4x
893	add		$256, SRC
894	add		$256, DST
895	sub		$256, DATALEN
896	jge		.Lcrypt_loop_4x\@
897
898.if \enc
899.Lghash_last_ciphertext_4x\@:
900	// Update GHASH with the last set of ciphertext blocks.
901	_ghash_4x
902.endif
903
904.Lcrypt_loop_4x_done\@:
905
906	// Undo the extra subtraction by 256 and check whether data remains.
907	add		$256, DATALEN
908	jz		.Ldone\@
909
910	// The data length isn't a multiple of 256 bytes.  Process the remaining
911	// data of length 1 <= DATALEN < 256, up to one 64-byte vector at a
912	// time.  Going one vector at a time may seem inefficient compared to
913	// having separate code paths for each possible number of vectors
914	// remaining.  However, using a loop keeps the code size down, and it
915	// performs surprising well; modern CPUs will start executing the next
916	// iteration before the previous one finishes and also predict the
917	// number of loop iterations.  For a similar reason, we roll up the AES
918	// rounds.
919	//
920	// On the last iteration, the remaining length may be less than 64
921	// bytes.  Handle this using masking.
922	//
923	// Since there are enough key powers available for all remaining data,
924	// there is no need to do a GHASH reduction after each iteration.
925	// Instead, multiply each remaining block by its own key power, and only
926	// do a GHASH reduction at the very end.
927
928	// Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N
929	// is the number of blocks that remain.
930	.set		POWERS_PTR, LE_CTR_PTR	// LE_CTR_PTR is free to be reused.
931	mov		DATALEN, %eax
932	neg		%rax
933	and		$~15, %rax  // -round_up(DATALEN, 16)
934	lea		OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR
935
936	// Start collecting the unreduced GHASH intermediate value LO, MI, HI.
937	.set		LO, GHASHDATA0
938	.set		LO_XMM, GHASHDATA0_XMM
939	.set		MI, GHASHDATA1
940	.set		MI_XMM, GHASHDATA1_XMM
941	.set		HI, GHASHDATA2
942	.set		HI_XMM, GHASHDATA2_XMM
943	vpxor		LO_XMM, LO_XMM, LO_XMM
944	vpxor		MI_XMM, MI_XMM, MI_XMM
945	vpxor		HI_XMM, HI_XMM, HI_XMM
946
947.Lcrypt_loop_1x\@:
948
949	// Select the appropriate mask for this iteration: all 1's if
950	// DATALEN >= 64, otherwise DATALEN 1's.  Do this branchlessly using the
951	// bzhi instruction from BMI2.  (This relies on DATALEN <= 255.)
952	mov		$-1, %rax
953	bzhi		DATALEN64, %rax, %rax
954	kmovq		%rax, %k1
955
956	// Encrypt a vector of counter blocks.  This does not need to be masked.
957	vpshufb		BSWAP_MASK, LE_CTR, %zmm0
958	vpaddd		LE_CTR_INC, LE_CTR, LE_CTR
959	vpxord		RNDKEY0, %zmm0, %zmm0
960	lea		16(KEY), %rax
9611:
962	vbroadcasti32x4	(%rax), RNDKEY
963	vaesenc		RNDKEY, %zmm0, %zmm0
964	add		$16, %rax
965	cmp		%rax, RNDKEYLAST_PTR
966	jne		1b
967	vaesenclast	RNDKEYLAST, %zmm0, %zmm0
968
969	// XOR the data with the appropriate number of keystream bytes.
970	vmovdqu8	(SRC), %zmm1{%k1}{z}
971	vpxord		%zmm1, %zmm0, %zmm0
972	vmovdqu8	%zmm0, (DST){%k1}
973
974	// Update GHASH with the ciphertext block(s), without reducing.
975	//
976	// In the case of DATALEN < 64, the ciphertext is zero-padded to 64
977	// bytes.  (If decrypting, it's done by the above masked load.  If
978	// encrypting, it's done by the below masked register-to-register move.)
979	// Note that if DATALEN <= 48, there will be additional padding beyond
980	// the padding of the last block specified by GHASH itself; i.e., there
981	// may be whole block(s) that get processed by the GHASH multiplication
982	// and reduction instructions but should not actually be included in the
983	// GHASH.  However, any such blocks are all-zeroes, and the values that
984	// they're multiplied with are also all-zeroes.  Therefore they just add
985	// 0 * 0 = 0 to the final GHASH result, which makes no difference.
986	vmovdqu8	(POWERS_PTR), H_POW1
987.if \enc
988	vmovdqu8	%zmm0, %zmm1{%k1}{z}
989.endif
990	vpshufb		BSWAP_MASK, %zmm1, %zmm0
991	vpxord		GHASH_ACC, %zmm0, %zmm0
992	_ghash_mul_noreduce	H_POW1, %zmm0, LO, MI, HI, \
993				GHASHDATA3, %zmm1, %zmm2, %zmm3
994	vpxor		GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
995
996	add		$64, POWERS_PTR
997	add		$64, SRC
998	add		$64, DST
999	sub		$64, DATALEN
1000	jg		.Lcrypt_loop_1x\@
1001
1002	// Finally, do the GHASH reduction.
1003	_ghash_reduce	LO, MI, HI, GFPOLY, %zmm0
1004	_horizontal_xor	HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2
1005
1006.Ldone\@:
1007	// Store the updated GHASH accumulator back to memory.
1008	vmovdqu		GHASH_ACC_XMM, (GHASH_ACC_PTR)
1009
1010	vzeroupper	// This is needed after using ymm or zmm registers.
1011	RET
1012.endm
1013
1014// void aes_gcm_enc_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
1015//				      const u32 le_ctr[4], u8 ghash_acc[16],
1016//				      u64 total_aadlen, u64 total_datalen);
1017// bool aes_gcm_dec_final_vaes_avx512(const struct aes_gcm_key_vaes_avx512 *key,
1018//				      const u32 le_ctr[4],
1019//				      const u8 ghash_acc[16],
1020//				      u64 total_aadlen, u64 total_datalen,
1021//				      const u8 tag[16], int taglen);
1022//
1023// This macro generates one of the above two functions (with \enc selecting
1024// which one).  Both functions finish computing the GCM authentication tag by
1025// updating GHASH with the lengths block and encrypting the GHASH accumulator.
1026// |total_aadlen| and |total_datalen| must be the total length of the additional
1027// authenticated data and the en/decrypted data in bytes, respectively.
1028//
1029// The encryption function then stores the full-length (16-byte) computed
1030// authentication tag to |ghash_acc|.  The decryption function instead loads the
1031// expected authentication tag (the one that was transmitted) from the 16-byte
1032// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the
1033// computed tag in constant time, and returns true if and only if they match.
1034.macro	_aes_gcm_final	enc
1035
1036	// Function arguments
1037	.set	KEY,		%rdi
1038	.set	LE_CTR_PTR,	%rsi
1039	.set	GHASH_ACC_PTR,	%rdx
1040	.set	TOTAL_AADLEN,	%rcx
1041	.set	TOTAL_DATALEN,	%r8
1042	.set	TAG,		%r9
1043	.set	TAGLEN,		%r10d	// Originally at 8(%rsp)
1044
1045	// Additional local variables.
1046	// %rax, %xmm0-%xmm3, and %k1 are used as temporary registers.
1047	.set	AESKEYLEN,	%r11d
1048	.set	AESKEYLEN64,	%r11
1049	.set	GFPOLY,		%xmm4
1050	.set	BSWAP_MASK,	%xmm5
1051	.set	LE_CTR,		%xmm6
1052	.set	GHASH_ACC,	%xmm7
1053	.set	H_POW1,		%xmm8
1054
1055	// Load some constants.
1056	vmovdqa		.Lgfpoly(%rip), GFPOLY
1057	vmovdqa		.Lbswap_mask(%rip), BSWAP_MASK
1058
1059	// Load the AES key length in bytes.
1060	movl		OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
1061
1062	// Set up a counter block with 1 in the low 32-bit word.  This is the
1063	// counter that produces the ciphertext needed to encrypt the auth tag.
1064	// GFPOLY has 1 in the low word, so grab the 1 from there using a blend.
1065	vpblendd	$0xe, (LE_CTR_PTR), GFPOLY, LE_CTR
1066
1067	// Build the lengths block and XOR it with the GHASH accumulator.
1068	// Although the lengths block is defined as the AAD length followed by
1069	// the en/decrypted data length, both in big-endian byte order, a byte
1070	// reflection of the full block is needed because of the way we compute
1071	// GHASH (see _ghash_mul_step).  By using little-endian values in the
1072	// opposite order, we avoid having to reflect any bytes here.
1073	vmovq		TOTAL_DATALEN, %xmm0
1074	vpinsrq		$1, TOTAL_AADLEN, %xmm0, %xmm0
1075	vpsllq		$3, %xmm0, %xmm0	// Bytes to bits
1076	vpxor		(GHASH_ACC_PTR), %xmm0, GHASH_ACC
1077
1078	// Load the first hash key power (H^1), which is stored last.
1079	vmovdqu8	OFFSETOFEND_H_POWERS-16(KEY), H_POW1
1080
1081.if !\enc
1082	// Prepare a mask of TAGLEN one bits.
1083	movl		8(%rsp), TAGLEN
1084	mov		$-1, %eax
1085	bzhi		TAGLEN, %eax, %eax
1086	kmovd		%eax, %k1
1087.endif
1088
1089	// Make %rax point to the last AES round key for the chosen AES variant.
1090	lea		6*16(KEY,AESKEYLEN64,4), %rax
1091
1092	// Start the AES encryption of the counter block by swapping the counter
1093	// block to big-endian and XOR-ing it with the zero-th AES round key.
1094	vpshufb		BSWAP_MASK, LE_CTR, %xmm0
1095	vpxor		(KEY), %xmm0, %xmm0
1096
1097	// Complete the AES encryption and multiply GHASH_ACC by H^1.
1098	// Interleave the AES and GHASH instructions to improve performance.
1099	cmp		$24, AESKEYLEN
1100	jl		128f	// AES-128?
1101	je		192f	// AES-192?
1102	// AES-256
1103	vaesenc		-13*16(%rax), %xmm0, %xmm0
1104	vaesenc		-12*16(%rax), %xmm0, %xmm0
1105192:
1106	vaesenc		-11*16(%rax), %xmm0, %xmm0
1107	vaesenc		-10*16(%rax), %xmm0, %xmm0
1108128:
1109.irp i, 0,1,2,3,4,5,6,7,8
1110	_ghash_mul_step	\i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1111			%xmm1, %xmm2, %xmm3
1112	vaesenc		(\i-9)*16(%rax), %xmm0, %xmm0
1113.endr
1114	_ghash_mul_step	9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1115			%xmm1, %xmm2, %xmm3
1116
1117	// Undo the byte reflection of the GHASH accumulator.
1118	vpshufb		BSWAP_MASK, GHASH_ACC, GHASH_ACC
1119
1120	// Do the last AES round and XOR the resulting keystream block with the
1121	// GHASH accumulator to produce the full computed authentication tag.
1122	//
1123	// Reduce latency by taking advantage of the property vaesenclast(key,
1124	// a) ^ b == vaesenclast(key ^ b, a).  I.e., XOR GHASH_ACC into the last
1125	// round key, instead of XOR'ing the final AES output with GHASH_ACC.
1126	//
1127	// enc_final then returns the computed auth tag, while dec_final
1128	// compares it with the transmitted one and returns a bool.  To compare
1129	// the tags, dec_final XORs them together and uses vptest to check
1130	// whether the result is all-zeroes.  This should be constant-time.
1131	// dec_final applies the vaesenclast optimization to this additional
1132	// value XOR'd too, using vpternlogd to XOR the last round key, GHASH
1133	// accumulator, and transmitted auth tag together in one instruction.
1134.if \enc
1135	vpxor		(%rax), GHASH_ACC, %xmm1
1136	vaesenclast	%xmm1, %xmm0, GHASH_ACC
1137	vmovdqu		GHASH_ACC, (GHASH_ACC_PTR)
1138.else
1139	vmovdqu		(TAG), %xmm1
1140	vpternlogd	$0x96, (%rax), GHASH_ACC, %xmm1
1141	vaesenclast	%xmm1, %xmm0, %xmm0
1142	xor		%eax, %eax
1143	vmovdqu8	%xmm0, %xmm0{%k1}{z}	// Truncate to TAGLEN bytes
1144	vptest		%xmm0, %xmm0
1145	sete		%al
1146.endif
1147	// No need for vzeroupper here, since only used xmm registers were used.
1148	RET
1149.endm
1150
1151SYM_FUNC_START(aes_gcm_enc_update_vaes_avx512)
1152	_aes_gcm_update	1
1153SYM_FUNC_END(aes_gcm_enc_update_vaes_avx512)
1154SYM_FUNC_START(aes_gcm_dec_update_vaes_avx512)
1155	_aes_gcm_update	0
1156SYM_FUNC_END(aes_gcm_dec_update_vaes_avx512)
1157
1158SYM_FUNC_START(aes_gcm_enc_final_vaes_avx512)
1159	_aes_gcm_final	1
1160SYM_FUNC_END(aes_gcm_enc_final_vaes_avx512)
1161SYM_FUNC_START(aes_gcm_dec_final_vaes_avx512)
1162	_aes_gcm_final	0
1163SYM_FUNC_END(aes_gcm_dec_final_vaes_avx512)
1164