xref: /linux/arch/x86/crypto/aes-gcm-vaes-avx2.S (revision 8f4c9978de91a9a3b37df1e74d6201acfba6cefd)
1/* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
2//
3// AES-GCM implementation for x86_64 CPUs that support the following CPU
4// features: VAES && VPCLMULQDQ && AVX2
5//
6// Copyright 2025 Google LLC
7//
8// Author: Eric Biggers <ebiggers@google.com>
9//
10//------------------------------------------------------------------------------
11//
12// This file is dual-licensed, meaning that you can use it under your choice of
13// either of the following two licenses:
14//
15// Licensed under the Apache License 2.0 (the "License").  You may obtain a copy
16// of the License at
17//
18//	http://www.apache.org/licenses/LICENSE-2.0
19//
20// Unless required by applicable law or agreed to in writing, software
21// distributed under the License is distributed on an "AS IS" BASIS,
22// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
23// See the License for the specific language governing permissions and
24// limitations under the License.
25//
26// or
27//
28// Redistribution and use in source and binary forms, with or without
29// modification, are permitted provided that the following conditions are met:
30//
31// 1. Redistributions of source code must retain the above copyright notice,
32//    this list of conditions and the following disclaimer.
33//
34// 2. Redistributions in binary form must reproduce the above copyright
35//    notice, this list of conditions and the following disclaimer in the
36//    documentation and/or other materials provided with the distribution.
37//
38// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
39// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
40// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
41// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
42// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
43// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
44// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
45// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
46// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
47// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
48// POSSIBILITY OF SUCH DAMAGE.
49//
50// -----------------------------------------------------------------------------
51//
52// This is similar to aes-gcm-vaes-avx512.S, but it uses AVX2 instead of AVX512.
53// This means it can only use 16 vector registers instead of 32, the maximum
54// vector length is 32 bytes, and some instructions such as vpternlogd and
55// masked loads/stores are unavailable.  However, it is able to run on CPUs that
56// have VAES without AVX512, namely AMD Zen 3 (including "Milan" server CPUs),
57// various Intel client CPUs such as Alder Lake, and Intel Sierra Forest.
58//
59// This implementation also uses Karatsuba multiplication instead of schoolbook
60// multiplication for GHASH in its main loop.  This does not help much on Intel,
61// but it improves performance by ~5% on AMD Zen 3.  Other factors weighing
62// slightly in favor of Karatsuba multiplication in this implementation are the
63// lower maximum vector length (which means there are fewer key powers, so we
64// can cache the halves of each key power XOR'd together and still use less
65// memory than the AVX512 implementation), and the unavailability of the
66// vpternlogd instruction (which helped schoolbook a bit more than Karatsuba).
67
68#include <linux/linkage.h>
69
70.section .rodata
71.p2align 4
72
73	// The below three 16-byte values must be in the order that they are, as
74	// they are really two 32-byte tables and a 16-byte value that overlap:
75	//
76	// - The first 32-byte table begins at .Lselect_high_bytes_table.
77	//   For 0 <= len <= 16, the 16-byte value at
78	//   '.Lselect_high_bytes_table + len' selects the high 'len' bytes of
79	//   another 16-byte value when AND'ed with it.
80	//
81	// - The second 32-byte table begins at .Lrshift_and_bswap_table.
82	//   For 0 <= len <= 16, the 16-byte value at
83	//   '.Lrshift_and_bswap_table + len' is a vpshufb mask that does the
84	//   following operation: right-shift by '16 - len' bytes (shifting in
85	//   zeroes), then reflect all 16 bytes.
86	//
87	// - The 16-byte value at .Lbswap_mask is a vpshufb mask that reflects
88	//   all 16 bytes.
89.Lselect_high_bytes_table:
90	.octa	0
91.Lrshift_and_bswap_table:
92	.octa	0xffffffffffffffffffffffffffffffff
93.Lbswap_mask:
94	.octa	0x000102030405060708090a0b0c0d0e0f
95
96	// Sixteen 0x0f bytes.  By XOR'ing an entry of .Lrshift_and_bswap_table
97	// with this, we get a mask that left-shifts by '16 - len' bytes.
98.Lfifteens:
99	.octa	0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f
100
101	// This is the GHASH reducing polynomial without its constant term, i.e.
102	// x^128 + x^7 + x^2 + x, represented using the backwards mapping
103	// between bits and polynomial coefficients.
104	//
105	// Alternatively, it can be interpreted as the naturally-ordered
106	// representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the
107	// "reversed" GHASH reducing polynomial without its x^128 term.
108.Lgfpoly:
109	.octa	0xc2000000000000000000000000000001
110
111	// Same as above, but with the (1 << 64) bit set.
112.Lgfpoly_and_internal_carrybit:
113	.octa	0xc2000000000000010000000000000001
114
115	// Values needed to prepare the initial vector of counter blocks.
116.Lctr_pattern:
117	.octa	0
118	.octa	1
119
120	// The number of AES blocks per vector, as a 128-bit value.
121.Linc_2blocks:
122	.octa	2
123
124// Offsets in struct aes_gcm_key_vaes_avx2
125#define OFFSETOF_AESKEYLEN	480
126#define OFFSETOF_H_POWERS	512
127#define NUM_H_POWERS		8
128#define OFFSETOFEND_H_POWERS    (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
129#define OFFSETOF_H_POWERS_XORED	OFFSETOFEND_H_POWERS
130
131.text
132
133// Do one step of GHASH-multiplying the 128-bit lanes of \a by the 128-bit lanes
134// of \b and storing the reduced products in \dst.  Uses schoolbook
135// multiplication.
136.macro	_ghash_mul_step	i, a, b, dst, gfpoly, t0, t1, t2
137.if \i == 0
138	vpclmulqdq	$0x00, \a, \b, \t0	  // LO = a_L * b_L
139	vpclmulqdq	$0x01, \a, \b, \t1	  // MI_0 = a_L * b_H
140.elseif \i == 1
141	vpclmulqdq	$0x10, \a, \b, \t2	  // MI_1 = a_H * b_L
142.elseif \i == 2
143	vpxor		\t2, \t1, \t1		  // MI = MI_0 + MI_1
144.elseif \i == 3
145	vpclmulqdq	$0x01, \t0, \gfpoly, \t2  // LO_L*(x^63 + x^62 + x^57)
146.elseif \i == 4
147	vpshufd		$0x4e, \t0, \t0		  // Swap halves of LO
148.elseif \i == 5
149	vpxor		\t0, \t1, \t1		  // Fold LO into MI (part 1)
150	vpxor		\t2, \t1, \t1		  // Fold LO into MI (part 2)
151.elseif \i == 6
152	vpclmulqdq	$0x11, \a, \b, \dst	  // HI = a_H * b_H
153.elseif \i == 7
154	vpclmulqdq	$0x01, \t1, \gfpoly, \t0  // MI_L*(x^63 + x^62 + x^57)
155.elseif \i == 8
156	vpshufd		$0x4e, \t1, \t1		  // Swap halves of MI
157.elseif \i == 9
158	vpxor		\t1, \dst, \dst		  // Fold MI into HI (part 1)
159	vpxor		\t0, \dst, \dst		  // Fold MI into HI (part 2)
160.endif
161.endm
162
163// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store
164// the reduced products in \dst.  See _ghash_mul_step for full explanation.
165.macro	_ghash_mul	a, b, dst, gfpoly, t0, t1, t2
166.irp i, 0,1,2,3,4,5,6,7,8,9
167	_ghash_mul_step	\i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2
168.endr
169.endm
170
171// GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the
172// *unreduced* products to \lo, \mi, and \hi.
173.macro	_ghash_mul_noreduce	a, b, lo, mi, hi, t0
174	vpclmulqdq	$0x00, \a, \b, \t0	// a_L * b_L
175	vpxor		\t0, \lo, \lo
176	vpclmulqdq	$0x01, \a, \b, \t0	// a_L * b_H
177	vpxor		\t0, \mi, \mi
178	vpclmulqdq	$0x10, \a, \b, \t0	// a_H * b_L
179	vpxor		\t0, \mi, \mi
180	vpclmulqdq	$0x11, \a, \b, \t0	// a_H * b_H
181	vpxor		\t0, \hi, \hi
182.endm
183
184// Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit
185// reduced products in \hi.  See _ghash_mul_step for explanation of reduction.
186.macro	_ghash_reduce	lo, mi, hi, gfpoly, t0
187	vpclmulqdq	$0x01, \lo, \gfpoly, \t0
188	vpshufd		$0x4e, \lo, \lo
189	vpxor		\lo, \mi, \mi
190	vpxor		\t0, \mi, \mi
191	vpclmulqdq	$0x01, \mi, \gfpoly, \t0
192	vpshufd		$0x4e, \mi, \mi
193	vpxor		\mi, \hi, \hi
194	vpxor		\t0, \hi, \hi
195.endm
196
197// This is a specialized version of _ghash_mul that computes \a * \a, i.e. it
198// squares \a.  It skips computing MI = (a_L * a_H) + (a_H * a_L) = 0.
199.macro	_ghash_square	a, dst, gfpoly, t0, t1
200	vpclmulqdq	$0x00, \a, \a, \t0	  // LO = a_L * a_L
201	vpclmulqdq	$0x11, \a, \a, \dst	  // HI = a_H * a_H
202	vpclmulqdq	$0x01, \t0, \gfpoly, \t1  // LO_L*(x^63 + x^62 + x^57)
203	vpshufd		$0x4e, \t0, \t0		  // Swap halves of LO
204	vpxor		\t0, \t1, \t1		  // Fold LO into MI
205	vpclmulqdq	$0x01, \t1, \gfpoly, \t0  // MI_L*(x^63 + x^62 + x^57)
206	vpshufd		$0x4e, \t1, \t1		  // Swap halves of MI
207	vpxor		\t1, \dst, \dst		  // Fold MI into HI (part 1)
208	vpxor		\t0, \dst, \dst		  // Fold MI into HI (part 2)
209.endm
210
211// void aes_gcm_precompute_vaes_avx2(struct aes_gcm_key_vaes_avx2 *key);
212//
213// Given the expanded AES key |key->base.aes_key|, derive the GHASH subkey and
214// initialize |key->h_powers| and |key->h_powers_xored|.
215//
216// We use h_powers[0..7] to store H^8 through H^1, and h_powers_xored[0..7] to
217// store the 64-bit halves of the key powers XOR'd together (for Karatsuba
218// multiplication) in the order 8,6,7,5,4,2,3,1.
219SYM_FUNC_START(aes_gcm_precompute_vaes_avx2)
220
221	// Function arguments
222	.set	KEY,		%rdi
223
224	// Additional local variables
225	.set	POWERS_PTR,	%rsi
226	.set	RNDKEYLAST_PTR,	%rdx
227	.set	TMP0,		%ymm0
228	.set	TMP0_XMM,	%xmm0
229	.set	TMP1,		%ymm1
230	.set	TMP1_XMM,	%xmm1
231	.set	TMP2,		%ymm2
232	.set	TMP2_XMM,	%xmm2
233	.set	H_CUR,		%ymm3
234	.set	H_CUR_XMM,	%xmm3
235	.set	H_CUR2,		%ymm4
236	.set	H_INC,		%ymm5
237	.set	H_INC_XMM,	%xmm5
238	.set	GFPOLY,		%ymm6
239	.set	GFPOLY_XMM,	%xmm6
240
241	// Encrypt an all-zeroes block to get the raw hash subkey.
242	movl		OFFSETOF_AESKEYLEN(KEY), %eax
243	lea		6*16(KEY,%rax,4), RNDKEYLAST_PTR
244	vmovdqu		(KEY), H_CUR_XMM  // Zero-th round key XOR all-zeroes block
245	lea		16(KEY), %rax
2461:
247	vaesenc		(%rax), H_CUR_XMM, H_CUR_XMM
248	add		$16, %rax
249	cmp		%rax, RNDKEYLAST_PTR
250	jne		1b
251	vaesenclast	(RNDKEYLAST_PTR), H_CUR_XMM, H_CUR_XMM
252
253	// Reflect the bytes of the raw hash subkey.
254	vpshufb		.Lbswap_mask(%rip), H_CUR_XMM, H_CUR_XMM
255
256	// Finish preprocessing the byte-reflected hash subkey by multiplying it
257	// by x^-1 ("standard" interpretation of polynomial coefficients) or
258	// equivalently x^1 (natural interpretation).  This gets the key into a
259	// format that avoids having to bit-reflect the data blocks later.
260	vpshufd		$0xd3, H_CUR_XMM, TMP0_XMM
261	vpsrad		$31, TMP0_XMM, TMP0_XMM
262	vpaddq		H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
263	vpand		.Lgfpoly_and_internal_carrybit(%rip), TMP0_XMM, TMP0_XMM
264	vpxor		TMP0_XMM, H_CUR_XMM, H_CUR_XMM
265
266	// Load the gfpoly constant.
267	vbroadcasti128	.Lgfpoly(%rip), GFPOLY
268
269	// Square H^1 to get H^2.
270	_ghash_square	H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, TMP0_XMM, TMP1_XMM
271
272	// Create H_CUR = [H^2, H^1] and H_INC = [H^2, H^2].
273	vinserti128	$1, H_CUR_XMM, H_INC, H_CUR
274	vinserti128	$1, H_INC_XMM, H_INC, H_INC
275
276	// Compute H_CUR2 = [H^4, H^3].
277	_ghash_mul	H_INC, H_CUR, H_CUR2, GFPOLY, TMP0, TMP1, TMP2
278
279	// Store [H^2, H^1] and [H^4, H^3].
280	vmovdqu		H_CUR, OFFSETOF_H_POWERS+3*32(KEY)
281	vmovdqu		H_CUR2, OFFSETOF_H_POWERS+2*32(KEY)
282
283	// For Karatsuba multiplication: compute and store the two 64-bit halves
284	// of each key power XOR'd together.  Order is 4,2,3,1.
285	vpunpcklqdq	H_CUR, H_CUR2, TMP0
286	vpunpckhqdq	H_CUR, H_CUR2, TMP1
287	vpxor		TMP1, TMP0, TMP0
288	vmovdqu		TMP0, OFFSETOF_H_POWERS_XORED+32(KEY)
289
290	// Compute and store H_CUR = [H^6, H^5] and H_CUR2 = [H^8, H^7].
291	_ghash_mul	H_INC, H_CUR2, H_CUR, GFPOLY, TMP0, TMP1, TMP2
292	_ghash_mul	H_INC, H_CUR, H_CUR2, GFPOLY, TMP0, TMP1, TMP2
293	vmovdqu		H_CUR, OFFSETOF_H_POWERS+1*32(KEY)
294	vmovdqu		H_CUR2, OFFSETOF_H_POWERS+0*32(KEY)
295
296	// Again, compute and store the two 64-bit halves of each key power
297	// XOR'd together.  Order is 8,6,7,5.
298	vpunpcklqdq	H_CUR, H_CUR2, TMP0
299	vpunpckhqdq	H_CUR, H_CUR2, TMP1
300	vpxor		TMP1, TMP0, TMP0
301	vmovdqu		TMP0, OFFSETOF_H_POWERS_XORED(KEY)
302
303	vzeroupper
304	RET
305SYM_FUNC_END(aes_gcm_precompute_vaes_avx2)
306
307// Do one step of the GHASH update of four vectors of data blocks.
308//   \i: the step to do, 0 through 9
309//   \ghashdata_ptr: pointer to the data blocks (ciphertext or AAD)
310//   KEY: pointer to struct aes_gcm_key_vaes_avx2
311//   BSWAP_MASK: mask for reflecting the bytes of blocks
312//   H_POW[2-1]_XORED: cached values from KEY->h_powers_xored
313//   TMP[0-2]: temporary registers.  TMP[1-2] must be preserved across steps.
314//   LO, MI: working state for this macro that must be preserved across steps
315//   GHASH_ACC: the GHASH accumulator (input/output)
316.macro	_ghash_step_4x	i, ghashdata_ptr
317	.set		HI, GHASH_ACC # alias
318	.set		HI_XMM, GHASH_ACC_XMM
319.if \i == 0
320	// First vector
321	vmovdqu		0*32(\ghashdata_ptr), TMP1
322	vpshufb		BSWAP_MASK, TMP1, TMP1
323	vmovdqu		OFFSETOF_H_POWERS+0*32(KEY), TMP2
324	vpxor		GHASH_ACC, TMP1, TMP1
325	vpclmulqdq	$0x00, TMP2, TMP1, LO
326	vpclmulqdq	$0x11, TMP2, TMP1, HI
327	vpunpckhqdq	TMP1, TMP1, TMP0
328	vpxor		TMP1, TMP0, TMP0
329	vpclmulqdq	$0x00, H_POW2_XORED, TMP0, MI
330.elseif \i == 1
331.elseif \i == 2
332	// Second vector
333	vmovdqu		1*32(\ghashdata_ptr), TMP1
334	vpshufb		BSWAP_MASK, TMP1, TMP1
335	vmovdqu		OFFSETOF_H_POWERS+1*32(KEY), TMP2
336	vpclmulqdq	$0x00, TMP2, TMP1, TMP0
337	vpxor		TMP0, LO, LO
338	vpclmulqdq	$0x11, TMP2, TMP1, TMP0
339	vpxor		TMP0, HI, HI
340	vpunpckhqdq	TMP1, TMP1, TMP0
341	vpxor		TMP1, TMP0, TMP0
342	vpclmulqdq	$0x10, H_POW2_XORED, TMP0, TMP0
343	vpxor		TMP0, MI, MI
344.elseif \i == 3
345	// Third vector
346	vmovdqu		2*32(\ghashdata_ptr), TMP1
347	vpshufb		BSWAP_MASK, TMP1, TMP1
348	vmovdqu		OFFSETOF_H_POWERS+2*32(KEY), TMP2
349.elseif \i == 4
350	vpclmulqdq	$0x00, TMP2, TMP1, TMP0
351	vpxor		TMP0, LO, LO
352	vpclmulqdq	$0x11, TMP2, TMP1, TMP0
353	vpxor		TMP0, HI, HI
354.elseif \i == 5
355	vpunpckhqdq	TMP1, TMP1, TMP0
356	vpxor		TMP1, TMP0, TMP0
357	vpclmulqdq	$0x00, H_POW1_XORED, TMP0, TMP0
358	vpxor		TMP0, MI, MI
359
360	// Fourth vector
361	vmovdqu		3*32(\ghashdata_ptr), TMP1
362	vpshufb		BSWAP_MASK, TMP1, TMP1
363.elseif \i == 6
364	vmovdqu		OFFSETOF_H_POWERS+3*32(KEY), TMP2
365	vpclmulqdq	$0x00, TMP2, TMP1, TMP0
366	vpxor		TMP0, LO, LO
367	vpclmulqdq	$0x11, TMP2, TMP1, TMP0
368	vpxor		TMP0, HI, HI
369	vpunpckhqdq	TMP1, TMP1, TMP0
370	vpxor		TMP1, TMP0, TMP0
371	vpclmulqdq	$0x10, H_POW1_XORED, TMP0, TMP0
372	vpxor		TMP0, MI, MI
373.elseif \i == 7
374	// Finalize 'mi' following Karatsuba multiplication.
375	vpxor		LO, MI, MI
376	vpxor		HI, MI, MI
377
378	// Fold lo into mi.
379	vbroadcasti128	.Lgfpoly(%rip), TMP2
380	vpclmulqdq	$0x01, LO, TMP2, TMP0
381	vpshufd		$0x4e, LO, LO
382	vpxor		LO, MI, MI
383	vpxor		TMP0, MI, MI
384.elseif \i == 8
385	// Fold mi into hi.
386	vpclmulqdq	$0x01, MI, TMP2, TMP0
387	vpshufd		$0x4e, MI, MI
388	vpxor		MI, HI, HI
389	vpxor		TMP0, HI, HI
390.elseif \i == 9
391	vextracti128	$1, HI, TMP0_XMM
392	vpxor		TMP0_XMM, HI_XMM, GHASH_ACC_XMM
393.endif
394.endm
395
396// Update GHASH with four vectors of data blocks.  See _ghash_step_4x for full
397// explanation.
398.macro	_ghash_4x	ghashdata_ptr
399.irp i, 0,1,2,3,4,5,6,7,8,9
400	_ghash_step_4x	\i, \ghashdata_ptr
401.endr
402.endm
403
404// Load 1 <= %ecx <= 16 bytes from the pointer \src into the xmm register \dst
405// and zeroize any remaining bytes.  Clobbers %rax, %rcx, and \tmp{64,32}.
406.macro	_load_partial_block	src, dst, tmp64, tmp32
407	sub		$8, %ecx		// LEN - 8
408	jle		.Lle8\@
409
410	// Load 9 <= LEN <= 16 bytes.
411	vmovq		(\src), \dst		// Load first 8 bytes
412	mov		(\src, %rcx), %rax	// Load last 8 bytes
413	neg		%ecx
414	shl		$3, %ecx
415	shr		%cl, %rax		// Discard overlapping bytes
416	vpinsrq		$1, %rax, \dst, \dst
417	jmp		.Ldone\@
418
419.Lle8\@:
420	add		$4, %ecx		// LEN - 4
421	jl		.Llt4\@
422
423	// Load 4 <= LEN <= 8 bytes.
424	mov		(\src), %eax		// Load first 4 bytes
425	mov		(\src, %rcx), \tmp32	// Load last 4 bytes
426	jmp		.Lcombine\@
427
428.Llt4\@:
429	// Load 1 <= LEN <= 3 bytes.
430	add		$2, %ecx		// LEN - 2
431	movzbl		(\src), %eax		// Load first byte
432	jl		.Lmovq\@
433	movzwl		(\src, %rcx), \tmp32	// Load last 2 bytes
434.Lcombine\@:
435	shl		$3, %ecx
436	shl		%cl, \tmp64
437	or		\tmp64, %rax		// Combine the two parts
438.Lmovq\@:
439	vmovq		%rax, \dst
440.Ldone\@:
441.endm
442
443// Store 1 <= %ecx <= 16 bytes from the xmm register \src to the pointer \dst.
444// Clobbers %rax, %rcx, and \tmp{64,32}.
445.macro	_store_partial_block	src, dst, tmp64, tmp32
446	sub		$8, %ecx		// LEN - 8
447	jl		.Llt8\@
448
449	// Store 8 <= LEN <= 16 bytes.
450	vpextrq		$1, \src, %rax
451	mov		%ecx, \tmp32
452	shl		$3, %ecx
453	ror		%cl, %rax
454	mov		%rax, (\dst, \tmp64)	// Store last LEN - 8 bytes
455	vmovq		\src, (\dst)		// Store first 8 bytes
456	jmp		.Ldone\@
457
458.Llt8\@:
459	add		$4, %ecx		// LEN - 4
460	jl		.Llt4\@
461
462	// Store 4 <= LEN <= 7 bytes.
463	vpextrd		$1, \src, %eax
464	mov		%ecx, \tmp32
465	shl		$3, %ecx
466	ror		%cl, %eax
467	mov		%eax, (\dst, \tmp64)	// Store last LEN - 4 bytes
468	vmovd		\src, (\dst)		// Store first 4 bytes
469	jmp		.Ldone\@
470
471.Llt4\@:
472	// Store 1 <= LEN <= 3 bytes.
473	vpextrb		$0, \src, 0(\dst)
474	cmp		$-2, %ecx		// LEN - 4 == -2, i.e. LEN == 2?
475	jl		.Ldone\@
476	vpextrb		$1, \src, 1(\dst)
477	je		.Ldone\@
478	vpextrb		$2, \src, 2(\dst)
479.Ldone\@:
480.endm
481
482// void aes_gcm_aad_update_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
483//				     u8 ghash_acc[16],
484//				     const u8 *aad, int aadlen);
485//
486// This function processes the AAD (Additional Authenticated Data) in GCM.
487// Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
488// data given by |aad| and |aadlen|.  On the first call, |ghash_acc| must be all
489// zeroes.  |aadlen| must be a multiple of 16, except on the last call where it
490// can be any length.  The caller must do any buffering needed to ensure this.
491//
492// This handles large amounts of AAD efficiently, while also keeping overhead
493// low for small amounts which is the common case.  TLS and IPsec use less than
494// one block of AAD, but (uncommonly) other use cases may use much more.
495SYM_FUNC_START(aes_gcm_aad_update_vaes_avx2)
496
497	// Function arguments
498	.set	KEY,		%rdi
499	.set	GHASH_ACC_PTR,	%rsi
500	.set	AAD,		%rdx
501	.set	AADLEN,		%ecx	// Must be %ecx for _load_partial_block
502	.set	AADLEN64,	%rcx	// Zero-extend AADLEN before using!
503
504	// Additional local variables.
505	// %rax and %r8 are used as temporary registers.
506	.set	TMP0,		%ymm0
507	.set	TMP0_XMM,	%xmm0
508	.set	TMP1,		%ymm1
509	.set	TMP1_XMM,	%xmm1
510	.set	TMP2,		%ymm2
511	.set	TMP2_XMM,	%xmm2
512	.set	LO,		%ymm3
513	.set	LO_XMM,		%xmm3
514	.set	MI,		%ymm4
515	.set	MI_XMM,		%xmm4
516	.set	GHASH_ACC,	%ymm5
517	.set	GHASH_ACC_XMM,	%xmm5
518	.set	BSWAP_MASK,	%ymm6
519	.set	BSWAP_MASK_XMM,	%xmm6
520	.set	GFPOLY,		%ymm7
521	.set	GFPOLY_XMM,	%xmm7
522	.set	H_POW2_XORED,	%ymm8
523	.set	H_POW1_XORED,	%ymm9
524
525	// Load the bswap_mask and gfpoly constants.  Since AADLEN is usually
526	// small, usually only 128-bit vectors will be used.  So as an
527	// optimization, don't broadcast these constants to both 128-bit lanes
528	// quite yet.
529	vmovdqu		.Lbswap_mask(%rip), BSWAP_MASK_XMM
530	vmovdqu		.Lgfpoly(%rip), GFPOLY_XMM
531
532	// Load the GHASH accumulator.
533	vmovdqu		(GHASH_ACC_PTR), GHASH_ACC_XMM
534
535	// Check for the common case of AADLEN <= 16, as well as AADLEN == 0.
536	test		AADLEN, AADLEN
537	jz		.Laad_done
538	cmp		$16, AADLEN
539	jle		.Laad_lastblock
540
541	// AADLEN > 16, so we'll operate on full vectors.  Broadcast bswap_mask
542	// and gfpoly to both 128-bit lanes.
543	vinserti128	$1, BSWAP_MASK_XMM, BSWAP_MASK, BSWAP_MASK
544	vinserti128	$1, GFPOLY_XMM, GFPOLY, GFPOLY
545
546	// If AADLEN >= 128, update GHASH with 128 bytes of AAD at a time.
547	add		$-128, AADLEN	// 128 is 4 bytes, -128 is 1 byte
548	jl		.Laad_loop_4x_done
549	vmovdqu		OFFSETOF_H_POWERS_XORED(KEY), H_POW2_XORED
550	vmovdqu		OFFSETOF_H_POWERS_XORED+32(KEY), H_POW1_XORED
551.Laad_loop_4x:
552	_ghash_4x	AAD
553	sub		$-128, AAD
554	add		$-128, AADLEN
555	jge		.Laad_loop_4x
556.Laad_loop_4x_done:
557
558	// If AADLEN >= 32, update GHASH with 32 bytes of AAD at a time.
559	add		$96, AADLEN
560	jl		.Laad_loop_1x_done
561.Laad_loop_1x:
562	vmovdqu		(AAD), TMP0
563	vpshufb		BSWAP_MASK, TMP0, TMP0
564	vpxor		TMP0, GHASH_ACC, GHASH_ACC
565	vmovdqu		OFFSETOFEND_H_POWERS-32(KEY), TMP0
566	_ghash_mul	TMP0, GHASH_ACC, GHASH_ACC, GFPOLY, TMP1, TMP2, LO
567	vextracti128	$1, GHASH_ACC, TMP0_XMM
568	vpxor		TMP0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
569	add		$32, AAD
570	sub		$32, AADLEN
571	jge		.Laad_loop_1x
572.Laad_loop_1x_done:
573	add		$32, AADLEN
574	// Now 0 <= AADLEN < 32.
575
576	jz		.Laad_done
577	cmp		$16, AADLEN
578	jle		.Laad_lastblock
579
580	// Update GHASH with the remaining 17 <= AADLEN <= 31 bytes of AAD.
581	mov		AADLEN, AADLEN	// Zero-extend AADLEN to AADLEN64.
582	vmovdqu		(AAD), TMP0_XMM
583	vmovdqu		-16(AAD, AADLEN64), TMP1_XMM
584	vpshufb		BSWAP_MASK_XMM, TMP0_XMM, TMP0_XMM
585	vpxor		TMP0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
586	lea		.Lrshift_and_bswap_table(%rip), %rax
587	vpshufb		-16(%rax, AADLEN64), TMP1_XMM, TMP1_XMM
588	vinserti128	$1, TMP1_XMM, GHASH_ACC, GHASH_ACC
589	vmovdqu		OFFSETOFEND_H_POWERS-32(KEY), TMP0
590	_ghash_mul	TMP0, GHASH_ACC, GHASH_ACC, GFPOLY, TMP1, TMP2, LO
591	vextracti128	$1, GHASH_ACC, TMP0_XMM
592	vpxor		TMP0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
593	jmp		.Laad_done
594
595.Laad_lastblock:
596	// Update GHASH with the remaining 1 <= AADLEN <= 16 bytes of AAD.
597	_load_partial_block	AAD, TMP0_XMM, %r8, %r8d
598	vpshufb		BSWAP_MASK_XMM, TMP0_XMM, TMP0_XMM
599	vpxor		TMP0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
600	vmovdqu		OFFSETOFEND_H_POWERS-16(KEY), TMP0_XMM
601	_ghash_mul	TMP0_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM, GFPOLY_XMM, \
602			TMP1_XMM, TMP2_XMM, LO_XMM
603
604.Laad_done:
605	// Store the updated GHASH accumulator back to memory.
606	vmovdqu		GHASH_ACC_XMM, (GHASH_ACC_PTR)
607
608	vzeroupper
609	RET
610SYM_FUNC_END(aes_gcm_aad_update_vaes_avx2)
611
612// Do one non-last round of AES encryption on the blocks in the given AESDATA
613// vectors using the round key that has been broadcast to all 128-bit lanes of
614// \round_key.
615.macro	_vaesenc	round_key, vecs:vararg
616.irp i, \vecs
617	vaesenc		\round_key, AESDATA\i, AESDATA\i
618.endr
619.endm
620
621// Generate counter blocks in the given AESDATA vectors, then do the zero-th AES
622// round on them.  Clobbers TMP0.
623.macro	_ctr_begin	vecs:vararg
624	vbroadcasti128	.Linc_2blocks(%rip), TMP0
625.irp i, \vecs
626	vpshufb		BSWAP_MASK, LE_CTR, AESDATA\i
627	vpaddd		TMP0, LE_CTR, LE_CTR
628.endr
629.irp i, \vecs
630	vpxor		RNDKEY0, AESDATA\i, AESDATA\i
631.endr
632.endm
633
634// Generate and encrypt counter blocks in the given AESDATA vectors, excluding
635// the last AES round.  Clobbers %rax and TMP0.
636.macro	_aesenc_loop	vecs:vararg
637	_ctr_begin	\vecs
638	lea		16(KEY), %rax
639.Laesenc_loop\@:
640	vbroadcasti128	(%rax), TMP0
641	_vaesenc	TMP0, \vecs
642	add		$16, %rax
643	cmp		%rax, RNDKEYLAST_PTR
644	jne		.Laesenc_loop\@
645.endm
646
647// Finalize the keystream blocks in the given AESDATA vectors by doing the last
648// AES round, then XOR those keystream blocks with the corresponding data.
649// Reduce latency by doing the XOR before the vaesenclast, utilizing the
650// property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).  Clobbers TMP0.
651.macro	_aesenclast_and_xor	vecs:vararg
652.irp i, \vecs
653	vpxor		\i*32(SRC), RNDKEYLAST, TMP0
654	vaesenclast	TMP0, AESDATA\i, AESDATA\i
655.endr
656.irp i, \vecs
657	vmovdqu		AESDATA\i, \i*32(DST)
658.endr
659.endm
660
661// void aes_gcm_{enc,dec}_update_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
662//					   const u32 le_ctr[4], u8 ghash_acc[16],
663//					   const u8 *src, u8 *dst, int datalen);
664//
665// This macro generates a GCM encryption or decryption update function with the
666// above prototype (with \enc selecting which one).  The function computes the
667// next portion of the CTR keystream, XOR's it with |datalen| bytes from |src|,
668// and writes the resulting encrypted or decrypted data to |dst|.  It also
669// updates the GHASH accumulator |ghash_acc| using the next |datalen| ciphertext
670// bytes.
671//
672// |datalen| must be a multiple of 16, except on the last call where it can be
673// any length.  The caller must do any buffering needed to ensure this.  Both
674// in-place and out-of-place en/decryption are supported.
675//
676// |le_ctr| must give the current counter in little-endian format.  This
677// function loads the counter from |le_ctr| and increments the loaded counter as
678// needed, but it does *not* store the updated counter back to |le_ctr|.  The
679// caller must update |le_ctr| if any more data segments follow.  Internally,
680// only the low 32-bit word of the counter is incremented, following the GCM
681// standard.
682.macro	_aes_gcm_update	enc
683
684	// Function arguments
685	.set	KEY,		%rdi
686	.set	LE_CTR_PTR,	%rsi
687	.set	LE_CTR_PTR32,	%esi
688	.set	GHASH_ACC_PTR,	%rdx
689	.set	SRC,		%rcx	// Assumed to be %rcx.
690					// See .Ltail_xor_and_ghash_1to16bytes
691	.set	DST,		%r8
692	.set	DATALEN,	%r9d
693	.set	DATALEN64,	%r9	// Zero-extend DATALEN before using!
694
695	// Additional local variables
696
697	// %rax is used as a temporary register.  LE_CTR_PTR is also available
698	// as a temporary register after the counter is loaded.
699
700	// AES key length in bytes
701	.set	AESKEYLEN,	%r10d
702	.set	AESKEYLEN64,	%r10
703
704	// Pointer to the last AES round key for the chosen AES variant
705	.set	RNDKEYLAST_PTR,	%r11
706
707	// BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values
708	// using vpshufb, copied to all 128-bit lanes.
709	.set	BSWAP_MASK,	%ymm0
710	.set	BSWAP_MASK_XMM,	%xmm0
711
712	// GHASH_ACC is the accumulator variable for GHASH.  When fully reduced,
713	// only the lowest 128-bit lane can be nonzero.  When not fully reduced,
714	// more than one lane may be used, and they need to be XOR'd together.
715	.set	GHASH_ACC,	%ymm1
716	.set	GHASH_ACC_XMM,	%xmm1
717
718	// TMP[0-2] are temporary registers.
719	.set	TMP0,		%ymm2
720	.set	TMP0_XMM,	%xmm2
721	.set	TMP1,		%ymm3
722	.set	TMP1_XMM,	%xmm3
723	.set	TMP2,		%ymm4
724	.set	TMP2_XMM,	%xmm4
725
726	// LO and MI are used to accumulate unreduced GHASH products.
727	.set	LO,		%ymm5
728	.set	LO_XMM,		%xmm5
729	.set	MI,		%ymm6
730	.set	MI_XMM,		%xmm6
731
732	// H_POW[2-1]_XORED contain cached values from KEY->h_powers_xored.  The
733	// descending numbering reflects the order of the key powers.
734	.set	H_POW2_XORED,	%ymm7
735	.set	H_POW2_XORED_XMM, %xmm7
736	.set	H_POW1_XORED,	%ymm8
737
738	// RNDKEY0 caches the zero-th round key, and RNDKEYLAST the last one.
739	.set	RNDKEY0,	%ymm9
740	.set	RNDKEYLAST,	%ymm10
741
742	// LE_CTR contains the next set of little-endian counter blocks.
743	.set	LE_CTR,		%ymm11
744
745	// AESDATA[0-3] hold the counter blocks that are being encrypted by AES.
746	.set	AESDATA0,	%ymm12
747	.set	AESDATA0_XMM,	%xmm12
748	.set	AESDATA1,	%ymm13
749	.set	AESDATA1_XMM,	%xmm13
750	.set	AESDATA2,	%ymm14
751	.set	AESDATA3,	%ymm15
752
753.if \enc
754	.set	GHASHDATA_PTR,	DST
755.else
756	.set	GHASHDATA_PTR,	SRC
757.endif
758
759	vbroadcasti128	.Lbswap_mask(%rip), BSWAP_MASK
760
761	// Load the GHASH accumulator and the starting counter.
762	vmovdqu		(GHASH_ACC_PTR), GHASH_ACC_XMM
763	vbroadcasti128	(LE_CTR_PTR), LE_CTR
764
765	// Load the AES key length in bytes.
766	movl		OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
767
768	// Make RNDKEYLAST_PTR point to the last AES round key.  This is the
769	// round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
770	// respectively.  Then load the zero-th and last round keys.
771	lea		6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
772	vbroadcasti128	(KEY), RNDKEY0
773	vbroadcasti128	(RNDKEYLAST_PTR), RNDKEYLAST
774
775	// Finish initializing LE_CTR by adding 1 to the second block.
776	vpaddd		.Lctr_pattern(%rip), LE_CTR, LE_CTR
777
778	// If there are at least 128 bytes of data, then continue into the loop
779	// that processes 128 bytes of data at a time.  Otherwise skip it.
780	add		$-128, DATALEN	// 128 is 4 bytes, -128 is 1 byte
781	jl		.Lcrypt_loop_4x_done\@
782
783	vmovdqu		OFFSETOF_H_POWERS_XORED(KEY), H_POW2_XORED
784	vmovdqu		OFFSETOF_H_POWERS_XORED+32(KEY), H_POW1_XORED
785
786	// Main loop: en/decrypt and hash 4 vectors (128 bytes) at a time.
787
788.if \enc
789	// Encrypt the first 4 vectors of plaintext blocks.
790	_aesenc_loop	0,1,2,3
791	_aesenclast_and_xor	0,1,2,3
792	sub		$-128, SRC	// 128 is 4 bytes, -128 is 1 byte
793	add		$-128, DATALEN
794	jl		.Lghash_last_ciphertext_4x\@
795.endif
796
797.align 16
798.Lcrypt_loop_4x\@:
799
800	// Start the AES encryption of the counter blocks.
801	_ctr_begin	0,1,2,3
802	cmp		$24, AESKEYLEN
803	jl		128f	// AES-128?
804	je		192f	// AES-192?
805	// AES-256
806	vbroadcasti128	-13*16(RNDKEYLAST_PTR), TMP0
807	_vaesenc	TMP0, 0,1,2,3
808	vbroadcasti128	-12*16(RNDKEYLAST_PTR), TMP0
809	_vaesenc	TMP0, 0,1,2,3
810192:
811	vbroadcasti128	-11*16(RNDKEYLAST_PTR), TMP0
812	_vaesenc	TMP0, 0,1,2,3
813	vbroadcasti128	-10*16(RNDKEYLAST_PTR), TMP0
814	_vaesenc	TMP0, 0,1,2,3
815128:
816
817	// Finish the AES encryption of the counter blocks in AESDATA[0-3],
818	// interleaved with the GHASH update of the ciphertext blocks.
819.irp i, 9,8,7,6,5,4,3,2,1
820	_ghash_step_4x  (9 - \i), GHASHDATA_PTR
821	vbroadcasti128	-\i*16(RNDKEYLAST_PTR), TMP0
822	_vaesenc	TMP0, 0,1,2,3
823.endr
824	_ghash_step_4x	9, GHASHDATA_PTR
825.if \enc
826	sub		$-128, DST	// 128 is 4 bytes, -128 is 1 byte
827.endif
828	_aesenclast_and_xor	0,1,2,3
829	sub		$-128, SRC
830.if !\enc
831	sub		$-128, DST
832.endif
833	add		$-128, DATALEN
834	jge		.Lcrypt_loop_4x\@
835
836.if \enc
837.Lghash_last_ciphertext_4x\@:
838	// Update GHASH with the last set of ciphertext blocks.
839	_ghash_4x	DST
840	sub		$-128, DST
841.endif
842
843.Lcrypt_loop_4x_done\@:
844
845	// Undo the extra subtraction by 128 and check whether data remains.
846	sub		$-128, DATALEN	// 128 is 4 bytes, -128 is 1 byte
847	jz		.Ldone\@
848
849	// The data length isn't a multiple of 128 bytes.  Process the remaining
850	// data of length 1 <= DATALEN < 128.
851	//
852	// Since there are enough key powers available for all remaining data,
853	// there is no need to do a GHASH reduction after each iteration.
854	// Instead, multiply each remaining block by its own key power, and only
855	// do a GHASH reduction at the very end.
856
857	// Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N
858	// is the number of blocks that remain.
859	.set		POWERS_PTR, LE_CTR_PTR	// LE_CTR_PTR is free to be reused.
860	.set		POWERS_PTR32, LE_CTR_PTR32
861	mov		DATALEN, %eax
862	neg		%rax
863	and		$~15, %rax  // -round_up(DATALEN, 16)
864	lea		OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR
865
866	// Start collecting the unreduced GHASH intermediate value LO, MI, HI.
867	.set		HI, H_POW2_XORED	// H_POW2_XORED is free to be reused.
868	.set		HI_XMM, H_POW2_XORED_XMM
869	vpxor		LO_XMM, LO_XMM, LO_XMM
870	vpxor		MI_XMM, MI_XMM, MI_XMM
871	vpxor		HI_XMM, HI_XMM, HI_XMM
872
873	// 1 <= DATALEN < 128.  Generate 2 or 4 more vectors of keystream blocks
874	// excluding the last AES round, depending on the remaining DATALEN.
875	cmp		$64, DATALEN
876	jg		.Ltail_gen_4_keystream_vecs\@
877	_aesenc_loop	0,1
878	cmp		$32, DATALEN
879	jge		.Ltail_xor_and_ghash_full_vec_loop\@
880	jmp		.Ltail_xor_and_ghash_partial_vec\@
881.Ltail_gen_4_keystream_vecs\@:
882	_aesenc_loop	0,1,2,3
883
884	// XOR the remaining data and accumulate the unreduced GHASH products
885	// for DATALEN >= 32, starting with one full 32-byte vector at a time.
886.Ltail_xor_and_ghash_full_vec_loop\@:
887.if \enc
888	_aesenclast_and_xor	0
889	vpshufb		BSWAP_MASK, AESDATA0, AESDATA0
890.else
891	vmovdqu		(SRC), TMP1
892	vpxor		TMP1, RNDKEYLAST, TMP0
893	vaesenclast	TMP0, AESDATA0, AESDATA0
894	vmovdqu		AESDATA0, (DST)
895	vpshufb		BSWAP_MASK, TMP1, AESDATA0
896.endif
897	// The ciphertext blocks (i.e. GHASH input data) are now in AESDATA0.
898	vpxor		GHASH_ACC, AESDATA0, AESDATA0
899	vmovdqu		(POWERS_PTR), TMP2
900	_ghash_mul_noreduce	TMP2, AESDATA0, LO, MI, HI, TMP0
901	vmovdqa		AESDATA1, AESDATA0
902	vmovdqa		AESDATA2, AESDATA1
903	vmovdqa		AESDATA3, AESDATA2
904	vpxor		GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
905	add		$32, SRC
906	add		$32, DST
907	add		$32, POWERS_PTR
908	sub		$32, DATALEN
909	cmp		$32, DATALEN
910	jge		.Ltail_xor_and_ghash_full_vec_loop\@
911	test		DATALEN, DATALEN
912	jz		.Ltail_ghash_reduce\@
913
914.Ltail_xor_and_ghash_partial_vec\@:
915	// XOR the remaining data and accumulate the unreduced GHASH products,
916	// for 1 <= DATALEN < 32.
917	vaesenclast	RNDKEYLAST, AESDATA0, AESDATA0
918	cmp		$16, DATALEN
919	jle		.Ltail_xor_and_ghash_1to16bytes\@
920
921	// Handle 17 <= DATALEN < 32.
922
923	// Load a vpshufb mask that will right-shift by '32 - DATALEN' bytes
924	// (shifting in zeroes), then reflect all 16 bytes.
925	lea		.Lrshift_and_bswap_table(%rip), %rax
926	vmovdqu		-16(%rax, DATALEN64), TMP2_XMM
927
928	// Move the second keystream block to its own register and left-align it
929	vextracti128	$1, AESDATA0, AESDATA1_XMM
930	vpxor		.Lfifteens(%rip), TMP2_XMM, TMP0_XMM
931	vpshufb		TMP0_XMM, AESDATA1_XMM, AESDATA1_XMM
932
933	// Using overlapping loads and stores, XOR the source data with the
934	// keystream and write the destination data.  Then prepare the GHASH
935	// input data: the full ciphertext block and the zero-padded partial
936	// ciphertext block, both byte-reflected, in AESDATA0.
937.if \enc
938	vpxor		-16(SRC, DATALEN64), AESDATA1_XMM, AESDATA1_XMM
939	vpxor		(SRC), AESDATA0_XMM, AESDATA0_XMM
940	vmovdqu		AESDATA1_XMM, -16(DST, DATALEN64)
941	vmovdqu		AESDATA0_XMM, (DST)
942	vpshufb		TMP2_XMM, AESDATA1_XMM, AESDATA1_XMM
943	vpshufb		BSWAP_MASK_XMM, AESDATA0_XMM, AESDATA0_XMM
944.else
945	vmovdqu		-16(SRC, DATALEN64), TMP1_XMM
946	vmovdqu		(SRC), TMP0_XMM
947	vpxor		TMP1_XMM, AESDATA1_XMM, AESDATA1_XMM
948	vpxor		TMP0_XMM, AESDATA0_XMM, AESDATA0_XMM
949	vmovdqu		AESDATA1_XMM, -16(DST, DATALEN64)
950	vmovdqu		AESDATA0_XMM, (DST)
951	vpshufb		TMP2_XMM, TMP1_XMM, AESDATA1_XMM
952	vpshufb		BSWAP_MASK_XMM, TMP0_XMM, AESDATA0_XMM
953.endif
954	vpxor		GHASH_ACC_XMM, AESDATA0_XMM, AESDATA0_XMM
955	vinserti128	$1, AESDATA1_XMM, AESDATA0, AESDATA0
956	vmovdqu		(POWERS_PTR), TMP2
957	jmp		.Ltail_ghash_last_vec\@
958
959.Ltail_xor_and_ghash_1to16bytes\@:
960	// Handle 1 <= DATALEN <= 16.  Carefully load and store the
961	// possibly-partial block, which we mustn't access out of bounds.
962	vmovdqu		(POWERS_PTR), TMP2_XMM
963	mov		SRC, KEY	// Free up %rcx, assuming SRC == %rcx
964	mov		DATALEN, %ecx
965	_load_partial_block	KEY, TMP0_XMM, POWERS_PTR, POWERS_PTR32
966	vpxor		TMP0_XMM, AESDATA0_XMM, AESDATA0_XMM
967	mov		DATALEN, %ecx
968	_store_partial_block	AESDATA0_XMM, DST, POWERS_PTR, POWERS_PTR32
969.if \enc
970	lea		.Lselect_high_bytes_table(%rip), %rax
971	vpshufb		BSWAP_MASK_XMM, AESDATA0_XMM, AESDATA0_XMM
972	vpand		(%rax, DATALEN64), AESDATA0_XMM, AESDATA0_XMM
973.else
974	vpshufb		BSWAP_MASK_XMM, TMP0_XMM, AESDATA0_XMM
975.endif
976	vpxor		GHASH_ACC_XMM, AESDATA0_XMM, AESDATA0_XMM
977
978.Ltail_ghash_last_vec\@:
979	// Accumulate the unreduced GHASH products for the last 1-2 blocks.  The
980	// GHASH input data is in AESDATA0.  If only one block remains, then the
981	// second block in AESDATA0 is zero and does not affect the result.
982	_ghash_mul_noreduce	TMP2, AESDATA0, LO, MI, HI, TMP0
983
984.Ltail_ghash_reduce\@:
985	// Finally, do the GHASH reduction.
986	vbroadcasti128	.Lgfpoly(%rip), TMP0
987	_ghash_reduce	LO, MI, HI, TMP0, TMP1
988	vextracti128	$1, HI, GHASH_ACC_XMM
989	vpxor		HI_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
990
991.Ldone\@:
992	// Store the updated GHASH accumulator back to memory.
993	vmovdqu		GHASH_ACC_XMM, (GHASH_ACC_PTR)
994
995	vzeroupper
996	RET
997.endm
998
999// void aes_gcm_enc_final_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
1000//				    const u32 le_ctr[4], u8 ghash_acc[16],
1001//				    u64 total_aadlen, u64 total_datalen);
1002// bool aes_gcm_dec_final_vaes_avx2(const struct aes_gcm_key_vaes_avx2 *key,
1003//				    const u32 le_ctr[4], const u8 ghash_acc[16],
1004//				    u64 total_aadlen, u64 total_datalen,
1005//				    const u8 tag[16], int taglen);
1006//
1007// This macro generates one of the above two functions (with \enc selecting
1008// which one).  Both functions finish computing the GCM authentication tag by
1009// updating GHASH with the lengths block and encrypting the GHASH accumulator.
1010// |total_aadlen| and |total_datalen| must be the total length of the additional
1011// authenticated data and the en/decrypted data in bytes, respectively.
1012//
1013// The encryption function then stores the full-length (16-byte) computed
1014// authentication tag to |ghash_acc|.  The decryption function instead loads the
1015// expected authentication tag (the one that was transmitted) from the 16-byte
1016// buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the
1017// computed tag in constant time, and returns true if and only if they match.
1018.macro	_aes_gcm_final	enc
1019
1020	// Function arguments
1021	.set	KEY,		%rdi
1022	.set	LE_CTR_PTR,	%rsi
1023	.set	GHASH_ACC_PTR,	%rdx
1024	.set	TOTAL_AADLEN,	%rcx
1025	.set	TOTAL_DATALEN,	%r8
1026	.set	TAG,		%r9
1027	.set	TAGLEN,		%r10d	// Originally at 8(%rsp)
1028	.set	TAGLEN64,	%r10
1029
1030	// Additional local variables.
1031	// %rax and %xmm0-%xmm3 are used as temporary registers.
1032	.set	AESKEYLEN,	%r11d
1033	.set	AESKEYLEN64,	%r11
1034	.set	GFPOLY,		%xmm4
1035	.set	BSWAP_MASK,	%xmm5
1036	.set	LE_CTR,		%xmm6
1037	.set	GHASH_ACC,	%xmm7
1038	.set	H_POW1,		%xmm8
1039
1040	// Load some constants.
1041	vmovdqa		.Lgfpoly(%rip), GFPOLY
1042	vmovdqa		.Lbswap_mask(%rip), BSWAP_MASK
1043
1044	// Load the AES key length in bytes.
1045	movl		OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
1046
1047	// Set up a counter block with 1 in the low 32-bit word.  This is the
1048	// counter that produces the ciphertext needed to encrypt the auth tag.
1049	// GFPOLY has 1 in the low word, so grab the 1 from there using a blend.
1050	vpblendd	$0xe, (LE_CTR_PTR), GFPOLY, LE_CTR
1051
1052	// Build the lengths block and XOR it with the GHASH accumulator.
1053	// Although the lengths block is defined as the AAD length followed by
1054	// the en/decrypted data length, both in big-endian byte order, a byte
1055	// reflection of the full block is needed because of the way we compute
1056	// GHASH (see _ghash_mul_step).  By using little-endian values in the
1057	// opposite order, we avoid having to reflect any bytes here.
1058	vmovq		TOTAL_DATALEN, %xmm0
1059	vpinsrq		$1, TOTAL_AADLEN, %xmm0, %xmm0
1060	vpsllq		$3, %xmm0, %xmm0	// Bytes to bits
1061	vpxor		(GHASH_ACC_PTR), %xmm0, GHASH_ACC
1062
1063	// Load the first hash key power (H^1), which is stored last.
1064	vmovdqu		OFFSETOFEND_H_POWERS-16(KEY), H_POW1
1065
1066	// Load TAGLEN if decrypting.
1067.if !\enc
1068	movl		8(%rsp), TAGLEN
1069.endif
1070
1071	// Make %rax point to the last AES round key for the chosen AES variant.
1072	lea		6*16(KEY,AESKEYLEN64,4), %rax
1073
1074	// Start the AES encryption of the counter block by swapping the counter
1075	// block to big-endian and XOR-ing it with the zero-th AES round key.
1076	vpshufb		BSWAP_MASK, LE_CTR, %xmm0
1077	vpxor		(KEY), %xmm0, %xmm0
1078
1079	// Complete the AES encryption and multiply GHASH_ACC by H^1.
1080	// Interleave the AES and GHASH instructions to improve performance.
1081	cmp		$24, AESKEYLEN
1082	jl		128f	// AES-128?
1083	je		192f	// AES-192?
1084	// AES-256
1085	vaesenc		-13*16(%rax), %xmm0, %xmm0
1086	vaesenc		-12*16(%rax), %xmm0, %xmm0
1087192:
1088	vaesenc		-11*16(%rax), %xmm0, %xmm0
1089	vaesenc		-10*16(%rax), %xmm0, %xmm0
1090128:
1091.irp i, 0,1,2,3,4,5,6,7,8
1092	_ghash_mul_step	\i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1093			%xmm1, %xmm2, %xmm3
1094	vaesenc		(\i-9)*16(%rax), %xmm0, %xmm0
1095.endr
1096	_ghash_mul_step	9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
1097			%xmm1, %xmm2, %xmm3
1098
1099	// Undo the byte reflection of the GHASH accumulator.
1100	vpshufb		BSWAP_MASK, GHASH_ACC, GHASH_ACC
1101
1102	// Do the last AES round and XOR the resulting keystream block with the
1103	// GHASH accumulator to produce the full computed authentication tag.
1104	//
1105	// Reduce latency by taking advantage of the property vaesenclast(key,
1106	// a) ^ b == vaesenclast(key ^ b, a).  I.e., XOR GHASH_ACC into the last
1107	// round key, instead of XOR'ing the final AES output with GHASH_ACC.
1108	//
1109	// enc_final then returns the computed auth tag, while dec_final
1110	// compares it with the transmitted one and returns a bool.  To compare
1111	// the tags, dec_final XORs them together and uses vptest to check
1112	// whether the result is all-zeroes.  This should be constant-time.
1113	// dec_final applies the vaesenclast optimization to this additional
1114	// value XOR'd too.
1115.if \enc
1116	vpxor		(%rax), GHASH_ACC, %xmm1
1117	vaesenclast	%xmm1, %xmm0, GHASH_ACC
1118	vmovdqu		GHASH_ACC, (GHASH_ACC_PTR)
1119.else
1120	vpxor		(TAG), GHASH_ACC, GHASH_ACC
1121	vpxor		(%rax), GHASH_ACC, GHASH_ACC
1122	vaesenclast	GHASH_ACC, %xmm0, %xmm0
1123	lea		.Lselect_high_bytes_table(%rip), %rax
1124	vmovdqu		(%rax, TAGLEN64), %xmm1
1125	vpshufb		BSWAP_MASK, %xmm1, %xmm1 // select low bytes, not high
1126	xor		%eax, %eax
1127	vptest		%xmm1, %xmm0
1128	sete		%al
1129.endif
1130	// No need for vzeroupper here, since only used xmm registers were used.
1131	RET
1132.endm
1133
1134SYM_FUNC_START(aes_gcm_enc_update_vaes_avx2)
1135	_aes_gcm_update	1
1136SYM_FUNC_END(aes_gcm_enc_update_vaes_avx2)
1137SYM_FUNC_START(aes_gcm_dec_update_vaes_avx2)
1138	_aes_gcm_update	0
1139SYM_FUNC_END(aes_gcm_dec_update_vaes_avx2)
1140
1141SYM_FUNC_START(aes_gcm_enc_final_vaes_avx2)
1142	_aes_gcm_final	1
1143SYM_FUNC_END(aes_gcm_enc_final_vaes_avx2)
1144SYM_FUNC_START(aes_gcm_dec_final_vaes_avx2)
1145	_aes_gcm_final	0
1146SYM_FUNC_END(aes_gcm_dec_final_vaes_avx2)
1147