xref: /linux/arch/x86/crypto/sm4-aesni-avx2-asm_64.S (revision 34f7c6e7d4396090692a09789db231e12cb4762b)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * SM4 Cipher Algorithm, AES-NI/AVX2 optimized.
4 * as specified in
5 * https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
6 *
7 * Copyright (C) 2018 Markku-Juhani O. Saarinen <mjos@iki.fi>
8 * Copyright (C) 2020 Jussi Kivilinna <jussi.kivilinna@iki.fi>
9 * Copyright (c) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
10 */
11
12/* Based on SM4 AES-NI work by libgcrypt and Markku-Juhani O. Saarinen at:
13 *  https://github.com/mjosaarinen/sm4ni
14 */
15
16#include <linux/linkage.h>
17#include <asm/frame.h>
18
19#define rRIP         (%rip)
20
21/* vector registers */
22#define RX0          %ymm0
23#define RX1          %ymm1
24#define MASK_4BIT    %ymm2
25#define RTMP0        %ymm3
26#define RTMP1        %ymm4
27#define RTMP2        %ymm5
28#define RTMP3        %ymm6
29#define RTMP4        %ymm7
30
31#define RA0          %ymm8
32#define RA1          %ymm9
33#define RA2          %ymm10
34#define RA3          %ymm11
35
36#define RB0          %ymm12
37#define RB1          %ymm13
38#define RB2          %ymm14
39#define RB3          %ymm15
40
41#define RNOT         %ymm0
42#define RBSWAP       %ymm1
43
44#define RX0x         %xmm0
45#define RX1x         %xmm1
46#define MASK_4BITx   %xmm2
47
48#define RNOTx        %xmm0
49#define RBSWAPx      %xmm1
50
51#define RTMP0x       %xmm3
52#define RTMP1x       %xmm4
53#define RTMP2x       %xmm5
54#define RTMP3x       %xmm6
55#define RTMP4x       %xmm7
56
57
58/* helper macros */
59
60/* Transpose four 32-bit words between 128-bit vector lanes. */
61#define transpose_4x4(x0, x1, x2, x3, t1, t2) \
62	vpunpckhdq x1, x0, t2;                \
63	vpunpckldq x1, x0, x0;                \
64	                                      \
65	vpunpckldq x3, x2, t1;                \
66	vpunpckhdq x3, x2, x2;                \
67	                                      \
68	vpunpckhqdq t1, x0, x1;               \
69	vpunpcklqdq t1, x0, x0;               \
70	                                      \
71	vpunpckhqdq x2, t2, x3;               \
72	vpunpcklqdq x2, t2, x2;
73
74/* post-SubByte transform. */
75#define transform_pre(x, lo_t, hi_t, mask4bit, tmp0) \
76	vpand x, mask4bit, tmp0;                     \
77	vpandn x, mask4bit, x;                       \
78	vpsrld $4, x, x;                             \
79	                                             \
80	vpshufb tmp0, lo_t, tmp0;                    \
81	vpshufb x, hi_t, x;                          \
82	vpxor tmp0, x, x;
83
84/* post-SubByte transform. Note: x has been XOR'ed with mask4bit by
85 * 'vaeslastenc' instruction. */
86#define transform_post(x, lo_t, hi_t, mask4bit, tmp0) \
87	vpandn mask4bit, x, tmp0;                     \
88	vpsrld $4, x, x;                              \
89	vpand x, mask4bit, x;                         \
90	                                              \
91	vpshufb tmp0, lo_t, tmp0;                     \
92	vpshufb x, hi_t, x;                           \
93	vpxor tmp0, x, x;
94
95
96.section	.rodata.cst16, "aM", @progbits, 16
97.align 16
98
99/*
100 * Following four affine transform look-up tables are from work by
101 * Markku-Juhani O. Saarinen, at https://github.com/mjosaarinen/sm4ni
102 *
103 * These allow exposing SM4 S-Box from AES SubByte.
104 */
105
106/* pre-SubByte affine transform, from SM4 field to AES field. */
107.Lpre_tf_lo_s:
108	.quad 0x9197E2E474720701, 0xC7C1B4B222245157
109.Lpre_tf_hi_s:
110	.quad 0xE240AB09EB49A200, 0xF052B91BF95BB012
111
112/* post-SubByte affine transform, from AES field to SM4 field. */
113.Lpost_tf_lo_s:
114	.quad 0x5B67F2CEA19D0834, 0xEDD14478172BBE82
115.Lpost_tf_hi_s:
116	.quad 0xAE7201DD73AFDC00, 0x11CDBE62CC1063BF
117
118/* For isolating SubBytes from AESENCLAST, inverse shift row */
119.Linv_shift_row:
120	.byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
121	.byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
122
123/* Inverse shift row + Rotate left by 8 bits on 32-bit words with vpshufb */
124.Linv_shift_row_rol_8:
125	.byte 0x07, 0x00, 0x0d, 0x0a, 0x0b, 0x04, 0x01, 0x0e
126	.byte 0x0f, 0x08, 0x05, 0x02, 0x03, 0x0c, 0x09, 0x06
127
128/* Inverse shift row + Rotate left by 16 bits on 32-bit words with vpshufb */
129.Linv_shift_row_rol_16:
130	.byte 0x0a, 0x07, 0x00, 0x0d, 0x0e, 0x0b, 0x04, 0x01
131	.byte 0x02, 0x0f, 0x08, 0x05, 0x06, 0x03, 0x0c, 0x09
132
133/* Inverse shift row + Rotate left by 24 bits on 32-bit words with vpshufb */
134.Linv_shift_row_rol_24:
135	.byte 0x0d, 0x0a, 0x07, 0x00, 0x01, 0x0e, 0x0b, 0x04
136	.byte 0x05, 0x02, 0x0f, 0x08, 0x09, 0x06, 0x03, 0x0c
137
138/* For CTR-mode IV byteswap */
139.Lbswap128_mask:
140	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
141
142/* For input word byte-swap */
143.Lbswap32_mask:
144	.byte 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
145
146.align 4
147/* 4-bit mask */
148.L0f0f0f0f:
149	.long 0x0f0f0f0f
150
151/* 12 bytes, only for padding */
152.Lpadding_deadbeef:
153	.long 0xdeadbeef, 0xdeadbeef, 0xdeadbeef
154
155.text
156.align 16
157
158.align 8
159SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
160	/* input:
161	 *	%rdi: round key array, CTX
162	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
163	 *						plaintext blocks
164	 * output:
165	 *	RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3: sixteen parallel
166	 * 						ciphertext blocks
167	 */
168	FRAME_BEGIN
169
170	vbroadcasti128 .Lbswap32_mask rRIP, RTMP2;
171	vpshufb RTMP2, RA0, RA0;
172	vpshufb RTMP2, RA1, RA1;
173	vpshufb RTMP2, RA2, RA2;
174	vpshufb RTMP2, RA3, RA3;
175	vpshufb RTMP2, RB0, RB0;
176	vpshufb RTMP2, RB1, RB1;
177	vpshufb RTMP2, RB2, RB2;
178	vpshufb RTMP2, RB3, RB3;
179
180	vpbroadcastd .L0f0f0f0f rRIP, MASK_4BIT;
181	transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
182	transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
183
184#define ROUND(round, s0, s1, s2, s3, r0, r1, r2, r3)                \
185	vpbroadcastd (4*(round))(%rdi), RX0;                        \
186	vbroadcasti128 .Lpre_tf_lo_s rRIP, RTMP4;                   \
187	vbroadcasti128 .Lpre_tf_hi_s rRIP, RTMP1;                   \
188	vmovdqa RX0, RX1;                                           \
189	vpxor s1, RX0, RX0;                                         \
190	vpxor s2, RX0, RX0;                                         \
191	vpxor s3, RX0, RX0; /* s1 ^ s2 ^ s3 ^ rk */                 \
192	vbroadcasti128 .Lpost_tf_lo_s rRIP, RTMP2;                  \
193	vbroadcasti128 .Lpost_tf_hi_s rRIP, RTMP3;                  \
194	vpxor r1, RX1, RX1;                                         \
195	vpxor r2, RX1, RX1;                                         \
196	vpxor r3, RX1, RX1; /* r1 ^ r2 ^ r3 ^ rk */                 \
197	                                                            \
198	/* sbox, non-linear part */                                 \
199	transform_pre(RX0, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
200	transform_pre(RX1, RTMP4, RTMP1, MASK_4BIT, RTMP0);         \
201	vextracti128 $1, RX0, RTMP4x;                               \
202	vextracti128 $1, RX1, RTMP0x;                               \
203	vaesenclast MASK_4BITx, RX0x, RX0x;                         \
204	vaesenclast MASK_4BITx, RTMP4x, RTMP4x;                     \
205	vaesenclast MASK_4BITx, RX1x, RX1x;                         \
206	vaesenclast MASK_4BITx, RTMP0x, RTMP0x;                     \
207	vinserti128 $1, RTMP4x, RX0, RX0;                           \
208	vbroadcasti128 .Linv_shift_row rRIP, RTMP4;                 \
209	vinserti128 $1, RTMP0x, RX1, RX1;                           \
210	transform_post(RX0, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
211	transform_post(RX1, RTMP2, RTMP3, MASK_4BIT, RTMP0);        \
212	                                                            \
213	/* linear part */                                           \
214	vpshufb RTMP4, RX0, RTMP0;                                  \
215	vpxor RTMP0, s0, s0; /* s0 ^ x */                           \
216	vpshufb RTMP4, RX1, RTMP2;                                  \
217	vbroadcasti128 .Linv_shift_row_rol_8 rRIP, RTMP4;           \
218	vpxor RTMP2, r0, r0; /* r0 ^ x */                           \
219	vpshufb RTMP4, RX0, RTMP1;                                  \
220	vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) */               \
221	vpshufb RTMP4, RX1, RTMP3;                                  \
222	vbroadcasti128 .Linv_shift_row_rol_16 rRIP, RTMP4;          \
223	vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) */               \
224	vpshufb RTMP4, RX0, RTMP1;                                  \
225	vpxor RTMP1, RTMP0, RTMP0; /* x ^ rol(x,8) ^ rol(x,16) */   \
226	vpshufb RTMP4, RX1, RTMP3;                                  \
227	vbroadcasti128 .Linv_shift_row_rol_24 rRIP, RTMP4;          \
228	vpxor RTMP3, RTMP2, RTMP2; /* x ^ rol(x,8) ^ rol(x,16) */   \
229	vpshufb RTMP4, RX0, RTMP1;                                  \
230	vpxor RTMP1, s0, s0; /* s0 ^ x ^ rol(x,24) */               \
231	vpslld $2, RTMP0, RTMP1;                                    \
232	vpsrld $30, RTMP0, RTMP0;                                   \
233	vpxor RTMP0, s0, s0;                                        \
234	/* s0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
235	vpxor RTMP1, s0, s0;                                        \
236	vpshufb RTMP4, RX1, RTMP3;                                  \
237	vpxor RTMP3, r0, r0; /* r0 ^ x ^ rol(x,24) */               \
238	vpslld $2, RTMP2, RTMP3;                                    \
239	vpsrld $30, RTMP2, RTMP2;                                   \
240	vpxor RTMP2, r0, r0;                                        \
241	/* r0 ^ x ^ rol(x,2) ^ rol(x,10) ^ rol(x,18) ^ rol(x,24) */ \
242	vpxor RTMP3, r0, r0;
243
244	leaq (32*4)(%rdi), %rax;
245.align 16
246.Lroundloop_blk8:
247	ROUND(0, RA0, RA1, RA2, RA3, RB0, RB1, RB2, RB3);
248	ROUND(1, RA1, RA2, RA3, RA0, RB1, RB2, RB3, RB0);
249	ROUND(2, RA2, RA3, RA0, RA1, RB2, RB3, RB0, RB1);
250	ROUND(3, RA3, RA0, RA1, RA2, RB3, RB0, RB1, RB2);
251	leaq (4*4)(%rdi), %rdi;
252	cmpq %rax, %rdi;
253	jne .Lroundloop_blk8;
254
255#undef ROUND
256
257	vbroadcasti128 .Lbswap128_mask rRIP, RTMP2;
258
259	transpose_4x4(RA0, RA1, RA2, RA3, RTMP0, RTMP1);
260	transpose_4x4(RB0, RB1, RB2, RB3, RTMP0, RTMP1);
261	vpshufb RTMP2, RA0, RA0;
262	vpshufb RTMP2, RA1, RA1;
263	vpshufb RTMP2, RA2, RA2;
264	vpshufb RTMP2, RA3, RA3;
265	vpshufb RTMP2, RB0, RB0;
266	vpshufb RTMP2, RB1, RB1;
267	vpshufb RTMP2, RB2, RB2;
268	vpshufb RTMP2, RB3, RB3;
269
270	FRAME_END
271	RET;
272SYM_FUNC_END(__sm4_crypt_blk16)
273
274#define inc_le128(x, minus_one, tmp) \
275	vpcmpeqq minus_one, x, tmp;  \
276	vpsubq minus_one, x, x;      \
277	vpslldq $8, tmp, tmp;        \
278	vpsubq tmp, x, x;
279
280/*
281 * void sm4_aesni_avx2_ctr_enc_blk16(const u32 *rk, u8 *dst,
282 *                                   const u8 *src, u8 *iv)
283 */
284.align 8
285SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
286	/* input:
287	 *	%rdi: round key array, CTX
288	 *	%rsi: dst (16 blocks)
289	 *	%rdx: src (16 blocks)
290	 *	%rcx: iv (big endian, 128bit)
291	 */
292	FRAME_BEGIN
293
294	movq 8(%rcx), %rax;
295	bswapq %rax;
296
297	vzeroupper;
298
299	vbroadcasti128 .Lbswap128_mask rRIP, RTMP3;
300	vpcmpeqd RNOT, RNOT, RNOT;
301	vpsrldq $8, RNOT, RNOT;   /* ab: -1:0 ; cd: -1:0 */
302	vpaddq RNOT, RNOT, RTMP2; /* ab: -2:0 ; cd: -2:0 */
303
304	/* load IV and byteswap */
305	vmovdqu (%rcx), RTMP4x;
306	vpshufb RTMP3x, RTMP4x, RTMP4x;
307	vmovdqa RTMP4x, RTMP0x;
308	inc_le128(RTMP4x, RNOTx, RTMP1x);
309	vinserti128 $1, RTMP4x, RTMP0, RTMP0;
310	vpshufb RTMP3, RTMP0, RA0; /* +1 ; +0 */
311
312	/* check need for handling 64-bit overflow and carry */
313	cmpq $(0xffffffffffffffff - 16), %rax;
314	ja .Lhandle_ctr_carry;
315
316	/* construct IVs */
317	vpsubq RTMP2, RTMP0, RTMP0; /* +3 ; +2 */
318	vpshufb RTMP3, RTMP0, RA1;
319	vpsubq RTMP2, RTMP0, RTMP0; /* +5 ; +4 */
320	vpshufb RTMP3, RTMP0, RA2;
321	vpsubq RTMP2, RTMP0, RTMP0; /* +7 ; +6 */
322	vpshufb RTMP3, RTMP0, RA3;
323	vpsubq RTMP2, RTMP0, RTMP0; /* +9 ; +8 */
324	vpshufb RTMP3, RTMP0, RB0;
325	vpsubq RTMP2, RTMP0, RTMP0; /* +11 ; +10 */
326	vpshufb RTMP3, RTMP0, RB1;
327	vpsubq RTMP2, RTMP0, RTMP0; /* +13 ; +12 */
328	vpshufb RTMP3, RTMP0, RB2;
329	vpsubq RTMP2, RTMP0, RTMP0; /* +15 ; +14 */
330	vpshufb RTMP3, RTMP0, RB3;
331	vpsubq RTMP2, RTMP0, RTMP0; /* +16 */
332	vpshufb RTMP3x, RTMP0x, RTMP0x;
333
334	jmp .Lctr_carry_done;
335
336.Lhandle_ctr_carry:
337	/* construct IVs */
338	inc_le128(RTMP0, RNOT, RTMP1);
339	inc_le128(RTMP0, RNOT, RTMP1);
340	vpshufb RTMP3, RTMP0, RA1; /* +3 ; +2 */
341	inc_le128(RTMP0, RNOT, RTMP1);
342	inc_le128(RTMP0, RNOT, RTMP1);
343	vpshufb RTMP3, RTMP0, RA2; /* +5 ; +4 */
344	inc_le128(RTMP0, RNOT, RTMP1);
345	inc_le128(RTMP0, RNOT, RTMP1);
346	vpshufb RTMP3, RTMP0, RA3; /* +7 ; +6 */
347	inc_le128(RTMP0, RNOT, RTMP1);
348	inc_le128(RTMP0, RNOT, RTMP1);
349	vpshufb RTMP3, RTMP0, RB0; /* +9 ; +8 */
350	inc_le128(RTMP0, RNOT, RTMP1);
351	inc_le128(RTMP0, RNOT, RTMP1);
352	vpshufb RTMP3, RTMP0, RB1; /* +11 ; +10 */
353	inc_le128(RTMP0, RNOT, RTMP1);
354	inc_le128(RTMP0, RNOT, RTMP1);
355	vpshufb RTMP3, RTMP0, RB2; /* +13 ; +12 */
356	inc_le128(RTMP0, RNOT, RTMP1);
357	inc_le128(RTMP0, RNOT, RTMP1);
358	vpshufb RTMP3, RTMP0, RB3; /* +15 ; +14 */
359	inc_le128(RTMP0, RNOT, RTMP1);
360	vextracti128 $1, RTMP0, RTMP0x;
361	vpshufb RTMP3x, RTMP0x, RTMP0x; /* +16 */
362
363.align 4
364.Lctr_carry_done:
365	/* store new IV */
366	vmovdqu RTMP0x, (%rcx);
367
368	call __sm4_crypt_blk16;
369
370	vpxor (0 * 32)(%rdx), RA0, RA0;
371	vpxor (1 * 32)(%rdx), RA1, RA1;
372	vpxor (2 * 32)(%rdx), RA2, RA2;
373	vpxor (3 * 32)(%rdx), RA3, RA3;
374	vpxor (4 * 32)(%rdx), RB0, RB0;
375	vpxor (5 * 32)(%rdx), RB1, RB1;
376	vpxor (6 * 32)(%rdx), RB2, RB2;
377	vpxor (7 * 32)(%rdx), RB3, RB3;
378
379	vmovdqu RA0, (0 * 32)(%rsi);
380	vmovdqu RA1, (1 * 32)(%rsi);
381	vmovdqu RA2, (2 * 32)(%rsi);
382	vmovdqu RA3, (3 * 32)(%rsi);
383	vmovdqu RB0, (4 * 32)(%rsi);
384	vmovdqu RB1, (5 * 32)(%rsi);
385	vmovdqu RB2, (6 * 32)(%rsi);
386	vmovdqu RB3, (7 * 32)(%rsi);
387
388	vzeroall;
389	FRAME_END
390	RET;
391SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
392
393/*
394 * void sm4_aesni_avx2_cbc_dec_blk16(const u32 *rk, u8 *dst,
395 *                                   const u8 *src, u8 *iv)
396 */
397.align 8
398SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
399	/* input:
400	 *	%rdi: round key array, CTX
401	 *	%rsi: dst (16 blocks)
402	 *	%rdx: src (16 blocks)
403	 *	%rcx: iv
404	 */
405	FRAME_BEGIN
406
407	vzeroupper;
408
409	vmovdqu (0 * 32)(%rdx), RA0;
410	vmovdqu (1 * 32)(%rdx), RA1;
411	vmovdqu (2 * 32)(%rdx), RA2;
412	vmovdqu (3 * 32)(%rdx), RA3;
413	vmovdqu (4 * 32)(%rdx), RB0;
414	vmovdqu (5 * 32)(%rdx), RB1;
415	vmovdqu (6 * 32)(%rdx), RB2;
416	vmovdqu (7 * 32)(%rdx), RB3;
417
418	call __sm4_crypt_blk16;
419
420	vmovdqu (%rcx), RNOTx;
421	vinserti128 $1, (%rdx), RNOT, RNOT;
422	vpxor RNOT, RA0, RA0;
423	vpxor (0 * 32 + 16)(%rdx), RA1, RA1;
424	vpxor (1 * 32 + 16)(%rdx), RA2, RA2;
425	vpxor (2 * 32 + 16)(%rdx), RA3, RA3;
426	vpxor (3 * 32 + 16)(%rdx), RB0, RB0;
427	vpxor (4 * 32 + 16)(%rdx), RB1, RB1;
428	vpxor (5 * 32 + 16)(%rdx), RB2, RB2;
429	vpxor (6 * 32 + 16)(%rdx), RB3, RB3;
430	vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
431	vmovdqu RNOTx, (%rcx); /* store new IV */
432
433	vmovdqu RA0, (0 * 32)(%rsi);
434	vmovdqu RA1, (1 * 32)(%rsi);
435	vmovdqu RA2, (2 * 32)(%rsi);
436	vmovdqu RA3, (3 * 32)(%rsi);
437	vmovdqu RB0, (4 * 32)(%rsi);
438	vmovdqu RB1, (5 * 32)(%rsi);
439	vmovdqu RB2, (6 * 32)(%rsi);
440	vmovdqu RB3, (7 * 32)(%rsi);
441
442	vzeroall;
443	FRAME_END
444	RET;
445SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
446
447/*
448 * void sm4_aesni_avx2_cfb_dec_blk16(const u32 *rk, u8 *dst,
449 *                                   const u8 *src, u8 *iv)
450 */
451.align 8
452SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
453	/* input:
454	 *	%rdi: round key array, CTX
455	 *	%rsi: dst (16 blocks)
456	 *	%rdx: src (16 blocks)
457	 *	%rcx: iv
458	 */
459	FRAME_BEGIN
460
461	vzeroupper;
462
463	/* Load input */
464	vmovdqu (%rcx), RNOTx;
465	vinserti128 $1, (%rdx), RNOT, RA0;
466	vmovdqu (0 * 32 + 16)(%rdx), RA1;
467	vmovdqu (1 * 32 + 16)(%rdx), RA2;
468	vmovdqu (2 * 32 + 16)(%rdx), RA3;
469	vmovdqu (3 * 32 + 16)(%rdx), RB0;
470	vmovdqu (4 * 32 + 16)(%rdx), RB1;
471	vmovdqu (5 * 32 + 16)(%rdx), RB2;
472	vmovdqu (6 * 32 + 16)(%rdx), RB3;
473
474	/* Update IV */
475	vmovdqu (7 * 32 + 16)(%rdx), RNOTx;
476	vmovdqu RNOTx, (%rcx);
477
478	call __sm4_crypt_blk16;
479
480	vpxor (0 * 32)(%rdx), RA0, RA0;
481	vpxor (1 * 32)(%rdx), RA1, RA1;
482	vpxor (2 * 32)(%rdx), RA2, RA2;
483	vpxor (3 * 32)(%rdx), RA3, RA3;
484	vpxor (4 * 32)(%rdx), RB0, RB0;
485	vpxor (5 * 32)(%rdx), RB1, RB1;
486	vpxor (6 * 32)(%rdx), RB2, RB2;
487	vpxor (7 * 32)(%rdx), RB3, RB3;
488
489	vmovdqu RA0, (0 * 32)(%rsi);
490	vmovdqu RA1, (1 * 32)(%rsi);
491	vmovdqu RA2, (2 * 32)(%rsi);
492	vmovdqu RA3, (3 * 32)(%rsi);
493	vmovdqu RB0, (4 * 32)(%rsi);
494	vmovdqu RB1, (5 * 32)(%rsi);
495	vmovdqu RB2, (6 * 32)(%rsi);
496	vmovdqu RB3, (7 * 32)(%rsi);
497
498	vzeroall;
499	FRAME_END
500	RET;
501SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
502