xref: /linux/arch/s390/crypto/aes_s390.c (revision fc4bd01d9ff592f620c499686245c093440db0e8)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Cryptographic API.
4  *
5  * s390 implementation of the AES Cipher Algorithm.
6  *
7  * s390 Version:
8  *   Copyright IBM Corp. 2005, 2017
9  *   Author(s): Jan Glauber (jang@de.ibm.com)
10  *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11  *		Patrick Steuer <patrick.steuer@de.ibm.com>
12  *		Harald Freudenberger <freude@de.ibm.com>
13  *
14  * Derived from "crypto/aes_generic.c"
15  */
16 
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/cipher.h>
25 #include <crypto/internal/skcipher.h>
26 #include <crypto/scatterwalk.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/cpufeature.h>
30 #include <linux/init.h>
31 #include <linux/mutex.h>
32 #include <linux/fips.h>
33 #include <linux/string.h>
34 #include <crypto/xts.h>
35 #include <asm/cpacf.h>
36 
37 static u8 *ctrblk;
38 static DEFINE_MUTEX(ctrblk_lock);
39 
40 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
41 		    kma_functions;
42 
43 struct s390_aes_ctx {
44 	u8 key[AES_MAX_KEY_SIZE];
45 	int key_len;
46 	unsigned long fc;
47 	union {
48 		struct crypto_skcipher *skcipher;
49 		struct crypto_cipher *cip;
50 	} fallback;
51 };
52 
53 struct s390_xts_ctx {
54 	union {
55 		u8 keys[64];
56 		struct {
57 			u8 key[32];
58 			u8 pcc_key[32];
59 		};
60 	};
61 	int key_len;
62 	unsigned long fc;
63 	struct crypto_skcipher *fallback;
64 };
65 
66 struct gcm_sg_walk {
67 	struct scatter_walk walk;
68 	unsigned int walk_bytes;
69 	u8 *walk_ptr;
70 	unsigned int walk_bytes_remain;
71 	u8 buf[AES_BLOCK_SIZE];
72 	unsigned int buf_bytes;
73 	u8 *ptr;
74 	unsigned int nbytes;
75 };
76 
77 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
78 		unsigned int key_len)
79 {
80 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
81 
82 	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
83 	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
84 			CRYPTO_TFM_REQ_MASK);
85 
86 	return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
87 }
88 
89 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
90 		       unsigned int key_len)
91 {
92 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
93 	unsigned long fc;
94 
95 	/* Pick the correct function code based on the key length */
96 	fc = (key_len == 16) ? CPACF_KM_AES_128 :
97 	     (key_len == 24) ? CPACF_KM_AES_192 :
98 	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
99 
100 	/* Check if the function code is available */
101 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
102 	if (!sctx->fc)
103 		return setkey_fallback_cip(tfm, in_key, key_len);
104 
105 	sctx->key_len = key_len;
106 	memcpy(sctx->key, in_key, key_len);
107 	return 0;
108 }
109 
110 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
111 {
112 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
113 
114 	if (unlikely(!sctx->fc)) {
115 		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
116 		return;
117 	}
118 	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
119 }
120 
121 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
122 {
123 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
124 
125 	if (unlikely(!sctx->fc)) {
126 		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
127 		return;
128 	}
129 	cpacf_km(sctx->fc | CPACF_DECRYPT,
130 		 &sctx->key, out, in, AES_BLOCK_SIZE);
131 }
132 
133 static int fallback_init_cip(struct crypto_tfm *tfm)
134 {
135 	const char *name = tfm->__crt_alg->cra_name;
136 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
137 
138 	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
139 						 CRYPTO_ALG_NEED_FALLBACK);
140 
141 	if (IS_ERR(sctx->fallback.cip)) {
142 		pr_err("Allocating AES fallback algorithm %s failed\n",
143 		       name);
144 		return PTR_ERR(sctx->fallback.cip);
145 	}
146 
147 	return 0;
148 }
149 
150 static void fallback_exit_cip(struct crypto_tfm *tfm)
151 {
152 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
153 
154 	crypto_free_cipher(sctx->fallback.cip);
155 	sctx->fallback.cip = NULL;
156 }
157 
158 static struct crypto_alg aes_alg = {
159 	.cra_name		=	"aes",
160 	.cra_driver_name	=	"aes-s390",
161 	.cra_priority		=	300,
162 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
163 					CRYPTO_ALG_NEED_FALLBACK,
164 	.cra_blocksize		=	AES_BLOCK_SIZE,
165 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
166 	.cra_module		=	THIS_MODULE,
167 	.cra_init               =       fallback_init_cip,
168 	.cra_exit               =       fallback_exit_cip,
169 	.cra_u			=	{
170 		.cipher = {
171 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
172 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
173 			.cia_setkey		=	aes_set_key,
174 			.cia_encrypt		=	crypto_aes_encrypt,
175 			.cia_decrypt		=	crypto_aes_decrypt,
176 		}
177 	}
178 };
179 
180 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
181 				    unsigned int len)
182 {
183 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
184 
185 	crypto_skcipher_clear_flags(sctx->fallback.skcipher,
186 				    CRYPTO_TFM_REQ_MASK);
187 	crypto_skcipher_set_flags(sctx->fallback.skcipher,
188 				  crypto_skcipher_get_flags(tfm) &
189 				  CRYPTO_TFM_REQ_MASK);
190 	return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
191 }
192 
193 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
194 				   struct skcipher_request *req,
195 				   unsigned long modifier)
196 {
197 	struct skcipher_request *subreq = skcipher_request_ctx(req);
198 
199 	*subreq = *req;
200 	skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
201 	return (modifier & CPACF_DECRYPT) ?
202 		crypto_skcipher_decrypt(subreq) :
203 		crypto_skcipher_encrypt(subreq);
204 }
205 
206 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
207 			   unsigned int key_len)
208 {
209 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
210 	unsigned long fc;
211 
212 	/* Pick the correct function code based on the key length */
213 	fc = (key_len == 16) ? CPACF_KM_AES_128 :
214 	     (key_len == 24) ? CPACF_KM_AES_192 :
215 	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
216 
217 	/* Check if the function code is available */
218 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
219 	if (!sctx->fc)
220 		return setkey_fallback_skcipher(tfm, in_key, key_len);
221 
222 	sctx->key_len = key_len;
223 	memcpy(sctx->key, in_key, key_len);
224 	return 0;
225 }
226 
227 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
228 {
229 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
230 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
231 	struct skcipher_walk walk;
232 	unsigned int nbytes, n;
233 	int ret;
234 
235 	if (unlikely(!sctx->fc))
236 		return fallback_skcipher_crypt(sctx, req, modifier);
237 
238 	ret = skcipher_walk_virt(&walk, req, false);
239 	while ((nbytes = walk.nbytes) != 0) {
240 		/* only use complete blocks */
241 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
242 		cpacf_km(sctx->fc | modifier, sctx->key,
243 			 walk.dst.virt.addr, walk.src.virt.addr, n);
244 		ret = skcipher_walk_done(&walk, nbytes - n);
245 	}
246 	return ret;
247 }
248 
249 static int ecb_aes_encrypt(struct skcipher_request *req)
250 {
251 	return ecb_aes_crypt(req, 0);
252 }
253 
254 static int ecb_aes_decrypt(struct skcipher_request *req)
255 {
256 	return ecb_aes_crypt(req, CPACF_DECRYPT);
257 }
258 
259 static int fallback_init_skcipher(struct crypto_skcipher *tfm)
260 {
261 	const char *name = crypto_tfm_alg_name(&tfm->base);
262 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
263 
264 	sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
265 				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
266 
267 	if (IS_ERR(sctx->fallback.skcipher)) {
268 		pr_err("Allocating AES fallback algorithm %s failed\n",
269 		       name);
270 		return PTR_ERR(sctx->fallback.skcipher);
271 	}
272 
273 	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
274 				    crypto_skcipher_reqsize(sctx->fallback.skcipher));
275 	return 0;
276 }
277 
278 static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
279 {
280 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
281 
282 	crypto_free_skcipher(sctx->fallback.skcipher);
283 }
284 
285 static struct skcipher_alg ecb_aes_alg = {
286 	.base.cra_name		=	"ecb(aes)",
287 	.base.cra_driver_name	=	"ecb-aes-s390",
288 	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
289 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
290 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
291 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
292 	.base.cra_module	=	THIS_MODULE,
293 	.init			=	fallback_init_skcipher,
294 	.exit			=	fallback_exit_skcipher,
295 	.min_keysize		=	AES_MIN_KEY_SIZE,
296 	.max_keysize		=	AES_MAX_KEY_SIZE,
297 	.setkey			=	ecb_aes_set_key,
298 	.encrypt		=	ecb_aes_encrypt,
299 	.decrypt		=	ecb_aes_decrypt,
300 };
301 
302 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
303 			   unsigned int key_len)
304 {
305 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
306 	unsigned long fc;
307 
308 	/* Pick the correct function code based on the key length */
309 	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
310 	     (key_len == 24) ? CPACF_KMC_AES_192 :
311 	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
312 
313 	/* Check if the function code is available */
314 	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
315 	if (!sctx->fc)
316 		return setkey_fallback_skcipher(tfm, in_key, key_len);
317 
318 	sctx->key_len = key_len;
319 	memcpy(sctx->key, in_key, key_len);
320 	return 0;
321 }
322 
323 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
324 {
325 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
326 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
327 	struct skcipher_walk walk;
328 	unsigned int nbytes, n;
329 	int ret;
330 	struct {
331 		u8 iv[AES_BLOCK_SIZE];
332 		u8 key[AES_MAX_KEY_SIZE];
333 	} param;
334 
335 	if (unlikely(!sctx->fc))
336 		return fallback_skcipher_crypt(sctx, req, modifier);
337 
338 	ret = skcipher_walk_virt(&walk, req, false);
339 	if (ret)
340 		return ret;
341 	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
342 	memcpy(param.key, sctx->key, sctx->key_len);
343 	while ((nbytes = walk.nbytes) != 0) {
344 		/* only use complete blocks */
345 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
346 		cpacf_kmc(sctx->fc | modifier, &param,
347 			  walk.dst.virt.addr, walk.src.virt.addr, n);
348 		memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
349 		ret = skcipher_walk_done(&walk, nbytes - n);
350 	}
351 	memzero_explicit(&param, sizeof(param));
352 	return ret;
353 }
354 
355 static int cbc_aes_encrypt(struct skcipher_request *req)
356 {
357 	return cbc_aes_crypt(req, 0);
358 }
359 
360 static int cbc_aes_decrypt(struct skcipher_request *req)
361 {
362 	return cbc_aes_crypt(req, CPACF_DECRYPT);
363 }
364 
365 static struct skcipher_alg cbc_aes_alg = {
366 	.base.cra_name		=	"cbc(aes)",
367 	.base.cra_driver_name	=	"cbc-aes-s390",
368 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
369 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
370 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
371 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
372 	.base.cra_module	=	THIS_MODULE,
373 	.init			=	fallback_init_skcipher,
374 	.exit			=	fallback_exit_skcipher,
375 	.min_keysize		=	AES_MIN_KEY_SIZE,
376 	.max_keysize		=	AES_MAX_KEY_SIZE,
377 	.ivsize			=	AES_BLOCK_SIZE,
378 	.setkey			=	cbc_aes_set_key,
379 	.encrypt		=	cbc_aes_encrypt,
380 	.decrypt		=	cbc_aes_decrypt,
381 };
382 
383 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
384 			       unsigned int len)
385 {
386 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
387 
388 	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
389 	crypto_skcipher_set_flags(xts_ctx->fallback,
390 				  crypto_skcipher_get_flags(tfm) &
391 				  CRYPTO_TFM_REQ_MASK);
392 	return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
393 }
394 
395 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
396 			   unsigned int key_len)
397 {
398 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
399 	unsigned long fc;
400 	int err;
401 
402 	err = xts_fallback_setkey(tfm, in_key, key_len);
403 	if (err)
404 		return err;
405 
406 	/* Pick the correct function code based on the key length */
407 	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
408 	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
409 
410 	/* Check if the function code is available */
411 	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
412 	if (!xts_ctx->fc)
413 		return 0;
414 
415 	/* Split the XTS key into the two subkeys */
416 	key_len = key_len / 2;
417 	xts_ctx->key_len = key_len;
418 	memcpy(xts_ctx->key, in_key, key_len);
419 	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
420 	return 0;
421 }
422 
423 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
424 {
425 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
426 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
427 	struct skcipher_walk walk;
428 	unsigned int offset, nbytes, n;
429 	int ret;
430 	struct {
431 		u8 key[32];
432 		u8 tweak[16];
433 		u8 block[16];
434 		u8 bit[16];
435 		u8 xts[16];
436 	} pcc_param;
437 	struct {
438 		u8 key[32];
439 		u8 init[16];
440 	} xts_param;
441 
442 	if (req->cryptlen < AES_BLOCK_SIZE)
443 		return -EINVAL;
444 
445 	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
446 		struct skcipher_request *subreq = skcipher_request_ctx(req);
447 
448 		*subreq = *req;
449 		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
450 		return (modifier & CPACF_DECRYPT) ?
451 			crypto_skcipher_decrypt(subreq) :
452 			crypto_skcipher_encrypt(subreq);
453 	}
454 
455 	ret = skcipher_walk_virt(&walk, req, false);
456 	if (ret)
457 		return ret;
458 	offset = xts_ctx->key_len & 0x10;
459 	memset(pcc_param.block, 0, sizeof(pcc_param.block));
460 	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
461 	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
462 	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
463 	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
464 	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
465 
466 	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
467 	memcpy(xts_param.init, pcc_param.xts, 16);
468 
469 	while ((nbytes = walk.nbytes) != 0) {
470 		/* only use complete blocks */
471 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
472 		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
473 			 walk.dst.virt.addr, walk.src.virt.addr, n);
474 		ret = skcipher_walk_done(&walk, nbytes - n);
475 	}
476 	memzero_explicit(&pcc_param, sizeof(pcc_param));
477 	memzero_explicit(&xts_param, sizeof(xts_param));
478 	return ret;
479 }
480 
481 static int xts_aes_encrypt(struct skcipher_request *req)
482 {
483 	return xts_aes_crypt(req, 0);
484 }
485 
486 static int xts_aes_decrypt(struct skcipher_request *req)
487 {
488 	return xts_aes_crypt(req, CPACF_DECRYPT);
489 }
490 
491 static int xts_fallback_init(struct crypto_skcipher *tfm)
492 {
493 	const char *name = crypto_tfm_alg_name(&tfm->base);
494 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
495 
496 	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
497 				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
498 
499 	if (IS_ERR(xts_ctx->fallback)) {
500 		pr_err("Allocating XTS fallback algorithm %s failed\n",
501 		       name);
502 		return PTR_ERR(xts_ctx->fallback);
503 	}
504 	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
505 				    crypto_skcipher_reqsize(xts_ctx->fallback));
506 	return 0;
507 }
508 
509 static void xts_fallback_exit(struct crypto_skcipher *tfm)
510 {
511 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
512 
513 	crypto_free_skcipher(xts_ctx->fallback);
514 }
515 
516 static struct skcipher_alg xts_aes_alg = {
517 	.base.cra_name		=	"xts(aes)",
518 	.base.cra_driver_name	=	"xts-aes-s390",
519 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
520 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
521 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
522 	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
523 	.base.cra_module	=	THIS_MODULE,
524 	.init			=	xts_fallback_init,
525 	.exit			=	xts_fallback_exit,
526 	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
527 	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
528 	.ivsize			=	AES_BLOCK_SIZE,
529 	.setkey			=	xts_aes_set_key,
530 	.encrypt		=	xts_aes_encrypt,
531 	.decrypt		=	xts_aes_decrypt,
532 };
533 
534 static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
535 			       unsigned int key_len)
536 {
537 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
538 	unsigned long fc;
539 	int err;
540 
541 	err = xts_fallback_setkey(tfm, in_key, key_len);
542 	if (err)
543 		return err;
544 
545 	/* Pick the correct function code based on the key length */
546 	fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
547 	     (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
548 
549 	/* Check if the function code is available */
550 	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
551 	if (!xts_ctx->fc)
552 		return 0;
553 
554 	/* Store double-key */
555 	memcpy(xts_ctx->keys, in_key, key_len);
556 	xts_ctx->key_len = key_len;
557 	return 0;
558 }
559 
560 static int fullxts_aes_crypt(struct skcipher_request *req,  unsigned long modifier)
561 {
562 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
563 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
564 	unsigned int offset, nbytes, n;
565 	struct skcipher_walk walk;
566 	int ret;
567 	struct {
568 		__u8 key[64];
569 		__u8 tweak[16];
570 		__u8 nap[16];
571 	} fxts_param = {
572 		.nap = {0},
573 	};
574 
575 	if (req->cryptlen < AES_BLOCK_SIZE)
576 		return -EINVAL;
577 
578 	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
579 		struct skcipher_request *subreq = skcipher_request_ctx(req);
580 
581 		*subreq = *req;
582 		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
583 		return (modifier & CPACF_DECRYPT) ?
584 			crypto_skcipher_decrypt(subreq) :
585 			crypto_skcipher_encrypt(subreq);
586 	}
587 
588 	ret = skcipher_walk_virt(&walk, req, false);
589 	if (ret)
590 		return ret;
591 
592 	offset = xts_ctx->key_len & 0x20;
593 	memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
594 	memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
595 	fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
596 
597 	while ((nbytes = walk.nbytes) != 0) {
598 		/* only use complete blocks */
599 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
600 		cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
601 			 walk.dst.virt.addr, walk.src.virt.addr, n);
602 		ret = skcipher_walk_done(&walk, nbytes - n);
603 	}
604 	memzero_explicit(&fxts_param, sizeof(fxts_param));
605 	return ret;
606 }
607 
608 static int fullxts_aes_encrypt(struct skcipher_request *req)
609 {
610 	return fullxts_aes_crypt(req, 0);
611 }
612 
613 static int fullxts_aes_decrypt(struct skcipher_request *req)
614 {
615 	return fullxts_aes_crypt(req, CPACF_DECRYPT);
616 }
617 
618 static struct skcipher_alg fullxts_aes_alg = {
619 	.base.cra_name		=	"xts(aes)",
620 	.base.cra_driver_name	=	"full-xts-aes-s390",
621 	.base.cra_priority	=	403,	/* aes-xts-s390 + 1 */
622 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
623 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
624 	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
625 	.base.cra_module	=	THIS_MODULE,
626 	.init			=	xts_fallback_init,
627 	.exit			=	xts_fallback_exit,
628 	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
629 	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
630 	.ivsize			=	AES_BLOCK_SIZE,
631 	.setkey			=	fullxts_aes_set_key,
632 	.encrypt		=	fullxts_aes_encrypt,
633 	.decrypt		=	fullxts_aes_decrypt,
634 };
635 
636 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
637 			   unsigned int key_len)
638 {
639 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
640 	unsigned long fc;
641 
642 	/* Pick the correct function code based on the key length */
643 	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
644 	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
645 	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
646 
647 	/* Check if the function code is available */
648 	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
649 	if (!sctx->fc)
650 		return setkey_fallback_skcipher(tfm, in_key, key_len);
651 
652 	sctx->key_len = key_len;
653 	memcpy(sctx->key, in_key, key_len);
654 	return 0;
655 }
656 
657 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
658 {
659 	unsigned int i, n;
660 
661 	/* only use complete blocks, max. PAGE_SIZE */
662 	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
663 	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
664 	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
665 		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
666 		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
667 		ctrptr += AES_BLOCK_SIZE;
668 	}
669 	return n;
670 }
671 
672 static int ctr_aes_crypt(struct skcipher_request *req)
673 {
674 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
675 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
676 	u8 buf[AES_BLOCK_SIZE], *ctrptr;
677 	struct skcipher_walk walk;
678 	unsigned int n, nbytes;
679 	int ret, locked;
680 
681 	if (unlikely(!sctx->fc))
682 		return fallback_skcipher_crypt(sctx, req, 0);
683 
684 	locked = mutex_trylock(&ctrblk_lock);
685 
686 	ret = skcipher_walk_virt(&walk, req, false);
687 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
688 		n = AES_BLOCK_SIZE;
689 
690 		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
691 			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
692 		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
693 		cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
694 			    walk.src.virt.addr, n, ctrptr);
695 		if (ctrptr == ctrblk)
696 			memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
697 			       AES_BLOCK_SIZE);
698 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
699 		ret = skcipher_walk_done(&walk, nbytes - n);
700 	}
701 	if (locked)
702 		mutex_unlock(&ctrblk_lock);
703 	/*
704 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
705 	 */
706 	if (nbytes) {
707 		memset(buf, 0, AES_BLOCK_SIZE);
708 		memcpy(buf, walk.src.virt.addr, nbytes);
709 		cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
710 			    AES_BLOCK_SIZE, walk.iv);
711 		memcpy(walk.dst.virt.addr, buf, nbytes);
712 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
713 		ret = skcipher_walk_done(&walk, 0);
714 	}
715 
716 	return ret;
717 }
718 
719 static struct skcipher_alg ctr_aes_alg = {
720 	.base.cra_name		=	"ctr(aes)",
721 	.base.cra_driver_name	=	"ctr-aes-s390",
722 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
723 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
724 	.base.cra_blocksize	=	1,
725 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
726 	.base.cra_module	=	THIS_MODULE,
727 	.init			=	fallback_init_skcipher,
728 	.exit			=	fallback_exit_skcipher,
729 	.min_keysize		=	AES_MIN_KEY_SIZE,
730 	.max_keysize		=	AES_MAX_KEY_SIZE,
731 	.ivsize			=	AES_BLOCK_SIZE,
732 	.setkey			=	ctr_aes_set_key,
733 	.encrypt		=	ctr_aes_crypt,
734 	.decrypt		=	ctr_aes_crypt,
735 	.chunksize		=	AES_BLOCK_SIZE,
736 };
737 
738 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
739 			  unsigned int keylen)
740 {
741 	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
742 
743 	switch (keylen) {
744 	case AES_KEYSIZE_128:
745 		ctx->fc = CPACF_KMA_GCM_AES_128;
746 		break;
747 	case AES_KEYSIZE_192:
748 		ctx->fc = CPACF_KMA_GCM_AES_192;
749 		break;
750 	case AES_KEYSIZE_256:
751 		ctx->fc = CPACF_KMA_GCM_AES_256;
752 		break;
753 	default:
754 		return -EINVAL;
755 	}
756 
757 	memcpy(ctx->key, key, keylen);
758 	ctx->key_len = keylen;
759 	return 0;
760 }
761 
762 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
763 {
764 	switch (authsize) {
765 	case 4:
766 	case 8:
767 	case 12:
768 	case 13:
769 	case 14:
770 	case 15:
771 	case 16:
772 		break;
773 	default:
774 		return -EINVAL;
775 	}
776 
777 	return 0;
778 }
779 
780 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
781 			   unsigned int len)
782 {
783 	memset(gw, 0, sizeof(*gw));
784 	gw->walk_bytes_remain = len;
785 	scatterwalk_start(&gw->walk, sg);
786 }
787 
788 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
789 {
790 	if (gw->walk_bytes_remain == 0)
791 		return 0;
792 	gw->walk_ptr = scatterwalk_next(&gw->walk, gw->walk_bytes_remain,
793 					&gw->walk_bytes);
794 	return gw->walk_bytes;
795 }
796 
797 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
798 					     unsigned int nbytes, bool out)
799 {
800 	gw->walk_bytes_remain -= nbytes;
801 	if (out)
802 		scatterwalk_done_dst(&gw->walk, gw->walk_ptr, nbytes);
803 	else
804 		scatterwalk_done_src(&gw->walk, gw->walk_ptr, nbytes);
805 	gw->walk_ptr = NULL;
806 }
807 
808 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
809 {
810 	int n;
811 
812 	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
813 		gw->ptr = gw->buf;
814 		gw->nbytes = gw->buf_bytes;
815 		goto out;
816 	}
817 
818 	if (gw->walk_bytes_remain == 0) {
819 		gw->ptr = NULL;
820 		gw->nbytes = 0;
821 		goto out;
822 	}
823 
824 	if (!_gcm_sg_clamp_and_map(gw)) {
825 		gw->ptr = NULL;
826 		gw->nbytes = 0;
827 		goto out;
828 	}
829 
830 	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
831 		gw->ptr = gw->walk_ptr;
832 		gw->nbytes = gw->walk_bytes;
833 		goto out;
834 	}
835 
836 	while (1) {
837 		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
838 		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
839 		gw->buf_bytes += n;
840 		_gcm_sg_unmap_and_advance(gw, n, false);
841 		if (gw->buf_bytes >= minbytesneeded) {
842 			gw->ptr = gw->buf;
843 			gw->nbytes = gw->buf_bytes;
844 			goto out;
845 		}
846 		if (!_gcm_sg_clamp_and_map(gw)) {
847 			gw->ptr = NULL;
848 			gw->nbytes = 0;
849 			goto out;
850 		}
851 	}
852 
853 out:
854 	return gw->nbytes;
855 }
856 
857 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
858 {
859 	if (gw->walk_bytes_remain == 0) {
860 		gw->ptr = NULL;
861 		gw->nbytes = 0;
862 		goto out;
863 	}
864 
865 	if (!_gcm_sg_clamp_and_map(gw)) {
866 		gw->ptr = NULL;
867 		gw->nbytes = 0;
868 		goto out;
869 	}
870 
871 	if (gw->walk_bytes >= minbytesneeded) {
872 		gw->ptr = gw->walk_ptr;
873 		gw->nbytes = gw->walk_bytes;
874 		goto out;
875 	}
876 
877 	scatterwalk_unmap(gw->walk_ptr);
878 	gw->walk_ptr = NULL;
879 
880 	gw->ptr = gw->buf;
881 	gw->nbytes = sizeof(gw->buf);
882 
883 out:
884 	return gw->nbytes;
885 }
886 
887 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
888 {
889 	if (gw->ptr == NULL)
890 		return 0;
891 
892 	if (gw->ptr == gw->buf) {
893 		int n = gw->buf_bytes - bytesdone;
894 		if (n > 0) {
895 			memmove(gw->buf, gw->buf + bytesdone, n);
896 			gw->buf_bytes = n;
897 		} else
898 			gw->buf_bytes = 0;
899 	} else
900 		_gcm_sg_unmap_and_advance(gw, bytesdone, false);
901 
902 	return bytesdone;
903 }
904 
905 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
906 {
907 	int i, n;
908 
909 	if (gw->ptr == NULL)
910 		return 0;
911 
912 	if (gw->ptr == gw->buf) {
913 		for (i = 0; i < bytesdone; i += n) {
914 			if (!_gcm_sg_clamp_and_map(gw))
915 				return i;
916 			n = min(gw->walk_bytes, bytesdone - i);
917 			memcpy(gw->walk_ptr, gw->buf + i, n);
918 			_gcm_sg_unmap_and_advance(gw, n, true);
919 		}
920 	} else
921 		_gcm_sg_unmap_and_advance(gw, bytesdone, true);
922 
923 	return bytesdone;
924 }
925 
926 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
927 {
928 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
929 	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
930 	unsigned int ivsize = crypto_aead_ivsize(tfm);
931 	unsigned int taglen = crypto_aead_authsize(tfm);
932 	unsigned int aadlen = req->assoclen;
933 	unsigned int pclen = req->cryptlen;
934 	int ret = 0;
935 
936 	unsigned int n, len, in_bytes, out_bytes,
937 		     min_bytes, bytes, aad_bytes, pc_bytes;
938 	struct gcm_sg_walk gw_in, gw_out;
939 	u8 tag[GHASH_DIGEST_SIZE];
940 
941 	struct {
942 		u32 _[3];		/* reserved */
943 		u32 cv;			/* Counter Value */
944 		u8 t[GHASH_DIGEST_SIZE];/* Tag */
945 		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
946 		u64 taadl;		/* Total AAD Length */
947 		u64 tpcl;		/* Total Plain-/Cipher-text Length */
948 		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
949 		u8 k[AES_MAX_KEY_SIZE];	/* Key */
950 	} param;
951 
952 	/*
953 	 * encrypt
954 	 *   req->src: aad||plaintext
955 	 *   req->dst: aad||ciphertext||tag
956 	 * decrypt
957 	 *   req->src: aad||ciphertext||tag
958 	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
959 	 * aad, plaintext and ciphertext may be empty.
960 	 */
961 	if (flags & CPACF_DECRYPT)
962 		pclen -= taglen;
963 	len = aadlen + pclen;
964 
965 	memset(&param, 0, sizeof(param));
966 	param.cv = 1;
967 	param.taadl = aadlen * 8;
968 	param.tpcl = pclen * 8;
969 	memcpy(param.j0, req->iv, ivsize);
970 	*(u32 *)(param.j0 + ivsize) = 1;
971 	memcpy(param.k, ctx->key, ctx->key_len);
972 
973 	gcm_walk_start(&gw_in, req->src, len);
974 	gcm_walk_start(&gw_out, req->dst, len);
975 
976 	do {
977 		min_bytes = min_t(unsigned int,
978 				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
979 		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
980 		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
981 		bytes = min(in_bytes, out_bytes);
982 
983 		if (aadlen + pclen <= bytes) {
984 			aad_bytes = aadlen;
985 			pc_bytes = pclen;
986 			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
987 		} else {
988 			if (aadlen <= bytes) {
989 				aad_bytes = aadlen;
990 				pc_bytes = (bytes - aadlen) &
991 					   ~(AES_BLOCK_SIZE - 1);
992 				flags |= CPACF_KMA_LAAD;
993 			} else {
994 				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
995 				pc_bytes = 0;
996 			}
997 		}
998 
999 		if (aad_bytes > 0)
1000 			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
1001 
1002 		cpacf_kma(ctx->fc | flags, &param,
1003 			  gw_out.ptr + aad_bytes,
1004 			  gw_in.ptr + aad_bytes, pc_bytes,
1005 			  gw_in.ptr, aad_bytes);
1006 
1007 		n = aad_bytes + pc_bytes;
1008 		if (gcm_in_walk_done(&gw_in, n) != n)
1009 			return -ENOMEM;
1010 		if (gcm_out_walk_done(&gw_out, n) != n)
1011 			return -ENOMEM;
1012 		aadlen -= aad_bytes;
1013 		pclen -= pc_bytes;
1014 	} while (aadlen + pclen > 0);
1015 
1016 	if (flags & CPACF_DECRYPT) {
1017 		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1018 		if (crypto_memneq(tag, param.t, taglen))
1019 			ret = -EBADMSG;
1020 	} else
1021 		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1022 
1023 	memzero_explicit(&param, sizeof(param));
1024 	return ret;
1025 }
1026 
1027 static int gcm_aes_encrypt(struct aead_request *req)
1028 {
1029 	return gcm_aes_crypt(req, CPACF_ENCRYPT);
1030 }
1031 
1032 static int gcm_aes_decrypt(struct aead_request *req)
1033 {
1034 	return gcm_aes_crypt(req, CPACF_DECRYPT);
1035 }
1036 
1037 static struct aead_alg gcm_aes_aead = {
1038 	.setkey			= gcm_aes_setkey,
1039 	.setauthsize		= gcm_aes_setauthsize,
1040 	.encrypt		= gcm_aes_encrypt,
1041 	.decrypt		= gcm_aes_decrypt,
1042 
1043 	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
1044 	.maxauthsize		= GHASH_DIGEST_SIZE,
1045 	.chunksize		= AES_BLOCK_SIZE,
1046 
1047 	.base			= {
1048 		.cra_blocksize		= 1,
1049 		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
1050 		.cra_priority		= 900,
1051 		.cra_name		= "gcm(aes)",
1052 		.cra_driver_name	= "gcm-aes-s390",
1053 		.cra_module		= THIS_MODULE,
1054 	},
1055 };
1056 
1057 static struct crypto_alg *aes_s390_alg;
1058 static struct skcipher_alg *aes_s390_skcipher_algs[5];
1059 static int aes_s390_skciphers_num;
1060 static struct aead_alg *aes_s390_aead_alg;
1061 
1062 static int aes_s390_register_skcipher(struct skcipher_alg *alg)
1063 {
1064 	int ret;
1065 
1066 	ret = crypto_register_skcipher(alg);
1067 	if (!ret)
1068 		aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
1069 	return ret;
1070 }
1071 
1072 static void aes_s390_fini(void)
1073 {
1074 	if (aes_s390_alg)
1075 		crypto_unregister_alg(aes_s390_alg);
1076 	while (aes_s390_skciphers_num--)
1077 		crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
1078 	if (ctrblk)
1079 		free_page((unsigned long) ctrblk);
1080 
1081 	if (aes_s390_aead_alg)
1082 		crypto_unregister_aead(aes_s390_aead_alg);
1083 }
1084 
1085 static int __init aes_s390_init(void)
1086 {
1087 	int ret;
1088 
1089 	/* Query available functions for KM, KMC, KMCTR and KMA */
1090 	cpacf_query(CPACF_KM, &km_functions);
1091 	cpacf_query(CPACF_KMC, &kmc_functions);
1092 	cpacf_query(CPACF_KMCTR, &kmctr_functions);
1093 	cpacf_query(CPACF_KMA, &kma_functions);
1094 
1095 	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1096 	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1097 	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1098 		ret = crypto_register_alg(&aes_alg);
1099 		if (ret)
1100 			goto out_err;
1101 		aes_s390_alg = &aes_alg;
1102 		ret = aes_s390_register_skcipher(&ecb_aes_alg);
1103 		if (ret)
1104 			goto out_err;
1105 	}
1106 
1107 	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1108 	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1109 	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1110 		ret = aes_s390_register_skcipher(&cbc_aes_alg);
1111 		if (ret)
1112 			goto out_err;
1113 	}
1114 
1115 	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
1116 	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
1117 		ret = aes_s390_register_skcipher(&fullxts_aes_alg);
1118 		if (ret)
1119 			goto out_err;
1120 	}
1121 
1122 	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1123 	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1124 		ret = aes_s390_register_skcipher(&xts_aes_alg);
1125 		if (ret)
1126 			goto out_err;
1127 	}
1128 
1129 	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1130 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1131 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1132 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1133 		if (!ctrblk) {
1134 			ret = -ENOMEM;
1135 			goto out_err;
1136 		}
1137 		ret = aes_s390_register_skcipher(&ctr_aes_alg);
1138 		if (ret)
1139 			goto out_err;
1140 	}
1141 
1142 	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1143 	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1144 	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1145 		ret = crypto_register_aead(&gcm_aes_aead);
1146 		if (ret)
1147 			goto out_err;
1148 		aes_s390_aead_alg = &gcm_aes_aead;
1149 	}
1150 
1151 	return 0;
1152 out_err:
1153 	aes_s390_fini();
1154 	return ret;
1155 }
1156 
1157 module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1158 module_exit(aes_s390_fini);
1159 
1160 MODULE_ALIAS_CRYPTO("aes-all");
1161 
1162 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1163 MODULE_LICENSE("GPL");
1164 MODULE_IMPORT_NS("CRYPTO_INTERNAL");
1165