xref: /linux/arch/s390/crypto/aes_s390.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Cryptographic API.
4  *
5  * s390 implementation of the AES Cipher Algorithm.
6  *
7  * s390 Version:
8  *   Copyright IBM Corp. 2005, 2017
9  *   Author(s): Jan Glauber (jang@de.ibm.com)
10  *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
11  *		Patrick Steuer <patrick.steuer@de.ibm.com>
12  *		Harald Freudenberger <freude@de.ibm.com>
13  *
14  * Derived from "crypto/aes_generic.c"
15  */
16 
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/cipher.h>
25 #include <crypto/internal/skcipher.h>
26 #include <crypto/scatterwalk.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/cpufeature.h>
30 #include <linux/init.h>
31 #include <linux/mutex.h>
32 #include <linux/fips.h>
33 #include <linux/string.h>
34 #include <crypto/xts.h>
35 #include <asm/cpacf.h>
36 
37 static u8 *ctrblk;
38 static DEFINE_MUTEX(ctrblk_lock);
39 
40 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
41 		    kma_functions;
42 
43 struct s390_aes_ctx {
44 	u8 key[AES_MAX_KEY_SIZE];
45 	int key_len;
46 	unsigned long fc;
47 	union {
48 		struct crypto_skcipher *skcipher;
49 		struct crypto_cipher *cip;
50 	} fallback;
51 };
52 
53 struct s390_xts_ctx {
54 	union {
55 		u8 keys[64];
56 		struct {
57 			u8 key[32];
58 			u8 pcc_key[32];
59 		};
60 	};
61 	int key_len;
62 	unsigned long fc;
63 	struct crypto_skcipher *fallback;
64 };
65 
66 struct gcm_sg_walk {
67 	struct scatter_walk walk;
68 	unsigned int walk_bytes;
69 	u8 *walk_ptr;
70 	unsigned int walk_bytes_remain;
71 	u8 buf[AES_BLOCK_SIZE];
72 	unsigned int buf_bytes;
73 	u8 *ptr;
74 	unsigned int nbytes;
75 };
76 
setkey_fallback_cip(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)77 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
78 		unsigned int key_len)
79 {
80 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
81 
82 	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
83 	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
84 			CRYPTO_TFM_REQ_MASK);
85 
86 	return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
87 }
88 
aes_set_key(struct crypto_tfm * tfm,const u8 * in_key,unsigned int key_len)89 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
90 		       unsigned int key_len)
91 {
92 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
93 	unsigned long fc;
94 
95 	/* Pick the correct function code based on the key length */
96 	fc = (key_len == 16) ? CPACF_KM_AES_128 :
97 	     (key_len == 24) ? CPACF_KM_AES_192 :
98 	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
99 
100 	/* Check if the function code is available */
101 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
102 	if (!sctx->fc)
103 		return setkey_fallback_cip(tfm, in_key, key_len);
104 
105 	sctx->key_len = key_len;
106 	memcpy(sctx->key, in_key, key_len);
107 	return 0;
108 }
109 
crypto_aes_encrypt(struct crypto_tfm * tfm,u8 * out,const u8 * in)110 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
111 {
112 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
113 
114 	if (unlikely(!sctx->fc)) {
115 		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
116 		return;
117 	}
118 	cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
119 }
120 
crypto_aes_decrypt(struct crypto_tfm * tfm,u8 * out,const u8 * in)121 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
122 {
123 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
124 
125 	if (unlikely(!sctx->fc)) {
126 		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
127 		return;
128 	}
129 	cpacf_km(sctx->fc | CPACF_DECRYPT,
130 		 &sctx->key, out, in, AES_BLOCK_SIZE);
131 }
132 
fallback_init_cip(struct crypto_tfm * tfm)133 static int fallback_init_cip(struct crypto_tfm *tfm)
134 {
135 	const char *name = tfm->__crt_alg->cra_name;
136 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
137 
138 	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
139 						 CRYPTO_ALG_NEED_FALLBACK);
140 
141 	if (IS_ERR(sctx->fallback.cip)) {
142 		pr_err("Allocating AES fallback algorithm %s failed\n",
143 		       name);
144 		return PTR_ERR(sctx->fallback.cip);
145 	}
146 
147 	return 0;
148 }
149 
fallback_exit_cip(struct crypto_tfm * tfm)150 static void fallback_exit_cip(struct crypto_tfm *tfm)
151 {
152 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
153 
154 	crypto_free_cipher(sctx->fallback.cip);
155 	sctx->fallback.cip = NULL;
156 }
157 
158 static struct crypto_alg aes_alg = {
159 	.cra_name		=	"aes",
160 	.cra_driver_name	=	"aes-s390",
161 	.cra_priority		=	300,
162 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
163 					CRYPTO_ALG_NEED_FALLBACK,
164 	.cra_blocksize		=	AES_BLOCK_SIZE,
165 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
166 	.cra_module		=	THIS_MODULE,
167 	.cra_init               =       fallback_init_cip,
168 	.cra_exit               =       fallback_exit_cip,
169 	.cra_u			=	{
170 		.cipher = {
171 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
172 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
173 			.cia_setkey		=	aes_set_key,
174 			.cia_encrypt		=	crypto_aes_encrypt,
175 			.cia_decrypt		=	crypto_aes_decrypt,
176 		}
177 	}
178 };
179 
setkey_fallback_skcipher(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)180 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
181 				    unsigned int len)
182 {
183 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
184 
185 	crypto_skcipher_clear_flags(sctx->fallback.skcipher,
186 				    CRYPTO_TFM_REQ_MASK);
187 	crypto_skcipher_set_flags(sctx->fallback.skcipher,
188 				  crypto_skcipher_get_flags(tfm) &
189 				  CRYPTO_TFM_REQ_MASK);
190 	return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
191 }
192 
fallback_skcipher_crypt(struct s390_aes_ctx * sctx,struct skcipher_request * req,unsigned long modifier)193 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
194 				   struct skcipher_request *req,
195 				   unsigned long modifier)
196 {
197 	struct skcipher_request *subreq = skcipher_request_ctx(req);
198 
199 	*subreq = *req;
200 	skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
201 	return (modifier & CPACF_DECRYPT) ?
202 		crypto_skcipher_decrypt(subreq) :
203 		crypto_skcipher_encrypt(subreq);
204 }
205 
ecb_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)206 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
207 			   unsigned int key_len)
208 {
209 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
210 	unsigned long fc;
211 
212 	/* Pick the correct function code based on the key length */
213 	fc = (key_len == 16) ? CPACF_KM_AES_128 :
214 	     (key_len == 24) ? CPACF_KM_AES_192 :
215 	     (key_len == 32) ? CPACF_KM_AES_256 : 0;
216 
217 	/* Check if the function code is available */
218 	sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
219 	if (!sctx->fc)
220 		return setkey_fallback_skcipher(tfm, in_key, key_len);
221 
222 	sctx->key_len = key_len;
223 	memcpy(sctx->key, in_key, key_len);
224 	return 0;
225 }
226 
ecb_aes_crypt(struct skcipher_request * req,unsigned long modifier)227 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
228 {
229 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
230 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
231 	struct skcipher_walk walk;
232 	unsigned int nbytes, n;
233 	int ret;
234 
235 	if (unlikely(!sctx->fc))
236 		return fallback_skcipher_crypt(sctx, req, modifier);
237 
238 	ret = skcipher_walk_virt(&walk, req, false);
239 	while ((nbytes = walk.nbytes) != 0) {
240 		/* only use complete blocks */
241 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
242 		cpacf_km(sctx->fc | modifier, sctx->key,
243 			 walk.dst.virt.addr, walk.src.virt.addr, n);
244 		ret = skcipher_walk_done(&walk, nbytes - n);
245 	}
246 	return ret;
247 }
248 
ecb_aes_encrypt(struct skcipher_request * req)249 static int ecb_aes_encrypt(struct skcipher_request *req)
250 {
251 	return ecb_aes_crypt(req, 0);
252 }
253 
ecb_aes_decrypt(struct skcipher_request * req)254 static int ecb_aes_decrypt(struct skcipher_request *req)
255 {
256 	return ecb_aes_crypt(req, CPACF_DECRYPT);
257 }
258 
fallback_init_skcipher(struct crypto_skcipher * tfm)259 static int fallback_init_skcipher(struct crypto_skcipher *tfm)
260 {
261 	const char *name = crypto_tfm_alg_name(&tfm->base);
262 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
263 
264 	sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
265 				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
266 
267 	if (IS_ERR(sctx->fallback.skcipher)) {
268 		pr_err("Allocating AES fallback algorithm %s failed\n",
269 		       name);
270 		return PTR_ERR(sctx->fallback.skcipher);
271 	}
272 
273 	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
274 				    crypto_skcipher_reqsize(sctx->fallback.skcipher));
275 	return 0;
276 }
277 
fallback_exit_skcipher(struct crypto_skcipher * tfm)278 static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
279 {
280 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
281 
282 	crypto_free_skcipher(sctx->fallback.skcipher);
283 }
284 
285 static struct skcipher_alg ecb_aes_alg = {
286 	.base.cra_name		=	"ecb(aes)",
287 	.base.cra_driver_name	=	"ecb-aes-s390",
288 	.base.cra_priority	=	401,	/* combo: aes + ecb + 1 */
289 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
290 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
291 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
292 	.base.cra_module	=	THIS_MODULE,
293 	.init			=	fallback_init_skcipher,
294 	.exit			=	fallback_exit_skcipher,
295 	.min_keysize		=	AES_MIN_KEY_SIZE,
296 	.max_keysize		=	AES_MAX_KEY_SIZE,
297 	.setkey			=	ecb_aes_set_key,
298 	.encrypt		=	ecb_aes_encrypt,
299 	.decrypt		=	ecb_aes_decrypt,
300 };
301 
cbc_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)302 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
303 			   unsigned int key_len)
304 {
305 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
306 	unsigned long fc;
307 
308 	/* Pick the correct function code based on the key length */
309 	fc = (key_len == 16) ? CPACF_KMC_AES_128 :
310 	     (key_len == 24) ? CPACF_KMC_AES_192 :
311 	     (key_len == 32) ? CPACF_KMC_AES_256 : 0;
312 
313 	/* Check if the function code is available */
314 	sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
315 	if (!sctx->fc)
316 		return setkey_fallback_skcipher(tfm, in_key, key_len);
317 
318 	sctx->key_len = key_len;
319 	memcpy(sctx->key, in_key, key_len);
320 	return 0;
321 }
322 
cbc_aes_crypt(struct skcipher_request * req,unsigned long modifier)323 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
324 {
325 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
326 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
327 	struct skcipher_walk walk;
328 	unsigned int nbytes, n;
329 	int ret;
330 	struct {
331 		u8 iv[AES_BLOCK_SIZE];
332 		u8 key[AES_MAX_KEY_SIZE];
333 	} param;
334 
335 	if (unlikely(!sctx->fc))
336 		return fallback_skcipher_crypt(sctx, req, modifier);
337 
338 	ret = skcipher_walk_virt(&walk, req, false);
339 	if (ret)
340 		return ret;
341 	memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
342 	memcpy(param.key, sctx->key, sctx->key_len);
343 	while ((nbytes = walk.nbytes) != 0) {
344 		/* only use complete blocks */
345 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
346 		cpacf_kmc(sctx->fc | modifier, &param,
347 			  walk.dst.virt.addr, walk.src.virt.addr, n);
348 		memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
349 		ret = skcipher_walk_done(&walk, nbytes - n);
350 	}
351 	memzero_explicit(&param, sizeof(param));
352 	return ret;
353 }
354 
cbc_aes_encrypt(struct skcipher_request * req)355 static int cbc_aes_encrypt(struct skcipher_request *req)
356 {
357 	return cbc_aes_crypt(req, 0);
358 }
359 
cbc_aes_decrypt(struct skcipher_request * req)360 static int cbc_aes_decrypt(struct skcipher_request *req)
361 {
362 	return cbc_aes_crypt(req, CPACF_DECRYPT);
363 }
364 
365 static struct skcipher_alg cbc_aes_alg = {
366 	.base.cra_name		=	"cbc(aes)",
367 	.base.cra_driver_name	=	"cbc-aes-s390",
368 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
369 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
370 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
371 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
372 	.base.cra_module	=	THIS_MODULE,
373 	.init			=	fallback_init_skcipher,
374 	.exit			=	fallback_exit_skcipher,
375 	.min_keysize		=	AES_MIN_KEY_SIZE,
376 	.max_keysize		=	AES_MAX_KEY_SIZE,
377 	.ivsize			=	AES_BLOCK_SIZE,
378 	.setkey			=	cbc_aes_set_key,
379 	.encrypt		=	cbc_aes_encrypt,
380 	.decrypt		=	cbc_aes_decrypt,
381 };
382 
xts_fallback_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int len)383 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
384 			       unsigned int len)
385 {
386 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
387 
388 	crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
389 	crypto_skcipher_set_flags(xts_ctx->fallback,
390 				  crypto_skcipher_get_flags(tfm) &
391 				  CRYPTO_TFM_REQ_MASK);
392 	return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
393 }
394 
xts_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)395 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
396 			   unsigned int key_len)
397 {
398 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
399 	unsigned long fc;
400 	int err;
401 
402 	err = xts_fallback_setkey(tfm, in_key, key_len);
403 	if (err)
404 		return err;
405 
406 	/* Pick the correct function code based on the key length */
407 	fc = (key_len == 32) ? CPACF_KM_XTS_128 :
408 	     (key_len == 64) ? CPACF_KM_XTS_256 : 0;
409 
410 	/* Check if the function code is available */
411 	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
412 	if (!xts_ctx->fc)
413 		return 0;
414 
415 	/* Split the XTS key into the two subkeys */
416 	key_len = key_len / 2;
417 	xts_ctx->key_len = key_len;
418 	memcpy(xts_ctx->key, in_key, key_len);
419 	memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
420 	return 0;
421 }
422 
xts_aes_crypt(struct skcipher_request * req,unsigned long modifier)423 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
424 {
425 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
426 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
427 	struct skcipher_walk walk;
428 	unsigned int offset, nbytes, n;
429 	int ret;
430 	struct {
431 		u8 key[32];
432 		u8 tweak[16];
433 		u8 block[16];
434 		u8 bit[16];
435 		u8 xts[16];
436 	} pcc_param;
437 	struct {
438 		u8 key[32];
439 		u8 init[16];
440 	} xts_param;
441 
442 	if (req->cryptlen < AES_BLOCK_SIZE)
443 		return -EINVAL;
444 
445 	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
446 		struct skcipher_request *subreq = skcipher_request_ctx(req);
447 
448 		*subreq = *req;
449 		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
450 		return (modifier & CPACF_DECRYPT) ?
451 			crypto_skcipher_decrypt(subreq) :
452 			crypto_skcipher_encrypt(subreq);
453 	}
454 
455 	ret = skcipher_walk_virt(&walk, req, false);
456 	if (ret)
457 		return ret;
458 	offset = xts_ctx->key_len & 0x10;
459 	memset(pcc_param.block, 0, sizeof(pcc_param.block));
460 	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
461 	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
462 	memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
463 	memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
464 	cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
465 
466 	memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
467 	memcpy(xts_param.init, pcc_param.xts, 16);
468 
469 	while ((nbytes = walk.nbytes) != 0) {
470 		/* only use complete blocks */
471 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
472 		cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
473 			 walk.dst.virt.addr, walk.src.virt.addr, n);
474 		ret = skcipher_walk_done(&walk, nbytes - n);
475 	}
476 	memzero_explicit(&pcc_param, sizeof(pcc_param));
477 	memzero_explicit(&xts_param, sizeof(xts_param));
478 	return ret;
479 }
480 
xts_aes_encrypt(struct skcipher_request * req)481 static int xts_aes_encrypt(struct skcipher_request *req)
482 {
483 	return xts_aes_crypt(req, 0);
484 }
485 
xts_aes_decrypt(struct skcipher_request * req)486 static int xts_aes_decrypt(struct skcipher_request *req)
487 {
488 	return xts_aes_crypt(req, CPACF_DECRYPT);
489 }
490 
xts_fallback_init(struct crypto_skcipher * tfm)491 static int xts_fallback_init(struct crypto_skcipher *tfm)
492 {
493 	const char *name = crypto_tfm_alg_name(&tfm->base);
494 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
495 
496 	xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
497 				CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
498 
499 	if (IS_ERR(xts_ctx->fallback)) {
500 		pr_err("Allocating XTS fallback algorithm %s failed\n",
501 		       name);
502 		return PTR_ERR(xts_ctx->fallback);
503 	}
504 	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
505 				    crypto_skcipher_reqsize(xts_ctx->fallback));
506 	return 0;
507 }
508 
xts_fallback_exit(struct crypto_skcipher * tfm)509 static void xts_fallback_exit(struct crypto_skcipher *tfm)
510 {
511 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
512 
513 	crypto_free_skcipher(xts_ctx->fallback);
514 }
515 
516 static struct skcipher_alg xts_aes_alg = {
517 	.base.cra_name		=	"xts(aes)",
518 	.base.cra_driver_name	=	"xts-aes-s390",
519 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
520 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
521 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
522 	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
523 	.base.cra_module	=	THIS_MODULE,
524 	.init			=	xts_fallback_init,
525 	.exit			=	xts_fallback_exit,
526 	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
527 	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
528 	.ivsize			=	AES_BLOCK_SIZE,
529 	.setkey			=	xts_aes_set_key,
530 	.encrypt		=	xts_aes_encrypt,
531 	.decrypt		=	xts_aes_decrypt,
532 };
533 
fullxts_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)534 static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
535 			       unsigned int key_len)
536 {
537 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
538 	unsigned long fc;
539 	int err;
540 
541 	err = xts_fallback_setkey(tfm, in_key, key_len);
542 	if (err)
543 		return err;
544 
545 	/* Pick the correct function code based on the key length */
546 	fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
547 	     (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
548 
549 	/* Check if the function code is available */
550 	xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
551 	if (!xts_ctx->fc)
552 		return 0;
553 
554 	/* Store double-key */
555 	memcpy(xts_ctx->keys, in_key, key_len);
556 	xts_ctx->key_len = key_len;
557 	return 0;
558 }
559 
fullxts_aes_crypt(struct skcipher_request * req,unsigned long modifier)560 static int fullxts_aes_crypt(struct skcipher_request *req,  unsigned long modifier)
561 {
562 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
563 	struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
564 	unsigned int offset, nbytes, n;
565 	struct skcipher_walk walk;
566 	int ret;
567 	struct {
568 		__u8 key[64];
569 		__u8 tweak[16];
570 		__u8 nap[16];
571 	} fxts_param = {
572 		.nap = {0},
573 	};
574 
575 	if (req->cryptlen < AES_BLOCK_SIZE)
576 		return -EINVAL;
577 
578 	if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
579 		struct skcipher_request *subreq = skcipher_request_ctx(req);
580 
581 		*subreq = *req;
582 		skcipher_request_set_tfm(subreq, xts_ctx->fallback);
583 		return (modifier & CPACF_DECRYPT) ?
584 			crypto_skcipher_decrypt(subreq) :
585 			crypto_skcipher_encrypt(subreq);
586 	}
587 
588 	ret = skcipher_walk_virt(&walk, req, false);
589 	if (ret)
590 		return ret;
591 
592 	offset = xts_ctx->key_len & 0x20;
593 	memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
594 	memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
595 	fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
596 
597 	while ((nbytes = walk.nbytes) != 0) {
598 		/* only use complete blocks */
599 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
600 		cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
601 			 walk.dst.virt.addr, walk.src.virt.addr, n);
602 		ret = skcipher_walk_done(&walk, nbytes - n);
603 	}
604 	memzero_explicit(&fxts_param, sizeof(fxts_param));
605 	return ret;
606 }
607 
fullxts_aes_encrypt(struct skcipher_request * req)608 static int fullxts_aes_encrypt(struct skcipher_request *req)
609 {
610 	return fullxts_aes_crypt(req, 0);
611 }
612 
fullxts_aes_decrypt(struct skcipher_request * req)613 static int fullxts_aes_decrypt(struct skcipher_request *req)
614 {
615 	return fullxts_aes_crypt(req, CPACF_DECRYPT);
616 }
617 
618 static struct skcipher_alg fullxts_aes_alg = {
619 	.base.cra_name		=	"xts(aes)",
620 	.base.cra_driver_name	=	"full-xts-aes-s390",
621 	.base.cra_priority	=	403,	/* aes-xts-s390 + 1 */
622 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
623 	.base.cra_blocksize	=	AES_BLOCK_SIZE,
624 	.base.cra_ctxsize	=	sizeof(struct s390_xts_ctx),
625 	.base.cra_module	=	THIS_MODULE,
626 	.init			=	xts_fallback_init,
627 	.exit			=	xts_fallback_exit,
628 	.min_keysize		=	2 * AES_MIN_KEY_SIZE,
629 	.max_keysize		=	2 * AES_MAX_KEY_SIZE,
630 	.ivsize			=	AES_BLOCK_SIZE,
631 	.setkey			=	fullxts_aes_set_key,
632 	.encrypt		=	fullxts_aes_encrypt,
633 	.decrypt		=	fullxts_aes_decrypt,
634 };
635 
ctr_aes_set_key(struct crypto_skcipher * tfm,const u8 * in_key,unsigned int key_len)636 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
637 			   unsigned int key_len)
638 {
639 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
640 	unsigned long fc;
641 
642 	/* Pick the correct function code based on the key length */
643 	fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
644 	     (key_len == 24) ? CPACF_KMCTR_AES_192 :
645 	     (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
646 
647 	/* Check if the function code is available */
648 	sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
649 	if (!sctx->fc)
650 		return setkey_fallback_skcipher(tfm, in_key, key_len);
651 
652 	sctx->key_len = key_len;
653 	memcpy(sctx->key, in_key, key_len);
654 	return 0;
655 }
656 
__ctrblk_init(u8 * ctrptr,u8 * iv,unsigned int nbytes)657 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
658 {
659 	unsigned int i, n;
660 
661 	/* only use complete blocks, max. PAGE_SIZE */
662 	memcpy(ctrptr, iv, AES_BLOCK_SIZE);
663 	n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
664 	for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
665 		memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
666 		crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
667 		ctrptr += AES_BLOCK_SIZE;
668 	}
669 	return n;
670 }
671 
ctr_aes_crypt(struct skcipher_request * req)672 static int ctr_aes_crypt(struct skcipher_request *req)
673 {
674 	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
675 	struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
676 	u8 buf[AES_BLOCK_SIZE], *ctrptr;
677 	struct skcipher_walk walk;
678 	unsigned int n, nbytes;
679 	int ret, locked;
680 
681 	if (unlikely(!sctx->fc))
682 		return fallback_skcipher_crypt(sctx, req, 0);
683 
684 	locked = mutex_trylock(&ctrblk_lock);
685 
686 	ret = skcipher_walk_virt(&walk, req, false);
687 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
688 		n = AES_BLOCK_SIZE;
689 
690 		if (nbytes >= 2*AES_BLOCK_SIZE && locked)
691 			n = __ctrblk_init(ctrblk, walk.iv, nbytes);
692 		ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
693 		cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
694 			    walk.src.virt.addr, n, ctrptr);
695 		if (ctrptr == ctrblk)
696 			memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
697 			       AES_BLOCK_SIZE);
698 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
699 		ret = skcipher_walk_done(&walk, nbytes - n);
700 	}
701 	if (locked)
702 		mutex_unlock(&ctrblk_lock);
703 	/*
704 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
705 	 */
706 	if (nbytes) {
707 		memset(buf, 0, AES_BLOCK_SIZE);
708 		memcpy(buf, walk.src.virt.addr, nbytes);
709 		cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
710 			    AES_BLOCK_SIZE, walk.iv);
711 		memcpy(walk.dst.virt.addr, buf, nbytes);
712 		crypto_inc(walk.iv, AES_BLOCK_SIZE);
713 		ret = skcipher_walk_done(&walk, 0);
714 	}
715 
716 	return ret;
717 }
718 
719 static struct skcipher_alg ctr_aes_alg = {
720 	.base.cra_name		=	"ctr(aes)",
721 	.base.cra_driver_name	=	"ctr-aes-s390",
722 	.base.cra_priority	=	402,	/* ecb-aes-s390 + 1 */
723 	.base.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
724 	.base.cra_blocksize	=	1,
725 	.base.cra_ctxsize	=	sizeof(struct s390_aes_ctx),
726 	.base.cra_module	=	THIS_MODULE,
727 	.init			=	fallback_init_skcipher,
728 	.exit			=	fallback_exit_skcipher,
729 	.min_keysize		=	AES_MIN_KEY_SIZE,
730 	.max_keysize		=	AES_MAX_KEY_SIZE,
731 	.ivsize			=	AES_BLOCK_SIZE,
732 	.setkey			=	ctr_aes_set_key,
733 	.encrypt		=	ctr_aes_crypt,
734 	.decrypt		=	ctr_aes_crypt,
735 	.chunksize		=	AES_BLOCK_SIZE,
736 };
737 
gcm_aes_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)738 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
739 			  unsigned int keylen)
740 {
741 	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
742 
743 	switch (keylen) {
744 	case AES_KEYSIZE_128:
745 		ctx->fc = CPACF_KMA_GCM_AES_128;
746 		break;
747 	case AES_KEYSIZE_192:
748 		ctx->fc = CPACF_KMA_GCM_AES_192;
749 		break;
750 	case AES_KEYSIZE_256:
751 		ctx->fc = CPACF_KMA_GCM_AES_256;
752 		break;
753 	default:
754 		return -EINVAL;
755 	}
756 
757 	memcpy(ctx->key, key, keylen);
758 	ctx->key_len = keylen;
759 	return 0;
760 }
761 
gcm_aes_setauthsize(struct crypto_aead * tfm,unsigned int authsize)762 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
763 {
764 	switch (authsize) {
765 	case 4:
766 	case 8:
767 	case 12:
768 	case 13:
769 	case 14:
770 	case 15:
771 	case 16:
772 		break;
773 	default:
774 		return -EINVAL;
775 	}
776 
777 	return 0;
778 }
779 
gcm_walk_start(struct gcm_sg_walk * gw,struct scatterlist * sg,unsigned int len)780 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
781 			   unsigned int len)
782 {
783 	memset(gw, 0, sizeof(*gw));
784 	gw->walk_bytes_remain = len;
785 	scatterwalk_start(&gw->walk, sg);
786 }
787 
_gcm_sg_clamp_and_map(struct gcm_sg_walk * gw)788 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
789 {
790 	struct scatterlist *nextsg;
791 
792 	gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
793 	while (!gw->walk_bytes) {
794 		nextsg = sg_next(gw->walk.sg);
795 		if (!nextsg)
796 			return 0;
797 		scatterwalk_start(&gw->walk, nextsg);
798 		gw->walk_bytes = scatterwalk_clamp(&gw->walk,
799 						   gw->walk_bytes_remain);
800 	}
801 	gw->walk_ptr = scatterwalk_map(&gw->walk);
802 	return gw->walk_bytes;
803 }
804 
_gcm_sg_unmap_and_advance(struct gcm_sg_walk * gw,unsigned int nbytes)805 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
806 					     unsigned int nbytes)
807 {
808 	gw->walk_bytes_remain -= nbytes;
809 	scatterwalk_unmap(gw->walk_ptr);
810 	scatterwalk_advance(&gw->walk, nbytes);
811 	scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
812 	gw->walk_ptr = NULL;
813 }
814 
gcm_in_walk_go(struct gcm_sg_walk * gw,unsigned int minbytesneeded)815 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
816 {
817 	int n;
818 
819 	if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
820 		gw->ptr = gw->buf;
821 		gw->nbytes = gw->buf_bytes;
822 		goto out;
823 	}
824 
825 	if (gw->walk_bytes_remain == 0) {
826 		gw->ptr = NULL;
827 		gw->nbytes = 0;
828 		goto out;
829 	}
830 
831 	if (!_gcm_sg_clamp_and_map(gw)) {
832 		gw->ptr = NULL;
833 		gw->nbytes = 0;
834 		goto out;
835 	}
836 
837 	if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
838 		gw->ptr = gw->walk_ptr;
839 		gw->nbytes = gw->walk_bytes;
840 		goto out;
841 	}
842 
843 	while (1) {
844 		n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
845 		memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
846 		gw->buf_bytes += n;
847 		_gcm_sg_unmap_and_advance(gw, n);
848 		if (gw->buf_bytes >= minbytesneeded) {
849 			gw->ptr = gw->buf;
850 			gw->nbytes = gw->buf_bytes;
851 			goto out;
852 		}
853 		if (!_gcm_sg_clamp_and_map(gw)) {
854 			gw->ptr = NULL;
855 			gw->nbytes = 0;
856 			goto out;
857 		}
858 	}
859 
860 out:
861 	return gw->nbytes;
862 }
863 
gcm_out_walk_go(struct gcm_sg_walk * gw,unsigned int minbytesneeded)864 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
865 {
866 	if (gw->walk_bytes_remain == 0) {
867 		gw->ptr = NULL;
868 		gw->nbytes = 0;
869 		goto out;
870 	}
871 
872 	if (!_gcm_sg_clamp_and_map(gw)) {
873 		gw->ptr = NULL;
874 		gw->nbytes = 0;
875 		goto out;
876 	}
877 
878 	if (gw->walk_bytes >= minbytesneeded) {
879 		gw->ptr = gw->walk_ptr;
880 		gw->nbytes = gw->walk_bytes;
881 		goto out;
882 	}
883 
884 	scatterwalk_unmap(gw->walk_ptr);
885 	gw->walk_ptr = NULL;
886 
887 	gw->ptr = gw->buf;
888 	gw->nbytes = sizeof(gw->buf);
889 
890 out:
891 	return gw->nbytes;
892 }
893 
gcm_in_walk_done(struct gcm_sg_walk * gw,unsigned int bytesdone)894 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
895 {
896 	if (gw->ptr == NULL)
897 		return 0;
898 
899 	if (gw->ptr == gw->buf) {
900 		int n = gw->buf_bytes - bytesdone;
901 		if (n > 0) {
902 			memmove(gw->buf, gw->buf + bytesdone, n);
903 			gw->buf_bytes = n;
904 		} else
905 			gw->buf_bytes = 0;
906 	} else
907 		_gcm_sg_unmap_and_advance(gw, bytesdone);
908 
909 	return bytesdone;
910 }
911 
gcm_out_walk_done(struct gcm_sg_walk * gw,unsigned int bytesdone)912 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
913 {
914 	int i, n;
915 
916 	if (gw->ptr == NULL)
917 		return 0;
918 
919 	if (gw->ptr == gw->buf) {
920 		for (i = 0; i < bytesdone; i += n) {
921 			if (!_gcm_sg_clamp_and_map(gw))
922 				return i;
923 			n = min(gw->walk_bytes, bytesdone - i);
924 			memcpy(gw->walk_ptr, gw->buf + i, n);
925 			_gcm_sg_unmap_and_advance(gw, n);
926 		}
927 	} else
928 		_gcm_sg_unmap_and_advance(gw, bytesdone);
929 
930 	return bytesdone;
931 }
932 
gcm_aes_crypt(struct aead_request * req,unsigned int flags)933 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
934 {
935 	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
936 	struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
937 	unsigned int ivsize = crypto_aead_ivsize(tfm);
938 	unsigned int taglen = crypto_aead_authsize(tfm);
939 	unsigned int aadlen = req->assoclen;
940 	unsigned int pclen = req->cryptlen;
941 	int ret = 0;
942 
943 	unsigned int n, len, in_bytes, out_bytes,
944 		     min_bytes, bytes, aad_bytes, pc_bytes;
945 	struct gcm_sg_walk gw_in, gw_out;
946 	u8 tag[GHASH_DIGEST_SIZE];
947 
948 	struct {
949 		u32 _[3];		/* reserved */
950 		u32 cv;			/* Counter Value */
951 		u8 t[GHASH_DIGEST_SIZE];/* Tag */
952 		u8 h[AES_BLOCK_SIZE];	/* Hash-subkey */
953 		u64 taadl;		/* Total AAD Length */
954 		u64 tpcl;		/* Total Plain-/Cipher-text Length */
955 		u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
956 		u8 k[AES_MAX_KEY_SIZE];	/* Key */
957 	} param;
958 
959 	/*
960 	 * encrypt
961 	 *   req->src: aad||plaintext
962 	 *   req->dst: aad||ciphertext||tag
963 	 * decrypt
964 	 *   req->src: aad||ciphertext||tag
965 	 *   req->dst: aad||plaintext, return 0 or -EBADMSG
966 	 * aad, plaintext and ciphertext may be empty.
967 	 */
968 	if (flags & CPACF_DECRYPT)
969 		pclen -= taglen;
970 	len = aadlen + pclen;
971 
972 	memset(&param, 0, sizeof(param));
973 	param.cv = 1;
974 	param.taadl = aadlen * 8;
975 	param.tpcl = pclen * 8;
976 	memcpy(param.j0, req->iv, ivsize);
977 	*(u32 *)(param.j0 + ivsize) = 1;
978 	memcpy(param.k, ctx->key, ctx->key_len);
979 
980 	gcm_walk_start(&gw_in, req->src, len);
981 	gcm_walk_start(&gw_out, req->dst, len);
982 
983 	do {
984 		min_bytes = min_t(unsigned int,
985 				  aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
986 		in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
987 		out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
988 		bytes = min(in_bytes, out_bytes);
989 
990 		if (aadlen + pclen <= bytes) {
991 			aad_bytes = aadlen;
992 			pc_bytes = pclen;
993 			flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
994 		} else {
995 			if (aadlen <= bytes) {
996 				aad_bytes = aadlen;
997 				pc_bytes = (bytes - aadlen) &
998 					   ~(AES_BLOCK_SIZE - 1);
999 				flags |= CPACF_KMA_LAAD;
1000 			} else {
1001 				aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
1002 				pc_bytes = 0;
1003 			}
1004 		}
1005 
1006 		if (aad_bytes > 0)
1007 			memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
1008 
1009 		cpacf_kma(ctx->fc | flags, &param,
1010 			  gw_out.ptr + aad_bytes,
1011 			  gw_in.ptr + aad_bytes, pc_bytes,
1012 			  gw_in.ptr, aad_bytes);
1013 
1014 		n = aad_bytes + pc_bytes;
1015 		if (gcm_in_walk_done(&gw_in, n) != n)
1016 			return -ENOMEM;
1017 		if (gcm_out_walk_done(&gw_out, n) != n)
1018 			return -ENOMEM;
1019 		aadlen -= aad_bytes;
1020 		pclen -= pc_bytes;
1021 	} while (aadlen + pclen > 0);
1022 
1023 	if (flags & CPACF_DECRYPT) {
1024 		scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1025 		if (crypto_memneq(tag, param.t, taglen))
1026 			ret = -EBADMSG;
1027 	} else
1028 		scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1029 
1030 	memzero_explicit(&param, sizeof(param));
1031 	return ret;
1032 }
1033 
gcm_aes_encrypt(struct aead_request * req)1034 static int gcm_aes_encrypt(struct aead_request *req)
1035 {
1036 	return gcm_aes_crypt(req, CPACF_ENCRYPT);
1037 }
1038 
gcm_aes_decrypt(struct aead_request * req)1039 static int gcm_aes_decrypt(struct aead_request *req)
1040 {
1041 	return gcm_aes_crypt(req, CPACF_DECRYPT);
1042 }
1043 
1044 static struct aead_alg gcm_aes_aead = {
1045 	.setkey			= gcm_aes_setkey,
1046 	.setauthsize		= gcm_aes_setauthsize,
1047 	.encrypt		= gcm_aes_encrypt,
1048 	.decrypt		= gcm_aes_decrypt,
1049 
1050 	.ivsize			= GHASH_BLOCK_SIZE - sizeof(u32),
1051 	.maxauthsize		= GHASH_DIGEST_SIZE,
1052 	.chunksize		= AES_BLOCK_SIZE,
1053 
1054 	.base			= {
1055 		.cra_blocksize		= 1,
1056 		.cra_ctxsize		= sizeof(struct s390_aes_ctx),
1057 		.cra_priority		= 900,
1058 		.cra_name		= "gcm(aes)",
1059 		.cra_driver_name	= "gcm-aes-s390",
1060 		.cra_module		= THIS_MODULE,
1061 	},
1062 };
1063 
1064 static struct crypto_alg *aes_s390_alg;
1065 static struct skcipher_alg *aes_s390_skcipher_algs[5];
1066 static int aes_s390_skciphers_num;
1067 static struct aead_alg *aes_s390_aead_alg;
1068 
aes_s390_register_skcipher(struct skcipher_alg * alg)1069 static int aes_s390_register_skcipher(struct skcipher_alg *alg)
1070 {
1071 	int ret;
1072 
1073 	ret = crypto_register_skcipher(alg);
1074 	if (!ret)
1075 		aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
1076 	return ret;
1077 }
1078 
aes_s390_fini(void)1079 static void aes_s390_fini(void)
1080 {
1081 	if (aes_s390_alg)
1082 		crypto_unregister_alg(aes_s390_alg);
1083 	while (aes_s390_skciphers_num--)
1084 		crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
1085 	if (ctrblk)
1086 		free_page((unsigned long) ctrblk);
1087 
1088 	if (aes_s390_aead_alg)
1089 		crypto_unregister_aead(aes_s390_aead_alg);
1090 }
1091 
aes_s390_init(void)1092 static int __init aes_s390_init(void)
1093 {
1094 	int ret;
1095 
1096 	/* Query available functions for KM, KMC, KMCTR and KMA */
1097 	cpacf_query(CPACF_KM, &km_functions);
1098 	cpacf_query(CPACF_KMC, &kmc_functions);
1099 	cpacf_query(CPACF_KMCTR, &kmctr_functions);
1100 	cpacf_query(CPACF_KMA, &kma_functions);
1101 
1102 	if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1103 	    cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1104 	    cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1105 		ret = crypto_register_alg(&aes_alg);
1106 		if (ret)
1107 			goto out_err;
1108 		aes_s390_alg = &aes_alg;
1109 		ret = aes_s390_register_skcipher(&ecb_aes_alg);
1110 		if (ret)
1111 			goto out_err;
1112 	}
1113 
1114 	if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1115 	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1116 	    cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1117 		ret = aes_s390_register_skcipher(&cbc_aes_alg);
1118 		if (ret)
1119 			goto out_err;
1120 	}
1121 
1122 	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
1123 	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
1124 		ret = aes_s390_register_skcipher(&fullxts_aes_alg);
1125 		if (ret)
1126 			goto out_err;
1127 	}
1128 
1129 	if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1130 	    cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1131 		ret = aes_s390_register_skcipher(&xts_aes_alg);
1132 		if (ret)
1133 			goto out_err;
1134 	}
1135 
1136 	if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1137 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1138 	    cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1139 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1140 		if (!ctrblk) {
1141 			ret = -ENOMEM;
1142 			goto out_err;
1143 		}
1144 		ret = aes_s390_register_skcipher(&ctr_aes_alg);
1145 		if (ret)
1146 			goto out_err;
1147 	}
1148 
1149 	if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1150 	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1151 	    cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1152 		ret = crypto_register_aead(&gcm_aes_aead);
1153 		if (ret)
1154 			goto out_err;
1155 		aes_s390_aead_alg = &gcm_aes_aead;
1156 	}
1157 
1158 	return 0;
1159 out_err:
1160 	aes_s390_fini();
1161 	return ret;
1162 }
1163 
1164 module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1165 module_exit(aes_s390_fini);
1166 
1167 MODULE_ALIAS_CRYPTO("aes-all");
1168 
1169 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1170 MODULE_LICENSE("GPL");
1171 MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1172