xref: /linux/arch/s390/crypto/aes_s390.c (revision d91517839e5d95adc0cf4b28caa7af62a71de526)
1 /*
2  * Cryptographic API.
3  *
4  * s390 implementation of the AES Cipher Algorithm.
5  *
6  * s390 Version:
7  *   Copyright IBM Corp. 2005, 2007
8  *   Author(s): Jan Glauber (jang@de.ibm.com)
9  *		Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
10  *
11  * Derived from "crypto/aes_generic.c"
12  *
13  * This program is free software; you can redistribute it and/or modify it
14  * under the terms of the GNU General Public License as published by the Free
15  * Software Foundation; either version 2 of the License, or (at your option)
16  * any later version.
17  *
18  */
19 
20 #define KMSG_COMPONENT "aes_s390"
21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include "crypt_s390.h"
29 
30 #define AES_KEYLEN_128		1
31 #define AES_KEYLEN_192		2
32 #define AES_KEYLEN_256		4
33 
34 static u8 *ctrblk;
35 static char keylen_flag;
36 
37 struct s390_aes_ctx {
38 	u8 key[AES_MAX_KEY_SIZE];
39 	long enc;
40 	long dec;
41 	int key_len;
42 	union {
43 		struct crypto_blkcipher *blk;
44 		struct crypto_cipher *cip;
45 	} fallback;
46 };
47 
48 struct pcc_param {
49 	u8 key[32];
50 	u8 tweak[16];
51 	u8 block[16];
52 	u8 bit[16];
53 	u8 xts[16];
54 };
55 
56 struct s390_xts_ctx {
57 	u8 key[32];
58 	u8 pcc_key[32];
59 	long enc;
60 	long dec;
61 	int key_len;
62 	struct crypto_blkcipher *fallback;
63 };
64 
65 /*
66  * Check if the key_len is supported by the HW.
67  * Returns 0 if it is, a positive number if it is not and software fallback is
68  * required or a negative number in case the key size is not valid
69  */
70 static int need_fallback(unsigned int key_len)
71 {
72 	switch (key_len) {
73 	case 16:
74 		if (!(keylen_flag & AES_KEYLEN_128))
75 			return 1;
76 		break;
77 	case 24:
78 		if (!(keylen_flag & AES_KEYLEN_192))
79 			return 1;
80 		break;
81 	case 32:
82 		if (!(keylen_flag & AES_KEYLEN_256))
83 			return 1;
84 		break;
85 	default:
86 		return -1;
87 		break;
88 	}
89 	return 0;
90 }
91 
92 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
93 		unsigned int key_len)
94 {
95 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
96 	int ret;
97 
98 	sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
99 	sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
100 			CRYPTO_TFM_REQ_MASK);
101 
102 	ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
103 	if (ret) {
104 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
105 		tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
106 				CRYPTO_TFM_RES_MASK);
107 	}
108 	return ret;
109 }
110 
111 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
112 		       unsigned int key_len)
113 {
114 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
115 	u32 *flags = &tfm->crt_flags;
116 	int ret;
117 
118 	ret = need_fallback(key_len);
119 	if (ret < 0) {
120 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
121 		return -EINVAL;
122 	}
123 
124 	sctx->key_len = key_len;
125 	if (!ret) {
126 		memcpy(sctx->key, in_key, key_len);
127 		return 0;
128 	}
129 
130 	return setkey_fallback_cip(tfm, in_key, key_len);
131 }
132 
133 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
134 {
135 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
136 
137 	if (unlikely(need_fallback(sctx->key_len))) {
138 		crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
139 		return;
140 	}
141 
142 	switch (sctx->key_len) {
143 	case 16:
144 		crypt_s390_km(KM_AES_128_ENCRYPT, &sctx->key, out, in,
145 			      AES_BLOCK_SIZE);
146 		break;
147 	case 24:
148 		crypt_s390_km(KM_AES_192_ENCRYPT, &sctx->key, out, in,
149 			      AES_BLOCK_SIZE);
150 		break;
151 	case 32:
152 		crypt_s390_km(KM_AES_256_ENCRYPT, &sctx->key, out, in,
153 			      AES_BLOCK_SIZE);
154 		break;
155 	}
156 }
157 
158 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
159 {
160 	const struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
161 
162 	if (unlikely(need_fallback(sctx->key_len))) {
163 		crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
164 		return;
165 	}
166 
167 	switch (sctx->key_len) {
168 	case 16:
169 		crypt_s390_km(KM_AES_128_DECRYPT, &sctx->key, out, in,
170 			      AES_BLOCK_SIZE);
171 		break;
172 	case 24:
173 		crypt_s390_km(KM_AES_192_DECRYPT, &sctx->key, out, in,
174 			      AES_BLOCK_SIZE);
175 		break;
176 	case 32:
177 		crypt_s390_km(KM_AES_256_DECRYPT, &sctx->key, out, in,
178 			      AES_BLOCK_SIZE);
179 		break;
180 	}
181 }
182 
183 static int fallback_init_cip(struct crypto_tfm *tfm)
184 {
185 	const char *name = tfm->__crt_alg->cra_name;
186 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
187 
188 	sctx->fallback.cip = crypto_alloc_cipher(name, 0,
189 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
190 
191 	if (IS_ERR(sctx->fallback.cip)) {
192 		pr_err("Allocating AES fallback algorithm %s failed\n",
193 		       name);
194 		return PTR_ERR(sctx->fallback.cip);
195 	}
196 
197 	return 0;
198 }
199 
200 static void fallback_exit_cip(struct crypto_tfm *tfm)
201 {
202 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
203 
204 	crypto_free_cipher(sctx->fallback.cip);
205 	sctx->fallback.cip = NULL;
206 }
207 
208 static struct crypto_alg aes_alg = {
209 	.cra_name		=	"aes",
210 	.cra_driver_name	=	"aes-s390",
211 	.cra_priority		=	CRYPT_S390_PRIORITY,
212 	.cra_flags		=	CRYPTO_ALG_TYPE_CIPHER |
213 					CRYPTO_ALG_NEED_FALLBACK,
214 	.cra_blocksize		=	AES_BLOCK_SIZE,
215 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
216 	.cra_module		=	THIS_MODULE,
217 	.cra_init               =       fallback_init_cip,
218 	.cra_exit               =       fallback_exit_cip,
219 	.cra_u			=	{
220 		.cipher = {
221 			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
222 			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
223 			.cia_setkey		=	aes_set_key,
224 			.cia_encrypt		=	aes_encrypt,
225 			.cia_decrypt		=	aes_decrypt,
226 		}
227 	}
228 };
229 
230 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
231 		unsigned int len)
232 {
233 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
234 	unsigned int ret;
235 
236 	sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
237 	sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
238 			CRYPTO_TFM_REQ_MASK);
239 
240 	ret = crypto_blkcipher_setkey(sctx->fallback.blk, key, len);
241 	if (ret) {
242 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
243 		tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
244 				CRYPTO_TFM_RES_MASK);
245 	}
246 	return ret;
247 }
248 
249 static int fallback_blk_dec(struct blkcipher_desc *desc,
250 		struct scatterlist *dst, struct scatterlist *src,
251 		unsigned int nbytes)
252 {
253 	unsigned int ret;
254 	struct crypto_blkcipher *tfm;
255 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
256 
257 	tfm = desc->tfm;
258 	desc->tfm = sctx->fallback.blk;
259 
260 	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
261 
262 	desc->tfm = tfm;
263 	return ret;
264 }
265 
266 static int fallback_blk_enc(struct blkcipher_desc *desc,
267 		struct scatterlist *dst, struct scatterlist *src,
268 		unsigned int nbytes)
269 {
270 	unsigned int ret;
271 	struct crypto_blkcipher *tfm;
272 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
273 
274 	tfm = desc->tfm;
275 	desc->tfm = sctx->fallback.blk;
276 
277 	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
278 
279 	desc->tfm = tfm;
280 	return ret;
281 }
282 
283 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
284 			   unsigned int key_len)
285 {
286 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
287 	int ret;
288 
289 	ret = need_fallback(key_len);
290 	if (ret > 0) {
291 		sctx->key_len = key_len;
292 		return setkey_fallback_blk(tfm, in_key, key_len);
293 	}
294 
295 	switch (key_len) {
296 	case 16:
297 		sctx->enc = KM_AES_128_ENCRYPT;
298 		sctx->dec = KM_AES_128_DECRYPT;
299 		break;
300 	case 24:
301 		sctx->enc = KM_AES_192_ENCRYPT;
302 		sctx->dec = KM_AES_192_DECRYPT;
303 		break;
304 	case 32:
305 		sctx->enc = KM_AES_256_ENCRYPT;
306 		sctx->dec = KM_AES_256_DECRYPT;
307 		break;
308 	}
309 
310 	return aes_set_key(tfm, in_key, key_len);
311 }
312 
313 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param,
314 			 struct blkcipher_walk *walk)
315 {
316 	int ret = blkcipher_walk_virt(desc, walk);
317 	unsigned int nbytes;
318 
319 	while ((nbytes = walk->nbytes)) {
320 		/* only use complete blocks */
321 		unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
322 		u8 *out = walk->dst.virt.addr;
323 		u8 *in = walk->src.virt.addr;
324 
325 		ret = crypt_s390_km(func, param, out, in, n);
326 		if (ret < 0 || ret != n)
327 			return -EIO;
328 
329 		nbytes &= AES_BLOCK_SIZE - 1;
330 		ret = blkcipher_walk_done(desc, walk, nbytes);
331 	}
332 
333 	return ret;
334 }
335 
336 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
337 			   struct scatterlist *dst, struct scatterlist *src,
338 			   unsigned int nbytes)
339 {
340 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
341 	struct blkcipher_walk walk;
342 
343 	if (unlikely(need_fallback(sctx->key_len)))
344 		return fallback_blk_enc(desc, dst, src, nbytes);
345 
346 	blkcipher_walk_init(&walk, dst, src, nbytes);
347 	return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk);
348 }
349 
350 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
351 			   struct scatterlist *dst, struct scatterlist *src,
352 			   unsigned int nbytes)
353 {
354 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
355 	struct blkcipher_walk walk;
356 
357 	if (unlikely(need_fallback(sctx->key_len)))
358 		return fallback_blk_dec(desc, dst, src, nbytes);
359 
360 	blkcipher_walk_init(&walk, dst, src, nbytes);
361 	return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk);
362 }
363 
364 static int fallback_init_blk(struct crypto_tfm *tfm)
365 {
366 	const char *name = tfm->__crt_alg->cra_name;
367 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
368 
369 	sctx->fallback.blk = crypto_alloc_blkcipher(name, 0,
370 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
371 
372 	if (IS_ERR(sctx->fallback.blk)) {
373 		pr_err("Allocating AES fallback algorithm %s failed\n",
374 		       name);
375 		return PTR_ERR(sctx->fallback.blk);
376 	}
377 
378 	return 0;
379 }
380 
381 static void fallback_exit_blk(struct crypto_tfm *tfm)
382 {
383 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
384 
385 	crypto_free_blkcipher(sctx->fallback.blk);
386 	sctx->fallback.blk = NULL;
387 }
388 
389 static struct crypto_alg ecb_aes_alg = {
390 	.cra_name		=	"ecb(aes)",
391 	.cra_driver_name	=	"ecb-aes-s390",
392 	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
393 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
394 					CRYPTO_ALG_NEED_FALLBACK,
395 	.cra_blocksize		=	AES_BLOCK_SIZE,
396 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
397 	.cra_type		=	&crypto_blkcipher_type,
398 	.cra_module		=	THIS_MODULE,
399 	.cra_init		=	fallback_init_blk,
400 	.cra_exit		=	fallback_exit_blk,
401 	.cra_u			=	{
402 		.blkcipher = {
403 			.min_keysize		=	AES_MIN_KEY_SIZE,
404 			.max_keysize		=	AES_MAX_KEY_SIZE,
405 			.setkey			=	ecb_aes_set_key,
406 			.encrypt		=	ecb_aes_encrypt,
407 			.decrypt		=	ecb_aes_decrypt,
408 		}
409 	}
410 };
411 
412 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
413 			   unsigned int key_len)
414 {
415 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
416 	int ret;
417 
418 	ret = need_fallback(key_len);
419 	if (ret > 0) {
420 		sctx->key_len = key_len;
421 		return setkey_fallback_blk(tfm, in_key, key_len);
422 	}
423 
424 	switch (key_len) {
425 	case 16:
426 		sctx->enc = KMC_AES_128_ENCRYPT;
427 		sctx->dec = KMC_AES_128_DECRYPT;
428 		break;
429 	case 24:
430 		sctx->enc = KMC_AES_192_ENCRYPT;
431 		sctx->dec = KMC_AES_192_DECRYPT;
432 		break;
433 	case 32:
434 		sctx->enc = KMC_AES_256_ENCRYPT;
435 		sctx->dec = KMC_AES_256_DECRYPT;
436 		break;
437 	}
438 
439 	return aes_set_key(tfm, in_key, key_len);
440 }
441 
442 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func,
443 			 struct blkcipher_walk *walk)
444 {
445 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
446 	int ret = blkcipher_walk_virt(desc, walk);
447 	unsigned int nbytes = walk->nbytes;
448 	struct {
449 		u8 iv[AES_BLOCK_SIZE];
450 		u8 key[AES_MAX_KEY_SIZE];
451 	} param;
452 
453 	if (!nbytes)
454 		goto out;
455 
456 	memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
457 	memcpy(param.key, sctx->key, sctx->key_len);
458 	do {
459 		/* only use complete blocks */
460 		unsigned int n = nbytes & ~(AES_BLOCK_SIZE - 1);
461 		u8 *out = walk->dst.virt.addr;
462 		u8 *in = walk->src.virt.addr;
463 
464 		ret = crypt_s390_kmc(func, &param, out, in, n);
465 		if (ret < 0 || ret != n)
466 			return -EIO;
467 
468 		nbytes &= AES_BLOCK_SIZE - 1;
469 		ret = blkcipher_walk_done(desc, walk, nbytes);
470 	} while ((nbytes = walk->nbytes));
471 	memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
472 
473 out:
474 	return ret;
475 }
476 
477 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
478 			   struct scatterlist *dst, struct scatterlist *src,
479 			   unsigned int nbytes)
480 {
481 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
482 	struct blkcipher_walk walk;
483 
484 	if (unlikely(need_fallback(sctx->key_len)))
485 		return fallback_blk_enc(desc, dst, src, nbytes);
486 
487 	blkcipher_walk_init(&walk, dst, src, nbytes);
488 	return cbc_aes_crypt(desc, sctx->enc, &walk);
489 }
490 
491 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
492 			   struct scatterlist *dst, struct scatterlist *src,
493 			   unsigned int nbytes)
494 {
495 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
496 	struct blkcipher_walk walk;
497 
498 	if (unlikely(need_fallback(sctx->key_len)))
499 		return fallback_blk_dec(desc, dst, src, nbytes);
500 
501 	blkcipher_walk_init(&walk, dst, src, nbytes);
502 	return cbc_aes_crypt(desc, sctx->dec, &walk);
503 }
504 
505 static struct crypto_alg cbc_aes_alg = {
506 	.cra_name		=	"cbc(aes)",
507 	.cra_driver_name	=	"cbc-aes-s390",
508 	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
509 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
510 					CRYPTO_ALG_NEED_FALLBACK,
511 	.cra_blocksize		=	AES_BLOCK_SIZE,
512 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
513 	.cra_type		=	&crypto_blkcipher_type,
514 	.cra_module		=	THIS_MODULE,
515 	.cra_init		=	fallback_init_blk,
516 	.cra_exit		=	fallback_exit_blk,
517 	.cra_u			=	{
518 		.blkcipher = {
519 			.min_keysize		=	AES_MIN_KEY_SIZE,
520 			.max_keysize		=	AES_MAX_KEY_SIZE,
521 			.ivsize			=	AES_BLOCK_SIZE,
522 			.setkey			=	cbc_aes_set_key,
523 			.encrypt		=	cbc_aes_encrypt,
524 			.decrypt		=	cbc_aes_decrypt,
525 		}
526 	}
527 };
528 
529 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
530 				   unsigned int len)
531 {
532 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
533 	unsigned int ret;
534 
535 	xts_ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
536 	xts_ctx->fallback->base.crt_flags |= (tfm->crt_flags &
537 			CRYPTO_TFM_REQ_MASK);
538 
539 	ret = crypto_blkcipher_setkey(xts_ctx->fallback, key, len);
540 	if (ret) {
541 		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
542 		tfm->crt_flags |= (xts_ctx->fallback->base.crt_flags &
543 				CRYPTO_TFM_RES_MASK);
544 	}
545 	return ret;
546 }
547 
548 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
549 		struct scatterlist *dst, struct scatterlist *src,
550 		unsigned int nbytes)
551 {
552 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
553 	struct crypto_blkcipher *tfm;
554 	unsigned int ret;
555 
556 	tfm = desc->tfm;
557 	desc->tfm = xts_ctx->fallback;
558 
559 	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
560 
561 	desc->tfm = tfm;
562 	return ret;
563 }
564 
565 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
566 		struct scatterlist *dst, struct scatterlist *src,
567 		unsigned int nbytes)
568 {
569 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
570 	struct crypto_blkcipher *tfm;
571 	unsigned int ret;
572 
573 	tfm = desc->tfm;
574 	desc->tfm = xts_ctx->fallback;
575 
576 	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
577 
578 	desc->tfm = tfm;
579 	return ret;
580 }
581 
582 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
583 			   unsigned int key_len)
584 {
585 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
586 	u32 *flags = &tfm->crt_flags;
587 
588 	switch (key_len) {
589 	case 32:
590 		xts_ctx->enc = KM_XTS_128_ENCRYPT;
591 		xts_ctx->dec = KM_XTS_128_DECRYPT;
592 		memcpy(xts_ctx->key + 16, in_key, 16);
593 		memcpy(xts_ctx->pcc_key + 16, in_key + 16, 16);
594 		break;
595 	case 48:
596 		xts_ctx->enc = 0;
597 		xts_ctx->dec = 0;
598 		xts_fallback_setkey(tfm, in_key, key_len);
599 		break;
600 	case 64:
601 		xts_ctx->enc = KM_XTS_256_ENCRYPT;
602 		xts_ctx->dec = KM_XTS_256_DECRYPT;
603 		memcpy(xts_ctx->key, in_key, 32);
604 		memcpy(xts_ctx->pcc_key, in_key + 32, 32);
605 		break;
606 	default:
607 		*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
608 		return -EINVAL;
609 	}
610 	xts_ctx->key_len = key_len;
611 	return 0;
612 }
613 
614 static int xts_aes_crypt(struct blkcipher_desc *desc, long func,
615 			 struct s390_xts_ctx *xts_ctx,
616 			 struct blkcipher_walk *walk)
617 {
618 	unsigned int offset = (xts_ctx->key_len >> 1) & 0x10;
619 	int ret = blkcipher_walk_virt(desc, walk);
620 	unsigned int nbytes = walk->nbytes;
621 	unsigned int n;
622 	u8 *in, *out;
623 	struct pcc_param pcc_param;
624 	struct {
625 		u8 key[32];
626 		u8 init[16];
627 	} xts_param;
628 
629 	if (!nbytes)
630 		goto out;
631 
632 	memset(pcc_param.block, 0, sizeof(pcc_param.block));
633 	memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
634 	memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
635 	memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
636 	memcpy(pcc_param.key, xts_ctx->pcc_key, 32);
637 	ret = crypt_s390_pcc(func, &pcc_param.key[offset]);
638 	if (ret < 0)
639 		return -EIO;
640 
641 	memcpy(xts_param.key, xts_ctx->key, 32);
642 	memcpy(xts_param.init, pcc_param.xts, 16);
643 	do {
644 		/* only use complete blocks */
645 		n = nbytes & ~(AES_BLOCK_SIZE - 1);
646 		out = walk->dst.virt.addr;
647 		in = walk->src.virt.addr;
648 
649 		ret = crypt_s390_km(func, &xts_param.key[offset], out, in, n);
650 		if (ret < 0 || ret != n)
651 			return -EIO;
652 
653 		nbytes &= AES_BLOCK_SIZE - 1;
654 		ret = blkcipher_walk_done(desc, walk, nbytes);
655 	} while ((nbytes = walk->nbytes));
656 out:
657 	return ret;
658 }
659 
660 static int xts_aes_encrypt(struct blkcipher_desc *desc,
661 			   struct scatterlist *dst, struct scatterlist *src,
662 			   unsigned int nbytes)
663 {
664 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
665 	struct blkcipher_walk walk;
666 
667 	if (unlikely(xts_ctx->key_len == 48))
668 		return xts_fallback_encrypt(desc, dst, src, nbytes);
669 
670 	blkcipher_walk_init(&walk, dst, src, nbytes);
671 	return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk);
672 }
673 
674 static int xts_aes_decrypt(struct blkcipher_desc *desc,
675 			   struct scatterlist *dst, struct scatterlist *src,
676 			   unsigned int nbytes)
677 {
678 	struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
679 	struct blkcipher_walk walk;
680 
681 	if (unlikely(xts_ctx->key_len == 48))
682 		return xts_fallback_decrypt(desc, dst, src, nbytes);
683 
684 	blkcipher_walk_init(&walk, dst, src, nbytes);
685 	return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk);
686 }
687 
688 static int xts_fallback_init(struct crypto_tfm *tfm)
689 {
690 	const char *name = tfm->__crt_alg->cra_name;
691 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
692 
693 	xts_ctx->fallback = crypto_alloc_blkcipher(name, 0,
694 			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
695 
696 	if (IS_ERR(xts_ctx->fallback)) {
697 		pr_err("Allocating XTS fallback algorithm %s failed\n",
698 		       name);
699 		return PTR_ERR(xts_ctx->fallback);
700 	}
701 	return 0;
702 }
703 
704 static void xts_fallback_exit(struct crypto_tfm *tfm)
705 {
706 	struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
707 
708 	crypto_free_blkcipher(xts_ctx->fallback);
709 	xts_ctx->fallback = NULL;
710 }
711 
712 static struct crypto_alg xts_aes_alg = {
713 	.cra_name		=	"xts(aes)",
714 	.cra_driver_name	=	"xts-aes-s390",
715 	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
716 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER |
717 					CRYPTO_ALG_NEED_FALLBACK,
718 	.cra_blocksize		=	AES_BLOCK_SIZE,
719 	.cra_ctxsize		=	sizeof(struct s390_xts_ctx),
720 	.cra_type		=	&crypto_blkcipher_type,
721 	.cra_module		=	THIS_MODULE,
722 	.cra_init		=	xts_fallback_init,
723 	.cra_exit		=	xts_fallback_exit,
724 	.cra_u			=	{
725 		.blkcipher = {
726 			.min_keysize		=	2 * AES_MIN_KEY_SIZE,
727 			.max_keysize		=	2 * AES_MAX_KEY_SIZE,
728 			.ivsize			=	AES_BLOCK_SIZE,
729 			.setkey			=	xts_aes_set_key,
730 			.encrypt		=	xts_aes_encrypt,
731 			.decrypt		=	xts_aes_decrypt,
732 		}
733 	}
734 };
735 
736 static int xts_aes_alg_reg;
737 
738 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
739 			   unsigned int key_len)
740 {
741 	struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
742 
743 	switch (key_len) {
744 	case 16:
745 		sctx->enc = KMCTR_AES_128_ENCRYPT;
746 		sctx->dec = KMCTR_AES_128_DECRYPT;
747 		break;
748 	case 24:
749 		sctx->enc = KMCTR_AES_192_ENCRYPT;
750 		sctx->dec = KMCTR_AES_192_DECRYPT;
751 		break;
752 	case 32:
753 		sctx->enc = KMCTR_AES_256_ENCRYPT;
754 		sctx->dec = KMCTR_AES_256_DECRYPT;
755 		break;
756 	}
757 
758 	return aes_set_key(tfm, in_key, key_len);
759 }
760 
761 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
762 			 struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
763 {
764 	int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
765 	unsigned int i, n, nbytes;
766 	u8 buf[AES_BLOCK_SIZE];
767 	u8 *out, *in;
768 
769 	if (!walk->nbytes)
770 		return ret;
771 
772 	memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
773 	while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
774 		out = walk->dst.virt.addr;
775 		in = walk->src.virt.addr;
776 		while (nbytes >= AES_BLOCK_SIZE) {
777 			/* only use complete blocks, max. PAGE_SIZE */
778 			n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
779 						 nbytes & ~(AES_BLOCK_SIZE - 1);
780 			for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
781 				memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
782 				       AES_BLOCK_SIZE);
783 				crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
784 			}
785 			ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
786 			if (ret < 0 || ret != n)
787 				return -EIO;
788 			if (n > AES_BLOCK_SIZE)
789 				memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
790 				       AES_BLOCK_SIZE);
791 			crypto_inc(ctrblk, AES_BLOCK_SIZE);
792 			out += n;
793 			in += n;
794 			nbytes -= n;
795 		}
796 		ret = blkcipher_walk_done(desc, walk, nbytes);
797 	}
798 	/*
799 	 * final block may be < AES_BLOCK_SIZE, copy only nbytes
800 	 */
801 	if (nbytes) {
802 		out = walk->dst.virt.addr;
803 		in = walk->src.virt.addr;
804 		ret = crypt_s390_kmctr(func, sctx->key, buf, in,
805 				       AES_BLOCK_SIZE, ctrblk);
806 		if (ret < 0 || ret != AES_BLOCK_SIZE)
807 			return -EIO;
808 		memcpy(out, buf, nbytes);
809 		crypto_inc(ctrblk, AES_BLOCK_SIZE);
810 		ret = blkcipher_walk_done(desc, walk, 0);
811 	}
812 	memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
813 	return ret;
814 }
815 
816 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
817 			   struct scatterlist *dst, struct scatterlist *src,
818 			   unsigned int nbytes)
819 {
820 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
821 	struct blkcipher_walk walk;
822 
823 	blkcipher_walk_init(&walk, dst, src, nbytes);
824 	return ctr_aes_crypt(desc, sctx->enc, sctx, &walk);
825 }
826 
827 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
828 			   struct scatterlist *dst, struct scatterlist *src,
829 			   unsigned int nbytes)
830 {
831 	struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
832 	struct blkcipher_walk walk;
833 
834 	blkcipher_walk_init(&walk, dst, src, nbytes);
835 	return ctr_aes_crypt(desc, sctx->dec, sctx, &walk);
836 }
837 
838 static struct crypto_alg ctr_aes_alg = {
839 	.cra_name		=	"ctr(aes)",
840 	.cra_driver_name	=	"ctr-aes-s390",
841 	.cra_priority		=	CRYPT_S390_COMPOSITE_PRIORITY,
842 	.cra_flags		=	CRYPTO_ALG_TYPE_BLKCIPHER,
843 	.cra_blocksize		=	1,
844 	.cra_ctxsize		=	sizeof(struct s390_aes_ctx),
845 	.cra_type		=	&crypto_blkcipher_type,
846 	.cra_module		=	THIS_MODULE,
847 	.cra_u			=	{
848 		.blkcipher = {
849 			.min_keysize		=	AES_MIN_KEY_SIZE,
850 			.max_keysize		=	AES_MAX_KEY_SIZE,
851 			.ivsize			=	AES_BLOCK_SIZE,
852 			.setkey			=	ctr_aes_set_key,
853 			.encrypt		=	ctr_aes_encrypt,
854 			.decrypt		=	ctr_aes_decrypt,
855 		}
856 	}
857 };
858 
859 static int ctr_aes_alg_reg;
860 
861 static int __init aes_s390_init(void)
862 {
863 	int ret;
864 
865 	if (crypt_s390_func_available(KM_AES_128_ENCRYPT, CRYPT_S390_MSA))
866 		keylen_flag |= AES_KEYLEN_128;
867 	if (crypt_s390_func_available(KM_AES_192_ENCRYPT, CRYPT_S390_MSA))
868 		keylen_flag |= AES_KEYLEN_192;
869 	if (crypt_s390_func_available(KM_AES_256_ENCRYPT, CRYPT_S390_MSA))
870 		keylen_flag |= AES_KEYLEN_256;
871 
872 	if (!keylen_flag)
873 		return -EOPNOTSUPP;
874 
875 	/* z9 109 and z9 BC/EC only support 128 bit key length */
876 	if (keylen_flag == AES_KEYLEN_128)
877 		pr_info("AES hardware acceleration is only available for"
878 			" 128-bit keys\n");
879 
880 	ret = crypto_register_alg(&aes_alg);
881 	if (ret)
882 		goto aes_err;
883 
884 	ret = crypto_register_alg(&ecb_aes_alg);
885 	if (ret)
886 		goto ecb_aes_err;
887 
888 	ret = crypto_register_alg(&cbc_aes_alg);
889 	if (ret)
890 		goto cbc_aes_err;
891 
892 	if (crypt_s390_func_available(KM_XTS_128_ENCRYPT,
893 			CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
894 	    crypt_s390_func_available(KM_XTS_256_ENCRYPT,
895 			CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
896 		ret = crypto_register_alg(&xts_aes_alg);
897 		if (ret)
898 			goto xts_aes_err;
899 		xts_aes_alg_reg = 1;
900 	}
901 
902 	if (crypt_s390_func_available(KMCTR_AES_128_ENCRYPT,
903 				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
904 	    crypt_s390_func_available(KMCTR_AES_192_ENCRYPT,
905 				CRYPT_S390_MSA | CRYPT_S390_MSA4) &&
906 	    crypt_s390_func_available(KMCTR_AES_256_ENCRYPT,
907 				CRYPT_S390_MSA | CRYPT_S390_MSA4)) {
908 		ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
909 		if (!ctrblk) {
910 			ret = -ENOMEM;
911 			goto ctr_aes_err;
912 		}
913 		ret = crypto_register_alg(&ctr_aes_alg);
914 		if (ret) {
915 			free_page((unsigned long) ctrblk);
916 			goto ctr_aes_err;
917 		}
918 		ctr_aes_alg_reg = 1;
919 	}
920 
921 out:
922 	return ret;
923 
924 ctr_aes_err:
925 	crypto_unregister_alg(&xts_aes_alg);
926 xts_aes_err:
927 	crypto_unregister_alg(&cbc_aes_alg);
928 cbc_aes_err:
929 	crypto_unregister_alg(&ecb_aes_alg);
930 ecb_aes_err:
931 	crypto_unregister_alg(&aes_alg);
932 aes_err:
933 	goto out;
934 }
935 
936 static void __exit aes_s390_fini(void)
937 {
938 	if (ctr_aes_alg_reg) {
939 		crypto_unregister_alg(&ctr_aes_alg);
940 		free_page((unsigned long) ctrblk);
941 	}
942 	if (xts_aes_alg_reg)
943 		crypto_unregister_alg(&xts_aes_alg);
944 	crypto_unregister_alg(&cbc_aes_alg);
945 	crypto_unregister_alg(&ecb_aes_alg);
946 	crypto_unregister_alg(&aes_alg);
947 }
948 
949 module_init(aes_s390_init);
950 module_exit(aes_s390_fini);
951 
952 MODULE_ALIAS("aes-all");
953 
954 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
955 MODULE_LICENSE("GPL");
956