1 /* 2 * Cryptographic API. 3 * 4 * s390 implementation of the AES Cipher Algorithm. 5 * 6 * s390 Version: 7 * Copyright IBM Corp. 2005, 2007 8 * Author(s): Jan Glauber (jang@de.ibm.com) 9 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback 10 * 11 * Derived from "crypto/aes_generic.c" 12 * 13 * This program is free software; you can redistribute it and/or modify it 14 * under the terms of the GNU General Public License as published by the Free 15 * Software Foundation; either version 2 of the License, or (at your option) 16 * any later version. 17 * 18 */ 19 20 #define KMSG_COMPONENT "aes_s390" 21 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 22 23 #include <crypto/aes.h> 24 #include <crypto/algapi.h> 25 #include <crypto/internal/skcipher.h> 26 #include <linux/err.h> 27 #include <linux/module.h> 28 #include <linux/cpufeature.h> 29 #include <linux/init.h> 30 #include <linux/spinlock.h> 31 #include <linux/fips.h> 32 #include <crypto/xts.h> 33 #include <asm/cpacf.h> 34 35 static u8 *ctrblk; 36 static DEFINE_SPINLOCK(ctrblk_lock); 37 38 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 39 40 struct s390_aes_ctx { 41 u8 key[AES_MAX_KEY_SIZE]; 42 int key_len; 43 unsigned long fc; 44 union { 45 struct crypto_skcipher *blk; 46 struct crypto_cipher *cip; 47 } fallback; 48 }; 49 50 struct s390_xts_ctx { 51 u8 key[32]; 52 u8 pcc_key[32]; 53 int key_len; 54 unsigned long fc; 55 struct crypto_skcipher *fallback; 56 }; 57 58 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, 59 unsigned int key_len) 60 { 61 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 62 int ret; 63 64 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 65 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & 66 CRYPTO_TFM_REQ_MASK); 67 68 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); 69 if (ret) { 70 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 71 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags & 72 CRYPTO_TFM_RES_MASK); 73 } 74 return ret; 75 } 76 77 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 78 unsigned int key_len) 79 { 80 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 81 unsigned long fc; 82 83 /* Pick the correct function code based on the key length */ 84 fc = (key_len == 16) ? CPACF_KM_AES_128 : 85 (key_len == 24) ? CPACF_KM_AES_192 : 86 (key_len == 32) ? CPACF_KM_AES_256 : 0; 87 88 /* Check if the function code is available */ 89 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 90 if (!sctx->fc) 91 return setkey_fallback_cip(tfm, in_key, key_len); 92 93 sctx->key_len = key_len; 94 memcpy(sctx->key, in_key, key_len); 95 return 0; 96 } 97 98 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 99 { 100 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 101 102 if (unlikely(!sctx->fc)) { 103 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); 104 return; 105 } 106 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); 107 } 108 109 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 110 { 111 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 112 113 if (unlikely(!sctx->fc)) { 114 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); 115 return; 116 } 117 cpacf_km(sctx->fc | CPACF_DECRYPT, 118 &sctx->key, out, in, AES_BLOCK_SIZE); 119 } 120 121 static int fallback_init_cip(struct crypto_tfm *tfm) 122 { 123 const char *name = tfm->__crt_alg->cra_name; 124 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 125 126 sctx->fallback.cip = crypto_alloc_cipher(name, 0, 127 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); 128 129 if (IS_ERR(sctx->fallback.cip)) { 130 pr_err("Allocating AES fallback algorithm %s failed\n", 131 name); 132 return PTR_ERR(sctx->fallback.cip); 133 } 134 135 return 0; 136 } 137 138 static void fallback_exit_cip(struct crypto_tfm *tfm) 139 { 140 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 141 142 crypto_free_cipher(sctx->fallback.cip); 143 sctx->fallback.cip = NULL; 144 } 145 146 static struct crypto_alg aes_alg = { 147 .cra_name = "aes", 148 .cra_driver_name = "aes-s390", 149 .cra_priority = 300, 150 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | 151 CRYPTO_ALG_NEED_FALLBACK, 152 .cra_blocksize = AES_BLOCK_SIZE, 153 .cra_ctxsize = sizeof(struct s390_aes_ctx), 154 .cra_module = THIS_MODULE, 155 .cra_init = fallback_init_cip, 156 .cra_exit = fallback_exit_cip, 157 .cra_u = { 158 .cipher = { 159 .cia_min_keysize = AES_MIN_KEY_SIZE, 160 .cia_max_keysize = AES_MAX_KEY_SIZE, 161 .cia_setkey = aes_set_key, 162 .cia_encrypt = aes_encrypt, 163 .cia_decrypt = aes_decrypt, 164 } 165 } 166 }; 167 168 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key, 169 unsigned int len) 170 { 171 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 172 unsigned int ret; 173 174 crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK); 175 crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags & 176 CRYPTO_TFM_REQ_MASK); 177 178 ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len); 179 180 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 181 tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) & 182 CRYPTO_TFM_RES_MASK; 183 184 return ret; 185 } 186 187 static int fallback_blk_dec(struct blkcipher_desc *desc, 188 struct scatterlist *dst, struct scatterlist *src, 189 unsigned int nbytes) 190 { 191 unsigned int ret; 192 struct crypto_blkcipher *tfm = desc->tfm; 193 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); 194 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); 195 196 skcipher_request_set_tfm(req, sctx->fallback.blk); 197 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 198 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 199 200 ret = crypto_skcipher_decrypt(req); 201 202 skcipher_request_zero(req); 203 return ret; 204 } 205 206 static int fallback_blk_enc(struct blkcipher_desc *desc, 207 struct scatterlist *dst, struct scatterlist *src, 208 unsigned int nbytes) 209 { 210 unsigned int ret; 211 struct crypto_blkcipher *tfm = desc->tfm; 212 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm); 213 SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk); 214 215 skcipher_request_set_tfm(req, sctx->fallback.blk); 216 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 217 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 218 219 ret = crypto_skcipher_encrypt(req); 220 return ret; 221 } 222 223 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 224 unsigned int key_len) 225 { 226 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 227 unsigned long fc; 228 229 /* Pick the correct function code based on the key length */ 230 fc = (key_len == 16) ? CPACF_KM_AES_128 : 231 (key_len == 24) ? CPACF_KM_AES_192 : 232 (key_len == 32) ? CPACF_KM_AES_256 : 0; 233 234 /* Check if the function code is available */ 235 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 236 if (!sctx->fc) 237 return setkey_fallback_blk(tfm, in_key, key_len); 238 239 sctx->key_len = key_len; 240 memcpy(sctx->key, in_key, key_len); 241 return 0; 242 } 243 244 static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, 245 struct blkcipher_walk *walk) 246 { 247 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 248 unsigned int nbytes, n; 249 int ret; 250 251 ret = blkcipher_walk_virt(desc, walk); 252 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 253 /* only use complete blocks */ 254 n = nbytes & ~(AES_BLOCK_SIZE - 1); 255 cpacf_km(sctx->fc | modifier, sctx->key, 256 walk->dst.virt.addr, walk->src.virt.addr, n); 257 ret = blkcipher_walk_done(desc, walk, nbytes - n); 258 } 259 260 return ret; 261 } 262 263 static int ecb_aes_encrypt(struct blkcipher_desc *desc, 264 struct scatterlist *dst, struct scatterlist *src, 265 unsigned int nbytes) 266 { 267 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 268 struct blkcipher_walk walk; 269 270 if (unlikely(!sctx->fc)) 271 return fallback_blk_enc(desc, dst, src, nbytes); 272 273 blkcipher_walk_init(&walk, dst, src, nbytes); 274 return ecb_aes_crypt(desc, 0, &walk); 275 } 276 277 static int ecb_aes_decrypt(struct blkcipher_desc *desc, 278 struct scatterlist *dst, struct scatterlist *src, 279 unsigned int nbytes) 280 { 281 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 282 struct blkcipher_walk walk; 283 284 if (unlikely(!sctx->fc)) 285 return fallback_blk_dec(desc, dst, src, nbytes); 286 287 blkcipher_walk_init(&walk, dst, src, nbytes); 288 return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk); 289 } 290 291 static int fallback_init_blk(struct crypto_tfm *tfm) 292 { 293 const char *name = tfm->__crt_alg->cra_name; 294 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 295 296 sctx->fallback.blk = crypto_alloc_skcipher(name, 0, 297 CRYPTO_ALG_ASYNC | 298 CRYPTO_ALG_NEED_FALLBACK); 299 300 if (IS_ERR(sctx->fallback.blk)) { 301 pr_err("Allocating AES fallback algorithm %s failed\n", 302 name); 303 return PTR_ERR(sctx->fallback.blk); 304 } 305 306 return 0; 307 } 308 309 static void fallback_exit_blk(struct crypto_tfm *tfm) 310 { 311 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 312 313 crypto_free_skcipher(sctx->fallback.blk); 314 } 315 316 static struct crypto_alg ecb_aes_alg = { 317 .cra_name = "ecb(aes)", 318 .cra_driver_name = "ecb-aes-s390", 319 .cra_priority = 400, /* combo: aes + ecb */ 320 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 321 CRYPTO_ALG_NEED_FALLBACK, 322 .cra_blocksize = AES_BLOCK_SIZE, 323 .cra_ctxsize = sizeof(struct s390_aes_ctx), 324 .cra_type = &crypto_blkcipher_type, 325 .cra_module = THIS_MODULE, 326 .cra_init = fallback_init_blk, 327 .cra_exit = fallback_exit_blk, 328 .cra_u = { 329 .blkcipher = { 330 .min_keysize = AES_MIN_KEY_SIZE, 331 .max_keysize = AES_MAX_KEY_SIZE, 332 .setkey = ecb_aes_set_key, 333 .encrypt = ecb_aes_encrypt, 334 .decrypt = ecb_aes_decrypt, 335 } 336 } 337 }; 338 339 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 340 unsigned int key_len) 341 { 342 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 343 unsigned long fc; 344 345 /* Pick the correct function code based on the key length */ 346 fc = (key_len == 16) ? CPACF_KMC_AES_128 : 347 (key_len == 24) ? CPACF_KMC_AES_192 : 348 (key_len == 32) ? CPACF_KMC_AES_256 : 0; 349 350 /* Check if the function code is available */ 351 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 352 if (!sctx->fc) 353 return setkey_fallback_blk(tfm, in_key, key_len); 354 355 sctx->key_len = key_len; 356 memcpy(sctx->key, in_key, key_len); 357 return 0; 358 } 359 360 static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, 361 struct blkcipher_walk *walk) 362 { 363 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 364 unsigned int nbytes, n; 365 int ret; 366 struct { 367 u8 iv[AES_BLOCK_SIZE]; 368 u8 key[AES_MAX_KEY_SIZE]; 369 } param; 370 371 ret = blkcipher_walk_virt(desc, walk); 372 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE); 373 memcpy(param.key, sctx->key, sctx->key_len); 374 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 375 /* only use complete blocks */ 376 n = nbytes & ~(AES_BLOCK_SIZE - 1); 377 cpacf_kmc(sctx->fc | modifier, ¶m, 378 walk->dst.virt.addr, walk->src.virt.addr, n); 379 ret = blkcipher_walk_done(desc, walk, nbytes - n); 380 } 381 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE); 382 return ret; 383 } 384 385 static int cbc_aes_encrypt(struct blkcipher_desc *desc, 386 struct scatterlist *dst, struct scatterlist *src, 387 unsigned int nbytes) 388 { 389 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 390 struct blkcipher_walk walk; 391 392 if (unlikely(!sctx->fc)) 393 return fallback_blk_enc(desc, dst, src, nbytes); 394 395 blkcipher_walk_init(&walk, dst, src, nbytes); 396 return cbc_aes_crypt(desc, 0, &walk); 397 } 398 399 static int cbc_aes_decrypt(struct blkcipher_desc *desc, 400 struct scatterlist *dst, struct scatterlist *src, 401 unsigned int nbytes) 402 { 403 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 404 struct blkcipher_walk walk; 405 406 if (unlikely(!sctx->fc)) 407 return fallback_blk_dec(desc, dst, src, nbytes); 408 409 blkcipher_walk_init(&walk, dst, src, nbytes); 410 return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk); 411 } 412 413 static struct crypto_alg cbc_aes_alg = { 414 .cra_name = "cbc(aes)", 415 .cra_driver_name = "cbc-aes-s390", 416 .cra_priority = 400, /* combo: aes + cbc */ 417 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 418 CRYPTO_ALG_NEED_FALLBACK, 419 .cra_blocksize = AES_BLOCK_SIZE, 420 .cra_ctxsize = sizeof(struct s390_aes_ctx), 421 .cra_type = &crypto_blkcipher_type, 422 .cra_module = THIS_MODULE, 423 .cra_init = fallback_init_blk, 424 .cra_exit = fallback_exit_blk, 425 .cra_u = { 426 .blkcipher = { 427 .min_keysize = AES_MIN_KEY_SIZE, 428 .max_keysize = AES_MAX_KEY_SIZE, 429 .ivsize = AES_BLOCK_SIZE, 430 .setkey = cbc_aes_set_key, 431 .encrypt = cbc_aes_encrypt, 432 .decrypt = cbc_aes_decrypt, 433 } 434 } 435 }; 436 437 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key, 438 unsigned int len) 439 { 440 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 441 unsigned int ret; 442 443 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); 444 crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags & 445 CRYPTO_TFM_REQ_MASK); 446 447 ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len); 448 449 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 450 tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) & 451 CRYPTO_TFM_RES_MASK; 452 453 return ret; 454 } 455 456 static int xts_fallback_decrypt(struct blkcipher_desc *desc, 457 struct scatterlist *dst, struct scatterlist *src, 458 unsigned int nbytes) 459 { 460 struct crypto_blkcipher *tfm = desc->tfm; 461 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); 462 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); 463 unsigned int ret; 464 465 skcipher_request_set_tfm(req, xts_ctx->fallback); 466 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 467 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 468 469 ret = crypto_skcipher_decrypt(req); 470 471 skcipher_request_zero(req); 472 return ret; 473 } 474 475 static int xts_fallback_encrypt(struct blkcipher_desc *desc, 476 struct scatterlist *dst, struct scatterlist *src, 477 unsigned int nbytes) 478 { 479 struct crypto_blkcipher *tfm = desc->tfm; 480 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm); 481 SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback); 482 unsigned int ret; 483 484 skcipher_request_set_tfm(req, xts_ctx->fallback); 485 skcipher_request_set_callback(req, desc->flags, NULL, NULL); 486 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); 487 488 ret = crypto_skcipher_encrypt(req); 489 490 skcipher_request_zero(req); 491 return ret; 492 } 493 494 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 495 unsigned int key_len) 496 { 497 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 498 unsigned long fc; 499 int err; 500 501 err = xts_check_key(tfm, in_key, key_len); 502 if (err) 503 return err; 504 505 /* In fips mode only 128 bit or 256 bit keys are valid */ 506 if (fips_enabled && key_len != 32 && key_len != 64) { 507 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; 508 return -EINVAL; 509 } 510 511 /* Pick the correct function code based on the key length */ 512 fc = (key_len == 32) ? CPACF_KM_XTS_128 : 513 (key_len == 64) ? CPACF_KM_XTS_256 : 0; 514 515 /* Check if the function code is available */ 516 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 517 if (!xts_ctx->fc) 518 return xts_fallback_setkey(tfm, in_key, key_len); 519 520 /* Split the XTS key into the two subkeys */ 521 key_len = key_len / 2; 522 xts_ctx->key_len = key_len; 523 memcpy(xts_ctx->key, in_key, key_len); 524 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); 525 return 0; 526 } 527 528 static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, 529 struct blkcipher_walk *walk) 530 { 531 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); 532 unsigned int offset, nbytes, n; 533 int ret; 534 struct { 535 u8 key[32]; 536 u8 tweak[16]; 537 u8 block[16]; 538 u8 bit[16]; 539 u8 xts[16]; 540 } pcc_param; 541 struct { 542 u8 key[32]; 543 u8 init[16]; 544 } xts_param; 545 546 ret = blkcipher_walk_virt(desc, walk); 547 offset = xts_ctx->key_len & 0x10; 548 memset(pcc_param.block, 0, sizeof(pcc_param.block)); 549 memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); 550 memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); 551 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); 552 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); 553 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); 554 555 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); 556 memcpy(xts_param.init, pcc_param.xts, 16); 557 558 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 559 /* only use complete blocks */ 560 n = nbytes & ~(AES_BLOCK_SIZE - 1); 561 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, 562 walk->dst.virt.addr, walk->src.virt.addr, n); 563 ret = blkcipher_walk_done(desc, walk, nbytes - n); 564 } 565 return ret; 566 } 567 568 static int xts_aes_encrypt(struct blkcipher_desc *desc, 569 struct scatterlist *dst, struct scatterlist *src, 570 unsigned int nbytes) 571 { 572 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); 573 struct blkcipher_walk walk; 574 575 if (unlikely(!xts_ctx->fc)) 576 return xts_fallback_encrypt(desc, dst, src, nbytes); 577 578 blkcipher_walk_init(&walk, dst, src, nbytes); 579 return xts_aes_crypt(desc, 0, &walk); 580 } 581 582 static int xts_aes_decrypt(struct blkcipher_desc *desc, 583 struct scatterlist *dst, struct scatterlist *src, 584 unsigned int nbytes) 585 { 586 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); 587 struct blkcipher_walk walk; 588 589 if (unlikely(!xts_ctx->fc)) 590 return xts_fallback_decrypt(desc, dst, src, nbytes); 591 592 blkcipher_walk_init(&walk, dst, src, nbytes); 593 return xts_aes_crypt(desc, CPACF_DECRYPT, &walk); 594 } 595 596 static int xts_fallback_init(struct crypto_tfm *tfm) 597 { 598 const char *name = tfm->__crt_alg->cra_name; 599 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 600 601 xts_ctx->fallback = crypto_alloc_skcipher(name, 0, 602 CRYPTO_ALG_ASYNC | 603 CRYPTO_ALG_NEED_FALLBACK); 604 605 if (IS_ERR(xts_ctx->fallback)) { 606 pr_err("Allocating XTS fallback algorithm %s failed\n", 607 name); 608 return PTR_ERR(xts_ctx->fallback); 609 } 610 return 0; 611 } 612 613 static void xts_fallback_exit(struct crypto_tfm *tfm) 614 { 615 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm); 616 617 crypto_free_skcipher(xts_ctx->fallback); 618 } 619 620 static struct crypto_alg xts_aes_alg = { 621 .cra_name = "xts(aes)", 622 .cra_driver_name = "xts-aes-s390", 623 .cra_priority = 400, /* combo: aes + xts */ 624 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 625 CRYPTO_ALG_NEED_FALLBACK, 626 .cra_blocksize = AES_BLOCK_SIZE, 627 .cra_ctxsize = sizeof(struct s390_xts_ctx), 628 .cra_type = &crypto_blkcipher_type, 629 .cra_module = THIS_MODULE, 630 .cra_init = xts_fallback_init, 631 .cra_exit = xts_fallback_exit, 632 .cra_u = { 633 .blkcipher = { 634 .min_keysize = 2 * AES_MIN_KEY_SIZE, 635 .max_keysize = 2 * AES_MAX_KEY_SIZE, 636 .ivsize = AES_BLOCK_SIZE, 637 .setkey = xts_aes_set_key, 638 .encrypt = xts_aes_encrypt, 639 .decrypt = xts_aes_decrypt, 640 } 641 } 642 }; 643 644 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 645 unsigned int key_len) 646 { 647 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 648 unsigned long fc; 649 650 /* Pick the correct function code based on the key length */ 651 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : 652 (key_len == 24) ? CPACF_KMCTR_AES_192 : 653 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; 654 655 /* Check if the function code is available */ 656 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 657 if (!sctx->fc) 658 return setkey_fallback_blk(tfm, in_key, key_len); 659 660 sctx->key_len = key_len; 661 memcpy(sctx->key, in_key, key_len); 662 return 0; 663 } 664 665 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 666 { 667 unsigned int i, n; 668 669 /* only use complete blocks, max. PAGE_SIZE */ 670 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 671 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 672 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 673 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 674 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 675 ctrptr += AES_BLOCK_SIZE; 676 } 677 return n; 678 } 679 680 static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, 681 struct blkcipher_walk *walk) 682 { 683 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 684 u8 buf[AES_BLOCK_SIZE], *ctrptr; 685 unsigned int n, nbytes; 686 int ret, locked; 687 688 locked = spin_trylock(&ctrblk_lock); 689 690 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); 691 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 692 n = AES_BLOCK_SIZE; 693 if (nbytes >= 2*AES_BLOCK_SIZE && locked) 694 n = __ctrblk_init(ctrblk, walk->iv, nbytes); 695 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; 696 cpacf_kmctr(sctx->fc | modifier, sctx->key, 697 walk->dst.virt.addr, walk->src.virt.addr, 698 n, ctrptr); 699 if (ctrptr == ctrblk) 700 memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE, 701 AES_BLOCK_SIZE); 702 crypto_inc(walk->iv, AES_BLOCK_SIZE); 703 ret = blkcipher_walk_done(desc, walk, nbytes - n); 704 } 705 if (locked) 706 spin_unlock(&ctrblk_lock); 707 /* 708 * final block may be < AES_BLOCK_SIZE, copy only nbytes 709 */ 710 if (nbytes) { 711 cpacf_kmctr(sctx->fc | modifier, sctx->key, 712 buf, walk->src.virt.addr, 713 AES_BLOCK_SIZE, walk->iv); 714 memcpy(walk->dst.virt.addr, buf, nbytes); 715 crypto_inc(walk->iv, AES_BLOCK_SIZE); 716 ret = blkcipher_walk_done(desc, walk, 0); 717 } 718 719 return ret; 720 } 721 722 static int ctr_aes_encrypt(struct blkcipher_desc *desc, 723 struct scatterlist *dst, struct scatterlist *src, 724 unsigned int nbytes) 725 { 726 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 727 struct blkcipher_walk walk; 728 729 if (unlikely(!sctx->fc)) 730 return fallback_blk_enc(desc, dst, src, nbytes); 731 732 blkcipher_walk_init(&walk, dst, src, nbytes); 733 return ctr_aes_crypt(desc, 0, &walk); 734 } 735 736 static int ctr_aes_decrypt(struct blkcipher_desc *desc, 737 struct scatterlist *dst, struct scatterlist *src, 738 unsigned int nbytes) 739 { 740 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); 741 struct blkcipher_walk walk; 742 743 if (unlikely(!sctx->fc)) 744 return fallback_blk_dec(desc, dst, src, nbytes); 745 746 blkcipher_walk_init(&walk, dst, src, nbytes); 747 return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk); 748 } 749 750 static struct crypto_alg ctr_aes_alg = { 751 .cra_name = "ctr(aes)", 752 .cra_driver_name = "ctr-aes-s390", 753 .cra_priority = 400, /* combo: aes + ctr */ 754 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | 755 CRYPTO_ALG_NEED_FALLBACK, 756 .cra_blocksize = 1, 757 .cra_ctxsize = sizeof(struct s390_aes_ctx), 758 .cra_type = &crypto_blkcipher_type, 759 .cra_module = THIS_MODULE, 760 .cra_init = fallback_init_blk, 761 .cra_exit = fallback_exit_blk, 762 .cra_u = { 763 .blkcipher = { 764 .min_keysize = AES_MIN_KEY_SIZE, 765 .max_keysize = AES_MAX_KEY_SIZE, 766 .ivsize = AES_BLOCK_SIZE, 767 .setkey = ctr_aes_set_key, 768 .encrypt = ctr_aes_encrypt, 769 .decrypt = ctr_aes_decrypt, 770 } 771 } 772 }; 773 774 static struct crypto_alg *aes_s390_algs_ptr[5]; 775 static int aes_s390_algs_num; 776 777 static int aes_s390_register_alg(struct crypto_alg *alg) 778 { 779 int ret; 780 781 ret = crypto_register_alg(alg); 782 if (!ret) 783 aes_s390_algs_ptr[aes_s390_algs_num++] = alg; 784 return ret; 785 } 786 787 static void aes_s390_fini(void) 788 { 789 while (aes_s390_algs_num--) 790 crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]); 791 if (ctrblk) 792 free_page((unsigned long) ctrblk); 793 } 794 795 static int __init aes_s390_init(void) 796 { 797 int ret; 798 799 /* Query available functions for KM, KMC and KMCTR */ 800 cpacf_query(CPACF_KM, &km_functions); 801 cpacf_query(CPACF_KMC, &kmc_functions); 802 cpacf_query(CPACF_KMCTR, &kmctr_functions); 803 804 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || 805 cpacf_test_func(&km_functions, CPACF_KM_AES_192) || 806 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { 807 ret = aes_s390_register_alg(&aes_alg); 808 if (ret) 809 goto out_err; 810 ret = aes_s390_register_alg(&ecb_aes_alg); 811 if (ret) 812 goto out_err; 813 } 814 815 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || 816 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || 817 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { 818 ret = aes_s390_register_alg(&cbc_aes_alg); 819 if (ret) 820 goto out_err; 821 } 822 823 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || 824 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { 825 ret = aes_s390_register_alg(&xts_aes_alg); 826 if (ret) 827 goto out_err; 828 } 829 830 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || 831 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || 832 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { 833 ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 834 if (!ctrblk) { 835 ret = -ENOMEM; 836 goto out_err; 837 } 838 ret = aes_s390_register_alg(&ctr_aes_alg); 839 if (ret) 840 goto out_err; 841 } 842 843 return 0; 844 out_err: 845 aes_s390_fini(); 846 return ret; 847 } 848 849 module_cpu_feature_match(MSA, aes_s390_init); 850 module_exit(aes_s390_fini); 851 852 MODULE_ALIAS_CRYPTO("aes-all"); 853 854 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 855 MODULE_LICENSE("GPL"); 856