1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cryptographic API. 4 * 5 * s390 implementation of the AES Cipher Algorithm with protected keys. 6 * 7 * s390 Version: 8 * Copyright IBM Corp. 2017, 2023 9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Harald Freudenberger <freude@de.ibm.com> 11 */ 12 13 #define KMSG_COMPONENT "paes_s390" 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 16 #include <crypto/aes.h> 17 #include <crypto/algapi.h> 18 #include <linux/bug.h> 19 #include <linux/err.h> 20 #include <linux/module.h> 21 #include <linux/cpufeature.h> 22 #include <linux/init.h> 23 #include <linux/mutex.h> 24 #include <linux/spinlock.h> 25 #include <linux/delay.h> 26 #include <crypto/internal/skcipher.h> 27 #include <crypto/xts.h> 28 #include <asm/cpacf.h> 29 #include <asm/pkey.h> 30 31 /* 32 * Key blobs smaller/bigger than these defines are rejected 33 * by the common code even before the individual setkey function 34 * is called. As paes can handle different kinds of key blobs 35 * and padding is also possible, the limits need to be generous. 36 */ 37 #define PAES_MIN_KEYSIZE 16 38 #define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE 39 40 static u8 *ctrblk; 41 static DEFINE_MUTEX(ctrblk_lock); 42 43 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 44 45 struct key_blob { 46 /* 47 * Small keys will be stored in the keybuf. Larger keys are 48 * stored in extra allocated memory. In both cases does 49 * key point to the memory where the key is stored. 50 * The code distinguishes by checking keylen against 51 * sizeof(keybuf). See the two following helper functions. 52 */ 53 u8 *key; 54 u8 keybuf[128]; 55 unsigned int keylen; 56 }; 57 58 static inline int _key_to_kb(struct key_blob *kb, 59 const u8 *key, 60 unsigned int keylen) 61 { 62 struct clearkey_header { 63 u8 type; 64 u8 res0[3]; 65 u8 version; 66 u8 res1[3]; 67 u32 keytype; 68 u32 len; 69 } __packed * h; 70 71 switch (keylen) { 72 case 16: 73 case 24: 74 case 32: 75 /* clear key value, prepare pkey clear key token in keybuf */ 76 memset(kb->keybuf, 0, sizeof(kb->keybuf)); 77 h = (struct clearkey_header *) kb->keybuf; 78 h->version = 0x02; /* TOKVER_CLEAR_KEY */ 79 h->keytype = (keylen - 8) >> 3; 80 h->len = keylen; 81 memcpy(kb->keybuf + sizeof(*h), key, keylen); 82 kb->keylen = sizeof(*h) + keylen; 83 kb->key = kb->keybuf; 84 break; 85 default: 86 /* other key material, let pkey handle this */ 87 if (keylen <= sizeof(kb->keybuf)) 88 kb->key = kb->keybuf; 89 else { 90 kb->key = kmalloc(keylen, GFP_KERNEL); 91 if (!kb->key) 92 return -ENOMEM; 93 } 94 memcpy(kb->key, key, keylen); 95 kb->keylen = keylen; 96 break; 97 } 98 99 return 0; 100 } 101 102 static inline void _free_kb_keybuf(struct key_blob *kb) 103 { 104 if (kb->key && kb->key != kb->keybuf 105 && kb->keylen > sizeof(kb->keybuf)) { 106 kfree_sensitive(kb->key); 107 kb->key = NULL; 108 } 109 } 110 111 struct s390_paes_ctx { 112 struct key_blob kb; 113 struct pkey_protkey pk; 114 spinlock_t pk_lock; 115 unsigned long fc; 116 }; 117 118 struct s390_pxts_ctx { 119 struct key_blob kb[2]; 120 struct pkey_protkey pk[2]; 121 spinlock_t pk_lock; 122 unsigned long fc; 123 }; 124 125 static inline int __paes_keyblob2pkey(struct key_blob *kb, 126 struct pkey_protkey *pk) 127 { 128 int i, ret = -EIO; 129 130 /* try three times in case of busy card */ 131 for (i = 0; ret && i < 3; i++) { 132 if (ret == -EBUSY && in_task()) { 133 if (msleep_interruptible(1000)) 134 return -EINTR; 135 } 136 ret = pkey_key2protkey(kb->key, kb->keylen, 137 pk->protkey, &pk->len, &pk->type); 138 } 139 140 return ret; 141 } 142 143 static inline int __paes_convert_key(struct s390_paes_ctx *ctx) 144 { 145 int ret; 146 struct pkey_protkey pkey; 147 148 pkey.len = sizeof(pkey.protkey); 149 ret = __paes_keyblob2pkey(&ctx->kb, &pkey); 150 if (ret) 151 return ret; 152 153 spin_lock_bh(&ctx->pk_lock); 154 memcpy(&ctx->pk, &pkey, sizeof(pkey)); 155 spin_unlock_bh(&ctx->pk_lock); 156 157 return 0; 158 } 159 160 static int ecb_paes_init(struct crypto_skcipher *tfm) 161 { 162 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 163 164 ctx->kb.key = NULL; 165 spin_lock_init(&ctx->pk_lock); 166 167 return 0; 168 } 169 170 static void ecb_paes_exit(struct crypto_skcipher *tfm) 171 { 172 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 173 174 _free_kb_keybuf(&ctx->kb); 175 } 176 177 static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx) 178 { 179 int rc; 180 unsigned long fc; 181 182 rc = __paes_convert_key(ctx); 183 if (rc) 184 return rc; 185 186 /* Pick the correct function code based on the protected key type */ 187 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : 188 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : 189 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; 190 191 /* Check if the function code is available */ 192 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 193 194 return ctx->fc ? 0 : -EINVAL; 195 } 196 197 static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 198 unsigned int key_len) 199 { 200 int rc; 201 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 202 203 _free_kb_keybuf(&ctx->kb); 204 rc = _key_to_kb(&ctx->kb, in_key, key_len); 205 if (rc) 206 return rc; 207 208 return __ecb_paes_set_key(ctx); 209 } 210 211 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) 212 { 213 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 214 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 215 struct skcipher_walk walk; 216 unsigned int nbytes, n, k; 217 int ret; 218 struct { 219 u8 key[MAXPROTKEYSIZE]; 220 } param; 221 222 ret = skcipher_walk_virt(&walk, req, false); 223 if (ret) 224 return ret; 225 226 spin_lock_bh(&ctx->pk_lock); 227 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 228 spin_unlock_bh(&ctx->pk_lock); 229 230 while ((nbytes = walk.nbytes) != 0) { 231 /* only use complete blocks */ 232 n = nbytes & ~(AES_BLOCK_SIZE - 1); 233 k = cpacf_km(ctx->fc | modifier, ¶m, 234 walk.dst.virt.addr, walk.src.virt.addr, n); 235 if (k) 236 ret = skcipher_walk_done(&walk, nbytes - k); 237 if (k < n) { 238 if (__paes_convert_key(ctx)) 239 return skcipher_walk_done(&walk, -EIO); 240 spin_lock_bh(&ctx->pk_lock); 241 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 242 spin_unlock_bh(&ctx->pk_lock); 243 } 244 } 245 return ret; 246 } 247 248 static int ecb_paes_encrypt(struct skcipher_request *req) 249 { 250 return ecb_paes_crypt(req, 0); 251 } 252 253 static int ecb_paes_decrypt(struct skcipher_request *req) 254 { 255 return ecb_paes_crypt(req, CPACF_DECRYPT); 256 } 257 258 static struct skcipher_alg ecb_paes_alg = { 259 .base.cra_name = "ecb(paes)", 260 .base.cra_driver_name = "ecb-paes-s390", 261 .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 262 .base.cra_blocksize = AES_BLOCK_SIZE, 263 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 264 .base.cra_module = THIS_MODULE, 265 .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list), 266 .init = ecb_paes_init, 267 .exit = ecb_paes_exit, 268 .min_keysize = PAES_MIN_KEYSIZE, 269 .max_keysize = PAES_MAX_KEYSIZE, 270 .setkey = ecb_paes_set_key, 271 .encrypt = ecb_paes_encrypt, 272 .decrypt = ecb_paes_decrypt, 273 }; 274 275 static int cbc_paes_init(struct crypto_skcipher *tfm) 276 { 277 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 278 279 ctx->kb.key = NULL; 280 spin_lock_init(&ctx->pk_lock); 281 282 return 0; 283 } 284 285 static void cbc_paes_exit(struct crypto_skcipher *tfm) 286 { 287 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 288 289 _free_kb_keybuf(&ctx->kb); 290 } 291 292 static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx) 293 { 294 int rc; 295 unsigned long fc; 296 297 rc = __paes_convert_key(ctx); 298 if (rc) 299 return rc; 300 301 /* Pick the correct function code based on the protected key type */ 302 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 : 303 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 : 304 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0; 305 306 /* Check if the function code is available */ 307 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 308 309 return ctx->fc ? 0 : -EINVAL; 310 } 311 312 static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 313 unsigned int key_len) 314 { 315 int rc; 316 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 317 318 _free_kb_keybuf(&ctx->kb); 319 rc = _key_to_kb(&ctx->kb, in_key, key_len); 320 if (rc) 321 return rc; 322 323 return __cbc_paes_set_key(ctx); 324 } 325 326 static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) 327 { 328 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 329 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 330 struct skcipher_walk walk; 331 unsigned int nbytes, n, k; 332 int ret; 333 struct { 334 u8 iv[AES_BLOCK_SIZE]; 335 u8 key[MAXPROTKEYSIZE]; 336 } param; 337 338 ret = skcipher_walk_virt(&walk, req, false); 339 if (ret) 340 return ret; 341 342 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); 343 spin_lock_bh(&ctx->pk_lock); 344 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 345 spin_unlock_bh(&ctx->pk_lock); 346 347 while ((nbytes = walk.nbytes) != 0) { 348 /* only use complete blocks */ 349 n = nbytes & ~(AES_BLOCK_SIZE - 1); 350 k = cpacf_kmc(ctx->fc | modifier, ¶m, 351 walk.dst.virt.addr, walk.src.virt.addr, n); 352 if (k) { 353 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 354 ret = skcipher_walk_done(&walk, nbytes - k); 355 } 356 if (k < n) { 357 if (__paes_convert_key(ctx)) 358 return skcipher_walk_done(&walk, -EIO); 359 spin_lock_bh(&ctx->pk_lock); 360 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 361 spin_unlock_bh(&ctx->pk_lock); 362 } 363 } 364 return ret; 365 } 366 367 static int cbc_paes_encrypt(struct skcipher_request *req) 368 { 369 return cbc_paes_crypt(req, 0); 370 } 371 372 static int cbc_paes_decrypt(struct skcipher_request *req) 373 { 374 return cbc_paes_crypt(req, CPACF_DECRYPT); 375 } 376 377 static struct skcipher_alg cbc_paes_alg = { 378 .base.cra_name = "cbc(paes)", 379 .base.cra_driver_name = "cbc-paes-s390", 380 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 381 .base.cra_blocksize = AES_BLOCK_SIZE, 382 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 383 .base.cra_module = THIS_MODULE, 384 .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list), 385 .init = cbc_paes_init, 386 .exit = cbc_paes_exit, 387 .min_keysize = PAES_MIN_KEYSIZE, 388 .max_keysize = PAES_MAX_KEYSIZE, 389 .ivsize = AES_BLOCK_SIZE, 390 .setkey = cbc_paes_set_key, 391 .encrypt = cbc_paes_encrypt, 392 .decrypt = cbc_paes_decrypt, 393 }; 394 395 static int xts_paes_init(struct crypto_skcipher *tfm) 396 { 397 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 398 399 ctx->kb[0].key = NULL; 400 ctx->kb[1].key = NULL; 401 spin_lock_init(&ctx->pk_lock); 402 403 return 0; 404 } 405 406 static void xts_paes_exit(struct crypto_skcipher *tfm) 407 { 408 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 409 410 _free_kb_keybuf(&ctx->kb[0]); 411 _free_kb_keybuf(&ctx->kb[1]); 412 } 413 414 static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) 415 { 416 struct pkey_protkey pkey0, pkey1; 417 418 pkey0.len = sizeof(pkey0.protkey); 419 pkey1.len = sizeof(pkey1.protkey); 420 421 if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) || 422 __paes_keyblob2pkey(&ctx->kb[1], &pkey1)) 423 return -EINVAL; 424 425 spin_lock_bh(&ctx->pk_lock); 426 memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0)); 427 memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1)); 428 spin_unlock_bh(&ctx->pk_lock); 429 430 return 0; 431 } 432 433 static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx) 434 { 435 unsigned long fc; 436 437 if (__xts_paes_convert_key(ctx)) 438 return -EINVAL; 439 440 if (ctx->pk[0].type != ctx->pk[1].type) 441 return -EINVAL; 442 443 /* Pick the correct function code based on the protected key type */ 444 fc = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PXTS_128 : 445 (ctx->pk[0].type == PKEY_KEYTYPE_AES_256) ? 446 CPACF_KM_PXTS_256 : 0; 447 448 /* Check if the function code is available */ 449 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 450 451 return ctx->fc ? 0 : -EINVAL; 452 } 453 454 static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 455 unsigned int xts_key_len) 456 { 457 int rc; 458 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 459 u8 ckey[2 * AES_MAX_KEY_SIZE]; 460 unsigned int ckey_len, key_len; 461 462 if (xts_key_len % 2) 463 return -EINVAL; 464 465 key_len = xts_key_len / 2; 466 467 _free_kb_keybuf(&ctx->kb[0]); 468 _free_kb_keybuf(&ctx->kb[1]); 469 rc = _key_to_kb(&ctx->kb[0], in_key, key_len); 470 if (rc) 471 return rc; 472 rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len); 473 if (rc) 474 return rc; 475 476 rc = __xts_paes_set_key(ctx); 477 if (rc) 478 return rc; 479 480 /* 481 * xts_verify_key verifies the key length is not odd and makes 482 * sure that the two keys are not the same. This can be done 483 * on the two protected keys as well 484 */ 485 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 486 AES_KEYSIZE_128 : AES_KEYSIZE_256; 487 memcpy(ckey, ctx->pk[0].protkey, ckey_len); 488 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); 489 return xts_verify_key(tfm, ckey, 2*ckey_len); 490 } 491 492 static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) 493 { 494 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 495 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 496 struct skcipher_walk walk; 497 unsigned int keylen, offset, nbytes, n, k; 498 int ret; 499 struct { 500 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */ 501 u8 tweak[16]; 502 u8 block[16]; 503 u8 bit[16]; 504 u8 xts[16]; 505 } pcc_param; 506 struct { 507 u8 key[MAXPROTKEYSIZE]; /* key + verification pattern */ 508 u8 init[16]; 509 } xts_param; 510 511 ret = skcipher_walk_virt(&walk, req, false); 512 if (ret) 513 return ret; 514 515 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; 516 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; 517 518 memset(&pcc_param, 0, sizeof(pcc_param)); 519 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); 520 spin_lock_bh(&ctx->pk_lock); 521 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); 522 memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); 523 spin_unlock_bh(&ctx->pk_lock); 524 cpacf_pcc(ctx->fc, pcc_param.key + offset); 525 memcpy(xts_param.init, pcc_param.xts, 16); 526 527 while ((nbytes = walk.nbytes) != 0) { 528 /* only use complete blocks */ 529 n = nbytes & ~(AES_BLOCK_SIZE - 1); 530 k = cpacf_km(ctx->fc | modifier, xts_param.key + offset, 531 walk.dst.virt.addr, walk.src.virt.addr, n); 532 if (k) 533 ret = skcipher_walk_done(&walk, nbytes - k); 534 if (k < n) { 535 if (__xts_paes_convert_key(ctx)) 536 return skcipher_walk_done(&walk, -EIO); 537 spin_lock_bh(&ctx->pk_lock); 538 memcpy(xts_param.key + offset, 539 ctx->pk[0].protkey, keylen); 540 spin_unlock_bh(&ctx->pk_lock); 541 } 542 } 543 544 return ret; 545 } 546 547 static int xts_paes_encrypt(struct skcipher_request *req) 548 { 549 return xts_paes_crypt(req, 0); 550 } 551 552 static int xts_paes_decrypt(struct skcipher_request *req) 553 { 554 return xts_paes_crypt(req, CPACF_DECRYPT); 555 } 556 557 static struct skcipher_alg xts_paes_alg = { 558 .base.cra_name = "xts(paes)", 559 .base.cra_driver_name = "xts-paes-s390", 560 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 561 .base.cra_blocksize = AES_BLOCK_SIZE, 562 .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), 563 .base.cra_module = THIS_MODULE, 564 .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list), 565 .init = xts_paes_init, 566 .exit = xts_paes_exit, 567 .min_keysize = 2 * PAES_MIN_KEYSIZE, 568 .max_keysize = 2 * PAES_MAX_KEYSIZE, 569 .ivsize = AES_BLOCK_SIZE, 570 .setkey = xts_paes_set_key, 571 .encrypt = xts_paes_encrypt, 572 .decrypt = xts_paes_decrypt, 573 }; 574 575 static int ctr_paes_init(struct crypto_skcipher *tfm) 576 { 577 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 578 579 ctx->kb.key = NULL; 580 spin_lock_init(&ctx->pk_lock); 581 582 return 0; 583 } 584 585 static void ctr_paes_exit(struct crypto_skcipher *tfm) 586 { 587 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 588 589 _free_kb_keybuf(&ctx->kb); 590 } 591 592 static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx) 593 { 594 int rc; 595 unsigned long fc; 596 597 rc = __paes_convert_key(ctx); 598 if (rc) 599 return rc; 600 601 /* Pick the correct function code based on the protected key type */ 602 fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 : 603 (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 : 604 (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? 605 CPACF_KMCTR_PAES_256 : 0; 606 607 /* Check if the function code is available */ 608 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 609 610 return ctx->fc ? 0 : -EINVAL; 611 } 612 613 static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 614 unsigned int key_len) 615 { 616 int rc; 617 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 618 619 _free_kb_keybuf(&ctx->kb); 620 rc = _key_to_kb(&ctx->kb, in_key, key_len); 621 if (rc) 622 return rc; 623 624 return __ctr_paes_set_key(ctx); 625 } 626 627 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 628 { 629 unsigned int i, n; 630 631 /* only use complete blocks, max. PAGE_SIZE */ 632 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 633 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 634 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 635 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 636 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 637 ctrptr += AES_BLOCK_SIZE; 638 } 639 return n; 640 } 641 642 static int ctr_paes_crypt(struct skcipher_request *req) 643 { 644 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 645 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 646 u8 buf[AES_BLOCK_SIZE], *ctrptr; 647 struct skcipher_walk walk; 648 unsigned int nbytes, n, k; 649 int ret, locked; 650 struct { 651 u8 key[MAXPROTKEYSIZE]; 652 } param; 653 654 ret = skcipher_walk_virt(&walk, req, false); 655 if (ret) 656 return ret; 657 658 spin_lock_bh(&ctx->pk_lock); 659 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 660 spin_unlock_bh(&ctx->pk_lock); 661 662 locked = mutex_trylock(&ctrblk_lock); 663 664 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 665 n = AES_BLOCK_SIZE; 666 if (nbytes >= 2*AES_BLOCK_SIZE && locked) 667 n = __ctrblk_init(ctrblk, walk.iv, nbytes); 668 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; 669 k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr, 670 walk.src.virt.addr, n, ctrptr); 671 if (k) { 672 if (ctrptr == ctrblk) 673 memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, 674 AES_BLOCK_SIZE); 675 crypto_inc(walk.iv, AES_BLOCK_SIZE); 676 ret = skcipher_walk_done(&walk, nbytes - k); 677 } 678 if (k < n) { 679 if (__paes_convert_key(ctx)) { 680 if (locked) 681 mutex_unlock(&ctrblk_lock); 682 return skcipher_walk_done(&walk, -EIO); 683 } 684 spin_lock_bh(&ctx->pk_lock); 685 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 686 spin_unlock_bh(&ctx->pk_lock); 687 } 688 } 689 if (locked) 690 mutex_unlock(&ctrblk_lock); 691 /* 692 * final block may be < AES_BLOCK_SIZE, copy only nbytes 693 */ 694 if (nbytes) { 695 memset(buf, 0, AES_BLOCK_SIZE); 696 memcpy(buf, walk.src.virt.addr, nbytes); 697 while (1) { 698 if (cpacf_kmctr(ctx->fc, ¶m, buf, 699 buf, AES_BLOCK_SIZE, 700 walk.iv) == AES_BLOCK_SIZE) 701 break; 702 if (__paes_convert_key(ctx)) 703 return skcipher_walk_done(&walk, -EIO); 704 spin_lock_bh(&ctx->pk_lock); 705 memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); 706 spin_unlock_bh(&ctx->pk_lock); 707 } 708 memcpy(walk.dst.virt.addr, buf, nbytes); 709 crypto_inc(walk.iv, AES_BLOCK_SIZE); 710 ret = skcipher_walk_done(&walk, nbytes); 711 } 712 713 return ret; 714 } 715 716 static struct skcipher_alg ctr_paes_alg = { 717 .base.cra_name = "ctr(paes)", 718 .base.cra_driver_name = "ctr-paes-s390", 719 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 720 .base.cra_blocksize = 1, 721 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 722 .base.cra_module = THIS_MODULE, 723 .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list), 724 .init = ctr_paes_init, 725 .exit = ctr_paes_exit, 726 .min_keysize = PAES_MIN_KEYSIZE, 727 .max_keysize = PAES_MAX_KEYSIZE, 728 .ivsize = AES_BLOCK_SIZE, 729 .setkey = ctr_paes_set_key, 730 .encrypt = ctr_paes_crypt, 731 .decrypt = ctr_paes_crypt, 732 .chunksize = AES_BLOCK_SIZE, 733 }; 734 735 static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg) 736 { 737 if (!list_empty(&alg->base.cra_list)) 738 crypto_unregister_skcipher(alg); 739 } 740 741 static void paes_s390_fini(void) 742 { 743 __crypto_unregister_skcipher(&ctr_paes_alg); 744 __crypto_unregister_skcipher(&xts_paes_alg); 745 __crypto_unregister_skcipher(&cbc_paes_alg); 746 __crypto_unregister_skcipher(&ecb_paes_alg); 747 if (ctrblk) 748 free_page((unsigned long) ctrblk); 749 } 750 751 static int __init paes_s390_init(void) 752 { 753 int ret; 754 755 /* Query available functions for KM, KMC and KMCTR */ 756 cpacf_query(CPACF_KM, &km_functions); 757 cpacf_query(CPACF_KMC, &kmc_functions); 758 cpacf_query(CPACF_KMCTR, &kmctr_functions); 759 760 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || 761 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || 762 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { 763 ret = crypto_register_skcipher(&ecb_paes_alg); 764 if (ret) 765 goto out_err; 766 } 767 768 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || 769 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || 770 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { 771 ret = crypto_register_skcipher(&cbc_paes_alg); 772 if (ret) 773 goto out_err; 774 } 775 776 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || 777 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { 778 ret = crypto_register_skcipher(&xts_paes_alg); 779 if (ret) 780 goto out_err; 781 } 782 783 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || 784 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || 785 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { 786 ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 787 if (!ctrblk) { 788 ret = -ENOMEM; 789 goto out_err; 790 } 791 ret = crypto_register_skcipher(&ctr_paes_alg); 792 if (ret) 793 goto out_err; 794 } 795 796 return 0; 797 out_err: 798 paes_s390_fini(); 799 return ret; 800 } 801 802 module_init(paes_s390_init); 803 module_exit(paes_s390_fini); 804 805 MODULE_ALIAS_CRYPTO("ecb(paes)"); 806 MODULE_ALIAS_CRYPTO("cbc(paes)"); 807 MODULE_ALIAS_CRYPTO("ctr(paes)"); 808 MODULE_ALIAS_CRYPTO("xts(paes)"); 809 810 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys"); 811 MODULE_LICENSE("GPL"); 812