1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cryptographic API. 4 * 5 * s390 implementation of the AES Cipher Algorithm with protected keys. 6 * 7 * s390 Version: 8 * Copyright IBM Corp. 2017, 2025 9 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Harald Freudenberger <freude@de.ibm.com> 11 */ 12 13 #define pr_fmt(fmt) "paes_s390: " fmt 14 15 #include <linux/atomic.h> 16 #include <linux/cpufeature.h> 17 #include <linux/delay.h> 18 #include <linux/err.h> 19 #include <linux/init.h> 20 #include <linux/miscdevice.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/spinlock.h> 24 #include <crypto/aes.h> 25 #include <crypto/algapi.h> 26 #include <crypto/engine.h> 27 #include <crypto/internal/skcipher.h> 28 #include <crypto/xts.h> 29 #include <asm/cpacf.h> 30 #include <asm/pkey.h> 31 32 /* 33 * Key blobs smaller/bigger than these defines are rejected 34 * by the common code even before the individual setkey function 35 * is called. As paes can handle different kinds of key blobs 36 * and padding is also possible, the limits need to be generous. 37 */ 38 #define PAES_MIN_KEYSIZE 16 39 #define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE 40 #define PAES_256_PROTKEY_SIZE (32 + 32) /* key + verification pattern */ 41 #define PXTS_256_PROTKEY_SIZE (32 + 32 + 32) /* k1 + k2 + verification pattern */ 42 43 static u8 *ctrblk; 44 static DEFINE_MUTEX(ctrblk_lock); 45 46 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; 47 48 static struct crypto_engine *paes_crypto_engine; 49 #define MAX_QLEN 10 50 51 /* 52 * protected key specific stuff 53 */ 54 55 struct paes_protkey { 56 u32 type; 57 u32 len; 58 u8 protkey[PXTS_256_PROTKEY_SIZE]; 59 }; 60 61 #define PK_STATE_NO_KEY 0 62 #define PK_STATE_CONVERT_IN_PROGRESS 1 63 #define PK_STATE_VALID 2 64 65 struct s390_paes_ctx { 66 /* source key material used to derive a protected key from */ 67 u8 keybuf[PAES_MAX_KEYSIZE]; 68 unsigned int keylen; 69 70 /* cpacf function code to use with this protected key type */ 71 long fc; 72 73 /* nr of requests enqueued via crypto engine which use this tfm ctx */ 74 atomic_t via_engine_ctr; 75 76 /* spinlock to atomic read/update all the following fields */ 77 spinlock_t pk_lock; 78 79 /* see PK_STATE* defines above, < 0 holds convert failure rc */ 80 int pk_state; 81 /* if state is valid, pk holds the protected key */ 82 struct paes_protkey pk; 83 }; 84 85 struct s390_pxts_ctx { 86 /* source key material used to derive a protected key from */ 87 u8 keybuf[2 * PAES_MAX_KEYSIZE]; 88 unsigned int keylen; 89 90 /* cpacf function code to use with this protected key type */ 91 long fc; 92 93 /* nr of requests enqueued via crypto engine which use this tfm ctx */ 94 atomic_t via_engine_ctr; 95 96 /* spinlock to atomic read/update all the following fields */ 97 spinlock_t pk_lock; 98 99 /* see PK_STATE* defines above, < 0 holds convert failure rc */ 100 int pk_state; 101 /* if state is valid, pk[] hold(s) the protected key(s) */ 102 struct paes_protkey pk[2]; 103 }; 104 105 /* 106 * make_clrkey_token() - wrap the raw key ck with pkey clearkey token 107 * information. 108 * @returns the size of the clearkey token 109 */ 110 static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest) 111 { 112 struct clrkey_token { 113 u8 type; 114 u8 res0[3]; 115 u8 version; 116 u8 res1[3]; 117 u32 keytype; 118 u32 len; 119 u8 key[]; 120 } __packed *token = (struct clrkey_token *)dest; 121 122 token->type = 0x00; 123 token->version = 0x02; 124 token->keytype = (cklen - 8) >> 3; 125 token->len = cklen; 126 memcpy(token->key, ck, cklen); 127 128 return sizeof(*token) + cklen; 129 } 130 131 /* 132 * paes_ctx_setkey() - Set key value into context, maybe construct 133 * a clear key token digestible by pkey from a clear key value. 134 */ 135 static inline int paes_ctx_setkey(struct s390_paes_ctx *ctx, 136 const u8 *key, unsigned int keylen) 137 { 138 if (keylen > sizeof(ctx->keybuf)) 139 return -EINVAL; 140 141 switch (keylen) { 142 case 16: 143 case 24: 144 case 32: 145 /* clear key value, prepare pkey clear key token in keybuf */ 146 memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); 147 ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf); 148 break; 149 default: 150 /* other key material, let pkey handle this */ 151 memcpy(ctx->keybuf, key, keylen); 152 ctx->keylen = keylen; 153 break; 154 } 155 156 return 0; 157 } 158 159 /* 160 * pxts_ctx_setkey() - Set key value into context, maybe construct 161 * a clear key token digestible by pkey from a clear key value. 162 */ 163 static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx, 164 const u8 *key, unsigned int keylen) 165 { 166 size_t cklen = keylen / 2; 167 168 if (keylen > sizeof(ctx->keybuf)) 169 return -EINVAL; 170 171 switch (keylen) { 172 case 32: 173 case 64: 174 /* clear key value, prepare pkey clear key tokens in keybuf */ 175 memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); 176 ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf); 177 ctx->keylen += make_clrkey_token(key + cklen, cklen, 178 ctx->keybuf + ctx->keylen); 179 break; 180 default: 181 /* other key material, let pkey handle this */ 182 memcpy(ctx->keybuf, key, keylen); 183 ctx->keylen = keylen; 184 break; 185 } 186 187 return 0; 188 } 189 190 /* 191 * Convert the raw key material into a protected key via PKEY api. 192 * This function may sleep - don't call in non-sleeping context. 193 */ 194 static inline int convert_key(const u8 *key, unsigned int keylen, 195 struct paes_protkey *pk) 196 { 197 int rc, i; 198 199 pk->len = sizeof(pk->protkey); 200 201 /* 202 * In case of a busy card retry with increasing delay 203 * of 200, 400, 800 and 1600 ms - in total 3 s. 204 */ 205 for (rc = -EIO, i = 0; rc && i < 5; i++) { 206 if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) { 207 rc = -EINTR; 208 goto out; 209 } 210 rc = pkey_key2protkey(key, keylen, 211 pk->protkey, &pk->len, &pk->type, 212 PKEY_XFLAG_NOMEMALLOC); 213 } 214 215 out: 216 pr_debug("rc=%d\n", rc); 217 return rc; 218 } 219 220 /* 221 * (Re-)Convert the raw key material from the ctx into a protected key 222 * via convert_key() function. Update the pk_state, pk_type, pk_len 223 * and the protected key in the tfm context. 224 * Please note this function may be invoked concurrently with the very 225 * same tfm context. The pk_lock spinlock in the context ensures an 226 * atomic update of the pk and the pk state but does not guarantee any 227 * order of update. So a fresh converted valid protected key may get 228 * updated with an 'old' expired key value. As the cpacf instructions 229 * detect this, refuse to operate with an invalid key and the calling 230 * code triggers a (re-)conversion this does no harm. This may lead to 231 * unnecessary additional conversion but never to invalid data on en- 232 * or decrypt operations. 233 */ 234 static int paes_convert_key(struct s390_paes_ctx *ctx) 235 { 236 struct paes_protkey pk; 237 int rc; 238 239 spin_lock_bh(&ctx->pk_lock); 240 ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; 241 spin_unlock_bh(&ctx->pk_lock); 242 243 rc = convert_key(ctx->keybuf, ctx->keylen, &pk); 244 245 /* update context */ 246 spin_lock_bh(&ctx->pk_lock); 247 if (rc) { 248 ctx->pk_state = rc; 249 } else { 250 ctx->pk_state = PK_STATE_VALID; 251 ctx->pk = pk; 252 } 253 spin_unlock_bh(&ctx->pk_lock); 254 255 memzero_explicit(&pk, sizeof(pk)); 256 pr_debug("rc=%d\n", rc); 257 return rc; 258 } 259 260 /* 261 * (Re-)Convert the raw xts key material from the ctx into a 262 * protected key via convert_key() function. Update the pk_state, 263 * pk_type, pk_len and the protected key in the tfm context. 264 * See also comments on function paes_convert_key. 265 */ 266 static int pxts_convert_key(struct s390_pxts_ctx *ctx) 267 { 268 struct paes_protkey pk0, pk1; 269 size_t split_keylen; 270 int rc; 271 272 spin_lock_bh(&ctx->pk_lock); 273 ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; 274 spin_unlock_bh(&ctx->pk_lock); 275 276 rc = convert_key(ctx->keybuf, ctx->keylen, &pk0); 277 if (rc) 278 goto out; 279 280 switch (pk0.type) { 281 case PKEY_KEYTYPE_AES_128: 282 case PKEY_KEYTYPE_AES_256: 283 /* second keytoken required */ 284 if (ctx->keylen % 2) { 285 rc = -EINVAL; 286 goto out; 287 } 288 split_keylen = ctx->keylen / 2; 289 rc = convert_key(ctx->keybuf + split_keylen, 290 split_keylen, &pk1); 291 if (rc) 292 goto out; 293 if (pk0.type != pk1.type) { 294 rc = -EINVAL; 295 goto out; 296 } 297 break; 298 case PKEY_KEYTYPE_AES_XTS_128: 299 case PKEY_KEYTYPE_AES_XTS_256: 300 /* single key */ 301 pk1.type = 0; 302 break; 303 default: 304 /* unsupported protected keytype */ 305 rc = -EINVAL; 306 goto out; 307 } 308 309 out: 310 /* update context */ 311 spin_lock_bh(&ctx->pk_lock); 312 if (rc) { 313 ctx->pk_state = rc; 314 } else { 315 ctx->pk_state = PK_STATE_VALID; 316 ctx->pk[0] = pk0; 317 ctx->pk[1] = pk1; 318 } 319 spin_unlock_bh(&ctx->pk_lock); 320 321 memzero_explicit(&pk0, sizeof(pk0)); 322 memzero_explicit(&pk1, sizeof(pk1)); 323 pr_debug("rc=%d\n", rc); 324 return rc; 325 } 326 327 /* 328 * PAES ECB implementation 329 */ 330 331 struct ecb_param { 332 u8 key[PAES_256_PROTKEY_SIZE]; 333 } __packed; 334 335 struct s390_pecb_req_ctx { 336 unsigned long modifier; 337 struct skcipher_walk walk; 338 bool param_init_done; 339 struct ecb_param param; 340 }; 341 342 static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 343 unsigned int key_len) 344 { 345 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 346 long fc; 347 int rc; 348 349 /* set raw key into context */ 350 rc = paes_ctx_setkey(ctx, in_key, key_len); 351 if (rc) 352 goto out; 353 354 /* convert key into protected key */ 355 rc = paes_convert_key(ctx); 356 if (rc) 357 goto out; 358 359 /* Pick the correct function code based on the protected key type */ 360 switch (ctx->pk.type) { 361 case PKEY_KEYTYPE_AES_128: 362 fc = CPACF_KM_PAES_128; 363 break; 364 case PKEY_KEYTYPE_AES_192: 365 fc = CPACF_KM_PAES_192; 366 break; 367 case PKEY_KEYTYPE_AES_256: 368 fc = CPACF_KM_PAES_256; 369 break; 370 default: 371 fc = 0; 372 break; 373 } 374 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 375 376 rc = fc ? 0 : -EINVAL; 377 378 out: 379 pr_debug("rc=%d\n", rc); 380 return rc; 381 } 382 383 static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx, 384 struct s390_pecb_req_ctx *req_ctx, 385 bool maysleep) 386 { 387 struct ecb_param *param = &req_ctx->param; 388 struct skcipher_walk *walk = &req_ctx->walk; 389 unsigned int nbytes, n, k; 390 int pk_state, rc = 0; 391 392 if (!req_ctx->param_init_done) { 393 /* fetch and check protected key state */ 394 spin_lock_bh(&ctx->pk_lock); 395 pk_state = ctx->pk_state; 396 switch (pk_state) { 397 case PK_STATE_NO_KEY: 398 rc = -ENOKEY; 399 break; 400 case PK_STATE_CONVERT_IN_PROGRESS: 401 rc = -EKEYEXPIRED; 402 break; 403 case PK_STATE_VALID: 404 memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 405 req_ctx->param_init_done = true; 406 break; 407 default: 408 rc = pk_state < 0 ? pk_state : -EIO; 409 break; 410 } 411 spin_unlock_bh(&ctx->pk_lock); 412 } 413 if (rc) 414 goto out; 415 416 /* 417 * Note that in case of partial processing or failure the walk 418 * is NOT unmapped here. So a follow up task may reuse the walk 419 * or in case of unrecoverable failure needs to unmap it. 420 */ 421 while ((nbytes = walk->nbytes) != 0) { 422 /* only use complete blocks */ 423 n = nbytes & ~(AES_BLOCK_SIZE - 1); 424 k = cpacf_km(ctx->fc | req_ctx->modifier, param, 425 walk->dst.virt.addr, walk->src.virt.addr, n); 426 if (k) 427 rc = skcipher_walk_done(walk, nbytes - k); 428 if (k < n) { 429 if (!maysleep) { 430 rc = -EKEYEXPIRED; 431 goto out; 432 } 433 rc = paes_convert_key(ctx); 434 if (rc) 435 goto out; 436 spin_lock_bh(&ctx->pk_lock); 437 memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 438 spin_unlock_bh(&ctx->pk_lock); 439 } 440 } 441 442 out: 443 pr_debug("rc=%d\n", rc); 444 return rc; 445 } 446 447 static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) 448 { 449 struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); 450 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 451 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 452 struct skcipher_walk *walk = &req_ctx->walk; 453 int rc; 454 455 /* 456 * Attempt synchronous encryption first. If it fails, schedule the request 457 * asynchronously via the crypto engine. To preserve execution order, 458 * once a request is queued to the engine, further requests using the same 459 * tfm will also be routed through the engine. 460 */ 461 462 rc = skcipher_walk_virt(walk, req, false); 463 if (rc) 464 goto out; 465 466 req_ctx->modifier = modifier; 467 req_ctx->param_init_done = false; 468 469 /* Try synchronous operation if no active engine usage */ 470 if (!atomic_read(&ctx->via_engine_ctr)) { 471 rc = ecb_paes_do_crypt(ctx, req_ctx, false); 472 if (rc == 0) 473 goto out; 474 } 475 476 /* 477 * If sync operation failed or key expired or there are already 478 * requests enqueued via engine, fallback to async. Mark tfm as 479 * using engine to serialize requests. 480 */ 481 if (rc == 0 || rc == -EKEYEXPIRED) { 482 atomic_inc(&ctx->via_engine_ctr); 483 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 484 if (rc != -EINPROGRESS) 485 atomic_dec(&ctx->via_engine_ctr); 486 } 487 488 if (rc != -EINPROGRESS) 489 skcipher_walk_done(walk, rc); 490 491 out: 492 if (rc != -EINPROGRESS) 493 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 494 pr_debug("rc=%d\n", rc); 495 return rc; 496 } 497 498 static int ecb_paes_encrypt(struct skcipher_request *req) 499 { 500 return ecb_paes_crypt(req, 0); 501 } 502 503 static int ecb_paes_decrypt(struct skcipher_request *req) 504 { 505 return ecb_paes_crypt(req, CPACF_DECRYPT); 506 } 507 508 static int ecb_paes_init(struct crypto_skcipher *tfm) 509 { 510 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 511 512 memset(ctx, 0, sizeof(*ctx)); 513 spin_lock_init(&ctx->pk_lock); 514 515 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pecb_req_ctx)); 516 517 return 0; 518 } 519 520 static void ecb_paes_exit(struct crypto_skcipher *tfm) 521 { 522 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 523 524 memzero_explicit(ctx, sizeof(*ctx)); 525 } 526 527 static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq) 528 { 529 struct skcipher_request *req = skcipher_request_cast(areq); 530 struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); 531 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 532 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 533 struct skcipher_walk *walk = &req_ctx->walk; 534 int rc; 535 536 /* walk has already been prepared */ 537 538 rc = ecb_paes_do_crypt(ctx, req_ctx, true); 539 if (rc == -EKEYEXPIRED) { 540 /* 541 * Protected key expired, conversion is in process. 542 * Trigger a re-schedule of this request by returning 543 * -ENOSPC ("hardware queue is full") to the crypto engine. 544 * To avoid immediately re-invocation of this callback, 545 * tell the scheduler to voluntarily give up the CPU here. 546 */ 547 cond_resched(); 548 pr_debug("rescheduling request\n"); 549 return -ENOSPC; 550 } else if (rc) { 551 skcipher_walk_done(walk, rc); 552 } 553 554 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 555 pr_debug("request complete with rc=%d\n", rc); 556 local_bh_disable(); 557 atomic_dec(&ctx->via_engine_ctr); 558 crypto_finalize_skcipher_request(engine, req, rc); 559 local_bh_enable(); 560 return rc; 561 } 562 563 static struct skcipher_engine_alg ecb_paes_alg = { 564 .base = { 565 .base.cra_name = "ecb(paes)", 566 .base.cra_driver_name = "ecb-paes-s390", 567 .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 568 .base.cra_blocksize = AES_BLOCK_SIZE, 569 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 570 .base.cra_module = THIS_MODULE, 571 .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.base.cra_list), 572 .init = ecb_paes_init, 573 .exit = ecb_paes_exit, 574 .min_keysize = PAES_MIN_KEYSIZE, 575 .max_keysize = PAES_MAX_KEYSIZE, 576 .setkey = ecb_paes_setkey, 577 .encrypt = ecb_paes_encrypt, 578 .decrypt = ecb_paes_decrypt, 579 }, 580 .op = { 581 .do_one_request = ecb_paes_do_one_request, 582 }, 583 }; 584 585 /* 586 * PAES CBC implementation 587 */ 588 589 struct cbc_param { 590 u8 iv[AES_BLOCK_SIZE]; 591 u8 key[PAES_256_PROTKEY_SIZE]; 592 } __packed; 593 594 struct s390_pcbc_req_ctx { 595 unsigned long modifier; 596 struct skcipher_walk walk; 597 bool param_init_done; 598 struct cbc_param param; 599 }; 600 601 static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 602 unsigned int key_len) 603 { 604 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 605 long fc; 606 int rc; 607 608 /* set raw key into context */ 609 rc = paes_ctx_setkey(ctx, in_key, key_len); 610 if (rc) 611 goto out; 612 613 /* convert raw key into protected key */ 614 rc = paes_convert_key(ctx); 615 if (rc) 616 goto out; 617 618 /* Pick the correct function code based on the protected key type */ 619 switch (ctx->pk.type) { 620 case PKEY_KEYTYPE_AES_128: 621 fc = CPACF_KMC_PAES_128; 622 break; 623 case PKEY_KEYTYPE_AES_192: 624 fc = CPACF_KMC_PAES_192; 625 break; 626 case PKEY_KEYTYPE_AES_256: 627 fc = CPACF_KMC_PAES_256; 628 break; 629 default: 630 fc = 0; 631 break; 632 } 633 ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 634 635 rc = fc ? 0 : -EINVAL; 636 637 out: 638 pr_debug("rc=%d\n", rc); 639 return rc; 640 } 641 642 static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx, 643 struct s390_pcbc_req_ctx *req_ctx, 644 bool maysleep) 645 { 646 struct cbc_param *param = &req_ctx->param; 647 struct skcipher_walk *walk = &req_ctx->walk; 648 unsigned int nbytes, n, k; 649 int pk_state, rc = 0; 650 651 if (!req_ctx->param_init_done) { 652 /* fetch and check protected key state */ 653 spin_lock_bh(&ctx->pk_lock); 654 pk_state = ctx->pk_state; 655 switch (pk_state) { 656 case PK_STATE_NO_KEY: 657 rc = -ENOKEY; 658 break; 659 case PK_STATE_CONVERT_IN_PROGRESS: 660 rc = -EKEYEXPIRED; 661 break; 662 case PK_STATE_VALID: 663 memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 664 req_ctx->param_init_done = true; 665 break; 666 default: 667 rc = pk_state < 0 ? pk_state : -EIO; 668 break; 669 } 670 spin_unlock_bh(&ctx->pk_lock); 671 } 672 if (rc) 673 goto out; 674 675 memcpy(param->iv, walk->iv, AES_BLOCK_SIZE); 676 677 /* 678 * Note that in case of partial processing or failure the walk 679 * is NOT unmapped here. So a follow up task may reuse the walk 680 * or in case of unrecoverable failure needs to unmap it. 681 */ 682 while ((nbytes = walk->nbytes) != 0) { 683 /* only use complete blocks */ 684 n = nbytes & ~(AES_BLOCK_SIZE - 1); 685 k = cpacf_kmc(ctx->fc | req_ctx->modifier, param, 686 walk->dst.virt.addr, walk->src.virt.addr, n); 687 if (k) { 688 memcpy(walk->iv, param->iv, AES_BLOCK_SIZE); 689 rc = skcipher_walk_done(walk, nbytes - k); 690 } 691 if (k < n) { 692 if (!maysleep) { 693 rc = -EKEYEXPIRED; 694 goto out; 695 } 696 rc = paes_convert_key(ctx); 697 if (rc) 698 goto out; 699 spin_lock_bh(&ctx->pk_lock); 700 memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 701 spin_unlock_bh(&ctx->pk_lock); 702 } 703 } 704 705 out: 706 pr_debug("rc=%d\n", rc); 707 return rc; 708 } 709 710 static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) 711 { 712 struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); 713 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 714 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 715 struct skcipher_walk *walk = &req_ctx->walk; 716 int rc; 717 718 /* 719 * Attempt synchronous encryption first. If it fails, schedule the request 720 * asynchronously via the crypto engine. To preserve execution order, 721 * once a request is queued to the engine, further requests using the same 722 * tfm will also be routed through the engine. 723 */ 724 725 rc = skcipher_walk_virt(walk, req, false); 726 if (rc) 727 goto out; 728 729 req_ctx->modifier = modifier; 730 req_ctx->param_init_done = false; 731 732 /* Try synchronous operation if no active engine usage */ 733 if (!atomic_read(&ctx->via_engine_ctr)) { 734 rc = cbc_paes_do_crypt(ctx, req_ctx, false); 735 if (rc == 0) 736 goto out; 737 } 738 739 /* 740 * If sync operation failed or key expired or there are already 741 * requests enqueued via engine, fallback to async. Mark tfm as 742 * using engine to serialize requests. 743 */ 744 if (rc == 0 || rc == -EKEYEXPIRED) { 745 atomic_inc(&ctx->via_engine_ctr); 746 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 747 if (rc != -EINPROGRESS) 748 atomic_dec(&ctx->via_engine_ctr); 749 } 750 751 if (rc != -EINPROGRESS) 752 skcipher_walk_done(walk, rc); 753 754 out: 755 if (rc != -EINPROGRESS) 756 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 757 pr_debug("rc=%d\n", rc); 758 return rc; 759 } 760 761 static int cbc_paes_encrypt(struct skcipher_request *req) 762 { 763 return cbc_paes_crypt(req, 0); 764 } 765 766 static int cbc_paes_decrypt(struct skcipher_request *req) 767 { 768 return cbc_paes_crypt(req, CPACF_DECRYPT); 769 } 770 771 static int cbc_paes_init(struct crypto_skcipher *tfm) 772 { 773 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 774 775 memset(ctx, 0, sizeof(*ctx)); 776 spin_lock_init(&ctx->pk_lock); 777 778 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pcbc_req_ctx)); 779 780 return 0; 781 } 782 783 static void cbc_paes_exit(struct crypto_skcipher *tfm) 784 { 785 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 786 787 memzero_explicit(ctx, sizeof(*ctx)); 788 } 789 790 static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq) 791 { 792 struct skcipher_request *req = skcipher_request_cast(areq); 793 struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); 794 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 795 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 796 struct skcipher_walk *walk = &req_ctx->walk; 797 int rc; 798 799 /* walk has already been prepared */ 800 801 rc = cbc_paes_do_crypt(ctx, req_ctx, true); 802 if (rc == -EKEYEXPIRED) { 803 /* 804 * Protected key expired, conversion is in process. 805 * Trigger a re-schedule of this request by returning 806 * -ENOSPC ("hardware queue is full") to the crypto engine. 807 * To avoid immediately re-invocation of this callback, 808 * tell the scheduler to voluntarily give up the CPU here. 809 */ 810 cond_resched(); 811 pr_debug("rescheduling request\n"); 812 return -ENOSPC; 813 } else if (rc) { 814 skcipher_walk_done(walk, rc); 815 } 816 817 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 818 pr_debug("request complete with rc=%d\n", rc); 819 local_bh_disable(); 820 atomic_dec(&ctx->via_engine_ctr); 821 crypto_finalize_skcipher_request(engine, req, rc); 822 local_bh_enable(); 823 return rc; 824 } 825 826 static struct skcipher_engine_alg cbc_paes_alg = { 827 .base = { 828 .base.cra_name = "cbc(paes)", 829 .base.cra_driver_name = "cbc-paes-s390", 830 .base.cra_priority = 402, /* cbc-paes-s390 + 1 */ 831 .base.cra_blocksize = AES_BLOCK_SIZE, 832 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 833 .base.cra_module = THIS_MODULE, 834 .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.base.cra_list), 835 .init = cbc_paes_init, 836 .exit = cbc_paes_exit, 837 .min_keysize = PAES_MIN_KEYSIZE, 838 .max_keysize = PAES_MAX_KEYSIZE, 839 .ivsize = AES_BLOCK_SIZE, 840 .setkey = cbc_paes_setkey, 841 .encrypt = cbc_paes_encrypt, 842 .decrypt = cbc_paes_decrypt, 843 }, 844 .op = { 845 .do_one_request = cbc_paes_do_one_request, 846 }, 847 }; 848 849 /* 850 * PAES CTR implementation 851 */ 852 853 struct ctr_param { 854 u8 key[PAES_256_PROTKEY_SIZE]; 855 } __packed; 856 857 struct s390_pctr_req_ctx { 858 unsigned long modifier; 859 struct skcipher_walk walk; 860 bool param_init_done; 861 struct ctr_param param; 862 }; 863 864 static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 865 unsigned int key_len) 866 { 867 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 868 long fc; 869 int rc; 870 871 /* set raw key into context */ 872 rc = paes_ctx_setkey(ctx, in_key, key_len); 873 if (rc) 874 goto out; 875 876 /* convert raw key into protected key */ 877 rc = paes_convert_key(ctx); 878 if (rc) 879 goto out; 880 881 /* Pick the correct function code based on the protected key type */ 882 switch (ctx->pk.type) { 883 case PKEY_KEYTYPE_AES_128: 884 fc = CPACF_KMCTR_PAES_128; 885 break; 886 case PKEY_KEYTYPE_AES_192: 887 fc = CPACF_KMCTR_PAES_192; 888 break; 889 case PKEY_KEYTYPE_AES_256: 890 fc = CPACF_KMCTR_PAES_256; 891 break; 892 default: 893 fc = 0; 894 break; 895 } 896 ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 897 898 rc = fc ? 0 : -EINVAL; 899 900 out: 901 pr_debug("rc=%d\n", rc); 902 return rc; 903 } 904 905 static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 906 { 907 unsigned int i, n; 908 909 /* only use complete blocks, max. PAGE_SIZE */ 910 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 911 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 912 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 913 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 914 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 915 ctrptr += AES_BLOCK_SIZE; 916 } 917 return n; 918 } 919 920 static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx, 921 struct s390_pctr_req_ctx *req_ctx, 922 bool maysleep) 923 { 924 struct ctr_param *param = &req_ctx->param; 925 struct skcipher_walk *walk = &req_ctx->walk; 926 u8 buf[AES_BLOCK_SIZE], *ctrptr; 927 unsigned int nbytes, n, k; 928 int pk_state, locked, rc = 0; 929 930 if (!req_ctx->param_init_done) { 931 /* fetch and check protected key state */ 932 spin_lock_bh(&ctx->pk_lock); 933 pk_state = ctx->pk_state; 934 switch (pk_state) { 935 case PK_STATE_NO_KEY: 936 rc = -ENOKEY; 937 break; 938 case PK_STATE_CONVERT_IN_PROGRESS: 939 rc = -EKEYEXPIRED; 940 break; 941 case PK_STATE_VALID: 942 memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 943 req_ctx->param_init_done = true; 944 break; 945 default: 946 rc = pk_state < 0 ? pk_state : -EIO; 947 break; 948 } 949 spin_unlock_bh(&ctx->pk_lock); 950 } 951 if (rc) 952 goto out; 953 954 locked = mutex_trylock(&ctrblk_lock); 955 956 /* 957 * Note that in case of partial processing or failure the walk 958 * is NOT unmapped here. So a follow up task may reuse the walk 959 * or in case of unrecoverable failure needs to unmap it. 960 */ 961 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { 962 n = AES_BLOCK_SIZE; 963 if (nbytes >= 2 * AES_BLOCK_SIZE && locked) 964 n = __ctrblk_init(ctrblk, walk->iv, nbytes); 965 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; 966 k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr, 967 walk->src.virt.addr, n, ctrptr); 968 if (k) { 969 if (ctrptr == ctrblk) 970 memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE, 971 AES_BLOCK_SIZE); 972 crypto_inc(walk->iv, AES_BLOCK_SIZE); 973 rc = skcipher_walk_done(walk, nbytes - k); 974 } 975 if (k < n) { 976 if (!maysleep) { 977 if (locked) 978 mutex_unlock(&ctrblk_lock); 979 rc = -EKEYEXPIRED; 980 goto out; 981 } 982 rc = paes_convert_key(ctx); 983 if (rc) { 984 if (locked) 985 mutex_unlock(&ctrblk_lock); 986 goto out; 987 } 988 spin_lock_bh(&ctx->pk_lock); 989 memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 990 spin_unlock_bh(&ctx->pk_lock); 991 } 992 } 993 if (locked) 994 mutex_unlock(&ctrblk_lock); 995 996 /* final block may be < AES_BLOCK_SIZE, copy only nbytes */ 997 if (nbytes) { 998 memset(buf, 0, AES_BLOCK_SIZE); 999 memcpy(buf, walk->src.virt.addr, nbytes); 1000 while (1) { 1001 if (cpacf_kmctr(ctx->fc, param, buf, 1002 buf, AES_BLOCK_SIZE, 1003 walk->iv) == AES_BLOCK_SIZE) 1004 break; 1005 if (!maysleep) { 1006 rc = -EKEYEXPIRED; 1007 goto out; 1008 } 1009 rc = paes_convert_key(ctx); 1010 if (rc) 1011 goto out; 1012 spin_lock_bh(&ctx->pk_lock); 1013 memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); 1014 spin_unlock_bh(&ctx->pk_lock); 1015 } 1016 memcpy(walk->dst.virt.addr, buf, nbytes); 1017 crypto_inc(walk->iv, AES_BLOCK_SIZE); 1018 rc = skcipher_walk_done(walk, 0); 1019 } 1020 1021 out: 1022 pr_debug("rc=%d\n", rc); 1023 return rc; 1024 } 1025 1026 static int ctr_paes_crypt(struct skcipher_request *req) 1027 { 1028 struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); 1029 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1030 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 1031 struct skcipher_walk *walk = &req_ctx->walk; 1032 int rc; 1033 1034 /* 1035 * Attempt synchronous encryption first. If it fails, schedule the request 1036 * asynchronously via the crypto engine. To preserve execution order, 1037 * once a request is queued to the engine, further requests using the same 1038 * tfm will also be routed through the engine. 1039 */ 1040 1041 rc = skcipher_walk_virt(walk, req, false); 1042 if (rc) 1043 goto out; 1044 1045 req_ctx->param_init_done = false; 1046 1047 /* Try synchronous operation if no active engine usage */ 1048 if (!atomic_read(&ctx->via_engine_ctr)) { 1049 rc = ctr_paes_do_crypt(ctx, req_ctx, false); 1050 if (rc == 0) 1051 goto out; 1052 } 1053 1054 /* 1055 * If sync operation failed or key expired or there are already 1056 * requests enqueued via engine, fallback to async. Mark tfm as 1057 * using engine to serialize requests. 1058 */ 1059 if (rc == 0 || rc == -EKEYEXPIRED) { 1060 atomic_inc(&ctx->via_engine_ctr); 1061 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 1062 if (rc != -EINPROGRESS) 1063 atomic_dec(&ctx->via_engine_ctr); 1064 } 1065 1066 if (rc != -EINPROGRESS) 1067 skcipher_walk_done(walk, rc); 1068 1069 out: 1070 if (rc != -EINPROGRESS) 1071 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 1072 pr_debug("rc=%d\n", rc); 1073 return rc; 1074 } 1075 1076 static int ctr_paes_init(struct crypto_skcipher *tfm) 1077 { 1078 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 1079 1080 memset(ctx, 0, sizeof(*ctx)); 1081 spin_lock_init(&ctx->pk_lock); 1082 1083 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pctr_req_ctx)); 1084 1085 return 0; 1086 } 1087 1088 static void ctr_paes_exit(struct crypto_skcipher *tfm) 1089 { 1090 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 1091 1092 memzero_explicit(ctx, sizeof(*ctx)); 1093 } 1094 1095 static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq) 1096 { 1097 struct skcipher_request *req = skcipher_request_cast(areq); 1098 struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); 1099 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1100 struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); 1101 struct skcipher_walk *walk = &req_ctx->walk; 1102 int rc; 1103 1104 /* walk has already been prepared */ 1105 1106 rc = ctr_paes_do_crypt(ctx, req_ctx, true); 1107 if (rc == -EKEYEXPIRED) { 1108 /* 1109 * Protected key expired, conversion is in process. 1110 * Trigger a re-schedule of this request by returning 1111 * -ENOSPC ("hardware queue is full") to the crypto engine. 1112 * To avoid immediately re-invocation of this callback, 1113 * tell the scheduler to voluntarily give up the CPU here. 1114 */ 1115 cond_resched(); 1116 pr_debug("rescheduling request\n"); 1117 return -ENOSPC; 1118 } else if (rc) { 1119 skcipher_walk_done(walk, rc); 1120 } 1121 1122 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 1123 pr_debug("request complete with rc=%d\n", rc); 1124 local_bh_disable(); 1125 atomic_dec(&ctx->via_engine_ctr); 1126 crypto_finalize_skcipher_request(engine, req, rc); 1127 local_bh_enable(); 1128 return rc; 1129 } 1130 1131 static struct skcipher_engine_alg ctr_paes_alg = { 1132 .base = { 1133 .base.cra_name = "ctr(paes)", 1134 .base.cra_driver_name = "ctr-paes-s390", 1135 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 1136 .base.cra_blocksize = 1, 1137 .base.cra_ctxsize = sizeof(struct s390_paes_ctx), 1138 .base.cra_module = THIS_MODULE, 1139 .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.base.cra_list), 1140 .init = ctr_paes_init, 1141 .exit = ctr_paes_exit, 1142 .min_keysize = PAES_MIN_KEYSIZE, 1143 .max_keysize = PAES_MAX_KEYSIZE, 1144 .ivsize = AES_BLOCK_SIZE, 1145 .setkey = ctr_paes_setkey, 1146 .encrypt = ctr_paes_crypt, 1147 .decrypt = ctr_paes_crypt, 1148 .chunksize = AES_BLOCK_SIZE, 1149 }, 1150 .op = { 1151 .do_one_request = ctr_paes_do_one_request, 1152 }, 1153 }; 1154 1155 /* 1156 * PAES XTS implementation 1157 */ 1158 1159 struct xts_full_km_param { 1160 u8 key[64]; 1161 u8 tweak[16]; 1162 u8 nap[16]; 1163 u8 wkvp[32]; 1164 } __packed; 1165 1166 struct xts_km_param { 1167 u8 key[PAES_256_PROTKEY_SIZE]; 1168 u8 init[16]; 1169 } __packed; 1170 1171 struct xts_pcc_param { 1172 u8 key[PAES_256_PROTKEY_SIZE]; 1173 u8 tweak[16]; 1174 u8 block[16]; 1175 u8 bit[16]; 1176 u8 xts[16]; 1177 } __packed; 1178 1179 struct s390_pxts_req_ctx { 1180 unsigned long modifier; 1181 struct skcipher_walk walk; 1182 bool param_init_done; 1183 union { 1184 struct xts_full_km_param full_km_param; 1185 struct xts_km_param km_param; 1186 } param; 1187 }; 1188 1189 static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, 1190 unsigned int in_keylen) 1191 { 1192 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1193 u8 ckey[2 * AES_MAX_KEY_SIZE]; 1194 unsigned int ckey_len; 1195 long fc; 1196 int rc; 1197 1198 if ((in_keylen == 32 || in_keylen == 64) && 1199 xts_verify_key(tfm, in_key, in_keylen)) 1200 return -EINVAL; 1201 1202 /* set raw key into context */ 1203 rc = pxts_ctx_setkey(ctx, in_key, in_keylen); 1204 if (rc) 1205 goto out; 1206 1207 /* convert raw key(s) into protected key(s) */ 1208 rc = pxts_convert_key(ctx); 1209 if (rc) 1210 goto out; 1211 1212 /* 1213 * xts_verify_key verifies the key length is not odd and makes 1214 * sure that the two keys are not the same. This can be done 1215 * on the two protected keys as well - but not for full xts keys. 1216 */ 1217 if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 || 1218 ctx->pk[0].type == PKEY_KEYTYPE_AES_256) { 1219 ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 1220 AES_KEYSIZE_128 : AES_KEYSIZE_256; 1221 memcpy(ckey, ctx->pk[0].protkey, ckey_len); 1222 memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); 1223 rc = xts_verify_key(tfm, ckey, 2 * ckey_len); 1224 memzero_explicit(ckey, sizeof(ckey)); 1225 if (rc) 1226 goto out; 1227 } 1228 1229 /* Pick the correct function code based on the protected key type */ 1230 switch (ctx->pk[0].type) { 1231 case PKEY_KEYTYPE_AES_128: 1232 fc = CPACF_KM_PXTS_128; 1233 break; 1234 case PKEY_KEYTYPE_AES_256: 1235 fc = CPACF_KM_PXTS_256; 1236 break; 1237 case PKEY_KEYTYPE_AES_XTS_128: 1238 fc = CPACF_KM_PXTS_128_FULL; 1239 break; 1240 case PKEY_KEYTYPE_AES_XTS_256: 1241 fc = CPACF_KM_PXTS_256_FULL; 1242 break; 1243 default: 1244 fc = 0; 1245 break; 1246 } 1247 ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 1248 1249 rc = fc ? 0 : -EINVAL; 1250 1251 out: 1252 pr_debug("rc=%d\n", rc); 1253 return rc; 1254 } 1255 1256 static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx, 1257 struct s390_pxts_req_ctx *req_ctx, 1258 bool maysleep) 1259 { 1260 struct xts_full_km_param *param = &req_ctx->param.full_km_param; 1261 struct skcipher_walk *walk = &req_ctx->walk; 1262 unsigned int keylen, offset, nbytes, n, k; 1263 int rc = 0; 1264 1265 /* 1266 * The calling function xts_paes_do_crypt() ensures the 1267 * protected key state is always PK_STATE_VALID when this 1268 * function is invoked. 1269 */ 1270 1271 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64; 1272 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0; 1273 1274 if (!req_ctx->param_init_done) { 1275 memset(param, 0, sizeof(*param)); 1276 spin_lock_bh(&ctx->pk_lock); 1277 memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 1278 memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); 1279 spin_unlock_bh(&ctx->pk_lock); 1280 memcpy(param->tweak, walk->iv, sizeof(param->tweak)); 1281 param->nap[0] = 0x01; /* initial alpha power (1, little-endian) */ 1282 req_ctx->param_init_done = true; 1283 } 1284 1285 /* 1286 * Note that in case of partial processing or failure the walk 1287 * is NOT unmapped here. So a follow up task may reuse the walk 1288 * or in case of unrecoverable failure needs to unmap it. 1289 */ 1290 while ((nbytes = walk->nbytes) != 0) { 1291 /* only use complete blocks */ 1292 n = nbytes & ~(AES_BLOCK_SIZE - 1); 1293 k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, 1294 walk->dst.virt.addr, walk->src.virt.addr, n); 1295 if (k) 1296 rc = skcipher_walk_done(walk, nbytes - k); 1297 if (k < n) { 1298 if (!maysleep) { 1299 rc = -EKEYEXPIRED; 1300 goto out; 1301 } 1302 rc = pxts_convert_key(ctx); 1303 if (rc) 1304 goto out; 1305 spin_lock_bh(&ctx->pk_lock); 1306 memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 1307 memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); 1308 spin_unlock_bh(&ctx->pk_lock); 1309 } 1310 } 1311 1312 out: 1313 pr_debug("rc=%d\n", rc); 1314 return rc; 1315 } 1316 1317 static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx, 1318 struct xts_km_param *param, 1319 struct skcipher_walk *walk, 1320 unsigned int keylen, 1321 unsigned int offset, bool maysleep) 1322 { 1323 struct xts_pcc_param pcc_param; 1324 unsigned long cc = 1; 1325 int rc = 0; 1326 1327 while (cc) { 1328 memset(&pcc_param, 0, sizeof(pcc_param)); 1329 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); 1330 spin_lock_bh(&ctx->pk_lock); 1331 memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); 1332 memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 1333 spin_unlock_bh(&ctx->pk_lock); 1334 cc = cpacf_pcc(ctx->fc, pcc_param.key + offset); 1335 if (cc) { 1336 if (!maysleep) { 1337 rc = -EKEYEXPIRED; 1338 break; 1339 } 1340 rc = pxts_convert_key(ctx); 1341 if (rc) 1342 break; 1343 continue; 1344 } 1345 memcpy(param->init, pcc_param.xts, 16); 1346 } 1347 1348 memzero_explicit(pcc_param.key, sizeof(pcc_param.key)); 1349 return rc; 1350 } 1351 1352 static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx, 1353 struct s390_pxts_req_ctx *req_ctx, 1354 bool maysleep) 1355 { 1356 struct xts_km_param *param = &req_ctx->param.km_param; 1357 struct skcipher_walk *walk = &req_ctx->walk; 1358 unsigned int keylen, offset, nbytes, n, k; 1359 int rc = 0; 1360 1361 /* 1362 * The calling function xts_paes_do_crypt() ensures the 1363 * protected key state is always PK_STATE_VALID when this 1364 * function is invoked. 1365 */ 1366 1367 keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; 1368 offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; 1369 1370 if (!req_ctx->param_init_done) { 1371 rc = __xts_2keys_prep_param(ctx, param, walk, 1372 keylen, offset, maysleep); 1373 if (rc) 1374 goto out; 1375 req_ctx->param_init_done = true; 1376 } 1377 1378 /* 1379 * Note that in case of partial processing or failure the walk 1380 * is NOT unmapped here. So a follow up task may reuse the walk 1381 * or in case of unrecoverable failure needs to unmap it. 1382 */ 1383 while ((nbytes = walk->nbytes) != 0) { 1384 /* only use complete blocks */ 1385 n = nbytes & ~(AES_BLOCK_SIZE - 1); 1386 k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, 1387 walk->dst.virt.addr, walk->src.virt.addr, n); 1388 if (k) 1389 rc = skcipher_walk_done(walk, nbytes - k); 1390 if (k < n) { 1391 if (!maysleep) { 1392 rc = -EKEYEXPIRED; 1393 goto out; 1394 } 1395 rc = pxts_convert_key(ctx); 1396 if (rc) 1397 goto out; 1398 spin_lock_bh(&ctx->pk_lock); 1399 memcpy(param->key + offset, ctx->pk[0].protkey, keylen); 1400 spin_unlock_bh(&ctx->pk_lock); 1401 } 1402 } 1403 1404 out: 1405 pr_debug("rc=%d\n", rc); 1406 return rc; 1407 } 1408 1409 static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx, 1410 struct s390_pxts_req_ctx *req_ctx, 1411 bool maysleep) 1412 { 1413 int pk_state, rc = 0; 1414 1415 /* fetch and check protected key state */ 1416 spin_lock_bh(&ctx->pk_lock); 1417 pk_state = ctx->pk_state; 1418 switch (pk_state) { 1419 case PK_STATE_NO_KEY: 1420 rc = -ENOKEY; 1421 break; 1422 case PK_STATE_CONVERT_IN_PROGRESS: 1423 rc = -EKEYEXPIRED; 1424 break; 1425 case PK_STATE_VALID: 1426 break; 1427 default: 1428 rc = pk_state < 0 ? pk_state : -EIO; 1429 break; 1430 } 1431 spin_unlock_bh(&ctx->pk_lock); 1432 if (rc) 1433 goto out; 1434 1435 /* Call the 'real' crypt function based on the xts prot key type. */ 1436 switch (ctx->fc) { 1437 case CPACF_KM_PXTS_128: 1438 case CPACF_KM_PXTS_256: 1439 rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep); 1440 break; 1441 case CPACF_KM_PXTS_128_FULL: 1442 case CPACF_KM_PXTS_256_FULL: 1443 rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep); 1444 break; 1445 default: 1446 rc = -EINVAL; 1447 } 1448 1449 out: 1450 pr_debug("rc=%d\n", rc); 1451 return rc; 1452 } 1453 1454 static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) 1455 { 1456 struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); 1457 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1458 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1459 struct skcipher_walk *walk = &req_ctx->walk; 1460 int rc; 1461 1462 /* 1463 * Attempt synchronous encryption first. If it fails, schedule the request 1464 * asynchronously via the crypto engine. To preserve execution order, 1465 * once a request is queued to the engine, further requests using the same 1466 * tfm will also be routed through the engine. 1467 */ 1468 1469 rc = skcipher_walk_virt(walk, req, false); 1470 if (rc) 1471 goto out; 1472 1473 req_ctx->modifier = modifier; 1474 req_ctx->param_init_done = false; 1475 1476 /* Try synchronous operation if no active engine usage */ 1477 if (!atomic_read(&ctx->via_engine_ctr)) { 1478 rc = xts_paes_do_crypt(ctx, req_ctx, false); 1479 if (rc == 0) 1480 goto out; 1481 } 1482 1483 /* 1484 * If sync operation failed or key expired or there are already 1485 * requests enqueued via engine, fallback to async. Mark tfm as 1486 * using engine to serialize requests. 1487 */ 1488 if (rc == 0 || rc == -EKEYEXPIRED) { 1489 atomic_inc(&ctx->via_engine_ctr); 1490 rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); 1491 if (rc != -EINPROGRESS) 1492 atomic_dec(&ctx->via_engine_ctr); 1493 } 1494 1495 if (rc != -EINPROGRESS) 1496 skcipher_walk_done(walk, rc); 1497 1498 out: 1499 if (rc != -EINPROGRESS) 1500 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 1501 pr_debug("rc=%d\n", rc); 1502 return rc; 1503 } 1504 1505 static int xts_paes_encrypt(struct skcipher_request *req) 1506 { 1507 return xts_paes_crypt(req, 0); 1508 } 1509 1510 static int xts_paes_decrypt(struct skcipher_request *req) 1511 { 1512 return xts_paes_crypt(req, CPACF_DECRYPT); 1513 } 1514 1515 static int xts_paes_init(struct crypto_skcipher *tfm) 1516 { 1517 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1518 1519 memset(ctx, 0, sizeof(*ctx)); 1520 spin_lock_init(&ctx->pk_lock); 1521 1522 crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pxts_req_ctx)); 1523 1524 return 0; 1525 } 1526 1527 static void xts_paes_exit(struct crypto_skcipher *tfm) 1528 { 1529 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1530 1531 memzero_explicit(ctx, sizeof(*ctx)); 1532 } 1533 1534 static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq) 1535 { 1536 struct skcipher_request *req = skcipher_request_cast(areq); 1537 struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); 1538 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 1539 struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); 1540 struct skcipher_walk *walk = &req_ctx->walk; 1541 int rc; 1542 1543 /* walk has already been prepared */ 1544 1545 rc = xts_paes_do_crypt(ctx, req_ctx, true); 1546 if (rc == -EKEYEXPIRED) { 1547 /* 1548 * Protected key expired, conversion is in process. 1549 * Trigger a re-schedule of this request by returning 1550 * -ENOSPC ("hardware queue is full") to the crypto engine. 1551 * To avoid immediately re-invocation of this callback, 1552 * tell the scheduler to voluntarily give up the CPU here. 1553 */ 1554 cond_resched(); 1555 pr_debug("rescheduling request\n"); 1556 return -ENOSPC; 1557 } else if (rc) { 1558 skcipher_walk_done(walk, rc); 1559 } 1560 1561 memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); 1562 pr_debug("request complete with rc=%d\n", rc); 1563 local_bh_disable(); 1564 atomic_dec(&ctx->via_engine_ctr); 1565 crypto_finalize_skcipher_request(engine, req, rc); 1566 local_bh_enable(); 1567 return rc; 1568 } 1569 1570 static struct skcipher_engine_alg xts_paes_alg = { 1571 .base = { 1572 .base.cra_name = "xts(paes)", 1573 .base.cra_driver_name = "xts-paes-s390", 1574 .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ 1575 .base.cra_blocksize = AES_BLOCK_SIZE, 1576 .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), 1577 .base.cra_module = THIS_MODULE, 1578 .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.base.cra_list), 1579 .init = xts_paes_init, 1580 .exit = xts_paes_exit, 1581 .min_keysize = 2 * PAES_MIN_KEYSIZE, 1582 .max_keysize = 2 * PAES_MAX_KEYSIZE, 1583 .ivsize = AES_BLOCK_SIZE, 1584 .setkey = xts_paes_setkey, 1585 .encrypt = xts_paes_encrypt, 1586 .decrypt = xts_paes_decrypt, 1587 }, 1588 .op = { 1589 .do_one_request = xts_paes_do_one_request, 1590 }, 1591 }; 1592 1593 /* 1594 * alg register, unregister, module init, exit 1595 */ 1596 1597 static struct miscdevice paes_dev = { 1598 .name = "paes", 1599 .minor = MISC_DYNAMIC_MINOR, 1600 }; 1601 1602 static inline void __crypto_unregister_skcipher(struct skcipher_engine_alg *alg) 1603 { 1604 if (!list_empty(&alg->base.base.cra_list)) 1605 crypto_engine_unregister_skcipher(alg); 1606 } 1607 1608 static void paes_s390_fini(void) 1609 { 1610 if (paes_crypto_engine) { 1611 crypto_engine_stop(paes_crypto_engine); 1612 crypto_engine_exit(paes_crypto_engine); 1613 } 1614 __crypto_unregister_skcipher(&ctr_paes_alg); 1615 __crypto_unregister_skcipher(&xts_paes_alg); 1616 __crypto_unregister_skcipher(&cbc_paes_alg); 1617 __crypto_unregister_skcipher(&ecb_paes_alg); 1618 if (ctrblk) 1619 free_page((unsigned long)ctrblk); 1620 misc_deregister(&paes_dev); 1621 } 1622 1623 static int __init paes_s390_init(void) 1624 { 1625 int rc; 1626 1627 /* register a simple paes pseudo misc device */ 1628 rc = misc_register(&paes_dev); 1629 if (rc) 1630 return rc; 1631 1632 /* with this pseudo devie alloc and start a crypto engine */ 1633 paes_crypto_engine = 1634 crypto_engine_alloc_init_and_set(paes_dev.this_device, 1635 true, false, MAX_QLEN); 1636 if (!paes_crypto_engine) { 1637 rc = -ENOMEM; 1638 goto out_err; 1639 } 1640 rc = crypto_engine_start(paes_crypto_engine); 1641 if (rc) { 1642 crypto_engine_exit(paes_crypto_engine); 1643 paes_crypto_engine = NULL; 1644 goto out_err; 1645 } 1646 1647 /* Query available functions for KM, KMC and KMCTR */ 1648 cpacf_query(CPACF_KM, &km_functions); 1649 cpacf_query(CPACF_KMC, &kmc_functions); 1650 cpacf_query(CPACF_KMCTR, &kmctr_functions); 1651 1652 if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || 1653 cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || 1654 cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { 1655 rc = crypto_engine_register_skcipher(&ecb_paes_alg); 1656 if (rc) 1657 goto out_err; 1658 pr_debug("%s registered\n", ecb_paes_alg.base.base.cra_driver_name); 1659 } 1660 1661 if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || 1662 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || 1663 cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { 1664 rc = crypto_engine_register_skcipher(&cbc_paes_alg); 1665 if (rc) 1666 goto out_err; 1667 pr_debug("%s registered\n", cbc_paes_alg.base.base.cra_driver_name); 1668 } 1669 1670 if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || 1671 cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { 1672 rc = crypto_engine_register_skcipher(&xts_paes_alg); 1673 if (rc) 1674 goto out_err; 1675 pr_debug("%s registered\n", xts_paes_alg.base.base.cra_driver_name); 1676 } 1677 1678 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || 1679 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || 1680 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { 1681 ctrblk = (u8 *)__get_free_page(GFP_KERNEL); 1682 if (!ctrblk) { 1683 rc = -ENOMEM; 1684 goto out_err; 1685 } 1686 rc = crypto_engine_register_skcipher(&ctr_paes_alg); 1687 if (rc) 1688 goto out_err; 1689 pr_debug("%s registered\n", ctr_paes_alg.base.base.cra_driver_name); 1690 } 1691 1692 return 0; 1693 1694 out_err: 1695 paes_s390_fini(); 1696 return rc; 1697 } 1698 1699 module_init(paes_s390_init); 1700 module_exit(paes_s390_fini); 1701 1702 MODULE_ALIAS_CRYPTO("ecb(paes)"); 1703 MODULE_ALIAS_CRYPTO("cbc(paes)"); 1704 MODULE_ALIAS_CRYPTO("ctr(paes)"); 1705 MODULE_ALIAS_CRYPTO("xts(paes)"); 1706 1707 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm with protected keys"); 1708 MODULE_LICENSE("GPL"); 1709