1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Cryptographic API. 4 * 5 * s390 implementation of the AES Cipher Algorithm. 6 * 7 * s390 Version: 8 * Copyright IBM Corp. 2005, 2017 9 * Author(s): Jan Glauber (jang@de.ibm.com) 10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback 11 * Patrick Steuer <patrick.steuer@de.ibm.com> 12 * Harald Freudenberger <freude@de.ibm.com> 13 * 14 * Derived from "crypto/aes_generic.c" 15 */ 16 17 #define KMSG_COMPONENT "aes_s390" 18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 19 20 #include <crypto/aes.h> 21 #include <crypto/algapi.h> 22 #include <crypto/ghash.h> 23 #include <crypto/internal/aead.h> 24 #include <crypto/internal/cipher.h> 25 #include <crypto/internal/skcipher.h> 26 #include <crypto/scatterwalk.h> 27 #include <linux/err.h> 28 #include <linux/module.h> 29 #include <linux/cpufeature.h> 30 #include <linux/init.h> 31 #include <linux/mutex.h> 32 #include <linux/fips.h> 33 #include <linux/string.h> 34 #include <crypto/xts.h> 35 #include <asm/cpacf.h> 36 37 static u8 *ctrblk; 38 static DEFINE_MUTEX(ctrblk_lock); 39 40 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, 41 kma_functions; 42 43 struct s390_aes_ctx { 44 u8 key[AES_MAX_KEY_SIZE]; 45 int key_len; 46 unsigned long fc; 47 union { 48 struct crypto_skcipher *skcipher; 49 struct crypto_cipher *cip; 50 } fallback; 51 }; 52 53 struct s390_xts_ctx { 54 union { 55 u8 keys[64]; 56 struct { 57 u8 key[32]; 58 u8 pcc_key[32]; 59 }; 60 }; 61 int key_len; 62 unsigned long fc; 63 struct crypto_skcipher *fallback; 64 }; 65 66 struct gcm_sg_walk { 67 struct scatter_walk walk; 68 unsigned int walk_bytes; 69 unsigned int walk_bytes_remain; 70 u8 buf[AES_BLOCK_SIZE]; 71 unsigned int buf_bytes; 72 u8 *ptr; 73 unsigned int nbytes; 74 }; 75 76 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, 77 unsigned int key_len) 78 { 79 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 80 81 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 82 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & 83 CRYPTO_TFM_REQ_MASK); 84 85 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); 86 } 87 88 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 89 unsigned int key_len) 90 { 91 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 92 unsigned long fc; 93 94 /* Pick the correct function code based on the key length */ 95 fc = (key_len == 16) ? CPACF_KM_AES_128 : 96 (key_len == 24) ? CPACF_KM_AES_192 : 97 (key_len == 32) ? CPACF_KM_AES_256 : 0; 98 99 /* Check if the function code is available */ 100 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 101 if (!sctx->fc) 102 return setkey_fallback_cip(tfm, in_key, key_len); 103 104 sctx->key_len = key_len; 105 memcpy(sctx->key, in_key, key_len); 106 return 0; 107 } 108 109 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 110 { 111 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 112 113 if (unlikely(!sctx->fc)) { 114 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); 115 return; 116 } 117 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); 118 } 119 120 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 121 { 122 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 123 124 if (unlikely(!sctx->fc)) { 125 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); 126 return; 127 } 128 cpacf_km(sctx->fc | CPACF_DECRYPT, 129 &sctx->key, out, in, AES_BLOCK_SIZE); 130 } 131 132 static int fallback_init_cip(struct crypto_tfm *tfm) 133 { 134 const char *name = tfm->__crt_alg->cra_name; 135 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 136 137 sctx->fallback.cip = crypto_alloc_cipher(name, 0, 138 CRYPTO_ALG_NEED_FALLBACK); 139 140 if (IS_ERR(sctx->fallback.cip)) { 141 pr_err("Allocating AES fallback algorithm %s failed\n", 142 name); 143 return PTR_ERR(sctx->fallback.cip); 144 } 145 146 return 0; 147 } 148 149 static void fallback_exit_cip(struct crypto_tfm *tfm) 150 { 151 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 152 153 crypto_free_cipher(sctx->fallback.cip); 154 sctx->fallback.cip = NULL; 155 } 156 157 static struct crypto_alg aes_alg = { 158 .cra_name = "aes", 159 .cra_driver_name = "aes-s390", 160 .cra_priority = 300, 161 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | 162 CRYPTO_ALG_NEED_FALLBACK, 163 .cra_blocksize = AES_BLOCK_SIZE, 164 .cra_ctxsize = sizeof(struct s390_aes_ctx), 165 .cra_module = THIS_MODULE, 166 .cra_init = fallback_init_cip, 167 .cra_exit = fallback_exit_cip, 168 .cra_u = { 169 .cipher = { 170 .cia_min_keysize = AES_MIN_KEY_SIZE, 171 .cia_max_keysize = AES_MAX_KEY_SIZE, 172 .cia_setkey = aes_set_key, 173 .cia_encrypt = crypto_aes_encrypt, 174 .cia_decrypt = crypto_aes_decrypt, 175 } 176 } 177 }; 178 179 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key, 180 unsigned int len) 181 { 182 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 183 184 crypto_skcipher_clear_flags(sctx->fallback.skcipher, 185 CRYPTO_TFM_REQ_MASK); 186 crypto_skcipher_set_flags(sctx->fallback.skcipher, 187 crypto_skcipher_get_flags(tfm) & 188 CRYPTO_TFM_REQ_MASK); 189 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len); 190 } 191 192 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx, 193 struct skcipher_request *req, 194 unsigned long modifier) 195 { 196 struct skcipher_request *subreq = skcipher_request_ctx(req); 197 198 *subreq = *req; 199 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher); 200 return (modifier & CPACF_DECRYPT) ? 201 crypto_skcipher_decrypt(subreq) : 202 crypto_skcipher_encrypt(subreq); 203 } 204 205 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 206 unsigned int key_len) 207 { 208 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 209 unsigned long fc; 210 211 /* Pick the correct function code based on the key length */ 212 fc = (key_len == 16) ? CPACF_KM_AES_128 : 213 (key_len == 24) ? CPACF_KM_AES_192 : 214 (key_len == 32) ? CPACF_KM_AES_256 : 0; 215 216 /* Check if the function code is available */ 217 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 218 if (!sctx->fc) 219 return setkey_fallback_skcipher(tfm, in_key, key_len); 220 221 sctx->key_len = key_len; 222 memcpy(sctx->key, in_key, key_len); 223 return 0; 224 } 225 226 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier) 227 { 228 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 229 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 230 struct skcipher_walk walk; 231 unsigned int nbytes, n; 232 int ret; 233 234 if (unlikely(!sctx->fc)) 235 return fallback_skcipher_crypt(sctx, req, modifier); 236 237 ret = skcipher_walk_virt(&walk, req, false); 238 while ((nbytes = walk.nbytes) != 0) { 239 /* only use complete blocks */ 240 n = nbytes & ~(AES_BLOCK_SIZE - 1); 241 cpacf_km(sctx->fc | modifier, sctx->key, 242 walk.dst.virt.addr, walk.src.virt.addr, n); 243 ret = skcipher_walk_done(&walk, nbytes - n); 244 } 245 return ret; 246 } 247 248 static int ecb_aes_encrypt(struct skcipher_request *req) 249 { 250 return ecb_aes_crypt(req, 0); 251 } 252 253 static int ecb_aes_decrypt(struct skcipher_request *req) 254 { 255 return ecb_aes_crypt(req, CPACF_DECRYPT); 256 } 257 258 static int fallback_init_skcipher(struct crypto_skcipher *tfm) 259 { 260 const char *name = crypto_tfm_alg_name(&tfm->base); 261 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 262 263 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0, 264 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 265 266 if (IS_ERR(sctx->fallback.skcipher)) { 267 pr_err("Allocating AES fallback algorithm %s failed\n", 268 name); 269 return PTR_ERR(sctx->fallback.skcipher); 270 } 271 272 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 273 crypto_skcipher_reqsize(sctx->fallback.skcipher)); 274 return 0; 275 } 276 277 static void fallback_exit_skcipher(struct crypto_skcipher *tfm) 278 { 279 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 280 281 crypto_free_skcipher(sctx->fallback.skcipher); 282 } 283 284 static struct skcipher_alg ecb_aes_alg = { 285 .base.cra_name = "ecb(aes)", 286 .base.cra_driver_name = "ecb-aes-s390", 287 .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 288 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 289 .base.cra_blocksize = AES_BLOCK_SIZE, 290 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 291 .base.cra_module = THIS_MODULE, 292 .init = fallback_init_skcipher, 293 .exit = fallback_exit_skcipher, 294 .min_keysize = AES_MIN_KEY_SIZE, 295 .max_keysize = AES_MAX_KEY_SIZE, 296 .setkey = ecb_aes_set_key, 297 .encrypt = ecb_aes_encrypt, 298 .decrypt = ecb_aes_decrypt, 299 }; 300 301 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 302 unsigned int key_len) 303 { 304 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 305 unsigned long fc; 306 307 /* Pick the correct function code based on the key length */ 308 fc = (key_len == 16) ? CPACF_KMC_AES_128 : 309 (key_len == 24) ? CPACF_KMC_AES_192 : 310 (key_len == 32) ? CPACF_KMC_AES_256 : 0; 311 312 /* Check if the function code is available */ 313 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 314 if (!sctx->fc) 315 return setkey_fallback_skcipher(tfm, in_key, key_len); 316 317 sctx->key_len = key_len; 318 memcpy(sctx->key, in_key, key_len); 319 return 0; 320 } 321 322 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier) 323 { 324 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 325 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 326 struct skcipher_walk walk; 327 unsigned int nbytes, n; 328 int ret; 329 struct { 330 u8 iv[AES_BLOCK_SIZE]; 331 u8 key[AES_MAX_KEY_SIZE]; 332 } param; 333 334 if (unlikely(!sctx->fc)) 335 return fallback_skcipher_crypt(sctx, req, modifier); 336 337 ret = skcipher_walk_virt(&walk, req, false); 338 if (ret) 339 return ret; 340 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); 341 memcpy(param.key, sctx->key, sctx->key_len); 342 while ((nbytes = walk.nbytes) != 0) { 343 /* only use complete blocks */ 344 n = nbytes & ~(AES_BLOCK_SIZE - 1); 345 cpacf_kmc(sctx->fc | modifier, ¶m, 346 walk.dst.virt.addr, walk.src.virt.addr, n); 347 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 348 ret = skcipher_walk_done(&walk, nbytes - n); 349 } 350 memzero_explicit(¶m, sizeof(param)); 351 return ret; 352 } 353 354 static int cbc_aes_encrypt(struct skcipher_request *req) 355 { 356 return cbc_aes_crypt(req, 0); 357 } 358 359 static int cbc_aes_decrypt(struct skcipher_request *req) 360 { 361 return cbc_aes_crypt(req, CPACF_DECRYPT); 362 } 363 364 static struct skcipher_alg cbc_aes_alg = { 365 .base.cra_name = "cbc(aes)", 366 .base.cra_driver_name = "cbc-aes-s390", 367 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 368 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 369 .base.cra_blocksize = AES_BLOCK_SIZE, 370 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 371 .base.cra_module = THIS_MODULE, 372 .init = fallback_init_skcipher, 373 .exit = fallback_exit_skcipher, 374 .min_keysize = AES_MIN_KEY_SIZE, 375 .max_keysize = AES_MAX_KEY_SIZE, 376 .ivsize = AES_BLOCK_SIZE, 377 .setkey = cbc_aes_set_key, 378 .encrypt = cbc_aes_encrypt, 379 .decrypt = cbc_aes_decrypt, 380 }; 381 382 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key, 383 unsigned int len) 384 { 385 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 386 387 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); 388 crypto_skcipher_set_flags(xts_ctx->fallback, 389 crypto_skcipher_get_flags(tfm) & 390 CRYPTO_TFM_REQ_MASK); 391 return crypto_skcipher_setkey(xts_ctx->fallback, key, len); 392 } 393 394 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 395 unsigned int key_len) 396 { 397 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 398 unsigned long fc; 399 int err; 400 401 err = xts_fallback_setkey(tfm, in_key, key_len); 402 if (err) 403 return err; 404 405 /* Pick the correct function code based on the key length */ 406 fc = (key_len == 32) ? CPACF_KM_XTS_128 : 407 (key_len == 64) ? CPACF_KM_XTS_256 : 0; 408 409 /* Check if the function code is available */ 410 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 411 if (!xts_ctx->fc) 412 return 0; 413 414 /* Split the XTS key into the two subkeys */ 415 key_len = key_len / 2; 416 xts_ctx->key_len = key_len; 417 memcpy(xts_ctx->key, in_key, key_len); 418 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); 419 return 0; 420 } 421 422 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier) 423 { 424 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 425 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 426 struct skcipher_walk walk; 427 unsigned int offset, nbytes, n; 428 int ret; 429 struct { 430 u8 key[32]; 431 u8 tweak[16]; 432 u8 block[16]; 433 u8 bit[16]; 434 u8 xts[16]; 435 } pcc_param; 436 struct { 437 u8 key[32]; 438 u8 init[16]; 439 } xts_param; 440 441 if (req->cryptlen < AES_BLOCK_SIZE) 442 return -EINVAL; 443 444 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { 445 struct skcipher_request *subreq = skcipher_request_ctx(req); 446 447 *subreq = *req; 448 skcipher_request_set_tfm(subreq, xts_ctx->fallback); 449 return (modifier & CPACF_DECRYPT) ? 450 crypto_skcipher_decrypt(subreq) : 451 crypto_skcipher_encrypt(subreq); 452 } 453 454 ret = skcipher_walk_virt(&walk, req, false); 455 if (ret) 456 return ret; 457 offset = xts_ctx->key_len & 0x10; 458 memset(pcc_param.block, 0, sizeof(pcc_param.block)); 459 memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); 460 memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); 461 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); 462 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); 463 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); 464 465 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); 466 memcpy(xts_param.init, pcc_param.xts, 16); 467 468 while ((nbytes = walk.nbytes) != 0) { 469 /* only use complete blocks */ 470 n = nbytes & ~(AES_BLOCK_SIZE - 1); 471 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, 472 walk.dst.virt.addr, walk.src.virt.addr, n); 473 ret = skcipher_walk_done(&walk, nbytes - n); 474 } 475 memzero_explicit(&pcc_param, sizeof(pcc_param)); 476 memzero_explicit(&xts_param, sizeof(xts_param)); 477 return ret; 478 } 479 480 static int xts_aes_encrypt(struct skcipher_request *req) 481 { 482 return xts_aes_crypt(req, 0); 483 } 484 485 static int xts_aes_decrypt(struct skcipher_request *req) 486 { 487 return xts_aes_crypt(req, CPACF_DECRYPT); 488 } 489 490 static int xts_fallback_init(struct crypto_skcipher *tfm) 491 { 492 const char *name = crypto_tfm_alg_name(&tfm->base); 493 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 494 495 xts_ctx->fallback = crypto_alloc_skcipher(name, 0, 496 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 497 498 if (IS_ERR(xts_ctx->fallback)) { 499 pr_err("Allocating XTS fallback algorithm %s failed\n", 500 name); 501 return PTR_ERR(xts_ctx->fallback); 502 } 503 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 504 crypto_skcipher_reqsize(xts_ctx->fallback)); 505 return 0; 506 } 507 508 static void xts_fallback_exit(struct crypto_skcipher *tfm) 509 { 510 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 511 512 crypto_free_skcipher(xts_ctx->fallback); 513 } 514 515 static struct skcipher_alg xts_aes_alg = { 516 .base.cra_name = "xts(aes)", 517 .base.cra_driver_name = "xts-aes-s390", 518 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 519 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 520 .base.cra_blocksize = AES_BLOCK_SIZE, 521 .base.cra_ctxsize = sizeof(struct s390_xts_ctx), 522 .base.cra_module = THIS_MODULE, 523 .init = xts_fallback_init, 524 .exit = xts_fallback_exit, 525 .min_keysize = 2 * AES_MIN_KEY_SIZE, 526 .max_keysize = 2 * AES_MAX_KEY_SIZE, 527 .ivsize = AES_BLOCK_SIZE, 528 .setkey = xts_aes_set_key, 529 .encrypt = xts_aes_encrypt, 530 .decrypt = xts_aes_decrypt, 531 }; 532 533 static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 534 unsigned int key_len) 535 { 536 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 537 unsigned long fc; 538 int err; 539 540 err = xts_fallback_setkey(tfm, in_key, key_len); 541 if (err) 542 return err; 543 544 /* Pick the correct function code based on the key length */ 545 fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL : 546 (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0; 547 548 /* Check if the function code is available */ 549 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 550 if (!xts_ctx->fc) 551 return 0; 552 553 /* Store double-key */ 554 memcpy(xts_ctx->keys, in_key, key_len); 555 xts_ctx->key_len = key_len; 556 return 0; 557 } 558 559 static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier) 560 { 561 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 562 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 563 unsigned int offset, nbytes, n; 564 struct skcipher_walk walk; 565 int ret; 566 struct { 567 __u8 key[64]; 568 __u8 tweak[16]; 569 __u8 nap[16]; 570 } fxts_param = { 571 .nap = {0}, 572 }; 573 574 if (req->cryptlen < AES_BLOCK_SIZE) 575 return -EINVAL; 576 577 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { 578 struct skcipher_request *subreq = skcipher_request_ctx(req); 579 580 *subreq = *req; 581 skcipher_request_set_tfm(subreq, xts_ctx->fallback); 582 return (modifier & CPACF_DECRYPT) ? 583 crypto_skcipher_decrypt(subreq) : 584 crypto_skcipher_encrypt(subreq); 585 } 586 587 ret = skcipher_walk_virt(&walk, req, false); 588 if (ret) 589 return ret; 590 591 offset = xts_ctx->key_len & 0x20; 592 memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len); 593 memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE); 594 fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ 595 596 while ((nbytes = walk.nbytes) != 0) { 597 /* only use complete blocks */ 598 n = nbytes & ~(AES_BLOCK_SIZE - 1); 599 cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset, 600 walk.dst.virt.addr, walk.src.virt.addr, n); 601 ret = skcipher_walk_done(&walk, nbytes - n); 602 } 603 memzero_explicit(&fxts_param, sizeof(fxts_param)); 604 return ret; 605 } 606 607 static int fullxts_aes_encrypt(struct skcipher_request *req) 608 { 609 return fullxts_aes_crypt(req, 0); 610 } 611 612 static int fullxts_aes_decrypt(struct skcipher_request *req) 613 { 614 return fullxts_aes_crypt(req, CPACF_DECRYPT); 615 } 616 617 static struct skcipher_alg fullxts_aes_alg = { 618 .base.cra_name = "xts(aes)", 619 .base.cra_driver_name = "full-xts-aes-s390", 620 .base.cra_priority = 403, /* aes-xts-s390 + 1 */ 621 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 622 .base.cra_blocksize = AES_BLOCK_SIZE, 623 .base.cra_ctxsize = sizeof(struct s390_xts_ctx), 624 .base.cra_module = THIS_MODULE, 625 .init = xts_fallback_init, 626 .exit = xts_fallback_exit, 627 .min_keysize = 2 * AES_MIN_KEY_SIZE, 628 .max_keysize = 2 * AES_MAX_KEY_SIZE, 629 .ivsize = AES_BLOCK_SIZE, 630 .setkey = fullxts_aes_set_key, 631 .encrypt = fullxts_aes_encrypt, 632 .decrypt = fullxts_aes_decrypt, 633 }; 634 635 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 636 unsigned int key_len) 637 { 638 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 639 unsigned long fc; 640 641 /* Pick the correct function code based on the key length */ 642 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : 643 (key_len == 24) ? CPACF_KMCTR_AES_192 : 644 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; 645 646 /* Check if the function code is available */ 647 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 648 if (!sctx->fc) 649 return setkey_fallback_skcipher(tfm, in_key, key_len); 650 651 sctx->key_len = key_len; 652 memcpy(sctx->key, in_key, key_len); 653 return 0; 654 } 655 656 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 657 { 658 unsigned int i, n; 659 660 /* only use complete blocks, max. PAGE_SIZE */ 661 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 662 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 663 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 664 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 665 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 666 ctrptr += AES_BLOCK_SIZE; 667 } 668 return n; 669 } 670 671 static int ctr_aes_crypt(struct skcipher_request *req) 672 { 673 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 674 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 675 u8 buf[AES_BLOCK_SIZE], *ctrptr; 676 struct skcipher_walk walk; 677 unsigned int n, nbytes; 678 int ret, locked; 679 680 if (unlikely(!sctx->fc)) 681 return fallback_skcipher_crypt(sctx, req, 0); 682 683 locked = mutex_trylock(&ctrblk_lock); 684 685 ret = skcipher_walk_virt(&walk, req, false); 686 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 687 n = AES_BLOCK_SIZE; 688 689 if (nbytes >= 2*AES_BLOCK_SIZE && locked) 690 n = __ctrblk_init(ctrblk, walk.iv, nbytes); 691 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; 692 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr, 693 walk.src.virt.addr, n, ctrptr); 694 if (ctrptr == ctrblk) 695 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE, 696 AES_BLOCK_SIZE); 697 crypto_inc(walk.iv, AES_BLOCK_SIZE); 698 ret = skcipher_walk_done(&walk, nbytes - n); 699 } 700 if (locked) 701 mutex_unlock(&ctrblk_lock); 702 /* 703 * final block may be < AES_BLOCK_SIZE, copy only nbytes 704 */ 705 if (nbytes) { 706 memset(buf, 0, AES_BLOCK_SIZE); 707 memcpy(buf, walk.src.virt.addr, nbytes); 708 cpacf_kmctr(sctx->fc, sctx->key, buf, buf, 709 AES_BLOCK_SIZE, walk.iv); 710 memcpy(walk.dst.virt.addr, buf, nbytes); 711 crypto_inc(walk.iv, AES_BLOCK_SIZE); 712 ret = skcipher_walk_done(&walk, 0); 713 } 714 715 return ret; 716 } 717 718 static struct skcipher_alg ctr_aes_alg = { 719 .base.cra_name = "ctr(aes)", 720 .base.cra_driver_name = "ctr-aes-s390", 721 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 722 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 723 .base.cra_blocksize = 1, 724 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 725 .base.cra_module = THIS_MODULE, 726 .init = fallback_init_skcipher, 727 .exit = fallback_exit_skcipher, 728 .min_keysize = AES_MIN_KEY_SIZE, 729 .max_keysize = AES_MAX_KEY_SIZE, 730 .ivsize = AES_BLOCK_SIZE, 731 .setkey = ctr_aes_set_key, 732 .encrypt = ctr_aes_crypt, 733 .decrypt = ctr_aes_crypt, 734 .chunksize = AES_BLOCK_SIZE, 735 }; 736 737 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, 738 unsigned int keylen) 739 { 740 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 741 742 switch (keylen) { 743 case AES_KEYSIZE_128: 744 ctx->fc = CPACF_KMA_GCM_AES_128; 745 break; 746 case AES_KEYSIZE_192: 747 ctx->fc = CPACF_KMA_GCM_AES_192; 748 break; 749 case AES_KEYSIZE_256: 750 ctx->fc = CPACF_KMA_GCM_AES_256; 751 break; 752 default: 753 return -EINVAL; 754 } 755 756 memcpy(ctx->key, key, keylen); 757 ctx->key_len = keylen; 758 return 0; 759 } 760 761 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 762 { 763 switch (authsize) { 764 case 4: 765 case 8: 766 case 12: 767 case 13: 768 case 14: 769 case 15: 770 case 16: 771 break; 772 default: 773 return -EINVAL; 774 } 775 776 return 0; 777 } 778 779 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, 780 unsigned int len) 781 { 782 memset(gw, 0, sizeof(*gw)); 783 gw->walk_bytes_remain = len; 784 scatterwalk_start(&gw->walk, sg); 785 } 786 787 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) 788 { 789 if (gw->walk_bytes_remain == 0) 790 return 0; 791 gw->walk_bytes = scatterwalk_next(&gw->walk, gw->walk_bytes_remain); 792 return gw->walk_bytes; 793 } 794 795 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, 796 unsigned int nbytes, bool out) 797 { 798 gw->walk_bytes_remain -= nbytes; 799 if (out) 800 scatterwalk_done_dst(&gw->walk, nbytes); 801 else 802 scatterwalk_done_src(&gw->walk, nbytes); 803 } 804 805 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 806 { 807 int n; 808 809 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { 810 gw->ptr = gw->buf; 811 gw->nbytes = gw->buf_bytes; 812 goto out; 813 } 814 815 if (gw->walk_bytes_remain == 0) { 816 gw->ptr = NULL; 817 gw->nbytes = 0; 818 goto out; 819 } 820 821 if (!_gcm_sg_clamp_and_map(gw)) { 822 gw->ptr = NULL; 823 gw->nbytes = 0; 824 goto out; 825 } 826 827 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { 828 gw->ptr = gw->walk.addr; 829 gw->nbytes = gw->walk_bytes; 830 goto out; 831 } 832 833 while (1) { 834 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); 835 memcpy(gw->buf + gw->buf_bytes, gw->walk.addr, n); 836 gw->buf_bytes += n; 837 _gcm_sg_unmap_and_advance(gw, n, false); 838 if (gw->buf_bytes >= minbytesneeded) { 839 gw->ptr = gw->buf; 840 gw->nbytes = gw->buf_bytes; 841 goto out; 842 } 843 if (!_gcm_sg_clamp_and_map(gw)) { 844 gw->ptr = NULL; 845 gw->nbytes = 0; 846 goto out; 847 } 848 } 849 850 out: 851 return gw->nbytes; 852 } 853 854 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 855 { 856 if (gw->walk_bytes_remain == 0) { 857 gw->ptr = NULL; 858 gw->nbytes = 0; 859 goto out; 860 } 861 862 if (!_gcm_sg_clamp_and_map(gw)) { 863 gw->ptr = NULL; 864 gw->nbytes = 0; 865 goto out; 866 } 867 868 if (gw->walk_bytes >= minbytesneeded) { 869 gw->ptr = gw->walk.addr; 870 gw->nbytes = gw->walk_bytes; 871 goto out; 872 } 873 874 scatterwalk_unmap(&gw->walk); 875 876 gw->ptr = gw->buf; 877 gw->nbytes = sizeof(gw->buf); 878 879 out: 880 return gw->nbytes; 881 } 882 883 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 884 { 885 if (gw->ptr == NULL) 886 return 0; 887 888 if (gw->ptr == gw->buf) { 889 int n = gw->buf_bytes - bytesdone; 890 if (n > 0) { 891 memmove(gw->buf, gw->buf + bytesdone, n); 892 gw->buf_bytes = n; 893 } else 894 gw->buf_bytes = 0; 895 } else 896 _gcm_sg_unmap_and_advance(gw, bytesdone, false); 897 898 return bytesdone; 899 } 900 901 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 902 { 903 int i, n; 904 905 if (gw->ptr == NULL) 906 return 0; 907 908 if (gw->ptr == gw->buf) { 909 for (i = 0; i < bytesdone; i += n) { 910 if (!_gcm_sg_clamp_and_map(gw)) 911 return i; 912 n = min(gw->walk_bytes, bytesdone - i); 913 memcpy(gw->walk.addr, gw->buf + i, n); 914 _gcm_sg_unmap_and_advance(gw, n, true); 915 } 916 } else 917 _gcm_sg_unmap_and_advance(gw, bytesdone, true); 918 919 return bytesdone; 920 } 921 922 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) 923 { 924 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 925 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 926 unsigned int ivsize = crypto_aead_ivsize(tfm); 927 unsigned int taglen = crypto_aead_authsize(tfm); 928 unsigned int aadlen = req->assoclen; 929 unsigned int pclen = req->cryptlen; 930 int ret = 0; 931 932 unsigned int n, len, in_bytes, out_bytes, 933 min_bytes, bytes, aad_bytes, pc_bytes; 934 struct gcm_sg_walk gw_in, gw_out; 935 u8 tag[GHASH_DIGEST_SIZE]; 936 937 struct { 938 u32 _[3]; /* reserved */ 939 u32 cv; /* Counter Value */ 940 u8 t[GHASH_DIGEST_SIZE];/* Tag */ 941 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */ 942 u64 taadl; /* Total AAD Length */ 943 u64 tpcl; /* Total Plain-/Cipher-text Length */ 944 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */ 945 u8 k[AES_MAX_KEY_SIZE]; /* Key */ 946 } param; 947 948 /* 949 * encrypt 950 * req->src: aad||plaintext 951 * req->dst: aad||ciphertext||tag 952 * decrypt 953 * req->src: aad||ciphertext||tag 954 * req->dst: aad||plaintext, return 0 or -EBADMSG 955 * aad, plaintext and ciphertext may be empty. 956 */ 957 if (flags & CPACF_DECRYPT) 958 pclen -= taglen; 959 len = aadlen + pclen; 960 961 memset(¶m, 0, sizeof(param)); 962 param.cv = 1; 963 param.taadl = aadlen * 8; 964 param.tpcl = pclen * 8; 965 memcpy(param.j0, req->iv, ivsize); 966 *(u32 *)(param.j0 + ivsize) = 1; 967 memcpy(param.k, ctx->key, ctx->key_len); 968 969 gcm_walk_start(&gw_in, req->src, len); 970 gcm_walk_start(&gw_out, req->dst, len); 971 972 do { 973 min_bytes = min_t(unsigned int, 974 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); 975 in_bytes = gcm_in_walk_go(&gw_in, min_bytes); 976 out_bytes = gcm_out_walk_go(&gw_out, min_bytes); 977 bytes = min(in_bytes, out_bytes); 978 979 if (aadlen + pclen <= bytes) { 980 aad_bytes = aadlen; 981 pc_bytes = pclen; 982 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC; 983 } else { 984 if (aadlen <= bytes) { 985 aad_bytes = aadlen; 986 pc_bytes = (bytes - aadlen) & 987 ~(AES_BLOCK_SIZE - 1); 988 flags |= CPACF_KMA_LAAD; 989 } else { 990 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1); 991 pc_bytes = 0; 992 } 993 } 994 995 if (aad_bytes > 0) 996 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes); 997 998 cpacf_kma(ctx->fc | flags, ¶m, 999 gw_out.ptr + aad_bytes, 1000 gw_in.ptr + aad_bytes, pc_bytes, 1001 gw_in.ptr, aad_bytes); 1002 1003 n = aad_bytes + pc_bytes; 1004 if (gcm_in_walk_done(&gw_in, n) != n) 1005 return -ENOMEM; 1006 if (gcm_out_walk_done(&gw_out, n) != n) 1007 return -ENOMEM; 1008 aadlen -= aad_bytes; 1009 pclen -= pc_bytes; 1010 } while (aadlen + pclen > 0); 1011 1012 if (flags & CPACF_DECRYPT) { 1013 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0); 1014 if (crypto_memneq(tag, param.t, taglen)) 1015 ret = -EBADMSG; 1016 } else 1017 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1); 1018 1019 memzero_explicit(¶m, sizeof(param)); 1020 return ret; 1021 } 1022 1023 static int gcm_aes_encrypt(struct aead_request *req) 1024 { 1025 return gcm_aes_crypt(req, CPACF_ENCRYPT); 1026 } 1027 1028 static int gcm_aes_decrypt(struct aead_request *req) 1029 { 1030 return gcm_aes_crypt(req, CPACF_DECRYPT); 1031 } 1032 1033 static struct aead_alg gcm_aes_aead = { 1034 .setkey = gcm_aes_setkey, 1035 .setauthsize = gcm_aes_setauthsize, 1036 .encrypt = gcm_aes_encrypt, 1037 .decrypt = gcm_aes_decrypt, 1038 1039 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32), 1040 .maxauthsize = GHASH_DIGEST_SIZE, 1041 .chunksize = AES_BLOCK_SIZE, 1042 1043 .base = { 1044 .cra_blocksize = 1, 1045 .cra_ctxsize = sizeof(struct s390_aes_ctx), 1046 .cra_priority = 900, 1047 .cra_name = "gcm(aes)", 1048 .cra_driver_name = "gcm-aes-s390", 1049 .cra_module = THIS_MODULE, 1050 }, 1051 }; 1052 1053 static struct crypto_alg *aes_s390_alg; 1054 static struct skcipher_alg *aes_s390_skcipher_algs[5]; 1055 static int aes_s390_skciphers_num; 1056 static struct aead_alg *aes_s390_aead_alg; 1057 1058 static int aes_s390_register_skcipher(struct skcipher_alg *alg) 1059 { 1060 int ret; 1061 1062 ret = crypto_register_skcipher(alg); 1063 if (!ret) 1064 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg; 1065 return ret; 1066 } 1067 1068 static void aes_s390_fini(void) 1069 { 1070 if (aes_s390_alg) 1071 crypto_unregister_alg(aes_s390_alg); 1072 while (aes_s390_skciphers_num--) 1073 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]); 1074 if (ctrblk) 1075 free_page((unsigned long) ctrblk); 1076 1077 if (aes_s390_aead_alg) 1078 crypto_unregister_aead(aes_s390_aead_alg); 1079 } 1080 1081 static int __init aes_s390_init(void) 1082 { 1083 int ret; 1084 1085 /* Query available functions for KM, KMC, KMCTR and KMA */ 1086 cpacf_query(CPACF_KM, &km_functions); 1087 cpacf_query(CPACF_KMC, &kmc_functions); 1088 cpacf_query(CPACF_KMCTR, &kmctr_functions); 1089 cpacf_query(CPACF_KMA, &kma_functions); 1090 1091 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || 1092 cpacf_test_func(&km_functions, CPACF_KM_AES_192) || 1093 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { 1094 ret = crypto_register_alg(&aes_alg); 1095 if (ret) 1096 goto out_err; 1097 aes_s390_alg = &aes_alg; 1098 ret = aes_s390_register_skcipher(&ecb_aes_alg); 1099 if (ret) 1100 goto out_err; 1101 } 1102 1103 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || 1104 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || 1105 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { 1106 ret = aes_s390_register_skcipher(&cbc_aes_alg); 1107 if (ret) 1108 goto out_err; 1109 } 1110 1111 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) || 1112 cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) { 1113 ret = aes_s390_register_skcipher(&fullxts_aes_alg); 1114 if (ret) 1115 goto out_err; 1116 } 1117 1118 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || 1119 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { 1120 ret = aes_s390_register_skcipher(&xts_aes_alg); 1121 if (ret) 1122 goto out_err; 1123 } 1124 1125 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || 1126 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || 1127 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { 1128 ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 1129 if (!ctrblk) { 1130 ret = -ENOMEM; 1131 goto out_err; 1132 } 1133 ret = aes_s390_register_skcipher(&ctr_aes_alg); 1134 if (ret) 1135 goto out_err; 1136 } 1137 1138 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) || 1139 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) || 1140 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) { 1141 ret = crypto_register_aead(&gcm_aes_aead); 1142 if (ret) 1143 goto out_err; 1144 aes_s390_aead_alg = &gcm_aes_aead; 1145 } 1146 1147 return 0; 1148 out_err: 1149 aes_s390_fini(); 1150 return ret; 1151 } 1152 1153 module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init); 1154 module_exit(aes_s390_fini); 1155 1156 MODULE_ALIAS_CRYPTO("aes-all"); 1157 1158 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 1159 MODULE_LICENSE("GPL"); 1160 MODULE_IMPORT_NS("CRYPTO_INTERNAL"); 1161