1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Cryptographic API. 4 * 5 * s390 implementation of the AES Cipher Algorithm. 6 * 7 * s390 Version: 8 * Copyright IBM Corp. 2005, 2017 9 * Author(s): Jan Glauber (jang@de.ibm.com) 10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback 11 * Patrick Steuer <patrick.steuer@de.ibm.com> 12 * Harald Freudenberger <freude@de.ibm.com> 13 * 14 * Derived from "crypto/aes_generic.c" 15 */ 16 17 #define pr_fmt(fmt) "aes_s390: " fmt 18 19 #include <crypto/aes.h> 20 #include <crypto/algapi.h> 21 #include <crypto/ghash.h> 22 #include <crypto/internal/aead.h> 23 #include <crypto/internal/cipher.h> 24 #include <crypto/internal/skcipher.h> 25 #include <crypto/scatterwalk.h> 26 #include <linux/err.h> 27 #include <linux/module.h> 28 #include <linux/cpufeature.h> 29 #include <linux/init.h> 30 #include <linux/mutex.h> 31 #include <linux/fips.h> 32 #include <linux/string.h> 33 #include <crypto/xts.h> 34 #include <asm/cpacf.h> 35 36 static u8 *ctrblk; 37 static DEFINE_MUTEX(ctrblk_lock); 38 39 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, 40 kma_functions; 41 42 struct s390_aes_ctx { 43 u8 key[AES_MAX_KEY_SIZE]; 44 int key_len; 45 unsigned long fc; 46 union { 47 struct crypto_skcipher *skcipher; 48 struct crypto_cipher *cip; 49 } fallback; 50 }; 51 52 struct s390_xts_ctx { 53 union { 54 u8 keys[64]; 55 struct { 56 u8 key[32]; 57 u8 pcc_key[32]; 58 }; 59 }; 60 int key_len; 61 unsigned long fc; 62 struct crypto_skcipher *fallback; 63 }; 64 65 struct gcm_sg_walk { 66 struct scatter_walk walk; 67 unsigned int walk_bytes; 68 unsigned int walk_bytes_remain; 69 u8 buf[AES_BLOCK_SIZE]; 70 unsigned int buf_bytes; 71 u8 *ptr; 72 unsigned int nbytes; 73 }; 74 75 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, 76 unsigned int key_len) 77 { 78 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 79 80 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 81 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & 82 CRYPTO_TFM_REQ_MASK); 83 84 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); 85 } 86 87 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 88 unsigned int key_len) 89 { 90 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 91 unsigned long fc; 92 93 /* Pick the correct function code based on the key length */ 94 fc = (key_len == 16) ? CPACF_KM_AES_128 : 95 (key_len == 24) ? CPACF_KM_AES_192 : 96 (key_len == 32) ? CPACF_KM_AES_256 : 0; 97 98 /* Check if the function code is available */ 99 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 100 if (!sctx->fc) 101 return setkey_fallback_cip(tfm, in_key, key_len); 102 103 sctx->key_len = key_len; 104 memcpy(sctx->key, in_key, key_len); 105 return 0; 106 } 107 108 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 109 { 110 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 111 112 if (unlikely(!sctx->fc)) { 113 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); 114 return; 115 } 116 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); 117 } 118 119 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 120 { 121 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 122 123 if (unlikely(!sctx->fc)) { 124 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); 125 return; 126 } 127 cpacf_km(sctx->fc | CPACF_DECRYPT, 128 &sctx->key, out, in, AES_BLOCK_SIZE); 129 } 130 131 static int fallback_init_cip(struct crypto_tfm *tfm) 132 { 133 const char *name = tfm->__crt_alg->cra_name; 134 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 135 136 sctx->fallback.cip = crypto_alloc_cipher(name, 0, 137 CRYPTO_ALG_NEED_FALLBACK); 138 139 if (IS_ERR(sctx->fallback.cip)) { 140 pr_err("Allocating AES fallback algorithm %s failed\n", 141 name); 142 return PTR_ERR(sctx->fallback.cip); 143 } 144 145 return 0; 146 } 147 148 static void fallback_exit_cip(struct crypto_tfm *tfm) 149 { 150 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 151 152 crypto_free_cipher(sctx->fallback.cip); 153 sctx->fallback.cip = NULL; 154 } 155 156 static struct crypto_alg aes_alg = { 157 .cra_name = "aes", 158 .cra_driver_name = "aes-s390", 159 .cra_priority = 300, 160 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | 161 CRYPTO_ALG_NEED_FALLBACK, 162 .cra_blocksize = AES_BLOCK_SIZE, 163 .cra_ctxsize = sizeof(struct s390_aes_ctx), 164 .cra_module = THIS_MODULE, 165 .cra_init = fallback_init_cip, 166 .cra_exit = fallback_exit_cip, 167 .cra_u = { 168 .cipher = { 169 .cia_min_keysize = AES_MIN_KEY_SIZE, 170 .cia_max_keysize = AES_MAX_KEY_SIZE, 171 .cia_setkey = aes_set_key, 172 .cia_encrypt = crypto_aes_encrypt, 173 .cia_decrypt = crypto_aes_decrypt, 174 } 175 } 176 }; 177 178 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key, 179 unsigned int len) 180 { 181 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 182 183 crypto_skcipher_clear_flags(sctx->fallback.skcipher, 184 CRYPTO_TFM_REQ_MASK); 185 crypto_skcipher_set_flags(sctx->fallback.skcipher, 186 crypto_skcipher_get_flags(tfm) & 187 CRYPTO_TFM_REQ_MASK); 188 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len); 189 } 190 191 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx, 192 struct skcipher_request *req, 193 unsigned long modifier) 194 { 195 struct skcipher_request *subreq = skcipher_request_ctx(req); 196 197 *subreq = *req; 198 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher); 199 return (modifier & CPACF_DECRYPT) ? 200 crypto_skcipher_decrypt(subreq) : 201 crypto_skcipher_encrypt(subreq); 202 } 203 204 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 205 unsigned int key_len) 206 { 207 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 208 unsigned long fc; 209 210 /* Pick the correct function code based on the key length */ 211 fc = (key_len == 16) ? CPACF_KM_AES_128 : 212 (key_len == 24) ? CPACF_KM_AES_192 : 213 (key_len == 32) ? CPACF_KM_AES_256 : 0; 214 215 /* Check if the function code is available */ 216 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 217 if (!sctx->fc) 218 return setkey_fallback_skcipher(tfm, in_key, key_len); 219 220 sctx->key_len = key_len; 221 memcpy(sctx->key, in_key, key_len); 222 return 0; 223 } 224 225 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier) 226 { 227 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 228 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 229 struct skcipher_walk walk; 230 unsigned int nbytes, n; 231 int ret; 232 233 if (unlikely(!sctx->fc)) 234 return fallback_skcipher_crypt(sctx, req, modifier); 235 236 ret = skcipher_walk_virt(&walk, req, false); 237 while ((nbytes = walk.nbytes) != 0) { 238 /* only use complete blocks */ 239 n = nbytes & ~(AES_BLOCK_SIZE - 1); 240 cpacf_km(sctx->fc | modifier, sctx->key, 241 walk.dst.virt.addr, walk.src.virt.addr, n); 242 ret = skcipher_walk_done(&walk, nbytes - n); 243 } 244 return ret; 245 } 246 247 static int ecb_aes_encrypt(struct skcipher_request *req) 248 { 249 return ecb_aes_crypt(req, 0); 250 } 251 252 static int ecb_aes_decrypt(struct skcipher_request *req) 253 { 254 return ecb_aes_crypt(req, CPACF_DECRYPT); 255 } 256 257 static int fallback_init_skcipher(struct crypto_skcipher *tfm) 258 { 259 const char *name = crypto_tfm_alg_name(&tfm->base); 260 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 261 262 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0, 263 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 264 265 if (IS_ERR(sctx->fallback.skcipher)) { 266 pr_err("Allocating AES fallback algorithm %s failed\n", 267 name); 268 return PTR_ERR(sctx->fallback.skcipher); 269 } 270 271 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 272 crypto_skcipher_reqsize(sctx->fallback.skcipher)); 273 return 0; 274 } 275 276 static void fallback_exit_skcipher(struct crypto_skcipher *tfm) 277 { 278 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 279 280 crypto_free_skcipher(sctx->fallback.skcipher); 281 } 282 283 static struct skcipher_alg ecb_aes_alg = { 284 .base.cra_name = "ecb(aes)", 285 .base.cra_driver_name = "ecb-aes-s390", 286 .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 287 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 288 .base.cra_blocksize = AES_BLOCK_SIZE, 289 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 290 .base.cra_module = THIS_MODULE, 291 .init = fallback_init_skcipher, 292 .exit = fallback_exit_skcipher, 293 .min_keysize = AES_MIN_KEY_SIZE, 294 .max_keysize = AES_MAX_KEY_SIZE, 295 .setkey = ecb_aes_set_key, 296 .encrypt = ecb_aes_encrypt, 297 .decrypt = ecb_aes_decrypt, 298 }; 299 300 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 301 unsigned int key_len) 302 { 303 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 304 unsigned long fc; 305 306 /* Pick the correct function code based on the key length */ 307 fc = (key_len == 16) ? CPACF_KMC_AES_128 : 308 (key_len == 24) ? CPACF_KMC_AES_192 : 309 (key_len == 32) ? CPACF_KMC_AES_256 : 0; 310 311 /* Check if the function code is available */ 312 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 313 if (!sctx->fc) 314 return setkey_fallback_skcipher(tfm, in_key, key_len); 315 316 sctx->key_len = key_len; 317 memcpy(sctx->key, in_key, key_len); 318 return 0; 319 } 320 321 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier) 322 { 323 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 324 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 325 struct skcipher_walk walk; 326 unsigned int nbytes, n; 327 int ret; 328 struct { 329 u8 iv[AES_BLOCK_SIZE]; 330 u8 key[AES_MAX_KEY_SIZE]; 331 } param; 332 333 if (unlikely(!sctx->fc)) 334 return fallback_skcipher_crypt(sctx, req, modifier); 335 336 ret = skcipher_walk_virt(&walk, req, false); 337 if (ret) 338 return ret; 339 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); 340 memcpy(param.key, sctx->key, sctx->key_len); 341 while ((nbytes = walk.nbytes) != 0) { 342 /* only use complete blocks */ 343 n = nbytes & ~(AES_BLOCK_SIZE - 1); 344 cpacf_kmc(sctx->fc | modifier, ¶m, 345 walk.dst.virt.addr, walk.src.virt.addr, n); 346 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 347 ret = skcipher_walk_done(&walk, nbytes - n); 348 } 349 memzero_explicit(¶m, sizeof(param)); 350 return ret; 351 } 352 353 static int cbc_aes_encrypt(struct skcipher_request *req) 354 { 355 return cbc_aes_crypt(req, 0); 356 } 357 358 static int cbc_aes_decrypt(struct skcipher_request *req) 359 { 360 return cbc_aes_crypt(req, CPACF_DECRYPT); 361 } 362 363 static struct skcipher_alg cbc_aes_alg = { 364 .base.cra_name = "cbc(aes)", 365 .base.cra_driver_name = "cbc-aes-s390", 366 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 367 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 368 .base.cra_blocksize = AES_BLOCK_SIZE, 369 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 370 .base.cra_module = THIS_MODULE, 371 .init = fallback_init_skcipher, 372 .exit = fallback_exit_skcipher, 373 .min_keysize = AES_MIN_KEY_SIZE, 374 .max_keysize = AES_MAX_KEY_SIZE, 375 .ivsize = AES_BLOCK_SIZE, 376 .setkey = cbc_aes_set_key, 377 .encrypt = cbc_aes_encrypt, 378 .decrypt = cbc_aes_decrypt, 379 }; 380 381 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key, 382 unsigned int len) 383 { 384 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 385 386 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); 387 crypto_skcipher_set_flags(xts_ctx->fallback, 388 crypto_skcipher_get_flags(tfm) & 389 CRYPTO_TFM_REQ_MASK); 390 return crypto_skcipher_setkey(xts_ctx->fallback, key, len); 391 } 392 393 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 394 unsigned int key_len) 395 { 396 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 397 unsigned long fc; 398 int err; 399 400 err = xts_fallback_setkey(tfm, in_key, key_len); 401 if (err) 402 return err; 403 404 /* Pick the correct function code based on the key length */ 405 fc = (key_len == 32) ? CPACF_KM_XTS_128 : 406 (key_len == 64) ? CPACF_KM_XTS_256 : 0; 407 408 /* Check if the function code is available */ 409 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 410 if (!xts_ctx->fc) 411 return 0; 412 413 /* Split the XTS key into the two subkeys */ 414 key_len = key_len / 2; 415 xts_ctx->key_len = key_len; 416 memcpy(xts_ctx->key, in_key, key_len); 417 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); 418 return 0; 419 } 420 421 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier) 422 { 423 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 424 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 425 struct skcipher_walk walk; 426 unsigned int offset, nbytes, n; 427 int ret; 428 struct { 429 u8 key[32]; 430 u8 tweak[16]; 431 u8 block[16]; 432 u8 bit[16]; 433 u8 xts[16]; 434 } pcc_param; 435 struct { 436 u8 key[32]; 437 u8 init[16]; 438 } xts_param; 439 440 if (req->cryptlen < AES_BLOCK_SIZE) 441 return -EINVAL; 442 443 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { 444 struct skcipher_request *subreq = skcipher_request_ctx(req); 445 446 *subreq = *req; 447 skcipher_request_set_tfm(subreq, xts_ctx->fallback); 448 return (modifier & CPACF_DECRYPT) ? 449 crypto_skcipher_decrypt(subreq) : 450 crypto_skcipher_encrypt(subreq); 451 } 452 453 ret = skcipher_walk_virt(&walk, req, false); 454 if (ret) 455 return ret; 456 offset = xts_ctx->key_len & 0x10; 457 memset(pcc_param.block, 0, sizeof(pcc_param.block)); 458 memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); 459 memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); 460 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); 461 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); 462 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); 463 464 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); 465 memcpy(xts_param.init, pcc_param.xts, 16); 466 467 while ((nbytes = walk.nbytes) != 0) { 468 /* only use complete blocks */ 469 n = nbytes & ~(AES_BLOCK_SIZE - 1); 470 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, 471 walk.dst.virt.addr, walk.src.virt.addr, n); 472 ret = skcipher_walk_done(&walk, nbytes - n); 473 } 474 memzero_explicit(&pcc_param, sizeof(pcc_param)); 475 memzero_explicit(&xts_param, sizeof(xts_param)); 476 return ret; 477 } 478 479 static int xts_aes_encrypt(struct skcipher_request *req) 480 { 481 return xts_aes_crypt(req, 0); 482 } 483 484 static int xts_aes_decrypt(struct skcipher_request *req) 485 { 486 return xts_aes_crypt(req, CPACF_DECRYPT); 487 } 488 489 static int xts_fallback_init(struct crypto_skcipher *tfm) 490 { 491 const char *name = crypto_tfm_alg_name(&tfm->base); 492 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 493 494 xts_ctx->fallback = crypto_alloc_skcipher(name, 0, 495 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 496 497 if (IS_ERR(xts_ctx->fallback)) { 498 pr_err("Allocating XTS fallback algorithm %s failed\n", 499 name); 500 return PTR_ERR(xts_ctx->fallback); 501 } 502 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 503 crypto_skcipher_reqsize(xts_ctx->fallback)); 504 return 0; 505 } 506 507 static void xts_fallback_exit(struct crypto_skcipher *tfm) 508 { 509 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 510 511 crypto_free_skcipher(xts_ctx->fallback); 512 } 513 514 static struct skcipher_alg xts_aes_alg = { 515 .base.cra_name = "xts(aes)", 516 .base.cra_driver_name = "xts-aes-s390", 517 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 518 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 519 .base.cra_blocksize = AES_BLOCK_SIZE, 520 .base.cra_ctxsize = sizeof(struct s390_xts_ctx), 521 .base.cra_module = THIS_MODULE, 522 .init = xts_fallback_init, 523 .exit = xts_fallback_exit, 524 .min_keysize = 2 * AES_MIN_KEY_SIZE, 525 .max_keysize = 2 * AES_MAX_KEY_SIZE, 526 .ivsize = AES_BLOCK_SIZE, 527 .setkey = xts_aes_set_key, 528 .encrypt = xts_aes_encrypt, 529 .decrypt = xts_aes_decrypt, 530 }; 531 532 static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 533 unsigned int key_len) 534 { 535 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 536 unsigned long fc; 537 int err; 538 539 err = xts_fallback_setkey(tfm, in_key, key_len); 540 if (err) 541 return err; 542 543 /* Pick the correct function code based on the key length */ 544 fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL : 545 (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0; 546 547 /* Check if the function code is available */ 548 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 549 if (!xts_ctx->fc) 550 return 0; 551 552 /* Store double-key */ 553 memcpy(xts_ctx->keys, in_key, key_len); 554 xts_ctx->key_len = key_len; 555 return 0; 556 } 557 558 static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier) 559 { 560 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 561 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 562 unsigned int offset, nbytes, n; 563 struct skcipher_walk walk; 564 int ret; 565 struct { 566 __u8 key[64]; 567 __u8 tweak[16]; 568 __u8 nap[16]; 569 } fxts_param = { 570 .nap = {0}, 571 }; 572 573 if (req->cryptlen < AES_BLOCK_SIZE) 574 return -EINVAL; 575 576 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { 577 struct skcipher_request *subreq = skcipher_request_ctx(req); 578 579 *subreq = *req; 580 skcipher_request_set_tfm(subreq, xts_ctx->fallback); 581 return (modifier & CPACF_DECRYPT) ? 582 crypto_skcipher_decrypt(subreq) : 583 crypto_skcipher_encrypt(subreq); 584 } 585 586 ret = skcipher_walk_virt(&walk, req, false); 587 if (ret) 588 return ret; 589 590 offset = xts_ctx->key_len & 0x20; 591 memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len); 592 memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE); 593 fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ 594 595 while ((nbytes = walk.nbytes) != 0) { 596 /* only use complete blocks */ 597 n = nbytes & ~(AES_BLOCK_SIZE - 1); 598 cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset, 599 walk.dst.virt.addr, walk.src.virt.addr, n); 600 ret = skcipher_walk_done(&walk, nbytes - n); 601 } 602 memzero_explicit(&fxts_param, sizeof(fxts_param)); 603 return ret; 604 } 605 606 static int fullxts_aes_encrypt(struct skcipher_request *req) 607 { 608 return fullxts_aes_crypt(req, 0); 609 } 610 611 static int fullxts_aes_decrypt(struct skcipher_request *req) 612 { 613 return fullxts_aes_crypt(req, CPACF_DECRYPT); 614 } 615 616 static struct skcipher_alg fullxts_aes_alg = { 617 .base.cra_name = "xts(aes)", 618 .base.cra_driver_name = "full-xts-aes-s390", 619 .base.cra_priority = 403, /* aes-xts-s390 + 1 */ 620 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 621 .base.cra_blocksize = AES_BLOCK_SIZE, 622 .base.cra_ctxsize = sizeof(struct s390_xts_ctx), 623 .base.cra_module = THIS_MODULE, 624 .init = xts_fallback_init, 625 .exit = xts_fallback_exit, 626 .min_keysize = 2 * AES_MIN_KEY_SIZE, 627 .max_keysize = 2 * AES_MAX_KEY_SIZE, 628 .ivsize = AES_BLOCK_SIZE, 629 .setkey = fullxts_aes_set_key, 630 .encrypt = fullxts_aes_encrypt, 631 .decrypt = fullxts_aes_decrypt, 632 }; 633 634 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 635 unsigned int key_len) 636 { 637 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 638 unsigned long fc; 639 640 /* Pick the correct function code based on the key length */ 641 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : 642 (key_len == 24) ? CPACF_KMCTR_AES_192 : 643 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; 644 645 /* Check if the function code is available */ 646 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 647 if (!sctx->fc) 648 return setkey_fallback_skcipher(tfm, in_key, key_len); 649 650 sctx->key_len = key_len; 651 memcpy(sctx->key, in_key, key_len); 652 return 0; 653 } 654 655 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 656 { 657 unsigned int i, n; 658 659 /* only use complete blocks, max. PAGE_SIZE */ 660 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 661 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 662 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 663 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 664 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 665 ctrptr += AES_BLOCK_SIZE; 666 } 667 return n; 668 } 669 670 static int ctr_aes_crypt(struct skcipher_request *req) 671 { 672 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 673 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 674 u8 buf[AES_BLOCK_SIZE], *ctrptr; 675 struct skcipher_walk walk; 676 unsigned int n, nbytes; 677 int ret, locked; 678 679 if (unlikely(!sctx->fc)) 680 return fallback_skcipher_crypt(sctx, req, 0); 681 682 locked = mutex_trylock(&ctrblk_lock); 683 684 ret = skcipher_walk_virt(&walk, req, false); 685 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 686 n = AES_BLOCK_SIZE; 687 688 if (nbytes >= 2*AES_BLOCK_SIZE && locked) 689 n = __ctrblk_init(ctrblk, walk.iv, nbytes); 690 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; 691 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr, 692 walk.src.virt.addr, n, ctrptr); 693 if (ctrptr == ctrblk) 694 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE, 695 AES_BLOCK_SIZE); 696 crypto_inc(walk.iv, AES_BLOCK_SIZE); 697 ret = skcipher_walk_done(&walk, nbytes - n); 698 } 699 if (locked) 700 mutex_unlock(&ctrblk_lock); 701 /* 702 * final block may be < AES_BLOCK_SIZE, copy only nbytes 703 */ 704 if (nbytes) { 705 memset(buf, 0, AES_BLOCK_SIZE); 706 memcpy(buf, walk.src.virt.addr, nbytes); 707 cpacf_kmctr(sctx->fc, sctx->key, buf, buf, 708 AES_BLOCK_SIZE, walk.iv); 709 memcpy(walk.dst.virt.addr, buf, nbytes); 710 crypto_inc(walk.iv, AES_BLOCK_SIZE); 711 ret = skcipher_walk_done(&walk, 0); 712 } 713 714 return ret; 715 } 716 717 static struct skcipher_alg ctr_aes_alg = { 718 .base.cra_name = "ctr(aes)", 719 .base.cra_driver_name = "ctr-aes-s390", 720 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 721 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 722 .base.cra_blocksize = 1, 723 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 724 .base.cra_module = THIS_MODULE, 725 .init = fallback_init_skcipher, 726 .exit = fallback_exit_skcipher, 727 .min_keysize = AES_MIN_KEY_SIZE, 728 .max_keysize = AES_MAX_KEY_SIZE, 729 .ivsize = AES_BLOCK_SIZE, 730 .setkey = ctr_aes_set_key, 731 .encrypt = ctr_aes_crypt, 732 .decrypt = ctr_aes_crypt, 733 .chunksize = AES_BLOCK_SIZE, 734 }; 735 736 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, 737 unsigned int keylen) 738 { 739 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 740 741 switch (keylen) { 742 case AES_KEYSIZE_128: 743 ctx->fc = CPACF_KMA_GCM_AES_128; 744 break; 745 case AES_KEYSIZE_192: 746 ctx->fc = CPACF_KMA_GCM_AES_192; 747 break; 748 case AES_KEYSIZE_256: 749 ctx->fc = CPACF_KMA_GCM_AES_256; 750 break; 751 default: 752 return -EINVAL; 753 } 754 755 memcpy(ctx->key, key, keylen); 756 ctx->key_len = keylen; 757 return 0; 758 } 759 760 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 761 { 762 switch (authsize) { 763 case 4: 764 case 8: 765 case 12: 766 case 13: 767 case 14: 768 case 15: 769 case 16: 770 break; 771 default: 772 return -EINVAL; 773 } 774 775 return 0; 776 } 777 778 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, 779 unsigned int len) 780 { 781 memset(gw, 0, sizeof(*gw)); 782 gw->walk_bytes_remain = len; 783 scatterwalk_start(&gw->walk, sg); 784 } 785 786 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) 787 { 788 if (gw->walk_bytes_remain == 0) 789 return 0; 790 gw->walk_bytes = scatterwalk_next(&gw->walk, gw->walk_bytes_remain); 791 return gw->walk_bytes; 792 } 793 794 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, 795 unsigned int nbytes, bool out) 796 { 797 gw->walk_bytes_remain -= nbytes; 798 if (out) 799 scatterwalk_done_dst(&gw->walk, nbytes); 800 else 801 scatterwalk_done_src(&gw->walk, nbytes); 802 } 803 804 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 805 { 806 int n; 807 808 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { 809 gw->ptr = gw->buf; 810 gw->nbytes = gw->buf_bytes; 811 goto out; 812 } 813 814 if (gw->walk_bytes_remain == 0) { 815 gw->ptr = NULL; 816 gw->nbytes = 0; 817 goto out; 818 } 819 820 if (!_gcm_sg_clamp_and_map(gw)) { 821 gw->ptr = NULL; 822 gw->nbytes = 0; 823 goto out; 824 } 825 826 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { 827 gw->ptr = gw->walk.addr; 828 gw->nbytes = gw->walk_bytes; 829 goto out; 830 } 831 832 while (1) { 833 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); 834 memcpy(gw->buf + gw->buf_bytes, gw->walk.addr, n); 835 gw->buf_bytes += n; 836 _gcm_sg_unmap_and_advance(gw, n, false); 837 if (gw->buf_bytes >= minbytesneeded) { 838 gw->ptr = gw->buf; 839 gw->nbytes = gw->buf_bytes; 840 goto out; 841 } 842 if (!_gcm_sg_clamp_and_map(gw)) { 843 gw->ptr = NULL; 844 gw->nbytes = 0; 845 goto out; 846 } 847 } 848 849 out: 850 return gw->nbytes; 851 } 852 853 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 854 { 855 if (gw->walk_bytes_remain == 0) { 856 gw->ptr = NULL; 857 gw->nbytes = 0; 858 goto out; 859 } 860 861 if (!_gcm_sg_clamp_and_map(gw)) { 862 gw->ptr = NULL; 863 gw->nbytes = 0; 864 goto out; 865 } 866 867 if (gw->walk_bytes >= minbytesneeded) { 868 gw->ptr = gw->walk.addr; 869 gw->nbytes = gw->walk_bytes; 870 goto out; 871 } 872 873 scatterwalk_unmap(&gw->walk); 874 875 gw->ptr = gw->buf; 876 gw->nbytes = sizeof(gw->buf); 877 878 out: 879 return gw->nbytes; 880 } 881 882 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 883 { 884 if (gw->ptr == NULL) 885 return 0; 886 887 if (gw->ptr == gw->buf) { 888 int n = gw->buf_bytes - bytesdone; 889 if (n > 0) { 890 memmove(gw->buf, gw->buf + bytesdone, n); 891 gw->buf_bytes = n; 892 } else 893 gw->buf_bytes = 0; 894 } else 895 _gcm_sg_unmap_and_advance(gw, bytesdone, false); 896 897 return bytesdone; 898 } 899 900 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 901 { 902 int i, n; 903 904 if (gw->ptr == NULL) 905 return 0; 906 907 if (gw->ptr == gw->buf) { 908 for (i = 0; i < bytesdone; i += n) { 909 if (!_gcm_sg_clamp_and_map(gw)) 910 return i; 911 n = min(gw->walk_bytes, bytesdone - i); 912 memcpy(gw->walk.addr, gw->buf + i, n); 913 _gcm_sg_unmap_and_advance(gw, n, true); 914 } 915 } else 916 _gcm_sg_unmap_and_advance(gw, bytesdone, true); 917 918 return bytesdone; 919 } 920 921 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) 922 { 923 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 924 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 925 unsigned int ivsize = crypto_aead_ivsize(tfm); 926 unsigned int taglen = crypto_aead_authsize(tfm); 927 unsigned int aadlen = req->assoclen; 928 unsigned int pclen = req->cryptlen; 929 int ret = 0; 930 931 unsigned int n, len, in_bytes, out_bytes, 932 min_bytes, bytes, aad_bytes, pc_bytes; 933 struct gcm_sg_walk gw_in, gw_out; 934 u8 tag[GHASH_DIGEST_SIZE]; 935 936 struct { 937 u32 _[3]; /* reserved */ 938 u32 cv; /* Counter Value */ 939 u8 t[GHASH_DIGEST_SIZE];/* Tag */ 940 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */ 941 u64 taadl; /* Total AAD Length */ 942 u64 tpcl; /* Total Plain-/Cipher-text Length */ 943 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */ 944 u8 k[AES_MAX_KEY_SIZE]; /* Key */ 945 } param; 946 947 /* 948 * encrypt 949 * req->src: aad||plaintext 950 * req->dst: aad||ciphertext||tag 951 * decrypt 952 * req->src: aad||ciphertext||tag 953 * req->dst: aad||plaintext, return 0 or -EBADMSG 954 * aad, plaintext and ciphertext may be empty. 955 */ 956 if (flags & CPACF_DECRYPT) 957 pclen -= taglen; 958 len = aadlen + pclen; 959 960 memset(¶m, 0, sizeof(param)); 961 param.cv = 1; 962 param.taadl = aadlen * 8; 963 param.tpcl = pclen * 8; 964 memcpy(param.j0, req->iv, ivsize); 965 *(u32 *)(param.j0 + ivsize) = 1; 966 memcpy(param.k, ctx->key, ctx->key_len); 967 968 gcm_walk_start(&gw_in, req->src, len); 969 gcm_walk_start(&gw_out, req->dst, len); 970 971 do { 972 min_bytes = min_t(unsigned int, 973 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); 974 in_bytes = gcm_in_walk_go(&gw_in, min_bytes); 975 out_bytes = gcm_out_walk_go(&gw_out, min_bytes); 976 bytes = min(in_bytes, out_bytes); 977 978 if (aadlen + pclen <= bytes) { 979 aad_bytes = aadlen; 980 pc_bytes = pclen; 981 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC; 982 } else { 983 if (aadlen <= bytes) { 984 aad_bytes = aadlen; 985 pc_bytes = (bytes - aadlen) & 986 ~(AES_BLOCK_SIZE - 1); 987 flags |= CPACF_KMA_LAAD; 988 } else { 989 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1); 990 pc_bytes = 0; 991 } 992 } 993 994 if (aad_bytes > 0) 995 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes); 996 997 cpacf_kma(ctx->fc | flags, ¶m, 998 gw_out.ptr + aad_bytes, 999 gw_in.ptr + aad_bytes, pc_bytes, 1000 gw_in.ptr, aad_bytes); 1001 1002 n = aad_bytes + pc_bytes; 1003 if (gcm_in_walk_done(&gw_in, n) != n) 1004 return -ENOMEM; 1005 if (gcm_out_walk_done(&gw_out, n) != n) 1006 return -ENOMEM; 1007 aadlen -= aad_bytes; 1008 pclen -= pc_bytes; 1009 } while (aadlen + pclen > 0); 1010 1011 if (flags & CPACF_DECRYPT) { 1012 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0); 1013 if (crypto_memneq(tag, param.t, taglen)) 1014 ret = -EBADMSG; 1015 } else 1016 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1); 1017 1018 memzero_explicit(¶m, sizeof(param)); 1019 return ret; 1020 } 1021 1022 static int gcm_aes_encrypt(struct aead_request *req) 1023 { 1024 return gcm_aes_crypt(req, CPACF_ENCRYPT); 1025 } 1026 1027 static int gcm_aes_decrypt(struct aead_request *req) 1028 { 1029 return gcm_aes_crypt(req, CPACF_DECRYPT); 1030 } 1031 1032 static struct aead_alg gcm_aes_aead = { 1033 .setkey = gcm_aes_setkey, 1034 .setauthsize = gcm_aes_setauthsize, 1035 .encrypt = gcm_aes_encrypt, 1036 .decrypt = gcm_aes_decrypt, 1037 1038 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32), 1039 .maxauthsize = GHASH_DIGEST_SIZE, 1040 .chunksize = AES_BLOCK_SIZE, 1041 1042 .base = { 1043 .cra_blocksize = 1, 1044 .cra_ctxsize = sizeof(struct s390_aes_ctx), 1045 .cra_priority = 900, 1046 .cra_name = "gcm(aes)", 1047 .cra_driver_name = "gcm-aes-s390", 1048 .cra_module = THIS_MODULE, 1049 }, 1050 }; 1051 1052 static struct crypto_alg *aes_s390_alg; 1053 static struct skcipher_alg *aes_s390_skcipher_algs[5]; 1054 static int aes_s390_skciphers_num; 1055 static struct aead_alg *aes_s390_aead_alg; 1056 1057 static int aes_s390_register_skcipher(struct skcipher_alg *alg) 1058 { 1059 int ret; 1060 1061 ret = crypto_register_skcipher(alg); 1062 if (!ret) 1063 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg; 1064 return ret; 1065 } 1066 1067 static void aes_s390_fini(void) 1068 { 1069 if (aes_s390_alg) 1070 crypto_unregister_alg(aes_s390_alg); 1071 while (aes_s390_skciphers_num--) 1072 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]); 1073 if (ctrblk) 1074 free_page((unsigned long) ctrblk); 1075 1076 if (aes_s390_aead_alg) 1077 crypto_unregister_aead(aes_s390_aead_alg); 1078 } 1079 1080 static int __init aes_s390_init(void) 1081 { 1082 int ret; 1083 1084 /* Query available functions for KM, KMC, KMCTR and KMA */ 1085 cpacf_query(CPACF_KM, &km_functions); 1086 cpacf_query(CPACF_KMC, &kmc_functions); 1087 cpacf_query(CPACF_KMCTR, &kmctr_functions); 1088 cpacf_query(CPACF_KMA, &kma_functions); 1089 1090 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || 1091 cpacf_test_func(&km_functions, CPACF_KM_AES_192) || 1092 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { 1093 ret = crypto_register_alg(&aes_alg); 1094 if (ret) 1095 goto out_err; 1096 aes_s390_alg = &aes_alg; 1097 ret = aes_s390_register_skcipher(&ecb_aes_alg); 1098 if (ret) 1099 goto out_err; 1100 } 1101 1102 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || 1103 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || 1104 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { 1105 ret = aes_s390_register_skcipher(&cbc_aes_alg); 1106 if (ret) 1107 goto out_err; 1108 } 1109 1110 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) || 1111 cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) { 1112 ret = aes_s390_register_skcipher(&fullxts_aes_alg); 1113 if (ret) 1114 goto out_err; 1115 } 1116 1117 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || 1118 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { 1119 ret = aes_s390_register_skcipher(&xts_aes_alg); 1120 if (ret) 1121 goto out_err; 1122 } 1123 1124 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || 1125 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || 1126 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { 1127 ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 1128 if (!ctrblk) { 1129 ret = -ENOMEM; 1130 goto out_err; 1131 } 1132 ret = aes_s390_register_skcipher(&ctr_aes_alg); 1133 if (ret) 1134 goto out_err; 1135 } 1136 1137 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) || 1138 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) || 1139 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) { 1140 ret = crypto_register_aead(&gcm_aes_aead); 1141 if (ret) 1142 goto out_err; 1143 aes_s390_aead_alg = &gcm_aes_aead; 1144 } 1145 1146 return 0; 1147 out_err: 1148 aes_s390_fini(); 1149 return ret; 1150 } 1151 1152 module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init); 1153 module_exit(aes_s390_fini); 1154 1155 MODULE_ALIAS_CRYPTO("aes-all"); 1156 1157 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 1158 MODULE_LICENSE("GPL"); 1159 MODULE_IMPORT_NS("CRYPTO_INTERNAL"); 1160