1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Cryptographic API. 4 * 5 * s390 implementation of the AES Cipher Algorithm. 6 * 7 * s390 Version: 8 * Copyright IBM Corp. 2005, 2017 9 * Author(s): Jan Glauber (jang@de.ibm.com) 10 * Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback 11 * Patrick Steuer <patrick.steuer@de.ibm.com> 12 * Harald Freudenberger <freude@de.ibm.com> 13 * 14 * Derived from "crypto/aes_generic.c" 15 */ 16 17 #define KMSG_COMPONENT "aes_s390" 18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 19 20 #include <crypto/aes.h> 21 #include <crypto/algapi.h> 22 #include <crypto/ghash.h> 23 #include <crypto/internal/aead.h> 24 #include <crypto/internal/cipher.h> 25 #include <crypto/internal/skcipher.h> 26 #include <crypto/scatterwalk.h> 27 #include <linux/err.h> 28 #include <linux/module.h> 29 #include <linux/cpufeature.h> 30 #include <linux/init.h> 31 #include <linux/mutex.h> 32 #include <linux/fips.h> 33 #include <linux/string.h> 34 #include <crypto/xts.h> 35 #include <asm/cpacf.h> 36 37 static u8 *ctrblk; 38 static DEFINE_MUTEX(ctrblk_lock); 39 40 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, 41 kma_functions; 42 43 struct s390_aes_ctx { 44 u8 key[AES_MAX_KEY_SIZE]; 45 int key_len; 46 unsigned long fc; 47 union { 48 struct crypto_skcipher *skcipher; 49 struct crypto_cipher *cip; 50 } fallback; 51 }; 52 53 struct s390_xts_ctx { 54 u8 key[32]; 55 u8 pcc_key[32]; 56 int key_len; 57 unsigned long fc; 58 struct crypto_skcipher *fallback; 59 }; 60 61 struct gcm_sg_walk { 62 struct scatter_walk walk; 63 unsigned int walk_bytes; 64 u8 *walk_ptr; 65 unsigned int walk_bytes_remain; 66 u8 buf[AES_BLOCK_SIZE]; 67 unsigned int buf_bytes; 68 u8 *ptr; 69 unsigned int nbytes; 70 }; 71 72 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key, 73 unsigned int key_len) 74 { 75 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 76 77 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 78 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags & 79 CRYPTO_TFM_REQ_MASK); 80 81 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len); 82 } 83 84 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, 85 unsigned int key_len) 86 { 87 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 88 unsigned long fc; 89 90 /* Pick the correct function code based on the key length */ 91 fc = (key_len == 16) ? CPACF_KM_AES_128 : 92 (key_len == 24) ? CPACF_KM_AES_192 : 93 (key_len == 32) ? CPACF_KM_AES_256 : 0; 94 95 /* Check if the function code is available */ 96 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 97 if (!sctx->fc) 98 return setkey_fallback_cip(tfm, in_key, key_len); 99 100 sctx->key_len = key_len; 101 memcpy(sctx->key, in_key, key_len); 102 return 0; 103 } 104 105 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 106 { 107 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 108 109 if (unlikely(!sctx->fc)) { 110 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in); 111 return; 112 } 113 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE); 114 } 115 116 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 117 { 118 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 119 120 if (unlikely(!sctx->fc)) { 121 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in); 122 return; 123 } 124 cpacf_km(sctx->fc | CPACF_DECRYPT, 125 &sctx->key, out, in, AES_BLOCK_SIZE); 126 } 127 128 static int fallback_init_cip(struct crypto_tfm *tfm) 129 { 130 const char *name = tfm->__crt_alg->cra_name; 131 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 132 133 sctx->fallback.cip = crypto_alloc_cipher(name, 0, 134 CRYPTO_ALG_NEED_FALLBACK); 135 136 if (IS_ERR(sctx->fallback.cip)) { 137 pr_err("Allocating AES fallback algorithm %s failed\n", 138 name); 139 return PTR_ERR(sctx->fallback.cip); 140 } 141 142 return 0; 143 } 144 145 static void fallback_exit_cip(struct crypto_tfm *tfm) 146 { 147 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm); 148 149 crypto_free_cipher(sctx->fallback.cip); 150 sctx->fallback.cip = NULL; 151 } 152 153 static struct crypto_alg aes_alg = { 154 .cra_name = "aes", 155 .cra_driver_name = "aes-s390", 156 .cra_priority = 300, 157 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | 158 CRYPTO_ALG_NEED_FALLBACK, 159 .cra_blocksize = AES_BLOCK_SIZE, 160 .cra_ctxsize = sizeof(struct s390_aes_ctx), 161 .cra_module = THIS_MODULE, 162 .cra_init = fallback_init_cip, 163 .cra_exit = fallback_exit_cip, 164 .cra_u = { 165 .cipher = { 166 .cia_min_keysize = AES_MIN_KEY_SIZE, 167 .cia_max_keysize = AES_MAX_KEY_SIZE, 168 .cia_setkey = aes_set_key, 169 .cia_encrypt = crypto_aes_encrypt, 170 .cia_decrypt = crypto_aes_decrypt, 171 } 172 } 173 }; 174 175 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key, 176 unsigned int len) 177 { 178 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 179 180 crypto_skcipher_clear_flags(sctx->fallback.skcipher, 181 CRYPTO_TFM_REQ_MASK); 182 crypto_skcipher_set_flags(sctx->fallback.skcipher, 183 crypto_skcipher_get_flags(tfm) & 184 CRYPTO_TFM_REQ_MASK); 185 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len); 186 } 187 188 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx, 189 struct skcipher_request *req, 190 unsigned long modifier) 191 { 192 struct skcipher_request *subreq = skcipher_request_ctx(req); 193 194 *subreq = *req; 195 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher); 196 return (modifier & CPACF_DECRYPT) ? 197 crypto_skcipher_decrypt(subreq) : 198 crypto_skcipher_encrypt(subreq); 199 } 200 201 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 202 unsigned int key_len) 203 { 204 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 205 unsigned long fc; 206 207 /* Pick the correct function code based on the key length */ 208 fc = (key_len == 16) ? CPACF_KM_AES_128 : 209 (key_len == 24) ? CPACF_KM_AES_192 : 210 (key_len == 32) ? CPACF_KM_AES_256 : 0; 211 212 /* Check if the function code is available */ 213 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 214 if (!sctx->fc) 215 return setkey_fallback_skcipher(tfm, in_key, key_len); 216 217 sctx->key_len = key_len; 218 memcpy(sctx->key, in_key, key_len); 219 return 0; 220 } 221 222 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier) 223 { 224 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 225 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 226 struct skcipher_walk walk; 227 unsigned int nbytes, n; 228 int ret; 229 230 if (unlikely(!sctx->fc)) 231 return fallback_skcipher_crypt(sctx, req, modifier); 232 233 ret = skcipher_walk_virt(&walk, req, false); 234 while ((nbytes = walk.nbytes) != 0) { 235 /* only use complete blocks */ 236 n = nbytes & ~(AES_BLOCK_SIZE - 1); 237 cpacf_km(sctx->fc | modifier, sctx->key, 238 walk.dst.virt.addr, walk.src.virt.addr, n); 239 ret = skcipher_walk_done(&walk, nbytes - n); 240 } 241 return ret; 242 } 243 244 static int ecb_aes_encrypt(struct skcipher_request *req) 245 { 246 return ecb_aes_crypt(req, 0); 247 } 248 249 static int ecb_aes_decrypt(struct skcipher_request *req) 250 { 251 return ecb_aes_crypt(req, CPACF_DECRYPT); 252 } 253 254 static int fallback_init_skcipher(struct crypto_skcipher *tfm) 255 { 256 const char *name = crypto_tfm_alg_name(&tfm->base); 257 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 258 259 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0, 260 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 261 262 if (IS_ERR(sctx->fallback.skcipher)) { 263 pr_err("Allocating AES fallback algorithm %s failed\n", 264 name); 265 return PTR_ERR(sctx->fallback.skcipher); 266 } 267 268 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 269 crypto_skcipher_reqsize(sctx->fallback.skcipher)); 270 return 0; 271 } 272 273 static void fallback_exit_skcipher(struct crypto_skcipher *tfm) 274 { 275 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 276 277 crypto_free_skcipher(sctx->fallback.skcipher); 278 } 279 280 static struct skcipher_alg ecb_aes_alg = { 281 .base.cra_name = "ecb(aes)", 282 .base.cra_driver_name = "ecb-aes-s390", 283 .base.cra_priority = 401, /* combo: aes + ecb + 1 */ 284 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 285 .base.cra_blocksize = AES_BLOCK_SIZE, 286 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 287 .base.cra_module = THIS_MODULE, 288 .init = fallback_init_skcipher, 289 .exit = fallback_exit_skcipher, 290 .min_keysize = AES_MIN_KEY_SIZE, 291 .max_keysize = AES_MAX_KEY_SIZE, 292 .setkey = ecb_aes_set_key, 293 .encrypt = ecb_aes_encrypt, 294 .decrypt = ecb_aes_decrypt, 295 }; 296 297 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 298 unsigned int key_len) 299 { 300 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 301 unsigned long fc; 302 303 /* Pick the correct function code based on the key length */ 304 fc = (key_len == 16) ? CPACF_KMC_AES_128 : 305 (key_len == 24) ? CPACF_KMC_AES_192 : 306 (key_len == 32) ? CPACF_KMC_AES_256 : 0; 307 308 /* Check if the function code is available */ 309 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; 310 if (!sctx->fc) 311 return setkey_fallback_skcipher(tfm, in_key, key_len); 312 313 sctx->key_len = key_len; 314 memcpy(sctx->key, in_key, key_len); 315 return 0; 316 } 317 318 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier) 319 { 320 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 321 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 322 struct skcipher_walk walk; 323 unsigned int nbytes, n; 324 int ret; 325 struct { 326 u8 iv[AES_BLOCK_SIZE]; 327 u8 key[AES_MAX_KEY_SIZE]; 328 } param; 329 330 if (unlikely(!sctx->fc)) 331 return fallback_skcipher_crypt(sctx, req, modifier); 332 333 ret = skcipher_walk_virt(&walk, req, false); 334 if (ret) 335 return ret; 336 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); 337 memcpy(param.key, sctx->key, sctx->key_len); 338 while ((nbytes = walk.nbytes) != 0) { 339 /* only use complete blocks */ 340 n = nbytes & ~(AES_BLOCK_SIZE - 1); 341 cpacf_kmc(sctx->fc | modifier, ¶m, 342 walk.dst.virt.addr, walk.src.virt.addr, n); 343 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); 344 ret = skcipher_walk_done(&walk, nbytes - n); 345 } 346 memzero_explicit(¶m, sizeof(param)); 347 return ret; 348 } 349 350 static int cbc_aes_encrypt(struct skcipher_request *req) 351 { 352 return cbc_aes_crypt(req, 0); 353 } 354 355 static int cbc_aes_decrypt(struct skcipher_request *req) 356 { 357 return cbc_aes_crypt(req, CPACF_DECRYPT); 358 } 359 360 static struct skcipher_alg cbc_aes_alg = { 361 .base.cra_name = "cbc(aes)", 362 .base.cra_driver_name = "cbc-aes-s390", 363 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 364 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 365 .base.cra_blocksize = AES_BLOCK_SIZE, 366 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 367 .base.cra_module = THIS_MODULE, 368 .init = fallback_init_skcipher, 369 .exit = fallback_exit_skcipher, 370 .min_keysize = AES_MIN_KEY_SIZE, 371 .max_keysize = AES_MAX_KEY_SIZE, 372 .ivsize = AES_BLOCK_SIZE, 373 .setkey = cbc_aes_set_key, 374 .encrypt = cbc_aes_encrypt, 375 .decrypt = cbc_aes_decrypt, 376 }; 377 378 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key, 379 unsigned int len) 380 { 381 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 382 383 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK); 384 crypto_skcipher_set_flags(xts_ctx->fallback, 385 crypto_skcipher_get_flags(tfm) & 386 CRYPTO_TFM_REQ_MASK); 387 return crypto_skcipher_setkey(xts_ctx->fallback, key, len); 388 } 389 390 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 391 unsigned int key_len) 392 { 393 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 394 unsigned long fc; 395 int err; 396 397 err = xts_fallback_setkey(tfm, in_key, key_len); 398 if (err) 399 return err; 400 401 /* Pick the correct function code based on the key length */ 402 fc = (key_len == 32) ? CPACF_KM_XTS_128 : 403 (key_len == 64) ? CPACF_KM_XTS_256 : 0; 404 405 /* Check if the function code is available */ 406 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; 407 if (!xts_ctx->fc) 408 return 0; 409 410 /* Split the XTS key into the two subkeys */ 411 key_len = key_len / 2; 412 xts_ctx->key_len = key_len; 413 memcpy(xts_ctx->key, in_key, key_len); 414 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len); 415 return 0; 416 } 417 418 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier) 419 { 420 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 421 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 422 struct skcipher_walk walk; 423 unsigned int offset, nbytes, n; 424 int ret; 425 struct { 426 u8 key[32]; 427 u8 tweak[16]; 428 u8 block[16]; 429 u8 bit[16]; 430 u8 xts[16]; 431 } pcc_param; 432 struct { 433 u8 key[32]; 434 u8 init[16]; 435 } xts_param; 436 437 if (req->cryptlen < AES_BLOCK_SIZE) 438 return -EINVAL; 439 440 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) { 441 struct skcipher_request *subreq = skcipher_request_ctx(req); 442 443 *subreq = *req; 444 skcipher_request_set_tfm(subreq, xts_ctx->fallback); 445 return (modifier & CPACF_DECRYPT) ? 446 crypto_skcipher_decrypt(subreq) : 447 crypto_skcipher_encrypt(subreq); 448 } 449 450 ret = skcipher_walk_virt(&walk, req, false); 451 if (ret) 452 return ret; 453 offset = xts_ctx->key_len & 0x10; 454 memset(pcc_param.block, 0, sizeof(pcc_param.block)); 455 memset(pcc_param.bit, 0, sizeof(pcc_param.bit)); 456 memset(pcc_param.xts, 0, sizeof(pcc_param.xts)); 457 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); 458 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len); 459 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset); 460 461 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len); 462 memcpy(xts_param.init, pcc_param.xts, 16); 463 464 while ((nbytes = walk.nbytes) != 0) { 465 /* only use complete blocks */ 466 n = nbytes & ~(AES_BLOCK_SIZE - 1); 467 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset, 468 walk.dst.virt.addr, walk.src.virt.addr, n); 469 ret = skcipher_walk_done(&walk, nbytes - n); 470 } 471 memzero_explicit(&pcc_param, sizeof(pcc_param)); 472 memzero_explicit(&xts_param, sizeof(xts_param)); 473 return ret; 474 } 475 476 static int xts_aes_encrypt(struct skcipher_request *req) 477 { 478 return xts_aes_crypt(req, 0); 479 } 480 481 static int xts_aes_decrypt(struct skcipher_request *req) 482 { 483 return xts_aes_crypt(req, CPACF_DECRYPT); 484 } 485 486 static int xts_fallback_init(struct crypto_skcipher *tfm) 487 { 488 const char *name = crypto_tfm_alg_name(&tfm->base); 489 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 490 491 xts_ctx->fallback = crypto_alloc_skcipher(name, 0, 492 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); 493 494 if (IS_ERR(xts_ctx->fallback)) { 495 pr_err("Allocating XTS fallback algorithm %s failed\n", 496 name); 497 return PTR_ERR(xts_ctx->fallback); 498 } 499 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 500 crypto_skcipher_reqsize(xts_ctx->fallback)); 501 return 0; 502 } 503 504 static void xts_fallback_exit(struct crypto_skcipher *tfm) 505 { 506 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm); 507 508 crypto_free_skcipher(xts_ctx->fallback); 509 } 510 511 static struct skcipher_alg xts_aes_alg = { 512 .base.cra_name = "xts(aes)", 513 .base.cra_driver_name = "xts-aes-s390", 514 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 515 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 516 .base.cra_blocksize = AES_BLOCK_SIZE, 517 .base.cra_ctxsize = sizeof(struct s390_xts_ctx), 518 .base.cra_module = THIS_MODULE, 519 .init = xts_fallback_init, 520 .exit = xts_fallback_exit, 521 .min_keysize = 2 * AES_MIN_KEY_SIZE, 522 .max_keysize = 2 * AES_MAX_KEY_SIZE, 523 .ivsize = AES_BLOCK_SIZE, 524 .setkey = xts_aes_set_key, 525 .encrypt = xts_aes_encrypt, 526 .decrypt = xts_aes_decrypt, 527 }; 528 529 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, 530 unsigned int key_len) 531 { 532 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 533 unsigned long fc; 534 535 /* Pick the correct function code based on the key length */ 536 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 : 537 (key_len == 24) ? CPACF_KMCTR_AES_192 : 538 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0; 539 540 /* Check if the function code is available */ 541 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; 542 if (!sctx->fc) 543 return setkey_fallback_skcipher(tfm, in_key, key_len); 544 545 sctx->key_len = key_len; 546 memcpy(sctx->key, in_key, key_len); 547 return 0; 548 } 549 550 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) 551 { 552 unsigned int i, n; 553 554 /* only use complete blocks, max. PAGE_SIZE */ 555 memcpy(ctrptr, iv, AES_BLOCK_SIZE); 556 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); 557 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { 558 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); 559 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); 560 ctrptr += AES_BLOCK_SIZE; 561 } 562 return n; 563 } 564 565 static int ctr_aes_crypt(struct skcipher_request *req) 566 { 567 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 568 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm); 569 u8 buf[AES_BLOCK_SIZE], *ctrptr; 570 struct skcipher_walk walk; 571 unsigned int n, nbytes; 572 int ret, locked; 573 574 if (unlikely(!sctx->fc)) 575 return fallback_skcipher_crypt(sctx, req, 0); 576 577 locked = mutex_trylock(&ctrblk_lock); 578 579 ret = skcipher_walk_virt(&walk, req, false); 580 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { 581 n = AES_BLOCK_SIZE; 582 583 if (nbytes >= 2*AES_BLOCK_SIZE && locked) 584 n = __ctrblk_init(ctrblk, walk.iv, nbytes); 585 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; 586 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr, 587 walk.src.virt.addr, n, ctrptr); 588 if (ctrptr == ctrblk) 589 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE, 590 AES_BLOCK_SIZE); 591 crypto_inc(walk.iv, AES_BLOCK_SIZE); 592 ret = skcipher_walk_done(&walk, nbytes - n); 593 } 594 if (locked) 595 mutex_unlock(&ctrblk_lock); 596 /* 597 * final block may be < AES_BLOCK_SIZE, copy only nbytes 598 */ 599 if (nbytes) { 600 cpacf_kmctr(sctx->fc, sctx->key, buf, walk.src.virt.addr, 601 AES_BLOCK_SIZE, walk.iv); 602 memcpy(walk.dst.virt.addr, buf, nbytes); 603 crypto_inc(walk.iv, AES_BLOCK_SIZE); 604 ret = skcipher_walk_done(&walk, 0); 605 } 606 607 return ret; 608 } 609 610 static struct skcipher_alg ctr_aes_alg = { 611 .base.cra_name = "ctr(aes)", 612 .base.cra_driver_name = "ctr-aes-s390", 613 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */ 614 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, 615 .base.cra_blocksize = 1, 616 .base.cra_ctxsize = sizeof(struct s390_aes_ctx), 617 .base.cra_module = THIS_MODULE, 618 .init = fallback_init_skcipher, 619 .exit = fallback_exit_skcipher, 620 .min_keysize = AES_MIN_KEY_SIZE, 621 .max_keysize = AES_MAX_KEY_SIZE, 622 .ivsize = AES_BLOCK_SIZE, 623 .setkey = ctr_aes_set_key, 624 .encrypt = ctr_aes_crypt, 625 .decrypt = ctr_aes_crypt, 626 .chunksize = AES_BLOCK_SIZE, 627 }; 628 629 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key, 630 unsigned int keylen) 631 { 632 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 633 634 switch (keylen) { 635 case AES_KEYSIZE_128: 636 ctx->fc = CPACF_KMA_GCM_AES_128; 637 break; 638 case AES_KEYSIZE_192: 639 ctx->fc = CPACF_KMA_GCM_AES_192; 640 break; 641 case AES_KEYSIZE_256: 642 ctx->fc = CPACF_KMA_GCM_AES_256; 643 break; 644 default: 645 return -EINVAL; 646 } 647 648 memcpy(ctx->key, key, keylen); 649 ctx->key_len = keylen; 650 return 0; 651 } 652 653 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 654 { 655 switch (authsize) { 656 case 4: 657 case 8: 658 case 12: 659 case 13: 660 case 14: 661 case 15: 662 case 16: 663 break; 664 default: 665 return -EINVAL; 666 } 667 668 return 0; 669 } 670 671 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg, 672 unsigned int len) 673 { 674 memset(gw, 0, sizeof(*gw)); 675 gw->walk_bytes_remain = len; 676 scatterwalk_start(&gw->walk, sg); 677 } 678 679 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw) 680 { 681 struct scatterlist *nextsg; 682 683 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain); 684 while (!gw->walk_bytes) { 685 nextsg = sg_next(gw->walk.sg); 686 if (!nextsg) 687 return 0; 688 scatterwalk_start(&gw->walk, nextsg); 689 gw->walk_bytes = scatterwalk_clamp(&gw->walk, 690 gw->walk_bytes_remain); 691 } 692 gw->walk_ptr = scatterwalk_map(&gw->walk); 693 return gw->walk_bytes; 694 } 695 696 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw, 697 unsigned int nbytes) 698 { 699 gw->walk_bytes_remain -= nbytes; 700 scatterwalk_unmap(gw->walk_ptr); 701 scatterwalk_advance(&gw->walk, nbytes); 702 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain); 703 gw->walk_ptr = NULL; 704 } 705 706 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 707 { 708 int n; 709 710 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) { 711 gw->ptr = gw->buf; 712 gw->nbytes = gw->buf_bytes; 713 goto out; 714 } 715 716 if (gw->walk_bytes_remain == 0) { 717 gw->ptr = NULL; 718 gw->nbytes = 0; 719 goto out; 720 } 721 722 if (!_gcm_sg_clamp_and_map(gw)) { 723 gw->ptr = NULL; 724 gw->nbytes = 0; 725 goto out; 726 } 727 728 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) { 729 gw->ptr = gw->walk_ptr; 730 gw->nbytes = gw->walk_bytes; 731 goto out; 732 } 733 734 while (1) { 735 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes); 736 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n); 737 gw->buf_bytes += n; 738 _gcm_sg_unmap_and_advance(gw, n); 739 if (gw->buf_bytes >= minbytesneeded) { 740 gw->ptr = gw->buf; 741 gw->nbytes = gw->buf_bytes; 742 goto out; 743 } 744 if (!_gcm_sg_clamp_and_map(gw)) { 745 gw->ptr = NULL; 746 gw->nbytes = 0; 747 goto out; 748 } 749 } 750 751 out: 752 return gw->nbytes; 753 } 754 755 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded) 756 { 757 if (gw->walk_bytes_remain == 0) { 758 gw->ptr = NULL; 759 gw->nbytes = 0; 760 goto out; 761 } 762 763 if (!_gcm_sg_clamp_and_map(gw)) { 764 gw->ptr = NULL; 765 gw->nbytes = 0; 766 goto out; 767 } 768 769 if (gw->walk_bytes >= minbytesneeded) { 770 gw->ptr = gw->walk_ptr; 771 gw->nbytes = gw->walk_bytes; 772 goto out; 773 } 774 775 scatterwalk_unmap(gw->walk_ptr); 776 gw->walk_ptr = NULL; 777 778 gw->ptr = gw->buf; 779 gw->nbytes = sizeof(gw->buf); 780 781 out: 782 return gw->nbytes; 783 } 784 785 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 786 { 787 if (gw->ptr == NULL) 788 return 0; 789 790 if (gw->ptr == gw->buf) { 791 int n = gw->buf_bytes - bytesdone; 792 if (n > 0) { 793 memmove(gw->buf, gw->buf + bytesdone, n); 794 gw->buf_bytes = n; 795 } else 796 gw->buf_bytes = 0; 797 } else 798 _gcm_sg_unmap_and_advance(gw, bytesdone); 799 800 return bytesdone; 801 } 802 803 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone) 804 { 805 int i, n; 806 807 if (gw->ptr == NULL) 808 return 0; 809 810 if (gw->ptr == gw->buf) { 811 for (i = 0; i < bytesdone; i += n) { 812 if (!_gcm_sg_clamp_and_map(gw)) 813 return i; 814 n = min(gw->walk_bytes, bytesdone - i); 815 memcpy(gw->walk_ptr, gw->buf + i, n); 816 _gcm_sg_unmap_and_advance(gw, n); 817 } 818 } else 819 _gcm_sg_unmap_and_advance(gw, bytesdone); 820 821 return bytesdone; 822 } 823 824 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags) 825 { 826 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 827 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm); 828 unsigned int ivsize = crypto_aead_ivsize(tfm); 829 unsigned int taglen = crypto_aead_authsize(tfm); 830 unsigned int aadlen = req->assoclen; 831 unsigned int pclen = req->cryptlen; 832 int ret = 0; 833 834 unsigned int n, len, in_bytes, out_bytes, 835 min_bytes, bytes, aad_bytes, pc_bytes; 836 struct gcm_sg_walk gw_in, gw_out; 837 u8 tag[GHASH_DIGEST_SIZE]; 838 839 struct { 840 u32 _[3]; /* reserved */ 841 u32 cv; /* Counter Value */ 842 u8 t[GHASH_DIGEST_SIZE];/* Tag */ 843 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */ 844 u64 taadl; /* Total AAD Length */ 845 u64 tpcl; /* Total Plain-/Cipher-text Length */ 846 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */ 847 u8 k[AES_MAX_KEY_SIZE]; /* Key */ 848 } param; 849 850 /* 851 * encrypt 852 * req->src: aad||plaintext 853 * req->dst: aad||ciphertext||tag 854 * decrypt 855 * req->src: aad||ciphertext||tag 856 * req->dst: aad||plaintext, return 0 or -EBADMSG 857 * aad, plaintext and ciphertext may be empty. 858 */ 859 if (flags & CPACF_DECRYPT) 860 pclen -= taglen; 861 len = aadlen + pclen; 862 863 memset(¶m, 0, sizeof(param)); 864 param.cv = 1; 865 param.taadl = aadlen * 8; 866 param.tpcl = pclen * 8; 867 memcpy(param.j0, req->iv, ivsize); 868 *(u32 *)(param.j0 + ivsize) = 1; 869 memcpy(param.k, ctx->key, ctx->key_len); 870 871 gcm_walk_start(&gw_in, req->src, len); 872 gcm_walk_start(&gw_out, req->dst, len); 873 874 do { 875 min_bytes = min_t(unsigned int, 876 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE); 877 in_bytes = gcm_in_walk_go(&gw_in, min_bytes); 878 out_bytes = gcm_out_walk_go(&gw_out, min_bytes); 879 bytes = min(in_bytes, out_bytes); 880 881 if (aadlen + pclen <= bytes) { 882 aad_bytes = aadlen; 883 pc_bytes = pclen; 884 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC; 885 } else { 886 if (aadlen <= bytes) { 887 aad_bytes = aadlen; 888 pc_bytes = (bytes - aadlen) & 889 ~(AES_BLOCK_SIZE - 1); 890 flags |= CPACF_KMA_LAAD; 891 } else { 892 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1); 893 pc_bytes = 0; 894 } 895 } 896 897 if (aad_bytes > 0) 898 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes); 899 900 cpacf_kma(ctx->fc | flags, ¶m, 901 gw_out.ptr + aad_bytes, 902 gw_in.ptr + aad_bytes, pc_bytes, 903 gw_in.ptr, aad_bytes); 904 905 n = aad_bytes + pc_bytes; 906 if (gcm_in_walk_done(&gw_in, n) != n) 907 return -ENOMEM; 908 if (gcm_out_walk_done(&gw_out, n) != n) 909 return -ENOMEM; 910 aadlen -= aad_bytes; 911 pclen -= pc_bytes; 912 } while (aadlen + pclen > 0); 913 914 if (flags & CPACF_DECRYPT) { 915 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0); 916 if (crypto_memneq(tag, param.t, taglen)) 917 ret = -EBADMSG; 918 } else 919 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1); 920 921 memzero_explicit(¶m, sizeof(param)); 922 return ret; 923 } 924 925 static int gcm_aes_encrypt(struct aead_request *req) 926 { 927 return gcm_aes_crypt(req, CPACF_ENCRYPT); 928 } 929 930 static int gcm_aes_decrypt(struct aead_request *req) 931 { 932 return gcm_aes_crypt(req, CPACF_DECRYPT); 933 } 934 935 static struct aead_alg gcm_aes_aead = { 936 .setkey = gcm_aes_setkey, 937 .setauthsize = gcm_aes_setauthsize, 938 .encrypt = gcm_aes_encrypt, 939 .decrypt = gcm_aes_decrypt, 940 941 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32), 942 .maxauthsize = GHASH_DIGEST_SIZE, 943 .chunksize = AES_BLOCK_SIZE, 944 945 .base = { 946 .cra_blocksize = 1, 947 .cra_ctxsize = sizeof(struct s390_aes_ctx), 948 .cra_priority = 900, 949 .cra_name = "gcm(aes)", 950 .cra_driver_name = "gcm-aes-s390", 951 .cra_module = THIS_MODULE, 952 }, 953 }; 954 955 static struct crypto_alg *aes_s390_alg; 956 static struct skcipher_alg *aes_s390_skcipher_algs[4]; 957 static int aes_s390_skciphers_num; 958 static struct aead_alg *aes_s390_aead_alg; 959 960 static int aes_s390_register_skcipher(struct skcipher_alg *alg) 961 { 962 int ret; 963 964 ret = crypto_register_skcipher(alg); 965 if (!ret) 966 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg; 967 return ret; 968 } 969 970 static void aes_s390_fini(void) 971 { 972 if (aes_s390_alg) 973 crypto_unregister_alg(aes_s390_alg); 974 while (aes_s390_skciphers_num--) 975 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]); 976 if (ctrblk) 977 free_page((unsigned long) ctrblk); 978 979 if (aes_s390_aead_alg) 980 crypto_unregister_aead(aes_s390_aead_alg); 981 } 982 983 static int __init aes_s390_init(void) 984 { 985 int ret; 986 987 /* Query available functions for KM, KMC, KMCTR and KMA */ 988 cpacf_query(CPACF_KM, &km_functions); 989 cpacf_query(CPACF_KMC, &kmc_functions); 990 cpacf_query(CPACF_KMCTR, &kmctr_functions); 991 cpacf_query(CPACF_KMA, &kma_functions); 992 993 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) || 994 cpacf_test_func(&km_functions, CPACF_KM_AES_192) || 995 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) { 996 ret = crypto_register_alg(&aes_alg); 997 if (ret) 998 goto out_err; 999 aes_s390_alg = &aes_alg; 1000 ret = aes_s390_register_skcipher(&ecb_aes_alg); 1001 if (ret) 1002 goto out_err; 1003 } 1004 1005 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) || 1006 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) || 1007 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) { 1008 ret = aes_s390_register_skcipher(&cbc_aes_alg); 1009 if (ret) 1010 goto out_err; 1011 } 1012 1013 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) || 1014 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) { 1015 ret = aes_s390_register_skcipher(&xts_aes_alg); 1016 if (ret) 1017 goto out_err; 1018 } 1019 1020 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) || 1021 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) || 1022 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) { 1023 ctrblk = (u8 *) __get_free_page(GFP_KERNEL); 1024 if (!ctrblk) { 1025 ret = -ENOMEM; 1026 goto out_err; 1027 } 1028 ret = aes_s390_register_skcipher(&ctr_aes_alg); 1029 if (ret) 1030 goto out_err; 1031 } 1032 1033 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) || 1034 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) || 1035 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) { 1036 ret = crypto_register_aead(&gcm_aes_aead); 1037 if (ret) 1038 goto out_err; 1039 aes_s390_aead_alg = &gcm_aes_aead; 1040 } 1041 1042 return 0; 1043 out_err: 1044 aes_s390_fini(); 1045 return ret; 1046 } 1047 1048 module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init); 1049 module_exit(aes_s390_fini); 1050 1051 MODULE_ALIAS_CRYPTO("aes-all"); 1052 1053 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); 1054 MODULE_LICENSE("GPL"); 1055 MODULE_IMPORT_NS(CRYPTO_INTERNAL); 1056