1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Shared crypto simd helpers 4 * 5 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 6 * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> 7 * Copyright (c) 2019 Google LLC 8 * 9 * Based on aesni-intel_glue.c by: 10 * Copyright (C) 2008, Intel Corp. 11 * Author: Huang Ying <ying.huang@intel.com> 12 */ 13 14 /* 15 * Shared crypto SIMD helpers. These functions dynamically create and register 16 * an skcipher or AEAD algorithm that wraps another, internal algorithm. The 17 * wrapper ensures that the internal algorithm is only executed in a context 18 * where SIMD instructions are usable, i.e. where may_use_simd() returns true. 19 * If SIMD is already usable, the wrapper directly calls the internal algorithm. 20 * Otherwise it defers execution to a workqueue via cryptd. 21 * 22 * This is an alternative to the internal algorithm implementing a fallback for 23 * the !may_use_simd() case itself. 24 * 25 * Note that the wrapper algorithm is asynchronous, i.e. it has the 26 * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who 27 * explicitly allocate a synchronous algorithm. 28 */ 29 30 #include <crypto/cryptd.h> 31 #include <crypto/internal/aead.h> 32 #include <crypto/internal/simd.h> 33 #include <crypto/internal/skcipher.h> 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/preempt.h> 37 #include <asm/simd.h> 38 39 /* skcipher support */ 40 41 struct simd_skcipher_alg { 42 const char *ialg_name; 43 struct skcipher_alg alg; 44 }; 45 46 struct simd_skcipher_ctx { 47 struct cryptd_skcipher *cryptd_tfm; 48 }; 49 50 static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 51 unsigned int key_len) 52 { 53 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 54 struct crypto_skcipher *child = &ctx->cryptd_tfm->base; 55 int err; 56 57 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 58 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & 59 CRYPTO_TFM_REQ_MASK); 60 err = crypto_skcipher_setkey(child, key, key_len); 61 crypto_skcipher_set_flags(tfm, crypto_skcipher_get_flags(child) & 62 CRYPTO_TFM_RES_MASK); 63 return err; 64 } 65 66 static int simd_skcipher_encrypt(struct skcipher_request *req) 67 { 68 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 69 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 70 struct skcipher_request *subreq; 71 struct crypto_skcipher *child; 72 73 subreq = skcipher_request_ctx(req); 74 *subreq = *req; 75 76 if (!crypto_simd_usable() || 77 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 78 child = &ctx->cryptd_tfm->base; 79 else 80 child = cryptd_skcipher_child(ctx->cryptd_tfm); 81 82 skcipher_request_set_tfm(subreq, child); 83 84 return crypto_skcipher_encrypt(subreq); 85 } 86 87 static int simd_skcipher_decrypt(struct skcipher_request *req) 88 { 89 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 90 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 91 struct skcipher_request *subreq; 92 struct crypto_skcipher *child; 93 94 subreq = skcipher_request_ctx(req); 95 *subreq = *req; 96 97 if (!crypto_simd_usable() || 98 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 99 child = &ctx->cryptd_tfm->base; 100 else 101 child = cryptd_skcipher_child(ctx->cryptd_tfm); 102 103 skcipher_request_set_tfm(subreq, child); 104 105 return crypto_skcipher_decrypt(subreq); 106 } 107 108 static void simd_skcipher_exit(struct crypto_skcipher *tfm) 109 { 110 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 111 112 cryptd_free_skcipher(ctx->cryptd_tfm); 113 } 114 115 static int simd_skcipher_init(struct crypto_skcipher *tfm) 116 { 117 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 118 struct cryptd_skcipher *cryptd_tfm; 119 struct simd_skcipher_alg *salg; 120 struct skcipher_alg *alg; 121 unsigned reqsize; 122 123 alg = crypto_skcipher_alg(tfm); 124 salg = container_of(alg, struct simd_skcipher_alg, alg); 125 126 cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, 127 CRYPTO_ALG_INTERNAL, 128 CRYPTO_ALG_INTERNAL); 129 if (IS_ERR(cryptd_tfm)) 130 return PTR_ERR(cryptd_tfm); 131 132 ctx->cryptd_tfm = cryptd_tfm; 133 134 reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); 135 reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); 136 reqsize += sizeof(struct skcipher_request); 137 138 crypto_skcipher_set_reqsize(tfm, reqsize); 139 140 return 0; 141 } 142 143 struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname, 144 const char *drvname, 145 const char *basename) 146 { 147 struct simd_skcipher_alg *salg; 148 struct crypto_skcipher *tfm; 149 struct skcipher_alg *ialg; 150 struct skcipher_alg *alg; 151 int err; 152 153 tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL, 154 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); 155 if (IS_ERR(tfm)) 156 return ERR_CAST(tfm); 157 158 ialg = crypto_skcipher_alg(tfm); 159 160 salg = kzalloc(sizeof(*salg), GFP_KERNEL); 161 if (!salg) { 162 salg = ERR_PTR(-ENOMEM); 163 goto out_put_tfm; 164 } 165 166 salg->ialg_name = basename; 167 alg = &salg->alg; 168 169 err = -ENAMETOOLONG; 170 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 171 CRYPTO_MAX_ALG_NAME) 172 goto out_free_salg; 173 174 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 175 drvname) >= CRYPTO_MAX_ALG_NAME) 176 goto out_free_salg; 177 178 alg->base.cra_flags = CRYPTO_ALG_ASYNC; 179 alg->base.cra_priority = ialg->base.cra_priority; 180 alg->base.cra_blocksize = ialg->base.cra_blocksize; 181 alg->base.cra_alignmask = ialg->base.cra_alignmask; 182 alg->base.cra_module = ialg->base.cra_module; 183 alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); 184 185 alg->ivsize = ialg->ivsize; 186 alg->chunksize = ialg->chunksize; 187 alg->min_keysize = ialg->min_keysize; 188 alg->max_keysize = ialg->max_keysize; 189 190 alg->init = simd_skcipher_init; 191 alg->exit = simd_skcipher_exit; 192 193 alg->setkey = simd_skcipher_setkey; 194 alg->encrypt = simd_skcipher_encrypt; 195 alg->decrypt = simd_skcipher_decrypt; 196 197 err = crypto_register_skcipher(alg); 198 if (err) 199 goto out_free_salg; 200 201 out_put_tfm: 202 crypto_free_skcipher(tfm); 203 return salg; 204 205 out_free_salg: 206 kfree(salg); 207 salg = ERR_PTR(err); 208 goto out_put_tfm; 209 } 210 EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); 211 212 struct simd_skcipher_alg *simd_skcipher_create(const char *algname, 213 const char *basename) 214 { 215 char drvname[CRYPTO_MAX_ALG_NAME]; 216 217 if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= 218 CRYPTO_MAX_ALG_NAME) 219 return ERR_PTR(-ENAMETOOLONG); 220 221 return simd_skcipher_create_compat(algname, drvname, basename); 222 } 223 EXPORT_SYMBOL_GPL(simd_skcipher_create); 224 225 void simd_skcipher_free(struct simd_skcipher_alg *salg) 226 { 227 crypto_unregister_skcipher(&salg->alg); 228 kfree(salg); 229 } 230 EXPORT_SYMBOL_GPL(simd_skcipher_free); 231 232 int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, 233 struct simd_skcipher_alg **simd_algs) 234 { 235 int err; 236 int i; 237 const char *algname; 238 const char *drvname; 239 const char *basename; 240 struct simd_skcipher_alg *simd; 241 242 err = crypto_register_skciphers(algs, count); 243 if (err) 244 return err; 245 246 for (i = 0; i < count; i++) { 247 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); 248 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); 249 algname = algs[i].base.cra_name + 2; 250 drvname = algs[i].base.cra_driver_name + 2; 251 basename = algs[i].base.cra_driver_name; 252 simd = simd_skcipher_create_compat(algname, drvname, basename); 253 err = PTR_ERR(simd); 254 if (IS_ERR(simd)) 255 goto err_unregister; 256 simd_algs[i] = simd; 257 } 258 return 0; 259 260 err_unregister: 261 simd_unregister_skciphers(algs, count, simd_algs); 262 return err; 263 } 264 EXPORT_SYMBOL_GPL(simd_register_skciphers_compat); 265 266 void simd_unregister_skciphers(struct skcipher_alg *algs, int count, 267 struct simd_skcipher_alg **simd_algs) 268 { 269 int i; 270 271 crypto_unregister_skciphers(algs, count); 272 273 for (i = 0; i < count; i++) { 274 if (simd_algs[i]) { 275 simd_skcipher_free(simd_algs[i]); 276 simd_algs[i] = NULL; 277 } 278 } 279 } 280 EXPORT_SYMBOL_GPL(simd_unregister_skciphers); 281 282 /* AEAD support */ 283 284 struct simd_aead_alg { 285 const char *ialg_name; 286 struct aead_alg alg; 287 }; 288 289 struct simd_aead_ctx { 290 struct cryptd_aead *cryptd_tfm; 291 }; 292 293 static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key, 294 unsigned int key_len) 295 { 296 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 297 struct crypto_aead *child = &ctx->cryptd_tfm->base; 298 int err; 299 300 crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); 301 crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) & 302 CRYPTO_TFM_REQ_MASK); 303 err = crypto_aead_setkey(child, key, key_len); 304 crypto_aead_set_flags(tfm, crypto_aead_get_flags(child) & 305 CRYPTO_TFM_RES_MASK); 306 return err; 307 } 308 309 static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 310 { 311 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 312 struct crypto_aead *child = &ctx->cryptd_tfm->base; 313 314 return crypto_aead_setauthsize(child, authsize); 315 } 316 317 static int simd_aead_encrypt(struct aead_request *req) 318 { 319 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 320 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 321 struct aead_request *subreq; 322 struct crypto_aead *child; 323 324 subreq = aead_request_ctx(req); 325 *subreq = *req; 326 327 if (!crypto_simd_usable() || 328 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 329 child = &ctx->cryptd_tfm->base; 330 else 331 child = cryptd_aead_child(ctx->cryptd_tfm); 332 333 aead_request_set_tfm(subreq, child); 334 335 return crypto_aead_encrypt(subreq); 336 } 337 338 static int simd_aead_decrypt(struct aead_request *req) 339 { 340 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 341 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 342 struct aead_request *subreq; 343 struct crypto_aead *child; 344 345 subreq = aead_request_ctx(req); 346 *subreq = *req; 347 348 if (!crypto_simd_usable() || 349 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 350 child = &ctx->cryptd_tfm->base; 351 else 352 child = cryptd_aead_child(ctx->cryptd_tfm); 353 354 aead_request_set_tfm(subreq, child); 355 356 return crypto_aead_decrypt(subreq); 357 } 358 359 static void simd_aead_exit(struct crypto_aead *tfm) 360 { 361 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 362 363 cryptd_free_aead(ctx->cryptd_tfm); 364 } 365 366 static int simd_aead_init(struct crypto_aead *tfm) 367 { 368 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 369 struct cryptd_aead *cryptd_tfm; 370 struct simd_aead_alg *salg; 371 struct aead_alg *alg; 372 unsigned reqsize; 373 374 alg = crypto_aead_alg(tfm); 375 salg = container_of(alg, struct simd_aead_alg, alg); 376 377 cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL, 378 CRYPTO_ALG_INTERNAL); 379 if (IS_ERR(cryptd_tfm)) 380 return PTR_ERR(cryptd_tfm); 381 382 ctx->cryptd_tfm = cryptd_tfm; 383 384 reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm)); 385 reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base)); 386 reqsize += sizeof(struct aead_request); 387 388 crypto_aead_set_reqsize(tfm, reqsize); 389 390 return 0; 391 } 392 393 struct simd_aead_alg *simd_aead_create_compat(const char *algname, 394 const char *drvname, 395 const char *basename) 396 { 397 struct simd_aead_alg *salg; 398 struct crypto_aead *tfm; 399 struct aead_alg *ialg; 400 struct aead_alg *alg; 401 int err; 402 403 tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL, 404 CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC); 405 if (IS_ERR(tfm)) 406 return ERR_CAST(tfm); 407 408 ialg = crypto_aead_alg(tfm); 409 410 salg = kzalloc(sizeof(*salg), GFP_KERNEL); 411 if (!salg) { 412 salg = ERR_PTR(-ENOMEM); 413 goto out_put_tfm; 414 } 415 416 salg->ialg_name = basename; 417 alg = &salg->alg; 418 419 err = -ENAMETOOLONG; 420 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 421 CRYPTO_MAX_ALG_NAME) 422 goto out_free_salg; 423 424 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 425 drvname) >= CRYPTO_MAX_ALG_NAME) 426 goto out_free_salg; 427 428 alg->base.cra_flags = CRYPTO_ALG_ASYNC; 429 alg->base.cra_priority = ialg->base.cra_priority; 430 alg->base.cra_blocksize = ialg->base.cra_blocksize; 431 alg->base.cra_alignmask = ialg->base.cra_alignmask; 432 alg->base.cra_module = ialg->base.cra_module; 433 alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx); 434 435 alg->ivsize = ialg->ivsize; 436 alg->maxauthsize = ialg->maxauthsize; 437 alg->chunksize = ialg->chunksize; 438 439 alg->init = simd_aead_init; 440 alg->exit = simd_aead_exit; 441 442 alg->setkey = simd_aead_setkey; 443 alg->setauthsize = simd_aead_setauthsize; 444 alg->encrypt = simd_aead_encrypt; 445 alg->decrypt = simd_aead_decrypt; 446 447 err = crypto_register_aead(alg); 448 if (err) 449 goto out_free_salg; 450 451 out_put_tfm: 452 crypto_free_aead(tfm); 453 return salg; 454 455 out_free_salg: 456 kfree(salg); 457 salg = ERR_PTR(err); 458 goto out_put_tfm; 459 } 460 EXPORT_SYMBOL_GPL(simd_aead_create_compat); 461 462 struct simd_aead_alg *simd_aead_create(const char *algname, 463 const char *basename) 464 { 465 char drvname[CRYPTO_MAX_ALG_NAME]; 466 467 if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >= 468 CRYPTO_MAX_ALG_NAME) 469 return ERR_PTR(-ENAMETOOLONG); 470 471 return simd_aead_create_compat(algname, drvname, basename); 472 } 473 EXPORT_SYMBOL_GPL(simd_aead_create); 474 475 void simd_aead_free(struct simd_aead_alg *salg) 476 { 477 crypto_unregister_aead(&salg->alg); 478 kfree(salg); 479 } 480 EXPORT_SYMBOL_GPL(simd_aead_free); 481 482 int simd_register_aeads_compat(struct aead_alg *algs, int count, 483 struct simd_aead_alg **simd_algs) 484 { 485 int err; 486 int i; 487 const char *algname; 488 const char *drvname; 489 const char *basename; 490 struct simd_aead_alg *simd; 491 492 err = crypto_register_aeads(algs, count); 493 if (err) 494 return err; 495 496 for (i = 0; i < count; i++) { 497 WARN_ON(strncmp(algs[i].base.cra_name, "__", 2)); 498 WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2)); 499 algname = algs[i].base.cra_name + 2; 500 drvname = algs[i].base.cra_driver_name + 2; 501 basename = algs[i].base.cra_driver_name; 502 simd = simd_aead_create_compat(algname, drvname, basename); 503 err = PTR_ERR(simd); 504 if (IS_ERR(simd)) 505 goto err_unregister; 506 simd_algs[i] = simd; 507 } 508 return 0; 509 510 err_unregister: 511 simd_unregister_aeads(algs, count, simd_algs); 512 return err; 513 } 514 EXPORT_SYMBOL_GPL(simd_register_aeads_compat); 515 516 void simd_unregister_aeads(struct aead_alg *algs, int count, 517 struct simd_aead_alg **simd_algs) 518 { 519 int i; 520 521 crypto_unregister_aeads(algs, count); 522 523 for (i = 0; i < count; i++) { 524 if (simd_algs[i]) { 525 simd_aead_free(simd_algs[i]); 526 simd_algs[i] = NULL; 527 } 528 } 529 } 530 EXPORT_SYMBOL_GPL(simd_unregister_aeads); 531 532 MODULE_LICENSE("GPL"); 533