1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Shared crypto simd helpers 4 * 5 * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 6 * Copyright (c) 2016 Herbert Xu <herbert@gondor.apana.org.au> 7 * Copyright (c) 2019 Google LLC 8 * 9 * Based on aesni-intel_glue.c by: 10 * Copyright (C) 2008, Intel Corp. 11 * Author: Huang Ying <ying.huang@intel.com> 12 */ 13 14 /* 15 * Shared crypto SIMD helpers. These functions dynamically create and register 16 * an skcipher or AEAD algorithm that wraps another, internal algorithm. The 17 * wrapper ensures that the internal algorithm is only executed in a context 18 * where SIMD instructions are usable, i.e. where may_use_simd() returns true. 19 * If SIMD is already usable, the wrapper directly calls the internal algorithm. 20 * Otherwise it defers execution to a workqueue via cryptd. 21 * 22 * This is an alternative to the internal algorithm implementing a fallback for 23 * the !may_use_simd() case itself. 24 * 25 * Note that the wrapper algorithm is asynchronous, i.e. it has the 26 * CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who 27 * explicitly allocate a synchronous algorithm. 28 */ 29 30 #include <crypto/cryptd.h> 31 #include <crypto/internal/aead.h> 32 #include <crypto/internal/simd.h> 33 #include <crypto/internal/skcipher.h> 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/preempt.h> 37 #include <asm/simd.h> 38 39 /* skcipher support */ 40 41 struct simd_skcipher_alg { 42 const char *ialg_name; 43 struct skcipher_alg alg; 44 }; 45 46 struct simd_skcipher_ctx { 47 struct cryptd_skcipher *cryptd_tfm; 48 }; 49 50 static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, 51 unsigned int key_len) 52 { 53 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 54 struct crypto_skcipher *child = &ctx->cryptd_tfm->base; 55 56 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); 57 crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) & 58 CRYPTO_TFM_REQ_MASK); 59 return crypto_skcipher_setkey(child, key, key_len); 60 } 61 62 static int simd_skcipher_encrypt(struct skcipher_request *req) 63 { 64 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 65 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 66 struct skcipher_request *subreq; 67 struct crypto_skcipher *child; 68 69 subreq = skcipher_request_ctx(req); 70 *subreq = *req; 71 72 if (!crypto_simd_usable() || 73 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 74 child = &ctx->cryptd_tfm->base; 75 else 76 child = cryptd_skcipher_child(ctx->cryptd_tfm); 77 78 skcipher_request_set_tfm(subreq, child); 79 80 return crypto_skcipher_encrypt(subreq); 81 } 82 83 static int simd_skcipher_decrypt(struct skcipher_request *req) 84 { 85 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 86 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 87 struct skcipher_request *subreq; 88 struct crypto_skcipher *child; 89 90 subreq = skcipher_request_ctx(req); 91 *subreq = *req; 92 93 if (!crypto_simd_usable() || 94 (in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm))) 95 child = &ctx->cryptd_tfm->base; 96 else 97 child = cryptd_skcipher_child(ctx->cryptd_tfm); 98 99 skcipher_request_set_tfm(subreq, child); 100 101 return crypto_skcipher_decrypt(subreq); 102 } 103 104 static void simd_skcipher_exit(struct crypto_skcipher *tfm) 105 { 106 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 107 108 cryptd_free_skcipher(ctx->cryptd_tfm); 109 } 110 111 static int simd_skcipher_init(struct crypto_skcipher *tfm) 112 { 113 struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); 114 struct cryptd_skcipher *cryptd_tfm; 115 struct simd_skcipher_alg *salg; 116 struct skcipher_alg *alg; 117 unsigned reqsize; 118 119 alg = crypto_skcipher_alg(tfm); 120 salg = container_of(alg, struct simd_skcipher_alg, alg); 121 122 cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name, 123 CRYPTO_ALG_INTERNAL, 124 CRYPTO_ALG_INTERNAL); 125 if (IS_ERR(cryptd_tfm)) 126 return PTR_ERR(cryptd_tfm); 127 128 ctx->cryptd_tfm = cryptd_tfm; 129 130 reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm)); 131 reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base)); 132 reqsize += sizeof(struct skcipher_request); 133 134 crypto_skcipher_set_reqsize(tfm, reqsize); 135 136 return 0; 137 } 138 139 struct simd_skcipher_alg *simd_skcipher_create_compat(struct skcipher_alg *ialg, 140 const char *algname, 141 const char *drvname, 142 const char *basename) 143 { 144 struct simd_skcipher_alg *salg; 145 struct skcipher_alg *alg; 146 int err; 147 148 salg = kzalloc_obj(*salg); 149 if (!salg) { 150 salg = ERR_PTR(-ENOMEM); 151 goto out; 152 } 153 154 salg->ialg_name = basename; 155 alg = &salg->alg; 156 157 err = -ENAMETOOLONG; 158 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 159 CRYPTO_MAX_ALG_NAME) 160 goto out_free_salg; 161 162 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 163 drvname) >= CRYPTO_MAX_ALG_NAME) 164 goto out_free_salg; 165 166 alg->base.cra_flags = CRYPTO_ALG_ASYNC | 167 (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); 168 alg->base.cra_priority = ialg->base.cra_priority; 169 alg->base.cra_blocksize = ialg->base.cra_blocksize; 170 alg->base.cra_alignmask = ialg->base.cra_alignmask; 171 alg->base.cra_module = ialg->base.cra_module; 172 alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx); 173 174 alg->ivsize = ialg->ivsize; 175 alg->chunksize = ialg->chunksize; 176 alg->min_keysize = ialg->min_keysize; 177 alg->max_keysize = ialg->max_keysize; 178 179 alg->init = simd_skcipher_init; 180 alg->exit = simd_skcipher_exit; 181 182 alg->setkey = simd_skcipher_setkey; 183 alg->encrypt = simd_skcipher_encrypt; 184 alg->decrypt = simd_skcipher_decrypt; 185 186 err = crypto_register_skcipher(alg); 187 if (err) 188 goto out_free_salg; 189 190 out: 191 return salg; 192 193 out_free_salg: 194 kfree(salg); 195 salg = ERR_PTR(err); 196 goto out; 197 } 198 EXPORT_SYMBOL_GPL(simd_skcipher_create_compat); 199 200 void simd_skcipher_free(struct simd_skcipher_alg *salg) 201 { 202 crypto_unregister_skcipher(&salg->alg); 203 kfree(salg); 204 } 205 EXPORT_SYMBOL_GPL(simd_skcipher_free); 206 207 int simd_register_skciphers_compat(struct skcipher_alg *algs, int count, 208 struct simd_skcipher_alg **simd_algs) 209 { 210 int err; 211 int i; 212 const char *algname; 213 const char *drvname; 214 const char *basename; 215 struct simd_skcipher_alg *simd; 216 217 for (i = 0; i < count; i++) { 218 if (WARN_ON(strncmp(algs[i].base.cra_name, "__", 2) || 219 strncmp(algs[i].base.cra_driver_name, "__", 2))) 220 return -EINVAL; 221 } 222 223 err = crypto_register_skciphers(algs, count); 224 if (err) 225 return err; 226 227 for (i = 0; i < count; i++) { 228 algname = algs[i].base.cra_name + 2; 229 drvname = algs[i].base.cra_driver_name + 2; 230 basename = algs[i].base.cra_driver_name; 231 simd = simd_skcipher_create_compat(algs + i, algname, drvname, basename); 232 err = PTR_ERR(simd); 233 if (IS_ERR(simd)) 234 goto err_unregister; 235 simd_algs[i] = simd; 236 } 237 return 0; 238 239 err_unregister: 240 simd_unregister_skciphers(algs, count, simd_algs); 241 return err; 242 } 243 EXPORT_SYMBOL_GPL(simd_register_skciphers_compat); 244 245 void simd_unregister_skciphers(struct skcipher_alg *algs, int count, 246 struct simd_skcipher_alg **simd_algs) 247 { 248 int i; 249 250 crypto_unregister_skciphers(algs, count); 251 252 for (i = 0; i < count; i++) { 253 if (simd_algs[i]) { 254 simd_skcipher_free(simd_algs[i]); 255 simd_algs[i] = NULL; 256 } 257 } 258 } 259 EXPORT_SYMBOL_GPL(simd_unregister_skciphers); 260 261 /* AEAD support */ 262 263 struct simd_aead_alg { 264 const char *ialg_name; 265 struct aead_alg alg; 266 }; 267 268 struct simd_aead_ctx { 269 struct cryptd_aead *cryptd_tfm; 270 }; 271 272 static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key, 273 unsigned int key_len) 274 { 275 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 276 struct crypto_aead *child = &ctx->cryptd_tfm->base; 277 278 crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK); 279 crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) & 280 CRYPTO_TFM_REQ_MASK); 281 return crypto_aead_setkey(child, key, key_len); 282 } 283 284 static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize) 285 { 286 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 287 struct crypto_aead *child = &ctx->cryptd_tfm->base; 288 289 return crypto_aead_setauthsize(child, authsize); 290 } 291 292 static int simd_aead_encrypt(struct aead_request *req) 293 { 294 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 295 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 296 struct aead_request *subreq; 297 struct crypto_aead *child; 298 299 subreq = aead_request_ctx(req); 300 *subreq = *req; 301 302 if (!crypto_simd_usable() || 303 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 304 child = &ctx->cryptd_tfm->base; 305 else 306 child = cryptd_aead_child(ctx->cryptd_tfm); 307 308 aead_request_set_tfm(subreq, child); 309 310 return crypto_aead_encrypt(subreq); 311 } 312 313 static int simd_aead_decrypt(struct aead_request *req) 314 { 315 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 316 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 317 struct aead_request *subreq; 318 struct crypto_aead *child; 319 320 subreq = aead_request_ctx(req); 321 *subreq = *req; 322 323 if (!crypto_simd_usable() || 324 (in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm))) 325 child = &ctx->cryptd_tfm->base; 326 else 327 child = cryptd_aead_child(ctx->cryptd_tfm); 328 329 aead_request_set_tfm(subreq, child); 330 331 return crypto_aead_decrypt(subreq); 332 } 333 334 static void simd_aead_exit(struct crypto_aead *tfm) 335 { 336 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 337 338 cryptd_free_aead(ctx->cryptd_tfm); 339 } 340 341 static int simd_aead_init(struct crypto_aead *tfm) 342 { 343 struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm); 344 struct cryptd_aead *cryptd_tfm; 345 struct simd_aead_alg *salg; 346 struct aead_alg *alg; 347 unsigned reqsize; 348 349 alg = crypto_aead_alg(tfm); 350 salg = container_of(alg, struct simd_aead_alg, alg); 351 352 cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL, 353 CRYPTO_ALG_INTERNAL); 354 if (IS_ERR(cryptd_tfm)) 355 return PTR_ERR(cryptd_tfm); 356 357 ctx->cryptd_tfm = cryptd_tfm; 358 359 reqsize = max(crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm)), 360 crypto_aead_reqsize(&cryptd_tfm->base)); 361 reqsize += sizeof(struct aead_request); 362 363 crypto_aead_set_reqsize(tfm, reqsize); 364 365 return 0; 366 } 367 368 static struct simd_aead_alg *simd_aead_create_compat(struct aead_alg *ialg, 369 const char *algname, 370 const char *drvname, 371 const char *basename) 372 { 373 struct simd_aead_alg *salg; 374 struct aead_alg *alg; 375 int err; 376 377 salg = kzalloc_obj(*salg); 378 if (!salg) { 379 salg = ERR_PTR(-ENOMEM); 380 goto out; 381 } 382 383 salg->ialg_name = basename; 384 alg = &salg->alg; 385 386 err = -ENAMETOOLONG; 387 if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >= 388 CRYPTO_MAX_ALG_NAME) 389 goto out_free_salg; 390 391 if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 392 drvname) >= CRYPTO_MAX_ALG_NAME) 393 goto out_free_salg; 394 395 alg->base.cra_flags = CRYPTO_ALG_ASYNC | 396 (ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS); 397 alg->base.cra_priority = ialg->base.cra_priority; 398 alg->base.cra_blocksize = ialg->base.cra_blocksize; 399 alg->base.cra_alignmask = ialg->base.cra_alignmask; 400 alg->base.cra_module = ialg->base.cra_module; 401 alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx); 402 403 alg->ivsize = ialg->ivsize; 404 alg->maxauthsize = ialg->maxauthsize; 405 alg->chunksize = ialg->chunksize; 406 407 alg->init = simd_aead_init; 408 alg->exit = simd_aead_exit; 409 410 alg->setkey = simd_aead_setkey; 411 alg->setauthsize = simd_aead_setauthsize; 412 alg->encrypt = simd_aead_encrypt; 413 alg->decrypt = simd_aead_decrypt; 414 415 err = crypto_register_aead(alg); 416 if (err) 417 goto out_free_salg; 418 419 out: 420 return salg; 421 422 out_free_salg: 423 kfree(salg); 424 salg = ERR_PTR(err); 425 goto out; 426 } 427 428 static void simd_aead_free(struct simd_aead_alg *salg) 429 { 430 crypto_unregister_aead(&salg->alg); 431 kfree(salg); 432 } 433 434 int simd_register_aeads_compat(struct aead_alg *algs, int count, 435 struct simd_aead_alg **simd_algs) 436 { 437 int err; 438 int i; 439 const char *algname; 440 const char *drvname; 441 const char *basename; 442 struct simd_aead_alg *simd; 443 444 for (i = 0; i < count; i++) { 445 if (WARN_ON(strncmp(algs[i].base.cra_name, "__", 2) || 446 strncmp(algs[i].base.cra_driver_name, "__", 2))) 447 return -EINVAL; 448 } 449 450 err = crypto_register_aeads(algs, count); 451 if (err) 452 return err; 453 454 for (i = 0; i < count; i++) { 455 algname = algs[i].base.cra_name + 2; 456 drvname = algs[i].base.cra_driver_name + 2; 457 basename = algs[i].base.cra_driver_name; 458 simd = simd_aead_create_compat(algs + i, algname, drvname, basename); 459 err = PTR_ERR(simd); 460 if (IS_ERR(simd)) 461 goto err_unregister; 462 simd_algs[i] = simd; 463 } 464 return 0; 465 466 err_unregister: 467 simd_unregister_aeads(algs, count, simd_algs); 468 return err; 469 } 470 EXPORT_SYMBOL_GPL(simd_register_aeads_compat); 471 472 void simd_unregister_aeads(struct aead_alg *algs, int count, 473 struct simd_aead_alg **simd_algs) 474 { 475 int i; 476 477 crypto_unregister_aeads(algs, count); 478 479 for (i = 0; i < count; i++) { 480 if (simd_algs[i]) { 481 simd_aead_free(simd_algs[i]); 482 simd_algs[i] = NULL; 483 } 484 } 485 } 486 EXPORT_SYMBOL_GPL(simd_unregister_aeads); 487 488 MODULE_DESCRIPTION("Shared crypto SIMD helpers"); 489 MODULE_LICENSE("GPL"); 490