1 /*- 2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org> 4 * Copyright (c) 2014 The FreeBSD Foundation 5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Portions of this software were developed by John-Mark Gurney 9 * under sponsorship of the FreeBSD Foundation and 10 * Rubicon Communications, LLC (Netgate). 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/kobj.h> 41 #include <sys/libkern.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/mutex.h> 47 #include <sys/smp.h> 48 #include <sys/systm.h> 49 #include <sys/uio.h> 50 51 #include <crypto/aesni/aesni.h> 52 #include <crypto/aesni/sha_sse.h> 53 #include <crypto/sha1.h> 54 #include <crypto/sha2/sha224.h> 55 #include <crypto/sha2/sha256.h> 56 57 #include <opencrypto/cryptodev.h> 58 #include <opencrypto/gmac.h> 59 #include <cryptodev_if.h> 60 61 #include <machine/md_var.h> 62 #include <machine/specialreg.h> 63 #if defined(__i386__) 64 #include <machine/npx.h> 65 #elif defined(__amd64__) 66 #include <machine/fpu.h> 67 #endif 68 69 static struct mtx_padalign *ctx_mtx; 70 static struct fpu_kern_ctx **ctx_fpu; 71 72 struct aesni_softc { 73 int32_t cid; 74 bool has_aes; 75 bool has_sha; 76 }; 77 78 #define ACQUIRE_CTX(i, ctx) \ 79 do { \ 80 (i) = PCPU_GET(cpuid); \ 81 mtx_lock(&ctx_mtx[(i)]); \ 82 (ctx) = ctx_fpu[(i)]; \ 83 } while (0) 84 #define RELEASE_CTX(i, ctx) \ 85 do { \ 86 mtx_unlock(&ctx_mtx[(i)]); \ 87 (i) = -1; \ 88 (ctx) = NULL; \ 89 } while (0) 90 91 static int aesni_newsession(device_t, crypto_session_t cses, 92 struct cryptoini *cri); 93 static int aesni_cipher_setup(struct aesni_session *ses, 94 struct cryptoini *encini, struct cryptoini *authini); 95 static int aesni_cipher_process(struct aesni_session *ses, 96 struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp); 97 static int aesni_cipher_crypt(struct aesni_session *ses, 98 struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp); 99 static int aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd, 100 struct cryptop *crp); 101 102 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data"); 103 104 static void 105 aesni_identify(driver_t *drv, device_t parent) 106 { 107 108 /* NB: order 10 is so we get attached after h/w devices */ 109 if (device_find_child(parent, "aesni", -1) == NULL && 110 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0) 111 panic("aesni: could not attach"); 112 } 113 114 static void 115 detect_cpu_features(bool *has_aes, bool *has_sha) 116 { 117 118 *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 && 119 (cpu_feature2 & CPUID2_SSE41) != 0); 120 *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 && 121 (cpu_feature2 & CPUID2_SSSE3) != 0); 122 } 123 124 static int 125 aesni_probe(device_t dev) 126 { 127 bool has_aes, has_sha; 128 129 detect_cpu_features(&has_aes, &has_sha); 130 if (!has_aes && !has_sha) { 131 device_printf(dev, "No AES or SHA support.\n"); 132 return (EINVAL); 133 } else if (has_aes && has_sha) 134 device_set_desc(dev, 135 "AES-CBC,AES-XTS,AES-GCM,AES-ICM,SHA1,SHA256"); 136 else if (has_aes) 137 device_set_desc(dev, "AES-CBC,AES-XTS,AES-GCM,AES-ICM"); 138 else 139 device_set_desc(dev, "SHA1,SHA256"); 140 141 return (0); 142 } 143 144 static void 145 aesni_cleanctx(void) 146 { 147 int i; 148 149 /* XXX - no way to return driverid */ 150 CPU_FOREACH(i) { 151 if (ctx_fpu[i] != NULL) { 152 mtx_destroy(&ctx_mtx[i]); 153 fpu_kern_free_ctx(ctx_fpu[i]); 154 } 155 ctx_fpu[i] = NULL; 156 } 157 free(ctx_mtx, M_AESNI); 158 ctx_mtx = NULL; 159 free(ctx_fpu, M_AESNI); 160 ctx_fpu = NULL; 161 } 162 163 static int 164 aesni_attach(device_t dev) 165 { 166 struct aesni_softc *sc; 167 int i; 168 169 sc = device_get_softc(dev); 170 171 sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session), 172 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC); 173 if (sc->cid < 0) { 174 device_printf(dev, "Could not get crypto driver id.\n"); 175 return (ENOMEM); 176 } 177 178 ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI, 179 M_WAITOK|M_ZERO); 180 ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI, 181 M_WAITOK|M_ZERO); 182 183 CPU_FOREACH(i) { 184 ctx_fpu[i] = fpu_kern_alloc_ctx(0); 185 mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW); 186 } 187 188 detect_cpu_features(&sc->has_aes, &sc->has_sha); 189 if (sc->has_aes) { 190 crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0); 191 crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0); 192 crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0); 193 crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); 194 crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); 195 crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); 196 crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0); 197 } 198 if (sc->has_sha) { 199 crypto_register(sc->cid, CRYPTO_SHA1, 0, 0); 200 crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0); 201 crypto_register(sc->cid, CRYPTO_SHA2_224, 0, 0); 202 crypto_register(sc->cid, CRYPTO_SHA2_224_HMAC, 0, 0); 203 crypto_register(sc->cid, CRYPTO_SHA2_256, 0, 0); 204 crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0); 205 } 206 return (0); 207 } 208 209 static int 210 aesni_detach(device_t dev) 211 { 212 struct aesni_softc *sc; 213 214 sc = device_get_softc(dev); 215 216 crypto_unregister_all(sc->cid); 217 218 aesni_cleanctx(); 219 220 return (0); 221 } 222 223 static int 224 aesni_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri) 225 { 226 struct aesni_softc *sc; 227 struct aesni_session *ses; 228 struct cryptoini *encini, *authini; 229 bool gcm_hash, gcm; 230 int error; 231 232 KASSERT(cses != NULL, ("EDOOFUS")); 233 if (cri == NULL) { 234 CRYPTDEB("no cri"); 235 return (EINVAL); 236 } 237 238 sc = device_get_softc(dev); 239 240 ses = crypto_get_driver_session(cses); 241 242 authini = NULL; 243 encini = NULL; 244 gcm = false; 245 gcm_hash = false; 246 for (; cri != NULL; cri = cri->cri_next) { 247 switch (cri->cri_alg) { 248 case CRYPTO_AES_NIST_GCM_16: 249 gcm = true; 250 /* FALLTHROUGH */ 251 case CRYPTO_AES_CBC: 252 case CRYPTO_AES_ICM: 253 case CRYPTO_AES_XTS: 254 if (!sc->has_aes) 255 goto unhandled; 256 if (encini != NULL) { 257 CRYPTDEB("encini already set"); 258 return (EINVAL); 259 } 260 encini = cri; 261 break; 262 case CRYPTO_AES_128_NIST_GMAC: 263 case CRYPTO_AES_192_NIST_GMAC: 264 case CRYPTO_AES_256_NIST_GMAC: 265 /* 266 * nothing to do here, maybe in the future cache some 267 * values for GHASH 268 */ 269 gcm_hash = true; 270 break; 271 case CRYPTO_SHA1: 272 case CRYPTO_SHA1_HMAC: 273 case CRYPTO_SHA2_224: 274 case CRYPTO_SHA2_224_HMAC: 275 case CRYPTO_SHA2_256: 276 case CRYPTO_SHA2_256_HMAC: 277 if (!sc->has_sha) 278 goto unhandled; 279 if (authini != NULL) { 280 CRYPTDEB("authini already set"); 281 return (EINVAL); 282 } 283 authini = cri; 284 break; 285 default: 286 unhandled: 287 CRYPTDEB("unhandled algorithm"); 288 return (EINVAL); 289 } 290 } 291 if (encini == NULL && authini == NULL) { 292 CRYPTDEB("no cipher"); 293 return (EINVAL); 294 } 295 /* 296 * GMAC algorithms are only supported with simultaneous GCM. Likewise 297 * GCM is not supported without GMAC. 298 */ 299 if (gcm_hash != gcm) 300 return (EINVAL); 301 302 if (encini != NULL) 303 ses->algo = encini->cri_alg; 304 if (authini != NULL) 305 ses->auth_algo = authini->cri_alg; 306 307 error = aesni_cipher_setup(ses, encini, authini); 308 if (error != 0) { 309 CRYPTDEB("setup failed"); 310 return (error); 311 } 312 313 return (0); 314 } 315 316 static int 317 aesni_process(device_t dev, struct cryptop *crp, int hint __unused) 318 { 319 struct aesni_session *ses; 320 struct cryptodesc *crd, *enccrd, *authcrd; 321 int error, needauth; 322 323 ses = NULL; 324 error = 0; 325 enccrd = NULL; 326 authcrd = NULL; 327 needauth = 0; 328 329 /* Sanity check. */ 330 if (crp == NULL) 331 return (EINVAL); 332 333 if (crp->crp_callback == NULL || crp->crp_desc == NULL || 334 crp->crp_session == NULL) { 335 error = EINVAL; 336 goto out; 337 } 338 339 for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) { 340 switch (crd->crd_alg) { 341 case CRYPTO_AES_NIST_GCM_16: 342 needauth = 1; 343 /* FALLTHROUGH */ 344 case CRYPTO_AES_CBC: 345 case CRYPTO_AES_ICM: 346 case CRYPTO_AES_XTS: 347 if (enccrd != NULL) { 348 error = EINVAL; 349 goto out; 350 } 351 enccrd = crd; 352 break; 353 354 case CRYPTO_AES_128_NIST_GMAC: 355 case CRYPTO_AES_192_NIST_GMAC: 356 case CRYPTO_AES_256_NIST_GMAC: 357 case CRYPTO_SHA1: 358 case CRYPTO_SHA1_HMAC: 359 case CRYPTO_SHA2_224: 360 case CRYPTO_SHA2_224_HMAC: 361 case CRYPTO_SHA2_256: 362 case CRYPTO_SHA2_256_HMAC: 363 if (authcrd != NULL) { 364 error = EINVAL; 365 goto out; 366 } 367 authcrd = crd; 368 break; 369 370 default: 371 error = EINVAL; 372 goto out; 373 } 374 } 375 376 if ((enccrd == NULL && authcrd == NULL) || 377 (needauth && authcrd == NULL)) { 378 error = EINVAL; 379 goto out; 380 } 381 382 /* CBC & XTS can only handle full blocks for now */ 383 if (enccrd != NULL && (enccrd->crd_alg == CRYPTO_AES_CBC || 384 enccrd->crd_alg == CRYPTO_AES_XTS) && 385 (enccrd->crd_len % AES_BLOCK_LEN) != 0) { 386 error = EINVAL; 387 goto out; 388 } 389 390 ses = crypto_get_driver_session(crp->crp_session); 391 KASSERT(ses != NULL, ("EDOOFUS")); 392 393 error = aesni_cipher_process(ses, enccrd, authcrd, crp); 394 if (error != 0) 395 goto out; 396 397 out: 398 crp->crp_etype = error; 399 crypto_done(crp); 400 return (error); 401 } 402 403 static uint8_t * 404 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp, 405 bool *allocated) 406 { 407 uint8_t *addr; 408 409 addr = crypto_contiguous_subsegment(crp->crp_flags, 410 crp->crp_buf, enccrd->crd_skip, enccrd->crd_len); 411 if (addr != NULL) { 412 *allocated = false; 413 return (addr); 414 } 415 addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT); 416 if (addr != NULL) { 417 *allocated = true; 418 crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, 419 enccrd->crd_len, addr); 420 } else 421 *allocated = false; 422 return (addr); 423 } 424 425 static device_method_t aesni_methods[] = { 426 DEVMETHOD(device_identify, aesni_identify), 427 DEVMETHOD(device_probe, aesni_probe), 428 DEVMETHOD(device_attach, aesni_attach), 429 DEVMETHOD(device_detach, aesni_detach), 430 431 DEVMETHOD(cryptodev_newsession, aesni_newsession), 432 DEVMETHOD(cryptodev_process, aesni_process), 433 434 DEVMETHOD_END 435 }; 436 437 static driver_t aesni_driver = { 438 "aesni", 439 aesni_methods, 440 sizeof(struct aesni_softc), 441 }; 442 static devclass_t aesni_devclass; 443 444 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0); 445 MODULE_VERSION(aesni, 1); 446 MODULE_DEPEND(aesni, crypto, 1, 1, 1); 447 448 static int 449 aesni_authprepare(struct aesni_session *ses, int klen, const void *cri_key) 450 { 451 int keylen; 452 453 if (klen % 8 != 0) 454 return (EINVAL); 455 keylen = klen / 8; 456 if (keylen > sizeof(ses->hmac_key)) 457 return (EINVAL); 458 if (ses->auth_algo == CRYPTO_SHA1 && keylen > 0) 459 return (EINVAL); 460 memcpy(ses->hmac_key, cri_key, keylen); 461 return (0); 462 } 463 464 static int 465 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini, 466 struct cryptoini *authini) 467 { 468 struct fpu_kern_ctx *ctx; 469 int kt, ctxidx, error; 470 471 switch (ses->auth_algo) { 472 case CRYPTO_SHA1: 473 case CRYPTO_SHA1_HMAC: 474 case CRYPTO_SHA2_224: 475 case CRYPTO_SHA2_224_HMAC: 476 case CRYPTO_SHA2_256: 477 case CRYPTO_SHA2_256_HMAC: 478 error = aesni_authprepare(ses, authini->cri_klen, 479 authini->cri_key); 480 if (error != 0) 481 return (error); 482 ses->mlen = authini->cri_mlen; 483 } 484 485 kt = is_fpu_kern_thread(0) || (encini == NULL); 486 if (!kt) { 487 ACQUIRE_CTX(ctxidx, ctx); 488 fpu_kern_enter(curthread, ctx, 489 FPU_KERN_NORMAL | FPU_KERN_KTHR); 490 } 491 492 error = 0; 493 if (encini != NULL) 494 error = aesni_cipher_setup_common(ses, encini->cri_key, 495 encini->cri_klen); 496 497 if (!kt) { 498 fpu_kern_leave(curthread, ctx); 499 RELEASE_CTX(ctxidx, ctx); 500 } 501 return (error); 502 } 503 504 static int 505 intel_sha1_update(void *vctx, const void *vdata, u_int datalen) 506 { 507 struct sha1_ctxt *ctx = vctx; 508 const char *data = vdata; 509 size_t gaplen; 510 size_t gapstart; 511 size_t off; 512 size_t copysiz; 513 u_int blocks; 514 515 off = 0; 516 /* Do any aligned blocks without redundant copying. */ 517 if (datalen >= 64 && ctx->count % 64 == 0) { 518 blocks = datalen / 64; 519 ctx->c.b64[0] += blocks * 64 * 8; 520 intel_sha1_step(ctx->h.b32, data + off, blocks); 521 off += blocks * 64; 522 } 523 524 while (off < datalen) { 525 gapstart = ctx->count % 64; 526 gaplen = 64 - gapstart; 527 528 copysiz = (gaplen < datalen - off) ? gaplen : datalen - off; 529 bcopy(&data[off], &ctx->m.b8[gapstart], copysiz); 530 ctx->count += copysiz; 531 ctx->count %= 64; 532 ctx->c.b64[0] += copysiz * 8; 533 if (ctx->count % 64 == 0) 534 intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1); 535 off += copysiz; 536 } 537 return (0); 538 } 539 540 static void 541 SHA1_Init_fn(void *ctx) 542 { 543 sha1_init(ctx); 544 } 545 546 static void 547 SHA1_Finalize_fn(void *digest, void *ctx) 548 { 549 sha1_result(ctx, digest); 550 } 551 552 static int 553 intel_sha256_update(void *vctx, const void *vdata, u_int len) 554 { 555 SHA256_CTX *ctx = vctx; 556 uint64_t bitlen; 557 uint32_t r; 558 u_int blocks; 559 const unsigned char *src = vdata; 560 561 /* Number of bytes left in the buffer from previous updates */ 562 r = (ctx->count >> 3) & 0x3f; 563 564 /* Convert the length into a number of bits */ 565 bitlen = len << 3; 566 567 /* Update number of bits */ 568 ctx->count += bitlen; 569 570 /* Handle the case where we don't need to perform any transforms */ 571 if (len < 64 - r) { 572 memcpy(&ctx->buf[r], src, len); 573 return (0); 574 } 575 576 /* Finish the current block */ 577 memcpy(&ctx->buf[r], src, 64 - r); 578 intel_sha256_step(ctx->state, ctx->buf, 1); 579 src += 64 - r; 580 len -= 64 - r; 581 582 /* Perform complete blocks */ 583 if (len >= 64) { 584 blocks = len / 64; 585 intel_sha256_step(ctx->state, src, blocks); 586 src += blocks * 64; 587 len -= blocks * 64; 588 } 589 590 /* Copy left over data into buffer */ 591 memcpy(ctx->buf, src, len); 592 return (0); 593 } 594 595 static void 596 SHA224_Init_fn(void *ctx) 597 { 598 SHA224_Init(ctx); 599 } 600 601 static void 602 SHA224_Finalize_fn(void *digest, void *ctx) 603 { 604 SHA224_Final(digest, ctx); 605 } 606 607 static void 608 SHA256_Init_fn(void *ctx) 609 { 610 SHA256_Init(ctx); 611 } 612 613 static void 614 SHA256_Finalize_fn(void *digest, void *ctx) 615 { 616 SHA256_Final(digest, ctx); 617 } 618 619 /* 620 * Compute the HASH( (key ^ xorbyte) || buf ) 621 */ 622 static void 623 hmac_internal(void *ctx, uint32_t *res, 624 int (*update)(void *, const void *, u_int), 625 void (*finalize)(void *, void *), uint8_t *key, uint8_t xorbyte, 626 const void *buf, size_t off, size_t buflen, int crpflags) 627 { 628 size_t i; 629 630 for (i = 0; i < 64; i++) 631 key[i] ^= xorbyte; 632 update(ctx, key, 64); 633 for (i = 0; i < 64; i++) 634 key[i] ^= xorbyte; 635 636 crypto_apply(crpflags, __DECONST(void *, buf), off, buflen, 637 __DECONST(int (*)(void *, void *, u_int), update), ctx); 638 finalize(res, ctx); 639 } 640 641 static int 642 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd, 643 struct cryptodesc *authcrd, struct cryptop *crp) 644 { 645 struct fpu_kern_ctx *ctx; 646 int error, ctxidx; 647 bool kt; 648 649 if (enccrd != NULL) { 650 if ((enccrd->crd_alg == CRYPTO_AES_ICM || 651 enccrd->crd_alg == CRYPTO_AES_NIST_GCM_16) && 652 (enccrd->crd_flags & CRD_F_IV_EXPLICIT) == 0) 653 return (EINVAL); 654 } 655 656 ctx = NULL; 657 ctxidx = 0; 658 error = 0; 659 kt = is_fpu_kern_thread(0); 660 if (!kt) { 661 ACQUIRE_CTX(ctxidx, ctx); 662 fpu_kern_enter(curthread, ctx, 663 FPU_KERN_NORMAL | FPU_KERN_KTHR); 664 } 665 666 /* Do work */ 667 if (enccrd != NULL && authcrd != NULL) { 668 /* Perform the first operation */ 669 if (crp->crp_desc == enccrd) 670 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp); 671 else 672 error = aesni_cipher_mac(ses, authcrd, crp); 673 if (error != 0) 674 goto out; 675 /* Perform the second operation */ 676 if (crp->crp_desc == enccrd) 677 error = aesni_cipher_mac(ses, authcrd, crp); 678 else 679 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp); 680 } else if (enccrd != NULL) 681 error = aesni_cipher_crypt(ses, enccrd, authcrd, crp); 682 else 683 error = aesni_cipher_mac(ses, authcrd, crp); 684 685 if (error != 0) 686 goto out; 687 688 out: 689 if (!kt) { 690 fpu_kern_leave(curthread, ctx); 691 RELEASE_CTX(ctxidx, ctx); 692 } 693 return (error); 694 } 695 696 static int 697 aesni_cipher_crypt(struct aesni_session *ses, struct cryptodesc *enccrd, 698 struct cryptodesc *authcrd, struct cryptop *crp) 699 { 700 uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf; 701 int error, ivlen; 702 bool encflag, allocated, authallocated; 703 704 KASSERT(ses->algo != CRYPTO_AES_NIST_GCM_16 || authcrd != NULL, 705 ("AES_NIST_GCM_16 must include MAC descriptor")); 706 707 ivlen = 0; 708 authbuf = NULL; 709 710 buf = aesni_cipher_alloc(enccrd, crp, &allocated); 711 if (buf == NULL) 712 return (ENOMEM); 713 714 authallocated = false; 715 if (ses->algo == CRYPTO_AES_NIST_GCM_16) { 716 authbuf = aesni_cipher_alloc(authcrd, crp, &authallocated); 717 if (authbuf == NULL) { 718 error = ENOMEM; 719 goto out; 720 } 721 } 722 723 error = 0; 724 encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT; 725 if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { 726 error = aesni_cipher_setup_common(ses, enccrd->crd_key, 727 enccrd->crd_klen); 728 if (error != 0) 729 goto out; 730 } 731 732 switch (enccrd->crd_alg) { 733 case CRYPTO_AES_CBC: 734 case CRYPTO_AES_ICM: 735 ivlen = AES_BLOCK_LEN; 736 break; 737 case CRYPTO_AES_XTS: 738 ivlen = 8; 739 break; 740 case CRYPTO_AES_NIST_GCM_16: 741 ivlen = 12; /* should support arbitarily larger */ 742 break; 743 } 744 745 /* Setup iv */ 746 if (encflag) { 747 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) 748 bcopy(enccrd->crd_iv, iv, ivlen); 749 else 750 arc4rand(iv, ivlen, 0); 751 752 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) 753 crypto_copyback(crp->crp_flags, crp->crp_buf, 754 enccrd->crd_inject, ivlen, iv); 755 } else { 756 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) 757 bcopy(enccrd->crd_iv, iv, ivlen); 758 else 759 crypto_copydata(crp->crp_flags, crp->crp_buf, 760 enccrd->crd_inject, ivlen, iv); 761 } 762 763 switch (ses->algo) { 764 case CRYPTO_AES_CBC: 765 if (encflag) 766 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, 767 enccrd->crd_len, buf, buf, iv); 768 else 769 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, 770 enccrd->crd_len, buf, iv); 771 break; 772 case CRYPTO_AES_ICM: 773 /* encryption & decryption are the same */ 774 aesni_encrypt_icm(ses->rounds, ses->enc_schedule, 775 enccrd->crd_len, buf, buf, iv); 776 break; 777 case CRYPTO_AES_XTS: 778 if (encflag) 779 aesni_encrypt_xts(ses->rounds, ses->enc_schedule, 780 ses->xts_schedule, enccrd->crd_len, buf, buf, 781 iv); 782 else 783 aesni_decrypt_xts(ses->rounds, ses->dec_schedule, 784 ses->xts_schedule, enccrd->crd_len, buf, buf, 785 iv); 786 break; 787 case CRYPTO_AES_NIST_GCM_16: 788 if (!encflag) 789 crypto_copydata(crp->crp_flags, crp->crp_buf, 790 authcrd->crd_inject, GMAC_DIGEST_LEN, tag); 791 else 792 bzero(tag, sizeof tag); 793 794 if (encflag) { 795 AES_GCM_encrypt(buf, buf, authbuf, iv, tag, 796 enccrd->crd_len, authcrd->crd_len, ivlen, 797 ses->enc_schedule, ses->rounds); 798 799 if (authcrd != NULL) 800 crypto_copyback(crp->crp_flags, crp->crp_buf, 801 authcrd->crd_inject, GMAC_DIGEST_LEN, tag); 802 } else { 803 if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag, 804 enccrd->crd_len, authcrd->crd_len, ivlen, 805 ses->enc_schedule, ses->rounds)) 806 error = EBADMSG; 807 } 808 break; 809 } 810 811 if (allocated) 812 crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, 813 enccrd->crd_len, buf); 814 815 out: 816 if (allocated) { 817 explicit_bzero(buf, enccrd->crd_len); 818 free(buf, M_AESNI); 819 } 820 if (authallocated) { 821 explicit_bzero(authbuf, authcrd->crd_len); 822 free(authbuf, M_AESNI); 823 } 824 return (error); 825 } 826 827 static int 828 aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd, 829 struct cryptop *crp) 830 { 831 union { 832 struct SHA256Context sha2 __aligned(16); 833 struct sha1_ctxt sha1 __aligned(16); 834 } sctx; 835 uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)]; 836 int hashlen, error; 837 void *ctx; 838 void (*InitFn)(void *); 839 int (*UpdateFn)(void *, const void *, unsigned); 840 void (*FinalizeFn)(void *, void *); 841 842 bool hmac; 843 844 if ((crd->crd_flags & ~CRD_F_KEY_EXPLICIT) != 0) { 845 CRYPTDEB("%s: Unsupported MAC flags: 0x%x", __func__, 846 (crd->crd_flags & ~CRD_F_KEY_EXPLICIT)); 847 return (EINVAL); 848 } 849 if ((crd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { 850 error = aesni_authprepare(ses, crd->crd_klen, crd->crd_key); 851 if (error != 0) 852 return (error); 853 } 854 855 hmac = false; 856 switch (ses->auth_algo) { 857 case CRYPTO_SHA1_HMAC: 858 hmac = true; 859 /* FALLTHROUGH */ 860 case CRYPTO_SHA1: 861 hashlen = SHA1_HASH_LEN; 862 InitFn = SHA1_Init_fn; 863 UpdateFn = intel_sha1_update; 864 FinalizeFn = SHA1_Finalize_fn; 865 ctx = &sctx.sha1; 866 break; 867 868 case CRYPTO_SHA2_256_HMAC: 869 hmac = true; 870 /* FALLTHROUGH */ 871 case CRYPTO_SHA2_256: 872 hashlen = SHA2_256_HASH_LEN; 873 InitFn = SHA256_Init_fn; 874 UpdateFn = intel_sha256_update; 875 FinalizeFn = SHA256_Finalize_fn; 876 ctx = &sctx.sha2; 877 break; 878 879 case CRYPTO_SHA2_224_HMAC: 880 hmac = true; 881 /* FALLTHROUGH */ 882 case CRYPTO_SHA2_224: 883 hashlen = SHA2_224_HASH_LEN; 884 InitFn = SHA224_Init_fn; 885 UpdateFn = intel_sha256_update; 886 FinalizeFn = SHA224_Finalize_fn; 887 ctx = &sctx.sha2; 888 break; 889 default: 890 /* 891 * AES-GMAC authentication is verified while processing the 892 * enccrd 893 */ 894 return (0); 895 } 896 897 if (hmac) { 898 /* Inner hash: (K ^ IPAD) || data */ 899 InitFn(ctx); 900 hmac_internal(ctx, res, UpdateFn, FinalizeFn, ses->hmac_key, 901 0x36, crp->crp_buf, crd->crd_skip, crd->crd_len, 902 crp->crp_flags); 903 /* Outer hash: (K ^ OPAD) || inner hash */ 904 InitFn(ctx); 905 hmac_internal(ctx, res, UpdateFn, FinalizeFn, ses->hmac_key, 906 0x5C, res, 0, hashlen, 0); 907 } else { 908 InitFn(ctx); 909 crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip, 910 crd->crd_len, __DECONST(int (*)(void *, void *, u_int), 911 UpdateFn), ctx); 912 FinalizeFn(res, ctx); 913 } 914 915 if (ses->mlen != 0 && ses->mlen < hashlen) 916 hashlen = ses->mlen; 917 918 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, hashlen, 919 (void *)res); 920 return (0); 921 } 922