1 /*- 2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org> 4 * Copyright (c) 2014 The FreeBSD Foundation 5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Portions of this software were developed by John-Mark Gurney 9 * under sponsorship of the FreeBSD Foundation and 10 * Rubicon Communications, LLC (Netgate). 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/kobj.h> 41 #include <sys/libkern.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/mutex.h> 47 #include <sys/smp.h> 48 #include <sys/systm.h> 49 #include <sys/uio.h> 50 51 #include <crypto/aesni/aesni.h> 52 #include <crypto/aesni/sha_sse.h> 53 #include <crypto/sha1.h> 54 #include <crypto/sha2/sha224.h> 55 #include <crypto/sha2/sha256.h> 56 57 #include <opencrypto/cryptodev.h> 58 #include <opencrypto/gmac.h> 59 #include <cryptodev_if.h> 60 61 #include <machine/md_var.h> 62 #include <machine/specialreg.h> 63 #if defined(__i386__) 64 #include <machine/npx.h> 65 #elif defined(__amd64__) 66 #include <machine/fpu.h> 67 #endif 68 69 static struct mtx_padalign *ctx_mtx; 70 static struct fpu_kern_ctx **ctx_fpu; 71 72 struct aesni_softc { 73 int32_t cid; 74 bool has_aes; 75 bool has_sha; 76 }; 77 78 #define ACQUIRE_CTX(i, ctx) \ 79 do { \ 80 (i) = PCPU_GET(cpuid); \ 81 mtx_lock(&ctx_mtx[(i)]); \ 82 (ctx) = ctx_fpu[(i)]; \ 83 } while (0) 84 #define RELEASE_CTX(i, ctx) \ 85 do { \ 86 mtx_unlock(&ctx_mtx[(i)]); \ 87 (i) = -1; \ 88 (ctx) = NULL; \ 89 } while (0) 90 91 static int aesni_cipher_setup(struct aesni_session *ses, 92 const struct crypto_session_params *csp); 93 static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp); 94 static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, 95 const struct crypto_session_params *csp); 96 static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, 97 const struct crypto_session_params *csp); 98 99 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data"); 100 101 static void 102 aesni_identify(driver_t *drv, device_t parent) 103 { 104 105 /* NB: order 10 is so we get attached after h/w devices */ 106 if (device_find_child(parent, "aesni", -1) == NULL && 107 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0) 108 panic("aesni: could not attach"); 109 } 110 111 static void 112 detect_cpu_features(bool *has_aes, bool *has_sha) 113 { 114 115 *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 && 116 (cpu_feature2 & CPUID2_SSE41) != 0); 117 *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 && 118 (cpu_feature2 & CPUID2_SSSE3) != 0); 119 } 120 121 static int 122 aesni_probe(device_t dev) 123 { 124 bool has_aes, has_sha; 125 126 detect_cpu_features(&has_aes, &has_sha); 127 if (!has_aes && !has_sha) { 128 device_printf(dev, "No AES or SHA support.\n"); 129 return (EINVAL); 130 } else if (has_aes && has_sha) 131 device_set_desc(dev, 132 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS,SHA1,SHA256"); 133 else if (has_aes) 134 device_set_desc(dev, 135 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS"); 136 else 137 device_set_desc(dev, "SHA1,SHA256"); 138 139 return (0); 140 } 141 142 static void 143 aesni_cleanctx(void) 144 { 145 int i; 146 147 /* XXX - no way to return driverid */ 148 CPU_FOREACH(i) { 149 if (ctx_fpu[i] != NULL) { 150 mtx_destroy(&ctx_mtx[i]); 151 fpu_kern_free_ctx(ctx_fpu[i]); 152 } 153 ctx_fpu[i] = NULL; 154 } 155 free(ctx_mtx, M_AESNI); 156 ctx_mtx = NULL; 157 free(ctx_fpu, M_AESNI); 158 ctx_fpu = NULL; 159 } 160 161 static int 162 aesni_attach(device_t dev) 163 { 164 struct aesni_softc *sc; 165 int i; 166 167 sc = device_get_softc(dev); 168 169 sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session), 170 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 171 if (sc->cid < 0) { 172 device_printf(dev, "Could not get crypto driver id.\n"); 173 return (ENOMEM); 174 } 175 176 ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI, 177 M_WAITOK|M_ZERO); 178 ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI, 179 M_WAITOK|M_ZERO); 180 181 CPU_FOREACH(i) { 182 ctx_fpu[i] = fpu_kern_alloc_ctx(0); 183 mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW); 184 } 185 186 detect_cpu_features(&sc->has_aes, &sc->has_sha); 187 return (0); 188 } 189 190 static int 191 aesni_detach(device_t dev) 192 { 193 struct aesni_softc *sc; 194 195 sc = device_get_softc(dev); 196 197 crypto_unregister_all(sc->cid); 198 199 aesni_cleanctx(); 200 201 return (0); 202 } 203 204 static bool 205 aesni_auth_supported(struct aesni_softc *sc, 206 const struct crypto_session_params *csp) 207 { 208 209 if (!sc->has_sha) 210 return (false); 211 212 switch (csp->csp_auth_alg) { 213 case CRYPTO_SHA1: 214 case CRYPTO_SHA2_224: 215 case CRYPTO_SHA2_256: 216 case CRYPTO_SHA1_HMAC: 217 case CRYPTO_SHA2_224_HMAC: 218 case CRYPTO_SHA2_256_HMAC: 219 break; 220 default: 221 return (false); 222 } 223 224 return (true); 225 } 226 227 static bool 228 aesni_cipher_supported(struct aesni_softc *sc, 229 const struct crypto_session_params *csp) 230 { 231 232 if (!sc->has_aes) 233 return (false); 234 235 switch (csp->csp_cipher_alg) { 236 case CRYPTO_AES_CBC: 237 case CRYPTO_AES_ICM: 238 if (csp->csp_ivlen != AES_BLOCK_LEN) 239 return (false); 240 return (sc->has_aes); 241 case CRYPTO_AES_XTS: 242 if (csp->csp_ivlen != AES_XTS_IV_LEN) 243 return (false); 244 return (sc->has_aes); 245 default: 246 return (false); 247 } 248 } 249 250 static int 251 aesni_probesession(device_t dev, const struct crypto_session_params *csp) 252 { 253 struct aesni_softc *sc; 254 255 sc = device_get_softc(dev); 256 if (csp->csp_flags != 0) 257 return (EINVAL); 258 switch (csp->csp_mode) { 259 case CSP_MODE_DIGEST: 260 if (!aesni_auth_supported(sc, csp)) 261 return (EINVAL); 262 break; 263 case CSP_MODE_CIPHER: 264 if (!aesni_cipher_supported(sc, csp)) 265 return (EINVAL); 266 break; 267 case CSP_MODE_AEAD: 268 switch (csp->csp_cipher_alg) { 269 case CRYPTO_AES_NIST_GCM_16: 270 if (csp->csp_auth_mlen != 0 && 271 csp->csp_auth_mlen != GMAC_DIGEST_LEN) 272 return (EINVAL); 273 if (csp->csp_ivlen != AES_GCM_IV_LEN || 274 !sc->has_aes) 275 return (EINVAL); 276 break; 277 case CRYPTO_AES_CCM_16: 278 if (csp->csp_auth_mlen != 0 && 279 csp->csp_auth_mlen != AES_CBC_MAC_HASH_LEN) 280 return (EINVAL); 281 if (csp->csp_ivlen != AES_CCM_IV_LEN || 282 !sc->has_aes) 283 return (EINVAL); 284 break; 285 default: 286 return (EINVAL); 287 } 288 break; 289 case CSP_MODE_ETA: 290 if (!aesni_auth_supported(sc, csp) || 291 !aesni_cipher_supported(sc, csp)) 292 return (EINVAL); 293 break; 294 default: 295 return (EINVAL); 296 } 297 298 return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); 299 } 300 301 static int 302 aesni_newsession(device_t dev, crypto_session_t cses, 303 const struct crypto_session_params *csp) 304 { 305 struct aesni_softc *sc; 306 struct aesni_session *ses; 307 int error; 308 309 sc = device_get_softc(dev); 310 311 ses = crypto_get_driver_session(cses); 312 313 switch (csp->csp_mode) { 314 case CSP_MODE_DIGEST: 315 case CSP_MODE_CIPHER: 316 case CSP_MODE_AEAD: 317 case CSP_MODE_ETA: 318 break; 319 default: 320 return (EINVAL); 321 } 322 error = aesni_cipher_setup(ses, csp); 323 if (error != 0) { 324 CRYPTDEB("setup failed"); 325 return (error); 326 } 327 328 return (0); 329 } 330 331 static int 332 aesni_process(device_t dev, struct cryptop *crp, int hint __unused) 333 { 334 struct aesni_session *ses; 335 int error; 336 337 ses = crypto_get_driver_session(crp->crp_session); 338 339 error = aesni_cipher_process(ses, crp); 340 341 crp->crp_etype = error; 342 crypto_done(crp); 343 return (0); 344 } 345 346 static uint8_t * 347 aesni_cipher_alloc(struct cryptop *crp, int start, int length, bool *allocated) 348 { 349 uint8_t *addr; 350 351 addr = crypto_contiguous_subsegment(crp, start, length); 352 if (addr != NULL) { 353 *allocated = false; 354 return (addr); 355 } 356 addr = malloc(length, M_AESNI, M_NOWAIT); 357 if (addr != NULL) { 358 *allocated = true; 359 crypto_copydata(crp, start, length, addr); 360 } else 361 *allocated = false; 362 return (addr); 363 } 364 365 static device_method_t aesni_methods[] = { 366 DEVMETHOD(device_identify, aesni_identify), 367 DEVMETHOD(device_probe, aesni_probe), 368 DEVMETHOD(device_attach, aesni_attach), 369 DEVMETHOD(device_detach, aesni_detach), 370 371 DEVMETHOD(cryptodev_probesession, aesni_probesession), 372 DEVMETHOD(cryptodev_newsession, aesni_newsession), 373 DEVMETHOD(cryptodev_process, aesni_process), 374 375 DEVMETHOD_END 376 }; 377 378 static driver_t aesni_driver = { 379 "aesni", 380 aesni_methods, 381 sizeof(struct aesni_softc), 382 }; 383 static devclass_t aesni_devclass; 384 385 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0); 386 MODULE_VERSION(aesni, 1); 387 MODULE_DEPEND(aesni, crypto, 1, 1, 1); 388 389 static void 390 intel_sha1_update(void *vctx, const void *vdata, u_int datalen) 391 { 392 struct sha1_ctxt *ctx = vctx; 393 const char *data = vdata; 394 size_t gaplen; 395 size_t gapstart; 396 size_t off; 397 size_t copysiz; 398 u_int blocks; 399 400 off = 0; 401 /* Do any aligned blocks without redundant copying. */ 402 if (datalen >= 64 && ctx->count % 64 == 0) { 403 blocks = datalen / 64; 404 ctx->c.b64[0] += blocks * 64 * 8; 405 intel_sha1_step(ctx->h.b32, data + off, blocks); 406 off += blocks * 64; 407 } 408 409 while (off < datalen) { 410 gapstart = ctx->count % 64; 411 gaplen = 64 - gapstart; 412 413 copysiz = (gaplen < datalen - off) ? gaplen : datalen - off; 414 bcopy(&data[off], &ctx->m.b8[gapstart], copysiz); 415 ctx->count += copysiz; 416 ctx->count %= 64; 417 ctx->c.b64[0] += copysiz * 8; 418 if (ctx->count % 64 == 0) 419 intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1); 420 off += copysiz; 421 } 422 } 423 424 static void 425 SHA1_Init_fn(void *ctx) 426 { 427 sha1_init(ctx); 428 } 429 430 static void 431 SHA1_Finalize_fn(void *digest, void *ctx) 432 { 433 sha1_result(ctx, digest); 434 } 435 436 static void 437 intel_sha256_update(void *vctx, const void *vdata, u_int len) 438 { 439 SHA256_CTX *ctx = vctx; 440 uint64_t bitlen; 441 uint32_t r; 442 u_int blocks; 443 const unsigned char *src = vdata; 444 445 /* Number of bytes left in the buffer from previous updates */ 446 r = (ctx->count >> 3) & 0x3f; 447 448 /* Convert the length into a number of bits */ 449 bitlen = len << 3; 450 451 /* Update number of bits */ 452 ctx->count += bitlen; 453 454 /* Handle the case where we don't need to perform any transforms */ 455 if (len < 64 - r) { 456 memcpy(&ctx->buf[r], src, len); 457 return; 458 } 459 460 /* Finish the current block */ 461 memcpy(&ctx->buf[r], src, 64 - r); 462 intel_sha256_step(ctx->state, ctx->buf, 1); 463 src += 64 - r; 464 len -= 64 - r; 465 466 /* Perform complete blocks */ 467 if (len >= 64) { 468 blocks = len / 64; 469 intel_sha256_step(ctx->state, src, blocks); 470 src += blocks * 64; 471 len -= blocks * 64; 472 } 473 474 /* Copy left over data into buffer */ 475 memcpy(ctx->buf, src, len); 476 } 477 478 static void 479 SHA224_Init_fn(void *ctx) 480 { 481 SHA224_Init(ctx); 482 } 483 484 static void 485 SHA224_Finalize_fn(void *digest, void *ctx) 486 { 487 SHA224_Final(digest, ctx); 488 } 489 490 static void 491 SHA256_Init_fn(void *ctx) 492 { 493 SHA256_Init(ctx); 494 } 495 496 static void 497 SHA256_Finalize_fn(void *digest, void *ctx) 498 { 499 SHA256_Final(digest, ctx); 500 } 501 502 static int 503 aesni_authprepare(struct aesni_session *ses, int klen) 504 { 505 506 if (klen > SHA1_BLOCK_LEN) 507 return (EINVAL); 508 if ((ses->hmac && klen == 0) || (!ses->hmac && klen != 0)) 509 return (EINVAL); 510 return (0); 511 } 512 513 static int 514 aesni_cipherprepare(const struct crypto_session_params *csp) 515 { 516 517 switch (csp->csp_cipher_alg) { 518 case CRYPTO_AES_ICM: 519 case CRYPTO_AES_NIST_GCM_16: 520 case CRYPTO_AES_CCM_16: 521 case CRYPTO_AES_CBC: 522 switch (csp->csp_cipher_klen * 8) { 523 case 128: 524 case 192: 525 case 256: 526 break; 527 default: 528 CRYPTDEB("invalid CBC/ICM/GCM key length"); 529 return (EINVAL); 530 } 531 break; 532 case CRYPTO_AES_XTS: 533 switch (csp->csp_cipher_klen * 8) { 534 case 256: 535 case 512: 536 break; 537 default: 538 CRYPTDEB("invalid XTS key length"); 539 return (EINVAL); 540 } 541 break; 542 default: 543 return (EINVAL); 544 } 545 return (0); 546 } 547 548 static int 549 aesni_cipher_setup(struct aesni_session *ses, 550 const struct crypto_session_params *csp) 551 { 552 struct fpu_kern_ctx *ctx; 553 int kt, ctxidx, error; 554 555 switch (csp->csp_auth_alg) { 556 case CRYPTO_SHA1_HMAC: 557 ses->hmac = true; 558 /* FALLTHROUGH */ 559 case CRYPTO_SHA1: 560 ses->hash_len = SHA1_HASH_LEN; 561 ses->hash_init = SHA1_Init_fn; 562 ses->hash_update = intel_sha1_update; 563 ses->hash_finalize = SHA1_Finalize_fn; 564 break; 565 case CRYPTO_SHA2_224_HMAC: 566 ses->hmac = true; 567 /* FALLTHROUGH */ 568 case CRYPTO_SHA2_224: 569 ses->hash_len = SHA2_224_HASH_LEN; 570 ses->hash_init = SHA224_Init_fn; 571 ses->hash_update = intel_sha256_update; 572 ses->hash_finalize = SHA224_Finalize_fn; 573 break; 574 case CRYPTO_SHA2_256_HMAC: 575 ses->hmac = true; 576 /* FALLTHROUGH */ 577 case CRYPTO_SHA2_256: 578 ses->hash_len = SHA2_256_HASH_LEN; 579 ses->hash_init = SHA256_Init_fn; 580 ses->hash_update = intel_sha256_update; 581 ses->hash_finalize = SHA256_Finalize_fn; 582 break; 583 } 584 585 if (ses->hash_len != 0) { 586 if (csp->csp_auth_mlen == 0) 587 ses->mlen = ses->hash_len; 588 else 589 ses->mlen = csp->csp_auth_mlen; 590 591 error = aesni_authprepare(ses, csp->csp_auth_klen); 592 if (error != 0) 593 return (error); 594 } 595 596 error = aesni_cipherprepare(csp); 597 if (error != 0) 598 return (error); 599 600 kt = is_fpu_kern_thread(0) || (csp->csp_cipher_alg == 0); 601 if (!kt) { 602 ACQUIRE_CTX(ctxidx, ctx); 603 fpu_kern_enter(curthread, ctx, 604 FPU_KERN_NORMAL | FPU_KERN_KTHR); 605 } 606 607 error = 0; 608 if (csp->csp_cipher_key != NULL) 609 aesni_cipher_setup_common(ses, csp, csp->csp_cipher_key, 610 csp->csp_cipher_klen); 611 612 if (!kt) { 613 fpu_kern_leave(curthread, ctx); 614 RELEASE_CTX(ctxidx, ctx); 615 } 616 return (error); 617 } 618 619 static int 620 aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp) 621 { 622 const struct crypto_session_params *csp; 623 struct fpu_kern_ctx *ctx; 624 int error, ctxidx; 625 bool kt; 626 627 csp = crypto_get_params(crp->crp_session); 628 switch (csp->csp_cipher_alg) { 629 case CRYPTO_AES_ICM: 630 case CRYPTO_AES_NIST_GCM_16: 631 case CRYPTO_AES_CCM_16: 632 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 633 return (EINVAL); 634 break; 635 case CRYPTO_AES_CBC: 636 case CRYPTO_AES_XTS: 637 /* CBC & XTS can only handle full blocks for now */ 638 if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) 639 return (EINVAL); 640 break; 641 } 642 643 ctx = NULL; 644 ctxidx = 0; 645 error = 0; 646 kt = is_fpu_kern_thread(0); 647 if (!kt) { 648 ACQUIRE_CTX(ctxidx, ctx); 649 fpu_kern_enter(curthread, ctx, 650 FPU_KERN_NORMAL | FPU_KERN_KTHR); 651 } 652 653 /* Do work */ 654 if (csp->csp_mode == CSP_MODE_ETA) { 655 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 656 error = aesni_cipher_crypt(ses, crp, csp); 657 if (error == 0) 658 error = aesni_cipher_mac(ses, crp, csp); 659 } else { 660 error = aesni_cipher_mac(ses, crp, csp); 661 if (error == 0) 662 error = aesni_cipher_crypt(ses, crp, csp); 663 } 664 } else if (csp->csp_mode == CSP_MODE_DIGEST) 665 error = aesni_cipher_mac(ses, crp, csp); 666 else 667 error = aesni_cipher_crypt(ses, crp, csp); 668 669 if (!kt) { 670 fpu_kern_leave(curthread, ctx); 671 RELEASE_CTX(ctxidx, ctx); 672 } 673 return (error); 674 } 675 676 static int 677 aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, 678 const struct crypto_session_params *csp) 679 { 680 uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf; 681 int error; 682 bool encflag, allocated, authallocated; 683 684 buf = aesni_cipher_alloc(crp, crp->crp_payload_start, 685 crp->crp_payload_length, &allocated); 686 if (buf == NULL) 687 return (ENOMEM); 688 689 authallocated = false; 690 authbuf = NULL; 691 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 || 692 csp->csp_cipher_alg == CRYPTO_AES_CCM_16) { 693 authbuf = aesni_cipher_alloc(crp, crp->crp_aad_start, 694 crp->crp_aad_length, &authallocated); 695 if (authbuf == NULL) { 696 error = ENOMEM; 697 goto out; 698 } 699 } 700 701 error = 0; 702 encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); 703 if (crp->crp_cipher_key != NULL) 704 aesni_cipher_setup_common(ses, csp, crp->crp_cipher_key, 705 csp->csp_cipher_klen); 706 707 crypto_read_iv(crp, iv); 708 709 switch (csp->csp_cipher_alg) { 710 case CRYPTO_AES_CBC: 711 if (encflag) 712 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, 713 crp->crp_payload_length, buf, buf, iv); 714 else 715 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, 716 crp->crp_payload_length, buf, iv); 717 break; 718 case CRYPTO_AES_ICM: 719 /* encryption & decryption are the same */ 720 aesni_encrypt_icm(ses->rounds, ses->enc_schedule, 721 crp->crp_payload_length, buf, buf, iv); 722 break; 723 case CRYPTO_AES_XTS: 724 if (encflag) 725 aesni_encrypt_xts(ses->rounds, ses->enc_schedule, 726 ses->xts_schedule, crp->crp_payload_length, buf, 727 buf, iv); 728 else 729 aesni_decrypt_xts(ses->rounds, ses->dec_schedule, 730 ses->xts_schedule, crp->crp_payload_length, buf, 731 buf, iv); 732 break; 733 case CRYPTO_AES_NIST_GCM_16: 734 if (encflag) { 735 memset(tag, 0, sizeof(tag)); 736 AES_GCM_encrypt(buf, buf, authbuf, iv, tag, 737 crp->crp_payload_length, crp->crp_aad_length, 738 csp->csp_ivlen, ses->enc_schedule, ses->rounds); 739 crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), 740 tag); 741 } else { 742 crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), 743 tag); 744 if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag, 745 crp->crp_payload_length, crp->crp_aad_length, 746 csp->csp_ivlen, ses->enc_schedule, ses->rounds)) 747 error = EBADMSG; 748 } 749 break; 750 case CRYPTO_AES_CCM_16: 751 if (encflag) { 752 memset(tag, 0, sizeof(tag)); 753 AES_CCM_encrypt(buf, buf, authbuf, iv, tag, 754 crp->crp_payload_length, crp->crp_aad_length, 755 csp->csp_ivlen, ses->enc_schedule, ses->rounds); 756 crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), 757 tag); 758 } else { 759 crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), 760 tag); 761 if (!AES_CCM_decrypt(buf, buf, authbuf, iv, tag, 762 crp->crp_payload_length, crp->crp_aad_length, 763 csp->csp_ivlen, ses->enc_schedule, ses->rounds)) 764 error = EBADMSG; 765 } 766 break; 767 } 768 if (allocated && error == 0) 769 crypto_copyback(crp, crp->crp_payload_start, 770 crp->crp_payload_length, buf); 771 772 out: 773 if (allocated) { 774 explicit_bzero(buf, crp->crp_payload_length); 775 free(buf, M_AESNI); 776 } 777 if (authallocated) { 778 explicit_bzero(authbuf, crp->crp_aad_length); 779 free(authbuf, M_AESNI); 780 } 781 return (error); 782 } 783 784 static int 785 aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, 786 const struct crypto_session_params *csp) 787 { 788 union { 789 struct SHA256Context sha2 __aligned(16); 790 struct sha1_ctxt sha1 __aligned(16); 791 } sctx; 792 uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16); 793 uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)]; 794 uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)]; 795 const uint8_t *key; 796 int i, keylen; 797 798 if (crp->crp_auth_key != NULL) 799 key = crp->crp_auth_key; 800 else 801 key = csp->csp_auth_key; 802 keylen = csp->csp_auth_klen; 803 804 if (ses->hmac) { 805 /* Inner hash: (K ^ IPAD) || data */ 806 ses->hash_init(&sctx); 807 for (i = 0; i < keylen; i++) 808 hmac_key[i] = key[i] ^ HMAC_IPAD_VAL; 809 for (i = keylen; i < sizeof(hmac_key); i++) 810 hmac_key[i] = 0 ^ HMAC_IPAD_VAL; 811 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); 812 813 crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 814 __DECONST(int (*)(void *, void *, u_int), ses->hash_update), 815 &sctx); 816 crypto_apply(crp, crp->crp_payload_start, 817 crp->crp_payload_length, 818 __DECONST(int (*)(void *, void *, u_int), ses->hash_update), 819 &sctx); 820 ses->hash_finalize(res, &sctx); 821 822 /* Outer hash: (K ^ OPAD) || inner hash */ 823 ses->hash_init(&sctx); 824 for (i = 0; i < keylen; i++) 825 hmac_key[i] = key[i] ^ HMAC_OPAD_VAL; 826 for (i = keylen; i < sizeof(hmac_key); i++) 827 hmac_key[i] = 0 ^ HMAC_OPAD_VAL; 828 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); 829 ses->hash_update(&sctx, res, ses->hash_len); 830 ses->hash_finalize(res, &sctx); 831 } else { 832 ses->hash_init(&sctx); 833 834 crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 835 __DECONST(int (*)(void *, void *, u_int), ses->hash_update), 836 &sctx); 837 crypto_apply(crp, crp->crp_payload_start, 838 crp->crp_payload_length, 839 __DECONST(int (*)(void *, void *, u_int), ses->hash_update), 840 &sctx); 841 842 ses->hash_finalize(res, &sctx); 843 } 844 845 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 846 crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2); 847 if (timingsafe_bcmp(res, res2, ses->mlen) != 0) 848 return (EBADMSG); 849 } else 850 crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res); 851 return (0); 852 } 853