1 /*- 2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org> 4 * Copyright (c) 2014 The FreeBSD Foundation 5 * Copyright (c) 2017 Conrad Meyer <cem@FreeBSD.org> 6 * All rights reserved. 7 * 8 * Portions of this software were developed by John-Mark Gurney 9 * under sponsorship of the FreeBSD Foundation and 10 * Rubicon Communications, LLC (Netgate). 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include <sys/param.h> 38 #include <sys/bus.h> 39 #include <sys/kernel.h> 40 #include <sys/kobj.h> 41 #include <sys/libkern.h> 42 #include <sys/lock.h> 43 #include <sys/malloc.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/mutex.h> 47 #include <sys/smp.h> 48 #include <sys/systm.h> 49 #include <sys/uio.h> 50 51 #include <crypto/aesni/aesni.h> 52 #include <crypto/aesni/sha_sse.h> 53 #include <crypto/sha1.h> 54 #include <crypto/sha2/sha224.h> 55 #include <crypto/sha2/sha256.h> 56 57 #include <opencrypto/cryptodev.h> 58 #include <opencrypto/gmac.h> 59 #include <cryptodev_if.h> 60 61 #include <machine/md_var.h> 62 #include <machine/specialreg.h> 63 #include <machine/fpu.h> 64 65 static struct mtx_padalign *ctx_mtx; 66 static struct fpu_kern_ctx **ctx_fpu; 67 68 struct aesni_softc { 69 int32_t cid; 70 bool has_aes; 71 bool has_sha; 72 }; 73 74 #define ACQUIRE_CTX(i, ctx) \ 75 do { \ 76 (i) = PCPU_GET(cpuid); \ 77 mtx_lock(&ctx_mtx[(i)]); \ 78 (ctx) = ctx_fpu[(i)]; \ 79 } while (0) 80 #define RELEASE_CTX(i, ctx) \ 81 do { \ 82 mtx_unlock(&ctx_mtx[(i)]); \ 83 (i) = -1; \ 84 (ctx) = NULL; \ 85 } while (0) 86 87 static int aesni_cipher_setup(struct aesni_session *ses, 88 const struct crypto_session_params *csp); 89 static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp); 90 static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, 91 const struct crypto_session_params *csp); 92 static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, 93 const struct crypto_session_params *csp); 94 95 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data"); 96 97 static void 98 aesni_identify(driver_t *drv, device_t parent) 99 { 100 101 /* NB: order 10 is so we get attached after h/w devices */ 102 if (device_find_child(parent, "aesni", -1) == NULL && 103 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0) 104 panic("aesni: could not attach"); 105 } 106 107 static void 108 detect_cpu_features(bool *has_aes, bool *has_sha) 109 { 110 111 *has_aes = ((cpu_feature2 & CPUID2_AESNI) != 0 && 112 (cpu_feature2 & CPUID2_SSE41) != 0); 113 *has_sha = ((cpu_stdext_feature & CPUID_STDEXT_SHA) != 0 && 114 (cpu_feature2 & CPUID2_SSSE3) != 0); 115 } 116 117 static int 118 aesni_probe(device_t dev) 119 { 120 bool has_aes, has_sha; 121 122 detect_cpu_features(&has_aes, &has_sha); 123 if (!has_aes && !has_sha) { 124 device_printf(dev, "No AES or SHA support.\n"); 125 return (EINVAL); 126 } else if (has_aes && has_sha) 127 device_set_desc(dev, 128 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS,SHA1,SHA256"); 129 else if (has_aes) 130 device_set_desc(dev, 131 "AES-CBC,AES-CCM,AES-GCM,AES-ICM,AES-XTS"); 132 else 133 device_set_desc(dev, "SHA1,SHA256"); 134 135 return (0); 136 } 137 138 static void 139 aesni_cleanctx(void) 140 { 141 int i; 142 143 /* XXX - no way to return driverid */ 144 CPU_FOREACH(i) { 145 if (ctx_fpu[i] != NULL) { 146 mtx_destroy(&ctx_mtx[i]); 147 fpu_kern_free_ctx(ctx_fpu[i]); 148 } 149 ctx_fpu[i] = NULL; 150 } 151 free(ctx_mtx, M_AESNI); 152 ctx_mtx = NULL; 153 free(ctx_fpu, M_AESNI); 154 ctx_fpu = NULL; 155 } 156 157 static int 158 aesni_attach(device_t dev) 159 { 160 struct aesni_softc *sc; 161 int i; 162 163 sc = device_get_softc(dev); 164 165 sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session), 166 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC | 167 CRYPTOCAP_F_ACCEL_SOFTWARE); 168 if (sc->cid < 0) { 169 device_printf(dev, "Could not get crypto driver id.\n"); 170 return (ENOMEM); 171 } 172 173 ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI, 174 M_WAITOK|M_ZERO); 175 ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI, 176 M_WAITOK|M_ZERO); 177 178 CPU_FOREACH(i) { 179 #ifdef __amd64__ 180 ctx_fpu[i] = fpu_kern_alloc_ctx_domain( 181 pcpu_find(i)->pc_domain, FPU_KERN_NORMAL); 182 #else 183 ctx_fpu[i] = fpu_kern_alloc_ctx(FPU_KERN_NORMAL); 184 #endif 185 mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW); 186 } 187 188 detect_cpu_features(&sc->has_aes, &sc->has_sha); 189 return (0); 190 } 191 192 static int 193 aesni_detach(device_t dev) 194 { 195 struct aesni_softc *sc; 196 197 sc = device_get_softc(dev); 198 199 crypto_unregister_all(sc->cid); 200 201 aesni_cleanctx(); 202 203 return (0); 204 } 205 206 static bool 207 aesni_auth_supported(struct aesni_softc *sc, 208 const struct crypto_session_params *csp) 209 { 210 211 if (!sc->has_sha) 212 return (false); 213 214 switch (csp->csp_auth_alg) { 215 case CRYPTO_SHA1: 216 case CRYPTO_SHA2_224: 217 case CRYPTO_SHA2_256: 218 case CRYPTO_SHA1_HMAC: 219 case CRYPTO_SHA2_224_HMAC: 220 case CRYPTO_SHA2_256_HMAC: 221 break; 222 default: 223 return (false); 224 } 225 226 return (true); 227 } 228 229 static bool 230 aesni_cipher_supported(struct aesni_softc *sc, 231 const struct crypto_session_params *csp) 232 { 233 234 if (!sc->has_aes) 235 return (false); 236 237 switch (csp->csp_cipher_alg) { 238 case CRYPTO_AES_CBC: 239 case CRYPTO_AES_ICM: 240 switch (csp->csp_cipher_klen * 8) { 241 case 128: 242 case 192: 243 case 256: 244 break; 245 default: 246 CRYPTDEB("invalid CBC/ICM key length"); 247 return (false); 248 } 249 if (csp->csp_ivlen != AES_BLOCK_LEN) 250 return (false); 251 break; 252 case CRYPTO_AES_XTS: 253 switch (csp->csp_cipher_klen * 8) { 254 case 256: 255 case 512: 256 break; 257 default: 258 CRYPTDEB("invalid XTS key length"); 259 return (false); 260 } 261 if (csp->csp_ivlen != AES_XTS_IV_LEN) 262 return (false); 263 break; 264 default: 265 return (false); 266 } 267 268 return (true); 269 } 270 271 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 272 273 static int 274 aesni_probesession(device_t dev, const struct crypto_session_params *csp) 275 { 276 struct aesni_softc *sc; 277 278 sc = device_get_softc(dev); 279 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 280 return (EINVAL); 281 switch (csp->csp_mode) { 282 case CSP_MODE_DIGEST: 283 if (!aesni_auth_supported(sc, csp)) 284 return (EINVAL); 285 break; 286 case CSP_MODE_CIPHER: 287 if (!aesni_cipher_supported(sc, csp)) 288 return (EINVAL); 289 break; 290 case CSP_MODE_AEAD: 291 switch (csp->csp_cipher_alg) { 292 case CRYPTO_AES_NIST_GCM_16: 293 switch (csp->csp_cipher_klen * 8) { 294 case 128: 295 case 192: 296 case 256: 297 break; 298 default: 299 CRYPTDEB("invalid GCM key length"); 300 return (EINVAL); 301 } 302 if (csp->csp_auth_mlen != 0 && 303 csp->csp_auth_mlen != GMAC_DIGEST_LEN) 304 return (EINVAL); 305 if (csp->csp_ivlen != AES_GCM_IV_LEN || 306 !sc->has_aes) 307 return (EINVAL); 308 break; 309 case CRYPTO_AES_CCM_16: 310 switch (csp->csp_cipher_klen * 8) { 311 case 128: 312 case 192: 313 case 256: 314 break; 315 default: 316 CRYPTDEB("invalid CCM key length"); 317 return (EINVAL); 318 } 319 if (csp->csp_auth_mlen != 0 && 320 csp->csp_auth_mlen != AES_CBC_MAC_HASH_LEN) 321 return (EINVAL); 322 if (csp->csp_ivlen != AES_CCM_IV_LEN || 323 !sc->has_aes) 324 return (EINVAL); 325 break; 326 default: 327 return (EINVAL); 328 } 329 break; 330 case CSP_MODE_ETA: 331 if (!aesni_auth_supported(sc, csp) || 332 !aesni_cipher_supported(sc, csp)) 333 return (EINVAL); 334 break; 335 default: 336 return (EINVAL); 337 } 338 339 return (CRYPTODEV_PROBE_ACCEL_SOFTWARE); 340 } 341 342 static int 343 aesni_newsession(device_t dev, crypto_session_t cses, 344 const struct crypto_session_params *csp) 345 { 346 struct aesni_softc *sc; 347 struct aesni_session *ses; 348 int error; 349 350 sc = device_get_softc(dev); 351 352 ses = crypto_get_driver_session(cses); 353 354 switch (csp->csp_mode) { 355 case CSP_MODE_DIGEST: 356 case CSP_MODE_CIPHER: 357 case CSP_MODE_AEAD: 358 case CSP_MODE_ETA: 359 break; 360 default: 361 return (EINVAL); 362 } 363 error = aesni_cipher_setup(ses, csp); 364 if (error != 0) { 365 CRYPTDEB("setup failed"); 366 return (error); 367 } 368 369 return (0); 370 } 371 372 static int 373 aesni_process(device_t dev, struct cryptop *crp, int hint __unused) 374 { 375 struct aesni_session *ses; 376 int error; 377 378 ses = crypto_get_driver_session(crp->crp_session); 379 380 error = aesni_cipher_process(ses, crp); 381 382 crp->crp_etype = error; 383 crypto_done(crp); 384 return (0); 385 } 386 387 static uint8_t * 388 aesni_cipher_alloc(struct cryptop *crp, int start, int length, bool *allocated) 389 { 390 uint8_t *addr; 391 392 addr = crypto_contiguous_subsegment(crp, start, length); 393 if (addr != NULL) { 394 *allocated = false; 395 return (addr); 396 } 397 addr = malloc(length, M_AESNI, M_NOWAIT); 398 if (addr != NULL) { 399 *allocated = true; 400 crypto_copydata(crp, start, length, addr); 401 } else 402 *allocated = false; 403 return (addr); 404 } 405 406 static device_method_t aesni_methods[] = { 407 DEVMETHOD(device_identify, aesni_identify), 408 DEVMETHOD(device_probe, aesni_probe), 409 DEVMETHOD(device_attach, aesni_attach), 410 DEVMETHOD(device_detach, aesni_detach), 411 412 DEVMETHOD(cryptodev_probesession, aesni_probesession), 413 DEVMETHOD(cryptodev_newsession, aesni_newsession), 414 DEVMETHOD(cryptodev_process, aesni_process), 415 416 DEVMETHOD_END 417 }; 418 419 static driver_t aesni_driver = { 420 "aesni", 421 aesni_methods, 422 sizeof(struct aesni_softc), 423 }; 424 static devclass_t aesni_devclass; 425 426 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0); 427 MODULE_VERSION(aesni, 1); 428 MODULE_DEPEND(aesni, crypto, 1, 1, 1); 429 430 static int 431 intel_sha1_update(void *vctx, const void *vdata, u_int datalen) 432 { 433 struct sha1_ctxt *ctx = vctx; 434 const char *data = vdata; 435 size_t gaplen; 436 size_t gapstart; 437 size_t off; 438 size_t copysiz; 439 u_int blocks; 440 441 off = 0; 442 /* Do any aligned blocks without redundant copying. */ 443 if (datalen >= 64 && ctx->count % 64 == 0) { 444 blocks = datalen / 64; 445 ctx->c.b64[0] += blocks * 64 * 8; 446 intel_sha1_step(ctx->h.b32, data + off, blocks); 447 off += blocks * 64; 448 } 449 450 while (off < datalen) { 451 gapstart = ctx->count % 64; 452 gaplen = 64 - gapstart; 453 454 copysiz = (gaplen < datalen - off) ? gaplen : datalen - off; 455 bcopy(&data[off], &ctx->m.b8[gapstart], copysiz); 456 ctx->count += copysiz; 457 ctx->count %= 64; 458 ctx->c.b64[0] += copysiz * 8; 459 if (ctx->count % 64 == 0) 460 intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1); 461 off += copysiz; 462 } 463 464 return (0); 465 } 466 467 static void 468 SHA1_Init_fn(void *ctx) 469 { 470 sha1_init(ctx); 471 } 472 473 static void 474 SHA1_Finalize_fn(void *digest, void *ctx) 475 { 476 sha1_result(ctx, digest); 477 } 478 479 static int 480 intel_sha256_update(void *vctx, const void *vdata, u_int len) 481 { 482 SHA256_CTX *ctx = vctx; 483 uint64_t bitlen; 484 uint32_t r; 485 u_int blocks; 486 const unsigned char *src = vdata; 487 488 /* Number of bytes left in the buffer from previous updates */ 489 r = (ctx->count >> 3) & 0x3f; 490 491 /* Convert the length into a number of bits */ 492 bitlen = len << 3; 493 494 /* Update number of bits */ 495 ctx->count += bitlen; 496 497 /* Handle the case where we don't need to perform any transforms */ 498 if (len < 64 - r) { 499 memcpy(&ctx->buf[r], src, len); 500 return (0); 501 } 502 503 /* Finish the current block */ 504 memcpy(&ctx->buf[r], src, 64 - r); 505 intel_sha256_step(ctx->state, ctx->buf, 1); 506 src += 64 - r; 507 len -= 64 - r; 508 509 /* Perform complete blocks */ 510 if (len >= 64) { 511 blocks = len / 64; 512 intel_sha256_step(ctx->state, src, blocks); 513 src += blocks * 64; 514 len -= blocks * 64; 515 } 516 517 /* Copy left over data into buffer */ 518 memcpy(ctx->buf, src, len); 519 520 return (0); 521 } 522 523 static void 524 SHA224_Init_fn(void *ctx) 525 { 526 SHA224_Init(ctx); 527 } 528 529 static void 530 SHA224_Finalize_fn(void *digest, void *ctx) 531 { 532 SHA224_Final(digest, ctx); 533 } 534 535 static void 536 SHA256_Init_fn(void *ctx) 537 { 538 SHA256_Init(ctx); 539 } 540 541 static void 542 SHA256_Finalize_fn(void *digest, void *ctx) 543 { 544 SHA256_Final(digest, ctx); 545 } 546 547 static int 548 aesni_authprepare(struct aesni_session *ses, int klen) 549 { 550 551 if (klen > SHA1_BLOCK_LEN) 552 return (EINVAL); 553 if ((ses->hmac && klen == 0) || (!ses->hmac && klen != 0)) 554 return (EINVAL); 555 return (0); 556 } 557 558 static int 559 aesni_cipher_setup(struct aesni_session *ses, 560 const struct crypto_session_params *csp) 561 { 562 struct fpu_kern_ctx *ctx; 563 uint8_t *schedbase; 564 int kt, ctxidx, error; 565 566 schedbase = (uint8_t *)roundup2((uintptr_t)ses->schedules, 567 AES_SCHED_ALIGN); 568 ses->enc_schedule = schedbase; 569 ses->dec_schedule = schedbase + AES_SCHED_LEN; 570 ses->xts_schedule = schedbase + AES_SCHED_LEN * 2; 571 572 switch (csp->csp_auth_alg) { 573 case CRYPTO_SHA1_HMAC: 574 ses->hmac = true; 575 /* FALLTHROUGH */ 576 case CRYPTO_SHA1: 577 ses->hash_len = SHA1_HASH_LEN; 578 ses->hash_init = SHA1_Init_fn; 579 ses->hash_update = intel_sha1_update; 580 ses->hash_finalize = SHA1_Finalize_fn; 581 break; 582 case CRYPTO_SHA2_224_HMAC: 583 ses->hmac = true; 584 /* FALLTHROUGH */ 585 case CRYPTO_SHA2_224: 586 ses->hash_len = SHA2_224_HASH_LEN; 587 ses->hash_init = SHA224_Init_fn; 588 ses->hash_update = intel_sha256_update; 589 ses->hash_finalize = SHA224_Finalize_fn; 590 break; 591 case CRYPTO_SHA2_256_HMAC: 592 ses->hmac = true; 593 /* FALLTHROUGH */ 594 case CRYPTO_SHA2_256: 595 ses->hash_len = SHA2_256_HASH_LEN; 596 ses->hash_init = SHA256_Init_fn; 597 ses->hash_update = intel_sha256_update; 598 ses->hash_finalize = SHA256_Finalize_fn; 599 break; 600 } 601 602 if (ses->hash_len != 0) { 603 if (csp->csp_auth_mlen == 0) 604 ses->mlen = ses->hash_len; 605 else 606 ses->mlen = csp->csp_auth_mlen; 607 608 error = aesni_authprepare(ses, csp->csp_auth_klen); 609 if (error != 0) 610 return (error); 611 } 612 613 kt = is_fpu_kern_thread(0) || (csp->csp_cipher_alg == 0); 614 if (!kt) { 615 ACQUIRE_CTX(ctxidx, ctx); 616 fpu_kern_enter(curthread, ctx, 617 FPU_KERN_NORMAL | FPU_KERN_KTHR); 618 } 619 620 error = 0; 621 if (csp->csp_cipher_key != NULL) 622 aesni_cipher_setup_common(ses, csp, csp->csp_cipher_key, 623 csp->csp_cipher_klen); 624 625 if (!kt) { 626 fpu_kern_leave(curthread, ctx); 627 RELEASE_CTX(ctxidx, ctx); 628 } 629 return (error); 630 } 631 632 static int 633 aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp) 634 { 635 const struct crypto_session_params *csp; 636 struct fpu_kern_ctx *ctx; 637 int error, ctxidx; 638 bool kt; 639 640 csp = crypto_get_params(crp->crp_session); 641 switch (csp->csp_cipher_alg) { 642 case CRYPTO_AES_ICM: 643 case CRYPTO_AES_NIST_GCM_16: 644 case CRYPTO_AES_CCM_16: 645 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 646 return (EINVAL); 647 break; 648 case CRYPTO_AES_CBC: 649 case CRYPTO_AES_XTS: 650 /* CBC & XTS can only handle full blocks for now */ 651 if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) 652 return (EINVAL); 653 break; 654 } 655 656 ctx = NULL; 657 ctxidx = 0; 658 error = 0; 659 kt = is_fpu_kern_thread(0); 660 if (!kt) { 661 ACQUIRE_CTX(ctxidx, ctx); 662 fpu_kern_enter(curthread, ctx, 663 FPU_KERN_NORMAL | FPU_KERN_KTHR); 664 } 665 666 /* Do work */ 667 if (csp->csp_mode == CSP_MODE_ETA) { 668 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 669 error = aesni_cipher_crypt(ses, crp, csp); 670 if (error == 0) 671 error = aesni_cipher_mac(ses, crp, csp); 672 } else { 673 error = aesni_cipher_mac(ses, crp, csp); 674 if (error == 0) 675 error = aesni_cipher_crypt(ses, crp, csp); 676 } 677 } else if (csp->csp_mode == CSP_MODE_DIGEST) 678 error = aesni_cipher_mac(ses, crp, csp); 679 else 680 error = aesni_cipher_crypt(ses, crp, csp); 681 682 if (!kt) { 683 fpu_kern_leave(curthread, ctx); 684 RELEASE_CTX(ctxidx, ctx); 685 } 686 return (error); 687 } 688 689 static int 690 aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp, 691 const struct crypto_session_params *csp) 692 { 693 uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN]; 694 uint8_t *authbuf, *buf, *outbuf; 695 int error; 696 bool encflag, allocated, authallocated, outallocated, outcopy; 697 698 buf = aesni_cipher_alloc(crp, crp->crp_payload_start, 699 crp->crp_payload_length, &allocated); 700 if (buf == NULL) 701 return (ENOMEM); 702 703 outallocated = false; 704 authallocated = false; 705 authbuf = NULL; 706 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 || 707 csp->csp_cipher_alg == CRYPTO_AES_CCM_16) { 708 if (crp->crp_aad != NULL) 709 authbuf = crp->crp_aad; 710 else 711 authbuf = aesni_cipher_alloc(crp, crp->crp_aad_start, 712 crp->crp_aad_length, &authallocated); 713 if (authbuf == NULL) { 714 error = ENOMEM; 715 goto out; 716 } 717 } 718 719 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 720 outbuf = crypto_buffer_contiguous_subsegment(&crp->crp_obuf, 721 crp->crp_payload_output_start, crp->crp_payload_length); 722 if (outbuf == NULL) { 723 outcopy = true; 724 if (allocated) 725 outbuf = buf; 726 else { 727 outbuf = malloc(crp->crp_payload_length, 728 M_AESNI, M_NOWAIT); 729 if (outbuf == NULL) { 730 error = ENOMEM; 731 goto out; 732 } 733 outallocated = true; 734 } 735 } else 736 outcopy = false; 737 } else { 738 outbuf = buf; 739 outcopy = allocated; 740 } 741 742 error = 0; 743 encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); 744 if (crp->crp_cipher_key != NULL) 745 aesni_cipher_setup_common(ses, csp, crp->crp_cipher_key, 746 csp->csp_cipher_klen); 747 748 crypto_read_iv(crp, iv); 749 750 switch (csp->csp_cipher_alg) { 751 case CRYPTO_AES_CBC: 752 if (encflag) 753 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, 754 crp->crp_payload_length, buf, outbuf, iv); 755 else { 756 if (buf != outbuf) 757 memcpy(outbuf, buf, crp->crp_payload_length); 758 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, 759 crp->crp_payload_length, outbuf, iv); 760 } 761 break; 762 case CRYPTO_AES_ICM: 763 /* encryption & decryption are the same */ 764 aesni_encrypt_icm(ses->rounds, ses->enc_schedule, 765 crp->crp_payload_length, buf, outbuf, iv); 766 break; 767 case CRYPTO_AES_XTS: 768 if (encflag) 769 aesni_encrypt_xts(ses->rounds, ses->enc_schedule, 770 ses->xts_schedule, crp->crp_payload_length, buf, 771 outbuf, iv); 772 else 773 aesni_decrypt_xts(ses->rounds, ses->dec_schedule, 774 ses->xts_schedule, crp->crp_payload_length, buf, 775 outbuf, iv); 776 break; 777 case CRYPTO_AES_NIST_GCM_16: 778 if (encflag) { 779 memset(tag, 0, sizeof(tag)); 780 AES_GCM_encrypt(buf, outbuf, authbuf, iv, tag, 781 crp->crp_payload_length, crp->crp_aad_length, 782 csp->csp_ivlen, ses->enc_schedule, ses->rounds); 783 crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), 784 tag); 785 } else { 786 crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), 787 tag); 788 if (!AES_GCM_decrypt(buf, outbuf, authbuf, iv, tag, 789 crp->crp_payload_length, crp->crp_aad_length, 790 csp->csp_ivlen, ses->enc_schedule, ses->rounds)) 791 error = EBADMSG; 792 } 793 break; 794 case CRYPTO_AES_CCM_16: 795 if (encflag) { 796 memset(tag, 0, sizeof(tag)); 797 AES_CCM_encrypt(buf, outbuf, authbuf, iv, tag, 798 crp->crp_payload_length, crp->crp_aad_length, 799 csp->csp_ivlen, ses->enc_schedule, ses->rounds); 800 crypto_copyback(crp, crp->crp_digest_start, sizeof(tag), 801 tag); 802 } else { 803 crypto_copydata(crp, crp->crp_digest_start, sizeof(tag), 804 tag); 805 if (!AES_CCM_decrypt(buf, outbuf, authbuf, iv, tag, 806 crp->crp_payload_length, crp->crp_aad_length, 807 csp->csp_ivlen, ses->enc_schedule, ses->rounds)) 808 error = EBADMSG; 809 } 810 break; 811 } 812 if (outcopy && error == 0) 813 crypto_copyback(crp, CRYPTO_HAS_OUTPUT_BUFFER(crp) ? 814 crp->crp_payload_output_start : crp->crp_payload_start, 815 crp->crp_payload_length, outbuf); 816 817 out: 818 if (allocated) 819 zfree(buf, M_AESNI); 820 if (authallocated) 821 zfree(authbuf, M_AESNI); 822 if (outallocated) 823 zfree(outbuf, M_AESNI); 824 explicit_bzero(iv, sizeof(iv)); 825 explicit_bzero(tag, sizeof(tag)); 826 return (error); 827 } 828 829 static int 830 aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp, 831 const struct crypto_session_params *csp) 832 { 833 union { 834 struct SHA256Context sha2 __aligned(16); 835 struct sha1_ctxt sha1 __aligned(16); 836 } sctx; 837 uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)]; 838 const uint8_t *key; 839 int i, keylen; 840 841 if (crp->crp_auth_key != NULL) 842 key = crp->crp_auth_key; 843 else 844 key = csp->csp_auth_key; 845 keylen = csp->csp_auth_klen; 846 847 if (ses->hmac) { 848 uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16); 849 850 /* Inner hash: (K ^ IPAD) || data */ 851 ses->hash_init(&sctx); 852 for (i = 0; i < keylen; i++) 853 hmac_key[i] = key[i] ^ HMAC_IPAD_VAL; 854 for (i = keylen; i < sizeof(hmac_key); i++) 855 hmac_key[i] = 0 ^ HMAC_IPAD_VAL; 856 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); 857 858 if (crp->crp_aad != NULL) 859 ses->hash_update(&sctx, crp->crp_aad, 860 crp->crp_aad_length); 861 else 862 crypto_apply(crp, crp->crp_aad_start, 863 crp->crp_aad_length, ses->hash_update, &sctx); 864 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && 865 CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 866 crypto_apply_buf(&crp->crp_obuf, 867 crp->crp_payload_output_start, 868 crp->crp_payload_length, 869 ses->hash_update, &sctx); 870 else 871 crypto_apply(crp, crp->crp_payload_start, 872 crp->crp_payload_length, ses->hash_update, &sctx); 873 874 if (csp->csp_flags & CSP_F_ESN) 875 ses->hash_update(&sctx, crp->crp_esn, 4); 876 877 ses->hash_finalize(res, &sctx); 878 879 /* Outer hash: (K ^ OPAD) || inner hash */ 880 ses->hash_init(&sctx); 881 for (i = 0; i < keylen; i++) 882 hmac_key[i] = key[i] ^ HMAC_OPAD_VAL; 883 for (i = keylen; i < sizeof(hmac_key); i++) 884 hmac_key[i] = 0 ^ HMAC_OPAD_VAL; 885 ses->hash_update(&sctx, hmac_key, sizeof(hmac_key)); 886 ses->hash_update(&sctx, res, ses->hash_len); 887 ses->hash_finalize(res, &sctx); 888 explicit_bzero(hmac_key, sizeof(hmac_key)); 889 } else { 890 ses->hash_init(&sctx); 891 892 if (crp->crp_aad != NULL) 893 ses->hash_update(&sctx, crp->crp_aad, 894 crp->crp_aad_length); 895 else 896 crypto_apply(crp, crp->crp_aad_start, 897 crp->crp_aad_length, ses->hash_update, &sctx); 898 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && 899 CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 900 crypto_apply_buf(&crp->crp_obuf, 901 crp->crp_payload_output_start, 902 crp->crp_payload_length, 903 ses->hash_update, &sctx); 904 else 905 crypto_apply(crp, crp->crp_payload_start, 906 crp->crp_payload_length, 907 ses->hash_update, &sctx); 908 909 ses->hash_finalize(res, &sctx); 910 } 911 912 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 913 uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)]; 914 915 crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2); 916 if (timingsafe_bcmp(res, res2, ses->mlen) != 0) 917 return (EBADMSG); 918 explicit_bzero(res2, sizeof(res2)); 919 } else 920 crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res); 921 explicit_bzero(res, sizeof(res)); 922 return (0); 923 } 924