1 /*- 2 * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org> 3 * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org> 4 * Copyright (c) 2014 The FreeBSD Foundation 5 * All rights reserved. 6 * 7 * Portions of this software were developed by John-Mark Gurney 8 * under sponsorship of the FreeBSD Foundation and 9 * Rubicon Communications, LLC (Netgate). 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/kernel.h> 39 #include <sys/kobj.h> 40 #include <sys/libkern.h> 41 #include <sys/lock.h> 42 #include <sys/module.h> 43 #include <sys/malloc.h> 44 #include <sys/rwlock.h> 45 #include <sys/bus.h> 46 #include <sys/uio.h> 47 #include <sys/mbuf.h> 48 #include <sys/smp.h> 49 #include <crypto/aesni/aesni.h> 50 #include <cryptodev_if.h> 51 #include <opencrypto/gmac.h> 52 53 static struct mtx_padalign *ctx_mtx; 54 static struct fpu_kern_ctx **ctx_fpu; 55 56 struct aesni_softc { 57 int dieing; 58 int32_t cid; 59 uint32_t sid; 60 TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions; 61 struct rwlock lock; 62 }; 63 64 #define AQUIRE_CTX(i, ctx) \ 65 do { \ 66 (i) = PCPU_GET(cpuid); \ 67 mtx_lock(&ctx_mtx[(i)]); \ 68 (ctx) = ctx_fpu[(i)]; \ 69 } while (0) 70 #define RELEASE_CTX(i, ctx) \ 71 do { \ 72 mtx_unlock(&ctx_mtx[(i)]); \ 73 (i) = -1; \ 74 (ctx) = NULL; \ 75 } while (0) 76 77 static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri); 78 static int aesni_freesession(device_t, uint64_t tid); 79 static void aesni_freesession_locked(struct aesni_softc *sc, 80 struct aesni_session *ses); 81 static int aesni_cipher_setup(struct aesni_session *ses, 82 struct cryptoini *encini); 83 static int aesni_cipher_process(struct aesni_session *ses, 84 struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp); 85 86 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data"); 87 88 static void 89 aesni_identify(driver_t *drv, device_t parent) 90 { 91 92 /* NB: order 10 is so we get attached after h/w devices */ 93 if (device_find_child(parent, "aesni", -1) == NULL && 94 BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0) 95 panic("aesni: could not attach"); 96 } 97 98 static int 99 aesni_probe(device_t dev) 100 { 101 102 if ((cpu_feature2 & CPUID2_AESNI) == 0) { 103 device_printf(dev, "No AESNI support.\n"); 104 return (EINVAL); 105 } 106 107 if ((cpu_feature2 & CPUID2_SSE41) == 0) { 108 device_printf(dev, "No SSE4.1 support.\n"); 109 return (EINVAL); 110 } 111 112 device_set_desc_copy(dev, "AES-CBC,AES-XTS,AES-GCM,AES-ICM"); 113 return (0); 114 } 115 116 static void 117 aensi_cleanctx(void) 118 { 119 int i; 120 121 /* XXX - no way to return driverid */ 122 CPU_FOREACH(i) { 123 if (ctx_fpu[i] != NULL) { 124 mtx_destroy(&ctx_mtx[i]); 125 fpu_kern_free_ctx(ctx_fpu[i]); 126 } 127 ctx_fpu[i] = NULL; 128 } 129 free(ctx_mtx, M_AESNI); 130 ctx_mtx = NULL; 131 free(ctx_fpu, M_AESNI); 132 ctx_fpu = NULL; 133 } 134 135 static int 136 aesni_attach(device_t dev) 137 { 138 struct aesni_softc *sc; 139 int i; 140 141 sc = device_get_softc(dev); 142 sc->dieing = 0; 143 TAILQ_INIT(&sc->sessions); 144 sc->sid = 1; 145 146 sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE | 147 CRYPTOCAP_F_SYNC); 148 if (sc->cid < 0) { 149 device_printf(dev, "Could not get crypto driver id.\n"); 150 return (ENOMEM); 151 } 152 153 ctx_mtx = malloc(sizeof *ctx_mtx * (mp_maxid + 1), M_AESNI, 154 M_WAITOK|M_ZERO); 155 ctx_fpu = malloc(sizeof *ctx_fpu * (mp_maxid + 1), M_AESNI, 156 M_WAITOK|M_ZERO); 157 158 CPU_FOREACH(i) { 159 ctx_fpu[i] = fpu_kern_alloc_ctx(0); 160 mtx_init(&ctx_mtx[i], "anifpumtx", NULL, MTX_DEF|MTX_NEW); 161 } 162 163 rw_init(&sc->lock, "aesni_lock"); 164 crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0); 165 crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0); 166 crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0); 167 crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); 168 crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); 169 crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); 170 crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0); 171 return (0); 172 } 173 174 static int 175 aesni_detach(device_t dev) 176 { 177 struct aesni_softc *sc; 178 struct aesni_session *ses; 179 180 sc = device_get_softc(dev); 181 182 rw_wlock(&sc->lock); 183 TAILQ_FOREACH(ses, &sc->sessions, next) { 184 if (ses->used) { 185 rw_wunlock(&sc->lock); 186 device_printf(dev, 187 "Cannot detach, sessions still active.\n"); 188 return (EBUSY); 189 } 190 } 191 sc->dieing = 1; 192 while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) { 193 TAILQ_REMOVE(&sc->sessions, ses, next); 194 free(ses, M_AESNI); 195 } 196 rw_wunlock(&sc->lock); 197 crypto_unregister_all(sc->cid); 198 199 rw_destroy(&sc->lock); 200 201 aensi_cleanctx(); 202 203 return (0); 204 } 205 206 static int 207 aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) 208 { 209 struct aesni_softc *sc; 210 struct aesni_session *ses; 211 struct cryptoini *encini; 212 int error; 213 214 if (sidp == NULL || cri == NULL) { 215 CRYPTDEB("no sidp or cri"); 216 return (EINVAL); 217 } 218 219 sc = device_get_softc(dev); 220 if (sc->dieing) 221 return (EINVAL); 222 223 ses = NULL; 224 encini = NULL; 225 for (; cri != NULL; cri = cri->cri_next) { 226 switch (cri->cri_alg) { 227 case CRYPTO_AES_CBC: 228 case CRYPTO_AES_ICM: 229 case CRYPTO_AES_XTS: 230 case CRYPTO_AES_NIST_GCM_16: 231 if (encini != NULL) { 232 CRYPTDEB("encini already set"); 233 return (EINVAL); 234 } 235 encini = cri; 236 break; 237 case CRYPTO_AES_128_NIST_GMAC: 238 case CRYPTO_AES_192_NIST_GMAC: 239 case CRYPTO_AES_256_NIST_GMAC: 240 /* 241 * nothing to do here, maybe in the future cache some 242 * values for GHASH 243 */ 244 break; 245 default: 246 CRYPTDEB("unhandled algorithm"); 247 return (EINVAL); 248 } 249 } 250 if (encini == NULL) { 251 CRYPTDEB("no cipher"); 252 return (EINVAL); 253 } 254 255 rw_wlock(&sc->lock); 256 if (sc->dieing) { 257 rw_wunlock(&sc->lock); 258 return (EINVAL); 259 } 260 /* 261 * Free sessions goes first, so if first session is used, we need to 262 * allocate one. 263 */ 264 ses = TAILQ_FIRST(&sc->sessions); 265 if (ses == NULL || ses->used) { 266 ses = malloc(sizeof(*ses), M_AESNI, M_NOWAIT | M_ZERO); 267 if (ses == NULL) { 268 rw_wunlock(&sc->lock); 269 return (ENOMEM); 270 } 271 ses->id = sc->sid++; 272 } else { 273 TAILQ_REMOVE(&sc->sessions, ses, next); 274 } 275 ses->used = 1; 276 TAILQ_INSERT_TAIL(&sc->sessions, ses, next); 277 rw_wunlock(&sc->lock); 278 ses->algo = encini->cri_alg; 279 280 error = aesni_cipher_setup(ses, encini); 281 if (error != 0) { 282 CRYPTDEB("setup failed"); 283 rw_wlock(&sc->lock); 284 aesni_freesession_locked(sc, ses); 285 rw_wunlock(&sc->lock); 286 return (error); 287 } 288 289 *sidp = ses->id; 290 return (0); 291 } 292 293 static void 294 aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses) 295 { 296 uint32_t sid; 297 298 rw_assert(&sc->lock, RA_WLOCKED); 299 300 sid = ses->id; 301 TAILQ_REMOVE(&sc->sessions, ses, next); 302 *ses = (struct aesni_session){}; 303 ses->id = sid; 304 TAILQ_INSERT_HEAD(&sc->sessions, ses, next); 305 } 306 307 static int 308 aesni_freesession(device_t dev, uint64_t tid) 309 { 310 struct aesni_softc *sc; 311 struct aesni_session *ses; 312 uint32_t sid; 313 314 sc = device_get_softc(dev); 315 sid = ((uint32_t)tid) & 0xffffffff; 316 rw_wlock(&sc->lock); 317 TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) { 318 if (ses->id == sid) 319 break; 320 } 321 if (ses == NULL) { 322 rw_wunlock(&sc->lock); 323 return (EINVAL); 324 } 325 aesni_freesession_locked(sc, ses); 326 rw_wunlock(&sc->lock); 327 return (0); 328 } 329 330 static int 331 aesni_process(device_t dev, struct cryptop *crp, int hint __unused) 332 { 333 struct aesni_softc *sc = device_get_softc(dev); 334 struct aesni_session *ses = NULL; 335 struct cryptodesc *crd, *enccrd, *authcrd; 336 int error, needauth; 337 338 error = 0; 339 enccrd = NULL; 340 authcrd = NULL; 341 needauth = 0; 342 343 /* Sanity check. */ 344 if (crp == NULL) 345 return (EINVAL); 346 347 if (crp->crp_callback == NULL || crp->crp_desc == NULL) { 348 error = EINVAL; 349 goto out; 350 } 351 352 for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) { 353 switch (crd->crd_alg) { 354 case CRYPTO_AES_CBC: 355 case CRYPTO_AES_ICM: 356 case CRYPTO_AES_XTS: 357 if (enccrd != NULL) { 358 error = EINVAL; 359 goto out; 360 } 361 enccrd = crd; 362 break; 363 364 case CRYPTO_AES_NIST_GCM_16: 365 if (enccrd != NULL) { 366 error = EINVAL; 367 goto out; 368 } 369 enccrd = crd; 370 needauth = 1; 371 break; 372 373 case CRYPTO_AES_128_NIST_GMAC: 374 case CRYPTO_AES_192_NIST_GMAC: 375 case CRYPTO_AES_256_NIST_GMAC: 376 if (authcrd != NULL) { 377 error = EINVAL; 378 goto out; 379 } 380 authcrd = crd; 381 needauth = 1; 382 break; 383 384 default: 385 error = EINVAL; 386 goto out; 387 } 388 } 389 390 if (enccrd == NULL || (needauth && authcrd == NULL)) { 391 error = EINVAL; 392 goto out; 393 } 394 395 /* CBC & XTS can only handle full blocks for now */ 396 if ((enccrd->crd_alg == CRYPTO_AES_CBC || enccrd->crd_alg == 397 CRYPTO_AES_XTS) && (enccrd->crd_len % AES_BLOCK_LEN) != 0) { 398 error = EINVAL; 399 goto out; 400 } 401 402 rw_rlock(&sc->lock); 403 TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) { 404 if (ses->id == (crp->crp_sid & 0xffffffff)) 405 break; 406 } 407 rw_runlock(&sc->lock); 408 if (ses == NULL) { 409 error = EINVAL; 410 goto out; 411 } 412 413 error = aesni_cipher_process(ses, enccrd, authcrd, crp); 414 if (error != 0) 415 goto out; 416 417 out: 418 crp->crp_etype = error; 419 crypto_done(crp); 420 return (error); 421 } 422 423 uint8_t * 424 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp, 425 int *allocated) 426 { 427 struct mbuf *m; 428 struct uio *uio; 429 struct iovec *iov; 430 uint8_t *addr; 431 432 if (crp->crp_flags & CRYPTO_F_IMBUF) { 433 m = (struct mbuf *)crp->crp_buf; 434 if (m->m_next != NULL) 435 goto alloc; 436 addr = mtod(m, uint8_t *); 437 } else if (crp->crp_flags & CRYPTO_F_IOV) { 438 uio = (struct uio *)crp->crp_buf; 439 if (uio->uio_iovcnt != 1) 440 goto alloc; 441 iov = uio->uio_iov; 442 addr = (uint8_t *)iov->iov_base; 443 } else 444 addr = (uint8_t *)crp->crp_buf; 445 *allocated = 0; 446 addr += enccrd->crd_skip; 447 return (addr); 448 449 alloc: 450 addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT); 451 if (addr != NULL) { 452 *allocated = 1; 453 crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, 454 enccrd->crd_len, addr); 455 } else 456 *allocated = 0; 457 return (addr); 458 } 459 460 static device_method_t aesni_methods[] = { 461 DEVMETHOD(device_identify, aesni_identify), 462 DEVMETHOD(device_probe, aesni_probe), 463 DEVMETHOD(device_attach, aesni_attach), 464 DEVMETHOD(device_detach, aesni_detach), 465 466 DEVMETHOD(cryptodev_newsession, aesni_newsession), 467 DEVMETHOD(cryptodev_freesession, aesni_freesession), 468 DEVMETHOD(cryptodev_process, aesni_process), 469 470 {0, 0}, 471 }; 472 473 static driver_t aesni_driver = { 474 "aesni", 475 aesni_methods, 476 sizeof(struct aesni_softc), 477 }; 478 static devclass_t aesni_devclass; 479 480 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0); 481 MODULE_VERSION(aesni, 1); 482 MODULE_DEPEND(aesni, crypto, 1, 1, 1); 483 484 static int 485 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini) 486 { 487 struct fpu_kern_ctx *ctx; 488 int error; 489 int kt, ctxidx; 490 491 kt = is_fpu_kern_thread(0); 492 if (!kt) { 493 AQUIRE_CTX(ctxidx, ctx); 494 error = fpu_kern_enter(curthread, ctx, 495 FPU_KERN_NORMAL | FPU_KERN_KTHR); 496 if (error != 0) 497 goto out; 498 } 499 500 error = aesni_cipher_setup_common(ses, encini->cri_key, 501 encini->cri_klen); 502 503 if (!kt) { 504 fpu_kern_leave(curthread, ctx); 505 out: 506 RELEASE_CTX(ctxidx, ctx); 507 } 508 return (error); 509 } 510 511 /* 512 * authcrd contains the associated date. 513 */ 514 static int 515 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd, 516 struct cryptodesc *authcrd, struct cryptop *crp) 517 { 518 struct fpu_kern_ctx *ctx; 519 uint8_t iv[AES_BLOCK_LEN]; 520 uint8_t tag[GMAC_DIGEST_LEN]; 521 uint8_t *buf, *authbuf; 522 int error, allocated, authallocated; 523 int ivlen, encflag; 524 int kt, ctxidx; 525 526 encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT; 527 528 if ((enccrd->crd_alg == CRYPTO_AES_ICM || 529 enccrd->crd_alg == CRYPTO_AES_NIST_GCM_16) && 530 (enccrd->crd_flags & CRD_F_IV_EXPLICIT) == 0) 531 return (EINVAL); 532 533 buf = aesni_cipher_alloc(enccrd, crp, &allocated); 534 if (buf == NULL) 535 return (ENOMEM); 536 537 authbuf = NULL; 538 authallocated = 0; 539 if (authcrd != NULL) { 540 authbuf = aesni_cipher_alloc(authcrd, crp, &authallocated); 541 if (authbuf == NULL) { 542 error = ENOMEM; 543 goto out1; 544 } 545 } 546 547 kt = is_fpu_kern_thread(0); 548 if (!kt) { 549 AQUIRE_CTX(ctxidx, ctx); 550 error = fpu_kern_enter(curthread, ctx, 551 FPU_KERN_NORMAL|FPU_KERN_KTHR); 552 if (error != 0) 553 goto out2; 554 } 555 556 if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) { 557 error = aesni_cipher_setup_common(ses, enccrd->crd_key, 558 enccrd->crd_klen); 559 if (error != 0) 560 goto out; 561 } 562 563 /* XXX - validate that enccrd and authcrd have/use same key? */ 564 switch (enccrd->crd_alg) { 565 case CRYPTO_AES_CBC: 566 case CRYPTO_AES_ICM: 567 ivlen = AES_BLOCK_LEN; 568 break; 569 case CRYPTO_AES_XTS: 570 ivlen = 8; 571 break; 572 case CRYPTO_AES_NIST_GCM_16: 573 ivlen = 12; /* should support arbitarily larger */ 574 break; 575 } 576 577 /* Setup iv */ 578 if (encflag) { 579 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) 580 bcopy(enccrd->crd_iv, iv, ivlen); 581 else 582 arc4rand(iv, ivlen, 0); 583 584 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) 585 crypto_copyback(crp->crp_flags, crp->crp_buf, 586 enccrd->crd_inject, ivlen, iv); 587 } else { 588 if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0) 589 bcopy(enccrd->crd_iv, iv, ivlen); 590 else 591 crypto_copydata(crp->crp_flags, crp->crp_buf, 592 enccrd->crd_inject, ivlen, iv); 593 } 594 595 if (authcrd != NULL && !encflag) 596 crypto_copydata(crp->crp_flags, crp->crp_buf, 597 authcrd->crd_inject, GMAC_DIGEST_LEN, tag); 598 else 599 bzero(tag, sizeof tag); 600 601 /* Do work */ 602 switch (ses->algo) { 603 case CRYPTO_AES_CBC: 604 if (encflag) 605 aesni_encrypt_cbc(ses->rounds, ses->enc_schedule, 606 enccrd->crd_len, buf, buf, iv); 607 else 608 aesni_decrypt_cbc(ses->rounds, ses->dec_schedule, 609 enccrd->crd_len, buf, iv); 610 break; 611 case CRYPTO_AES_ICM: 612 /* encryption & decryption are the same */ 613 aesni_encrypt_icm(ses->rounds, ses->enc_schedule, 614 enccrd->crd_len, buf, buf, iv); 615 break; 616 case CRYPTO_AES_XTS: 617 if (encflag) 618 aesni_encrypt_xts(ses->rounds, ses->enc_schedule, 619 ses->xts_schedule, enccrd->crd_len, buf, buf, 620 iv); 621 else 622 aesni_decrypt_xts(ses->rounds, ses->dec_schedule, 623 ses->xts_schedule, enccrd->crd_len, buf, buf, 624 iv); 625 break; 626 case CRYPTO_AES_NIST_GCM_16: 627 if (encflag) 628 AES_GCM_encrypt(buf, buf, authbuf, iv, tag, 629 enccrd->crd_len, authcrd->crd_len, ivlen, 630 ses->enc_schedule, ses->rounds); 631 else { 632 if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag, 633 enccrd->crd_len, authcrd->crd_len, ivlen, 634 ses->enc_schedule, ses->rounds)) 635 error = EBADMSG; 636 } 637 break; 638 } 639 640 if (allocated) 641 crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip, 642 enccrd->crd_len, buf); 643 644 if (!error && authcrd != NULL) { 645 crypto_copyback(crp->crp_flags, crp->crp_buf, 646 authcrd->crd_inject, GMAC_DIGEST_LEN, tag); 647 } 648 649 out: 650 if (!kt) { 651 fpu_kern_leave(curthread, ctx); 652 out2: 653 RELEASE_CTX(ctxidx, ctx); 654 } 655 656 out1: 657 if (allocated) { 658 bzero(buf, enccrd->crd_len); 659 free(buf, M_AESNI); 660 } 661 if (authallocated) 662 free(authbuf, M_AESNI); 663 return (error); 664 } 665