1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and 30 * 3.0 are supported. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/bus.h> 39 #include <sys/endian.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/module.h> 45 #include <sys/mutex.h> 46 #include <sys/random.h> 47 #include <sys/rman.h> 48 49 #include <machine/_inttypes.h> 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 53 #include <opencrypto/cryptodev.h> 54 #include <opencrypto/xform_auth.h> 55 #include "cryptodev_if.h" 56 57 #include <dev/ofw/ofw_bus_subr.h> 58 #include <dev/sec/sec.h> 59 60 static int sec_probe(device_t dev); 61 static int sec_attach(device_t dev); 62 static int sec_detach(device_t dev); 63 static int sec_suspend(device_t dev); 64 static int sec_resume(device_t dev); 65 static int sec_shutdown(device_t dev); 66 static void sec_primary_intr(void *arg); 67 static void sec_secondary_intr(void *arg); 68 static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, 69 void **ihand, int *irid, driver_intr_t handler, const char *iname); 70 static void sec_release_intr(struct sec_softc *sc, struct resource *ires, 71 void *ihand, int irid, const char *iname); 72 static int sec_controller_reset(struct sec_softc *sc); 73 static int sec_channel_reset(struct sec_softc *sc, int channel, int full); 74 static int sec_init(struct sec_softc *sc); 75 static int sec_alloc_dma_mem(struct sec_softc *sc, 76 struct sec_dma_mem *dma_mem, bus_size_t size); 77 static int sec_desc_map_dma(struct sec_softc *sc, 78 struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size, 79 struct sec_desc_map_info *sdmi); 80 static void sec_free_dma_mem(struct sec_dma_mem *dma_mem); 81 static void sec_enqueue(struct sec_softc *sc); 82 static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, 83 int channel); 84 static int sec_eu_channel(struct sec_softc *sc, int eu); 85 static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, 86 u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize); 87 static int sec_make_pointer_direct(struct sec_softc *sc, 88 struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize); 89 static int sec_probesession(device_t dev, 90 const struct crypto_session_params *csp); 91 static int sec_newsession(device_t dev, crypto_session_t cses, 92 const struct crypto_session_params *csp); 93 static int sec_process(device_t dev, struct cryptop *crp, int hint); 94 static int sec_build_common_ns_desc(struct sec_softc *sc, 95 struct sec_desc *desc, const struct crypto_session_params *csp, 96 struct cryptop *crp); 97 static int sec_build_common_s_desc(struct sec_softc *sc, 98 struct sec_desc *desc, const struct crypto_session_params *csp, 99 struct cryptop *crp); 100 101 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr); 102 103 /* AESU */ 104 static bool sec_aesu_newsession(const struct crypto_session_params *csp); 105 static int sec_aesu_make_desc(struct sec_softc *sc, 106 const struct crypto_session_params *csp, struct sec_desc *desc, 107 struct cryptop *crp); 108 109 /* MDEU */ 110 static bool sec_mdeu_can_handle(u_int alg); 111 static int sec_mdeu_config(const struct crypto_session_params *csp, 112 u_int *eu, u_int *mode, u_int *hashlen); 113 static bool sec_mdeu_newsession(const struct crypto_session_params *csp); 114 static int sec_mdeu_make_desc(struct sec_softc *sc, 115 const struct crypto_session_params *csp, struct sec_desc *desc, 116 struct cryptop *crp); 117 118 static device_method_t sec_methods[] = { 119 /* Device interface */ 120 DEVMETHOD(device_probe, sec_probe), 121 DEVMETHOD(device_attach, sec_attach), 122 DEVMETHOD(device_detach, sec_detach), 123 124 DEVMETHOD(device_suspend, sec_suspend), 125 DEVMETHOD(device_resume, sec_resume), 126 DEVMETHOD(device_shutdown, sec_shutdown), 127 128 /* Crypto methods */ 129 DEVMETHOD(cryptodev_probesession, sec_probesession), 130 DEVMETHOD(cryptodev_newsession, sec_newsession), 131 DEVMETHOD(cryptodev_process, sec_process), 132 133 DEVMETHOD_END 134 }; 135 static driver_t sec_driver = { 136 "sec", 137 sec_methods, 138 sizeof(struct sec_softc), 139 }; 140 141 static devclass_t sec_devclass; 142 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0); 143 MODULE_DEPEND(sec, crypto, 1, 1, 1); 144 145 static struct sec_eu_methods sec_eus[] = { 146 { 147 sec_aesu_newsession, 148 sec_aesu_make_desc, 149 }, 150 { 151 sec_mdeu_newsession, 152 sec_mdeu_make_desc, 153 }, 154 { NULL, NULL } 155 }; 156 157 static inline void 158 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op) 159 { 160 161 /* Sync only if dma memory is valid */ 162 if (dma_mem->dma_vaddr != NULL) 163 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op); 164 } 165 166 static inline void * 167 sec_get_pointer_data(struct sec_desc *desc, u_int n) 168 { 169 170 return (desc->sd_ptr_dmem[n].dma_vaddr); 171 } 172 173 static int 174 sec_probe(device_t dev) 175 { 176 struct sec_softc *sc; 177 uint64_t id; 178 179 if (!ofw_bus_status_okay(dev)) 180 return (ENXIO); 181 182 if (!ofw_bus_is_compatible(dev, "fsl,sec2.0")) 183 return (ENXIO); 184 185 sc = device_get_softc(dev); 186 187 sc->sc_rrid = 0; 188 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, 189 RF_ACTIVE); 190 191 if (sc->sc_rres == NULL) 192 return (ENXIO); 193 194 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); 195 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); 196 197 id = SEC_READ(sc, SEC_ID); 198 199 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); 200 201 switch (id) { 202 case SEC_20_ID: 203 device_set_desc(dev, "Freescale Security Engine 2.0"); 204 sc->sc_version = 2; 205 break; 206 case SEC_30_ID: 207 device_set_desc(dev, "Freescale Security Engine 3.0"); 208 sc->sc_version = 3; 209 break; 210 case SEC_31_ID: 211 device_set_desc(dev, "Freescale Security Engine 3.1"); 212 sc->sc_version = 3; 213 break; 214 default: 215 device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id); 216 return (ENXIO); 217 } 218 219 return (0); 220 } 221 222 static int 223 sec_attach(device_t dev) 224 { 225 struct sec_softc *sc; 226 struct sec_hw_lt *lt; 227 int error = 0; 228 int i; 229 230 sc = device_get_softc(dev); 231 sc->sc_dev = dev; 232 sc->sc_blocked = 0; 233 sc->sc_shutdown = 0; 234 235 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session), 236 CRYPTOCAP_F_HARDWARE); 237 if (sc->sc_cid < 0) { 238 device_printf(dev, "could not get crypto driver ID!\n"); 239 return (ENXIO); 240 } 241 242 /* Init locks */ 243 mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev), 244 "SEC Controller lock", MTX_DEF); 245 mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev), 246 "SEC Descriptors lock", MTX_DEF); 247 248 /* Allocate I/O memory for SEC registers */ 249 sc->sc_rrid = 0; 250 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, 251 RF_ACTIVE); 252 253 if (sc->sc_rres == NULL) { 254 device_printf(dev, "could not allocate I/O memory!\n"); 255 goto fail1; 256 } 257 258 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); 259 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); 260 261 /* Setup interrupts */ 262 sc->sc_pri_irid = 0; 263 error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand, 264 &sc->sc_pri_irid, sec_primary_intr, "primary"); 265 266 if (error) 267 goto fail2; 268 269 if (sc->sc_version == 3) { 270 sc->sc_sec_irid = 1; 271 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand, 272 &sc->sc_sec_irid, sec_secondary_intr, "secondary"); 273 274 if (error) 275 goto fail3; 276 } 277 278 /* Alloc DMA memory for descriptors and link tables */ 279 error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem), 280 SEC_DESCRIPTORS * sizeof(struct sec_hw_desc)); 281 282 if (error) 283 goto fail4; 284 285 error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem), 286 (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt)); 287 288 if (error) 289 goto fail5; 290 291 /* Fill in descriptors and link tables */ 292 for (i = 0; i < SEC_DESCRIPTORS; i++) { 293 sc->sc_desc[i].sd_desc = 294 (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i; 295 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr + 296 (i * sizeof(struct sec_hw_desc)); 297 } 298 299 for (i = 0; i < SEC_LT_ENTRIES + 1; i++) { 300 sc->sc_lt[i].sl_lt = 301 (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i; 302 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr + 303 (i * sizeof(struct sec_hw_lt)); 304 } 305 306 /* Last entry in link table is used to create a circle */ 307 lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt; 308 lt->shl_length = 0; 309 lt->shl_r = 0; 310 lt->shl_n = 1; 311 lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr; 312 313 /* Init descriptor and link table queues pointers */ 314 SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS); 315 SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS); 316 SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS); 317 SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS); 318 SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS); 319 SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS); 320 SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES); 321 SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES); 322 323 /* Create masks for fast checks */ 324 sc->sc_int_error_mask = 0; 325 for (i = 0; i < SEC_CHANNELS; i++) 326 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i)); 327 328 switch (sc->sc_version) { 329 case 2: 330 sc->sc_channel_idle_mask = 331 (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) | 332 (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) | 333 (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) | 334 (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S); 335 break; 336 case 3: 337 sc->sc_channel_idle_mask = 338 (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) | 339 (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) | 340 (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) | 341 (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S); 342 break; 343 } 344 345 /* Init hardware */ 346 error = sec_init(sc); 347 348 if (error) 349 goto fail6; 350 351 return (0); 352 353 fail6: 354 sec_free_dma_mem(&(sc->sc_lt_dmem)); 355 fail5: 356 sec_free_dma_mem(&(sc->sc_desc_dmem)); 357 fail4: 358 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, 359 sc->sc_sec_irid, "secondary"); 360 fail3: 361 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, 362 sc->sc_pri_irid, "primary"); 363 fail2: 364 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); 365 fail1: 366 mtx_destroy(&sc->sc_controller_lock); 367 mtx_destroy(&sc->sc_descriptors_lock); 368 369 return (ENXIO); 370 } 371 372 static int 373 sec_detach(device_t dev) 374 { 375 struct sec_softc *sc = device_get_softc(dev); 376 int i, error, timeout = SEC_TIMEOUT; 377 378 /* Prepare driver to shutdown */ 379 SEC_LOCK(sc, descriptors); 380 sc->sc_shutdown = 1; 381 SEC_UNLOCK(sc, descriptors); 382 383 /* Wait until all queued processing finishes */ 384 while (1) { 385 SEC_LOCK(sc, descriptors); 386 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc); 387 SEC_UNLOCK(sc, descriptors); 388 389 if (i == 0) 390 break; 391 392 if (timeout < 0) { 393 device_printf(dev, "queue flush timeout!\n"); 394 395 /* DMA can be still active - stop it */ 396 for (i = 0; i < SEC_CHANNELS; i++) 397 sec_channel_reset(sc, i, 1); 398 399 break; 400 } 401 402 timeout -= 1000; 403 DELAY(1000); 404 } 405 406 /* Disable interrupts */ 407 SEC_WRITE(sc, SEC_IER, 0); 408 409 /* Unregister from OCF */ 410 crypto_unregister_all(sc->sc_cid); 411 412 /* Free DMA memory */ 413 for (i = 0; i < SEC_DESCRIPTORS; i++) 414 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i])); 415 416 sec_free_dma_mem(&(sc->sc_lt_dmem)); 417 sec_free_dma_mem(&(sc->sc_desc_dmem)); 418 419 /* Release interrupts */ 420 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, 421 sc->sc_pri_irid, "primary"); 422 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, 423 sc->sc_sec_irid, "secondary"); 424 425 /* Release memory */ 426 if (sc->sc_rres) { 427 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, 428 sc->sc_rres); 429 if (error) 430 device_printf(dev, "bus_release_resource() failed for" 431 " I/O memory, error %d\n", error); 432 433 sc->sc_rres = NULL; 434 } 435 436 mtx_destroy(&sc->sc_controller_lock); 437 mtx_destroy(&sc->sc_descriptors_lock); 438 439 return (0); 440 } 441 442 static int 443 sec_suspend(device_t dev) 444 { 445 446 return (0); 447 } 448 449 static int 450 sec_resume(device_t dev) 451 { 452 453 return (0); 454 } 455 456 static int 457 sec_shutdown(device_t dev) 458 { 459 460 return (0); 461 } 462 463 static int 464 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, 465 int *irid, driver_intr_t handler, const char *iname) 466 { 467 int error; 468 469 (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid, 470 RF_ACTIVE); 471 472 if ((*ires) == NULL) { 473 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname); 474 return (ENXIO); 475 } 476 477 error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET, 478 NULL, handler, sc, ihand); 479 480 if (error) { 481 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname); 482 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires)) 483 device_printf(sc->sc_dev, "could not release %s IRQ\n", 484 iname); 485 486 (*ires) = NULL; 487 return (error); 488 } 489 490 return (0); 491 } 492 493 static void 494 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, 495 int irid, const char *iname) 496 { 497 int error; 498 499 if (ires == NULL) 500 return; 501 502 error = bus_teardown_intr(sc->sc_dev, ires, ihand); 503 if (error) 504 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s" 505 " IRQ, error %d\n", iname, error); 506 507 error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires); 508 if (error) 509 device_printf(sc->sc_dev, "bus_release_resource() failed for %s" 510 " IRQ, error %d\n", iname, error); 511 } 512 513 static void 514 sec_primary_intr(void *arg) 515 { 516 struct sec_session *ses; 517 struct sec_softc *sc = arg; 518 struct sec_desc *desc; 519 struct cryptop *crp; 520 uint64_t isr; 521 uint8_t hash[HASH_MAX_LEN]; 522 int i, wakeup = 0; 523 524 SEC_LOCK(sc, controller); 525 526 /* Check for errors */ 527 isr = SEC_READ(sc, SEC_ISR); 528 if (isr & sc->sc_int_error_mask) { 529 /* Check each channel for error */ 530 for (i = 0; i < SEC_CHANNELS; i++) { 531 if ((isr & SEC_INT_CH_ERR(i)) == 0) 532 continue; 533 534 device_printf(sc->sc_dev, 535 "I/O error on channel %i!\n", i); 536 537 /* Find and mark problematic descriptor */ 538 desc = sec_find_desc(sc, SEC_READ(sc, 539 SEC_CHAN_CDPR(i))); 540 541 if (desc != NULL) 542 desc->sd_error = EIO; 543 544 /* Do partial channel reset */ 545 sec_channel_reset(sc, i, 0); 546 } 547 } 548 549 /* ACK interrupt */ 550 SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL); 551 552 SEC_UNLOCK(sc, controller); 553 SEC_LOCK(sc, descriptors); 554 555 /* Handle processed descriptors */ 556 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 557 558 while (SEC_QUEUED_DESC_CNT(sc) > 0) { 559 desc = SEC_GET_QUEUED_DESC(sc); 560 561 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) { 562 SEC_PUT_BACK_QUEUED_DESC(sc); 563 break; 564 } 565 566 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD | 567 BUS_DMASYNC_PREWRITE); 568 569 crp = desc->sd_crp; 570 crp->crp_etype = desc->sd_error; 571 if (crp->crp_etype == 0) { 572 ses = crypto_get_driver_session(crp->crp_session); 573 if (ses->ss_mlen != 0) { 574 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 575 crypto_copydata(crp, 576 crp->crp_digest_start, 577 ses->ss_mlen, hash); 578 if (timingsafe_bcmp( 579 desc->sd_desc->shd_digest, 580 hash, ses->ss_mlen) != 0) 581 crp->crp_etype = EBADMSG; 582 } else 583 crypto_copyback(crp, 584 crp->crp_digest_start, 585 ses->ss_mlen, 586 desc->sd_desc->shd_digest); 587 } 588 } 589 crypto_done(desc->sd_crp); 590 591 SEC_DESC_FREE_POINTERS(desc); 592 SEC_DESC_FREE_LT(sc, desc); 593 SEC_DESC_QUEUED2FREE(sc); 594 } 595 596 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 597 598 if (!sc->sc_shutdown) { 599 wakeup = sc->sc_blocked; 600 sc->sc_blocked = 0; 601 } 602 603 SEC_UNLOCK(sc, descriptors); 604 605 /* Enqueue ready descriptors in hardware */ 606 sec_enqueue(sc); 607 608 if (wakeup) 609 crypto_unblock(sc->sc_cid, wakeup); 610 } 611 612 static void 613 sec_secondary_intr(void *arg) 614 { 615 struct sec_softc *sc = arg; 616 617 device_printf(sc->sc_dev, "spurious secondary interrupt!\n"); 618 sec_primary_intr(arg); 619 } 620 621 static int 622 sec_controller_reset(struct sec_softc *sc) 623 { 624 int timeout = SEC_TIMEOUT; 625 626 /* Reset Controller */ 627 SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR); 628 629 while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) { 630 DELAY(1000); 631 timeout -= 1000; 632 633 if (timeout < 0) { 634 device_printf(sc->sc_dev, "timeout while waiting for " 635 "device reset!\n"); 636 return (ETIMEDOUT); 637 } 638 } 639 640 return (0); 641 } 642 643 static int 644 sec_channel_reset(struct sec_softc *sc, int channel, int full) 645 { 646 int timeout = SEC_TIMEOUT; 647 uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON; 648 uint64_t reg; 649 650 /* Reset Channel */ 651 reg = SEC_READ(sc, SEC_CHAN_CCR(channel)); 652 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit); 653 654 while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) { 655 DELAY(1000); 656 timeout -= 1000; 657 658 if (timeout < 0) { 659 device_printf(sc->sc_dev, "timeout while waiting for " 660 "channel reset!\n"); 661 return (ETIMEDOUT); 662 } 663 } 664 665 if (full) { 666 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS; 667 668 switch(sc->sc_version) { 669 case 2: 670 reg |= SEC_CHAN_CCR_CDWE; 671 break; 672 case 3: 673 reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN; 674 break; 675 } 676 677 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg); 678 } 679 680 return (0); 681 } 682 683 static int 684 sec_init(struct sec_softc *sc) 685 { 686 uint64_t reg; 687 int error, i; 688 689 /* Reset controller twice to clear all pending interrupts */ 690 error = sec_controller_reset(sc); 691 if (error) 692 return (error); 693 694 error = sec_controller_reset(sc); 695 if (error) 696 return (error); 697 698 /* Reset channels */ 699 for (i = 0; i < SEC_CHANNELS; i++) { 700 error = sec_channel_reset(sc, i, 1); 701 if (error) 702 return (error); 703 } 704 705 /* Enable Interrupts */ 706 reg = SEC_INT_ITO; 707 for (i = 0; i < SEC_CHANNELS; i++) 708 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i); 709 710 SEC_WRITE(sc, SEC_IER, reg); 711 712 return (error); 713 } 714 715 static void 716 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 717 { 718 struct sec_dma_mem *dma_mem = arg; 719 720 if (error) 721 return; 722 723 KASSERT(nseg == 1, ("Wrong number of segments, should be 1")); 724 dma_mem->dma_paddr = segs->ds_addr; 725 } 726 727 static void 728 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg, 729 int error) 730 { 731 struct sec_desc_map_info *sdmi = arg; 732 struct sec_softc *sc = sdmi->sdmi_sc; 733 struct sec_lt *lt = NULL; 734 bus_addr_t addr; 735 bus_size_t size; 736 int i; 737 738 SEC_LOCK_ASSERT(sc, descriptors); 739 740 if (error) 741 return; 742 743 for (i = 0; i < nseg; i++) { 744 addr = segs[i].ds_addr; 745 size = segs[i].ds_len; 746 747 /* Skip requested offset */ 748 if (sdmi->sdmi_offset >= size) { 749 sdmi->sdmi_offset -= size; 750 continue; 751 } 752 753 addr += sdmi->sdmi_offset; 754 size -= sdmi->sdmi_offset; 755 sdmi->sdmi_offset = 0; 756 757 /* Do not link more than requested */ 758 if (sdmi->sdmi_size < size) 759 size = sdmi->sdmi_size; 760 761 lt = SEC_ALLOC_LT_ENTRY(sc); 762 lt->sl_lt->shl_length = size; 763 lt->sl_lt->shl_r = 0; 764 lt->sl_lt->shl_n = 0; 765 lt->sl_lt->shl_ptr = addr; 766 767 if (sdmi->sdmi_lt_first == NULL) 768 sdmi->sdmi_lt_first = lt; 769 770 sdmi->sdmi_lt_used += 1; 771 772 if ((sdmi->sdmi_size -= size) == 0) 773 break; 774 } 775 776 sdmi->sdmi_lt_last = lt; 777 } 778 779 static int 780 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, 781 bus_size_t size) 782 { 783 int error; 784 785 if (dma_mem->dma_vaddr != NULL) 786 return (EBUSY); 787 788 error = bus_dma_tag_create(NULL, /* parent */ 789 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ 790 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 791 BUS_SPACE_MAXADDR, /* highaddr */ 792 NULL, NULL, /* filtfunc, filtfuncarg */ 793 size, 1, /* maxsize, nsegments */ 794 size, 0, /* maxsegsz, flags */ 795 NULL, NULL, /* lockfunc, lockfuncarg */ 796 &(dma_mem->dma_tag)); /* dmat */ 797 798 if (error) { 799 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 800 " %i!\n", error); 801 goto err1; 802 } 803 804 error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr), 805 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map)); 806 807 if (error) { 808 device_printf(sc->sc_dev, "failed to allocate DMA safe" 809 " memory, error %i!\n", error); 810 goto err2; 811 } 812 813 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, 814 dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem, 815 BUS_DMA_NOWAIT); 816 817 if (error) { 818 device_printf(sc->sc_dev, "cannot get address of the DMA" 819 " memory, error %i\n", error); 820 goto err3; 821 } 822 823 dma_mem->dma_is_map = 0; 824 return (0); 825 826 err3: 827 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); 828 err2: 829 bus_dma_tag_destroy(dma_mem->dma_tag); 830 err1: 831 dma_mem->dma_vaddr = NULL; 832 return(error); 833 } 834 835 static int 836 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, 837 struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi) 838 { 839 int error; 840 841 if (dma_mem->dma_vaddr != NULL) 842 return (EBUSY); 843 844 switch (crp->crp_buf.cb_type) { 845 case CRYPTO_BUF_CONTIG: 846 break; 847 case CRYPTO_BUF_UIO: 848 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE; 849 break; 850 case CRYPTO_BUF_MBUF: 851 size = m_length(crp->crp_buf.cb_mbuf, NULL); 852 break; 853 case CRYPTO_BUF_VMPAGE: 854 size = PAGE_SIZE - crp->crp_buf.cb_vm_page_offset; 855 break; 856 default: 857 return (EINVAL); 858 } 859 860 error = bus_dma_tag_create(NULL, /* parent */ 861 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ 862 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 863 BUS_SPACE_MAXADDR, /* highaddr */ 864 NULL, NULL, /* filtfunc, filtfuncarg */ 865 size, /* maxsize */ 866 SEC_FREE_LT_CNT(sc), /* nsegments */ 867 SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */ 868 NULL, NULL, /* lockfunc, lockfuncarg */ 869 &(dma_mem->dma_tag)); /* dmat */ 870 871 if (error) { 872 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 873 " %i!\n", error); 874 dma_mem->dma_vaddr = NULL; 875 return (error); 876 } 877 878 error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map)); 879 880 if (error) { 881 device_printf(sc->sc_dev, "failed to create DMA map, error %i!" 882 "\n", error); 883 bus_dma_tag_destroy(dma_mem->dma_tag); 884 return (error); 885 } 886 887 error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp, 888 sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT); 889 890 if (error) { 891 device_printf(sc->sc_dev, "cannot get address of the DMA" 892 " memory, error %i!\n", error); 893 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); 894 bus_dma_tag_destroy(dma_mem->dma_tag); 895 return (error); 896 } 897 898 dma_mem->dma_is_map = 1; 899 dma_mem->dma_vaddr = crp; 900 901 return (0); 902 } 903 904 static void 905 sec_free_dma_mem(struct sec_dma_mem *dma_mem) 906 { 907 908 /* Check for double free */ 909 if (dma_mem->dma_vaddr == NULL) 910 return; 911 912 bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); 913 914 if (dma_mem->dma_is_map) 915 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); 916 else 917 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, 918 dma_mem->dma_map); 919 920 bus_dma_tag_destroy(dma_mem->dma_tag); 921 dma_mem->dma_vaddr = NULL; 922 } 923 924 static int 925 sec_eu_channel(struct sec_softc *sc, int eu) 926 { 927 uint64_t reg; 928 int channel = 0; 929 930 SEC_LOCK_ASSERT(sc, controller); 931 932 reg = SEC_READ(sc, SEC_EUASR); 933 934 switch (eu) { 935 case SEC_EU_AFEU: 936 channel = SEC_EUASR_AFEU(reg); 937 break; 938 case SEC_EU_DEU: 939 channel = SEC_EUASR_DEU(reg); 940 break; 941 case SEC_EU_MDEU_A: 942 case SEC_EU_MDEU_B: 943 channel = SEC_EUASR_MDEU(reg); 944 break; 945 case SEC_EU_RNGU: 946 channel = SEC_EUASR_RNGU(reg); 947 break; 948 case SEC_EU_PKEU: 949 channel = SEC_EUASR_PKEU(reg); 950 break; 951 case SEC_EU_AESU: 952 channel = SEC_EUASR_AESU(reg); 953 break; 954 case SEC_EU_KEU: 955 channel = SEC_EUASR_KEU(reg); 956 break; 957 case SEC_EU_CRCU: 958 channel = SEC_EUASR_CRCU(reg); 959 break; 960 } 961 962 return (channel - 1); 963 } 964 965 static int 966 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel) 967 { 968 u_int fflvl = SEC_MAX_FIFO_LEVEL; 969 uint64_t reg; 970 int i; 971 972 SEC_LOCK_ASSERT(sc, controller); 973 974 /* Find free channel if have not got one */ 975 if (channel < 0) { 976 for (i = 0; i < SEC_CHANNELS; i++) { 977 reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); 978 979 if ((reg & sc->sc_channel_idle_mask) == 0) { 980 channel = i; 981 break; 982 } 983 } 984 } 985 986 /* There is no free channel */ 987 if (channel < 0) 988 return (-1); 989 990 /* Check FIFO level on selected channel */ 991 reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); 992 993 switch(sc->sc_version) { 994 case 2: 995 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M; 996 break; 997 case 3: 998 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M; 999 break; 1000 } 1001 1002 if (fflvl >= SEC_MAX_FIFO_LEVEL) 1003 return (-1); 1004 1005 /* Enqueue descriptor in channel */ 1006 SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr); 1007 1008 return (channel); 1009 } 1010 1011 static void 1012 sec_enqueue(struct sec_softc *sc) 1013 { 1014 struct sec_desc *desc; 1015 int ch0, ch1; 1016 1017 SEC_LOCK(sc, descriptors); 1018 SEC_LOCK(sc, controller); 1019 1020 while (SEC_READY_DESC_CNT(sc) > 0) { 1021 desc = SEC_GET_READY_DESC(sc); 1022 1023 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0); 1024 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1); 1025 1026 /* 1027 * Both EU are used by the same channel. 1028 * Enqueue descriptor in channel used by busy EUs. 1029 */ 1030 if (ch0 >= 0 && ch0 == ch1) { 1031 if (sec_enqueue_desc(sc, desc, ch0) >= 0) { 1032 SEC_DESC_READY2QUEUED(sc); 1033 continue; 1034 } 1035 } 1036 1037 /* 1038 * Only one EU is free. 1039 * Enqueue descriptor in channel used by busy EU. 1040 */ 1041 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) { 1042 if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1) 1043 >= 0) { 1044 SEC_DESC_READY2QUEUED(sc); 1045 continue; 1046 } 1047 } 1048 1049 /* 1050 * Both EU are free. 1051 * Enqueue descriptor in first free channel. 1052 */ 1053 if (ch0 < 0 && ch1 < 0) { 1054 if (sec_enqueue_desc(sc, desc, -1) >= 0) { 1055 SEC_DESC_READY2QUEUED(sc); 1056 continue; 1057 } 1058 } 1059 1060 /* Current descriptor can not be queued at the moment */ 1061 SEC_PUT_BACK_READY_DESC(sc); 1062 break; 1063 } 1064 1065 SEC_UNLOCK(sc, controller); 1066 SEC_UNLOCK(sc, descriptors); 1067 } 1068 1069 static struct sec_desc * 1070 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr) 1071 { 1072 struct sec_desc *desc = NULL; 1073 int i; 1074 1075 SEC_LOCK_ASSERT(sc, descriptors); 1076 1077 for (i = 0; i < SEC_CHANNELS; i++) { 1078 if (sc->sc_desc[i].sd_desc_paddr == paddr) { 1079 desc = &(sc->sc_desc[i]); 1080 break; 1081 } 1082 } 1083 1084 return (desc); 1085 } 1086 1087 static int 1088 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, 1089 bus_addr_t data, bus_size_t dsize) 1090 { 1091 struct sec_hw_desc_ptr *ptr; 1092 1093 SEC_LOCK_ASSERT(sc, descriptors); 1094 1095 ptr = &(desc->sd_desc->shd_pointer[n]); 1096 ptr->shdp_length = dsize; 1097 ptr->shdp_extent = 0; 1098 ptr->shdp_j = 0; 1099 ptr->shdp_ptr = data; 1100 1101 return (0); 1102 } 1103 1104 static int 1105 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, 1106 u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize) 1107 { 1108 struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 }; 1109 struct sec_hw_desc_ptr *ptr; 1110 int error; 1111 1112 SEC_LOCK_ASSERT(sc, descriptors); 1113 1114 error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize, 1115 &sdmi); 1116 1117 if (error) 1118 return (error); 1119 1120 sdmi.sdmi_lt_last->sl_lt->shl_r = 1; 1121 desc->sd_lt_used += sdmi.sdmi_lt_used; 1122 1123 ptr = &(desc->sd_desc->shd_pointer[n]); 1124 ptr->shdp_length = dsize; 1125 ptr->shdp_extent = 0; 1126 ptr->shdp_j = 1; 1127 ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr; 1128 1129 return (0); 1130 } 1131 1132 static bool 1133 sec_cipher_supported(const struct crypto_session_params *csp) 1134 { 1135 1136 switch (csp->csp_cipher_alg) { 1137 case CRYPTO_AES_CBC: 1138 /* AESU */ 1139 if (csp->csp_ivlen != AES_BLOCK_LEN) 1140 return (false); 1141 break; 1142 default: 1143 return (false); 1144 } 1145 1146 if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN) 1147 return (false); 1148 1149 return (true); 1150 } 1151 1152 static bool 1153 sec_auth_supported(struct sec_softc *sc, 1154 const struct crypto_session_params *csp) 1155 { 1156 1157 switch (csp->csp_auth_alg) { 1158 case CRYPTO_SHA2_384_HMAC: 1159 case CRYPTO_SHA2_512_HMAC: 1160 if (sc->sc_version < 3) 1161 return (false); 1162 /* FALLTHROUGH */ 1163 case CRYPTO_SHA1_HMAC: 1164 case CRYPTO_SHA2_256_HMAC: 1165 if (csp->csp_auth_klen > SEC_MAX_KEY_LEN) 1166 return (false); 1167 break; 1168 case CRYPTO_SHA1: 1169 break; 1170 default: 1171 return (false); 1172 } 1173 return (true); 1174 } 1175 1176 static int 1177 sec_probesession(device_t dev, const struct crypto_session_params *csp) 1178 { 1179 struct sec_softc *sc = device_get_softc(dev); 1180 1181 if (csp->csp_flags != 0) 1182 return (EINVAL); 1183 switch (csp->csp_mode) { 1184 case CSP_MODE_DIGEST: 1185 if (!sec_auth_supported(sc, csp)) 1186 return (EINVAL); 1187 break; 1188 case CSP_MODE_CIPHER: 1189 if (!sec_cipher_supported(csp)) 1190 return (EINVAL); 1191 break; 1192 case CSP_MODE_ETA: 1193 if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp)) 1194 return (EINVAL); 1195 break; 1196 default: 1197 return (EINVAL); 1198 } 1199 return (CRYPTODEV_PROBE_HARDWARE); 1200 } 1201 1202 static int 1203 sec_newsession(device_t dev, crypto_session_t cses, 1204 const struct crypto_session_params *csp) 1205 { 1206 struct sec_eu_methods *eu = sec_eus; 1207 struct sec_session *ses; 1208 1209 ses = crypto_get_driver_session(cses); 1210 1211 /* Find EU for this session */ 1212 while (eu->sem_make_desc != NULL) { 1213 if (eu->sem_newsession(csp)) 1214 break; 1215 eu++; 1216 } 1217 KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session")); 1218 1219 /* Save cipher key */ 1220 if (csp->csp_cipher_key != NULL) 1221 memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen); 1222 1223 /* Save digest key */ 1224 if (csp->csp_auth_key != NULL) 1225 memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen); 1226 1227 if (csp->csp_auth_alg != 0) { 1228 if (csp->csp_auth_mlen == 0) 1229 ses->ss_mlen = crypto_auth_hash(csp)->hashsize; 1230 else 1231 ses->ss_mlen = csp->csp_auth_mlen; 1232 } 1233 1234 return (0); 1235 } 1236 1237 static int 1238 sec_process(device_t dev, struct cryptop *crp, int hint) 1239 { 1240 struct sec_softc *sc = device_get_softc(dev); 1241 struct sec_desc *desc = NULL; 1242 const struct crypto_session_params *csp; 1243 struct sec_session *ses; 1244 int error = 0; 1245 1246 ses = crypto_get_driver_session(crp->crp_session); 1247 csp = crypto_get_params(crp->crp_session); 1248 1249 /* Check for input length */ 1250 if (crypto_buffer_len(&crp->crp_buf) > SEC_MAX_DMA_BLOCK_SIZE) { 1251 crp->crp_etype = E2BIG; 1252 crypto_done(crp); 1253 return (0); 1254 } 1255 1256 SEC_LOCK(sc, descriptors); 1257 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1258 1259 /* Block driver if there is no free descriptors or we are going down */ 1260 if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) { 1261 sc->sc_blocked |= CRYPTO_SYMQ; 1262 SEC_UNLOCK(sc, descriptors); 1263 return (ERESTART); 1264 } 1265 1266 /* Prepare descriptor */ 1267 desc = SEC_GET_FREE_DESC(sc); 1268 desc->sd_lt_used = 0; 1269 desc->sd_error = 0; 1270 desc->sd_crp = crp; 1271 1272 if (csp->csp_cipher_alg != 0) 1273 crypto_read_iv(crp, desc->sd_desc->shd_iv); 1274 1275 if (crp->crp_cipher_key != NULL) 1276 memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen); 1277 1278 if (crp->crp_auth_key != NULL) 1279 memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen); 1280 1281 memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen); 1282 memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen); 1283 1284 error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp); 1285 1286 if (error) { 1287 SEC_DESC_FREE_POINTERS(desc); 1288 SEC_DESC_PUT_BACK_LT(sc, desc); 1289 SEC_PUT_BACK_FREE_DESC(sc); 1290 SEC_UNLOCK(sc, descriptors); 1291 crp->crp_etype = error; 1292 crypto_done(crp); 1293 return (0); 1294 } 1295 1296 /* 1297 * Skip DONE interrupt if this is not last request in burst, but only 1298 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE 1299 * signaling on each descriptor. 1300 */ 1301 if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3) 1302 desc->sd_desc->shd_dn = 0; 1303 else 1304 desc->sd_desc->shd_dn = 1; 1305 1306 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1307 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD | 1308 BUS_DMASYNC_POSTWRITE); 1309 SEC_DESC_FREE2READY(sc); 1310 SEC_UNLOCK(sc, descriptors); 1311 1312 /* Enqueue ready descriptors in hardware */ 1313 sec_enqueue(sc); 1314 1315 return (0); 1316 } 1317 1318 static int 1319 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, 1320 const struct crypto_session_params *csp, struct cryptop *crp) 1321 { 1322 struct sec_hw_desc *hd = desc->sd_desc; 1323 int error; 1324 1325 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; 1326 hd->shd_eu_sel1 = SEC_EU_NONE; 1327 hd->shd_mode1 = 0; 1328 1329 /* Pointer 0: NULL */ 1330 error = sec_make_pointer_direct(sc, desc, 0, 0, 0); 1331 if (error) 1332 return (error); 1333 1334 /* Pointer 1: IV IN */ 1335 error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr + 1336 offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); 1337 if (error) 1338 return (error); 1339 1340 /* Pointer 2: Cipher Key */ 1341 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + 1342 offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); 1343 if (error) 1344 return (error); 1345 1346 /* Pointer 3: Data IN */ 1347 error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, 1348 crp->crp_payload_length); 1349 if (error) 1350 return (error); 1351 1352 /* Pointer 4: Data OUT */ 1353 error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, 1354 crp->crp_payload_length); 1355 if (error) 1356 return (error); 1357 1358 /* Pointer 5: IV OUT (Not used: NULL) */ 1359 error = sec_make_pointer_direct(sc, desc, 5, 0, 0); 1360 if (error) 1361 return (error); 1362 1363 /* Pointer 6: NULL */ 1364 error = sec_make_pointer_direct(sc, desc, 6, 0, 0); 1365 1366 return (error); 1367 } 1368 1369 static int 1370 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, 1371 const struct crypto_session_params *csp, struct cryptop *crp) 1372 { 1373 struct sec_hw_desc *hd = desc->sd_desc; 1374 u_int eu, mode, hashlen; 1375 int error; 1376 1377 error = sec_mdeu_config(csp, &eu, &mode, &hashlen); 1378 if (error) 1379 return (error); 1380 1381 hd->shd_desc_type = SEC_DT_HMAC_SNOOP; 1382 hd->shd_eu_sel1 = eu; 1383 hd->shd_mode1 = mode; 1384 1385 /* Pointer 0: HMAC Key */ 1386 error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr + 1387 offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen); 1388 if (error) 1389 return (error); 1390 1391 /* Pointer 1: HMAC-Only Data IN */ 1392 error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start, 1393 crp->crp_aad_length); 1394 if (error) 1395 return (error); 1396 1397 /* Pointer 2: Cipher Key */ 1398 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + 1399 offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); 1400 if (error) 1401 return (error); 1402 1403 /* Pointer 3: IV IN */ 1404 error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr + 1405 offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); 1406 if (error) 1407 return (error); 1408 1409 /* Pointer 4: Data IN */ 1410 error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, 1411 crp->crp_payload_length); 1412 if (error) 1413 return (error); 1414 1415 /* Pointer 5: Data OUT */ 1416 error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start, 1417 crp->crp_payload_length); 1418 if (error) 1419 return (error); 1420 1421 /* Pointer 6: HMAC OUT */ 1422 error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr + 1423 offsetof(struct sec_hw_desc, shd_digest), hashlen); 1424 1425 return (error); 1426 } 1427 1428 /* AESU */ 1429 1430 static bool 1431 sec_aesu_newsession(const struct crypto_session_params *csp) 1432 { 1433 1434 return (csp->csp_cipher_alg == CRYPTO_AES_CBC); 1435 } 1436 1437 static int 1438 sec_aesu_make_desc(struct sec_softc *sc, 1439 const struct crypto_session_params *csp, struct sec_desc *desc, 1440 struct cryptop *crp) 1441 { 1442 struct sec_hw_desc *hd = desc->sd_desc; 1443 int error; 1444 1445 hd->shd_eu_sel0 = SEC_EU_AESU; 1446 hd->shd_mode0 = SEC_AESU_MODE_CBC; 1447 1448 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1449 hd->shd_mode0 |= SEC_AESU_MODE_ED; 1450 hd->shd_dir = 0; 1451 } else 1452 hd->shd_dir = 1; 1453 1454 if (csp->csp_mode == CSP_MODE_ETA) 1455 error = sec_build_common_s_desc(sc, desc, csp, crp); 1456 else 1457 error = sec_build_common_ns_desc(sc, desc, csp, crp); 1458 1459 return (error); 1460 } 1461 1462 /* MDEU */ 1463 1464 static bool 1465 sec_mdeu_can_handle(u_int alg) 1466 { 1467 switch (alg) { 1468 case CRYPTO_SHA1: 1469 case CRYPTO_SHA1_HMAC: 1470 case CRYPTO_SHA2_256_HMAC: 1471 case CRYPTO_SHA2_384_HMAC: 1472 case CRYPTO_SHA2_512_HMAC: 1473 return (true); 1474 default: 1475 return (false); 1476 } 1477 } 1478 1479 static int 1480 sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode, 1481 u_int *hashlen) 1482 { 1483 1484 *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT; 1485 *eu = SEC_EU_NONE; 1486 1487 switch (csp->csp_auth_alg) { 1488 case CRYPTO_SHA1_HMAC: 1489 *mode |= SEC_MDEU_MODE_HMAC; 1490 /* FALLTHROUGH */ 1491 case CRYPTO_SHA1: 1492 *eu = SEC_EU_MDEU_A; 1493 *mode |= SEC_MDEU_MODE_SHA1; 1494 *hashlen = SHA1_HASH_LEN; 1495 break; 1496 case CRYPTO_SHA2_256_HMAC: 1497 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256; 1498 *eu = SEC_EU_MDEU_A; 1499 break; 1500 case CRYPTO_SHA2_384_HMAC: 1501 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384; 1502 *eu = SEC_EU_MDEU_B; 1503 break; 1504 case CRYPTO_SHA2_512_HMAC: 1505 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512; 1506 *eu = SEC_EU_MDEU_B; 1507 break; 1508 default: 1509 return (EINVAL); 1510 } 1511 1512 if (*mode & SEC_MDEU_MODE_HMAC) 1513 *hashlen = SEC_HMAC_HASH_LEN; 1514 1515 return (0); 1516 } 1517 1518 static bool 1519 sec_mdeu_newsession(const struct crypto_session_params *csp) 1520 { 1521 1522 return (sec_mdeu_can_handle(csp->csp_auth_alg)); 1523 } 1524 1525 static int 1526 sec_mdeu_make_desc(struct sec_softc *sc, 1527 const struct crypto_session_params *csp, 1528 struct sec_desc *desc, struct cryptop *crp) 1529 { 1530 struct sec_hw_desc *hd = desc->sd_desc; 1531 u_int eu, mode, hashlen; 1532 int error; 1533 1534 error = sec_mdeu_config(csp, &eu, &mode, &hashlen); 1535 if (error) 1536 return (error); 1537 1538 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; 1539 hd->shd_eu_sel0 = eu; 1540 hd->shd_mode0 = mode; 1541 hd->shd_eu_sel1 = SEC_EU_NONE; 1542 hd->shd_mode1 = 0; 1543 1544 /* Pointer 0: NULL */ 1545 error = sec_make_pointer_direct(sc, desc, 0, 0, 0); 1546 if (error) 1547 return (error); 1548 1549 /* Pointer 1: Context In (Not used: NULL) */ 1550 error = sec_make_pointer_direct(sc, desc, 1, 0, 0); 1551 if (error) 1552 return (error); 1553 1554 /* Pointer 2: HMAC Key (or NULL, depending on digest type) */ 1555 if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC) 1556 error = sec_make_pointer_direct(sc, desc, 2, 1557 desc->sd_desc_paddr + offsetof(struct sec_hw_desc, 1558 shd_mkey), csp->csp_auth_klen); 1559 else 1560 error = sec_make_pointer_direct(sc, desc, 2, 0, 0); 1561 1562 if (error) 1563 return (error); 1564 1565 /* Pointer 3: Input Data */ 1566 error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, 1567 crp->crp_payload_length); 1568 if (error) 1569 return (error); 1570 1571 /* Pointer 4: NULL */ 1572 error = sec_make_pointer_direct(sc, desc, 4, 0, 0); 1573 if (error) 1574 return (error); 1575 1576 /* Pointer 5: Hash out */ 1577 error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr + 1578 offsetof(struct sec_hw_desc, shd_digest), hashlen); 1579 if (error) 1580 return (error); 1581 1582 /* Pointer 6: NULL */ 1583 error = sec_make_pointer_direct(sc, desc, 6, 0, 0); 1584 1585 return (0); 1586 } 1587