1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 19 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 21 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 24 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and 30 * 3.0 are supported. 31 */ 32 33 #include <sys/cdefs.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/kernel.h> 39 #include <sys/lock.h> 40 #include <sys/malloc.h> 41 #include <sys/mbuf.h> 42 #include <sys/module.h> 43 #include <sys/mutex.h> 44 #include <sys/random.h> 45 #include <sys/rman.h> 46 47 #include <machine/_inttypes.h> 48 #include <machine/bus.h> 49 #include <machine/resource.h> 50 51 #include <opencrypto/cryptodev.h> 52 #include <opencrypto/xform_auth.h> 53 #include "cryptodev_if.h" 54 55 #include <dev/ofw/ofw_bus_subr.h> 56 #include <dev/sec/sec.h> 57 58 static int sec_probe(device_t dev); 59 static int sec_attach(device_t dev); 60 static int sec_detach(device_t dev); 61 static int sec_suspend(device_t dev); 62 static int sec_resume(device_t dev); 63 static int sec_shutdown(device_t dev); 64 static void sec_primary_intr(void *arg); 65 static void sec_secondary_intr(void *arg); 66 static int sec_setup_intr(struct sec_softc *sc, struct resource **ires, 67 void **ihand, int *irid, driver_intr_t handler, const char *iname); 68 static void sec_release_intr(struct sec_softc *sc, struct resource *ires, 69 void *ihand, int irid, const char *iname); 70 static int sec_controller_reset(struct sec_softc *sc); 71 static int sec_channel_reset(struct sec_softc *sc, int channel, int full); 72 static int sec_init(struct sec_softc *sc); 73 static int sec_alloc_dma_mem(struct sec_softc *sc, 74 struct sec_dma_mem *dma_mem, bus_size_t size); 75 static int sec_desc_map_dma(struct sec_softc *sc, 76 struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size, 77 struct sec_desc_map_info *sdmi); 78 static void sec_free_dma_mem(struct sec_dma_mem *dma_mem); 79 static void sec_enqueue(struct sec_softc *sc); 80 static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, 81 int channel); 82 static int sec_eu_channel(struct sec_softc *sc, int eu); 83 static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, 84 u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize); 85 static int sec_make_pointer_direct(struct sec_softc *sc, 86 struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize); 87 static int sec_probesession(device_t dev, 88 const struct crypto_session_params *csp); 89 static int sec_newsession(device_t dev, crypto_session_t cses, 90 const struct crypto_session_params *csp); 91 static int sec_process(device_t dev, struct cryptop *crp, int hint); 92 static int sec_build_common_ns_desc(struct sec_softc *sc, 93 struct sec_desc *desc, const struct crypto_session_params *csp, 94 struct cryptop *crp); 95 static int sec_build_common_s_desc(struct sec_softc *sc, 96 struct sec_desc *desc, const struct crypto_session_params *csp, 97 struct cryptop *crp); 98 99 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr); 100 101 /* AESU */ 102 static bool sec_aesu_newsession(const struct crypto_session_params *csp); 103 static int sec_aesu_make_desc(struct sec_softc *sc, 104 const struct crypto_session_params *csp, struct sec_desc *desc, 105 struct cryptop *crp); 106 107 /* MDEU */ 108 static bool sec_mdeu_can_handle(u_int alg); 109 static int sec_mdeu_config(const struct crypto_session_params *csp, 110 u_int *eu, u_int *mode, u_int *hashlen); 111 static bool sec_mdeu_newsession(const struct crypto_session_params *csp); 112 static int sec_mdeu_make_desc(struct sec_softc *sc, 113 const struct crypto_session_params *csp, struct sec_desc *desc, 114 struct cryptop *crp); 115 116 static device_method_t sec_methods[] = { 117 /* Device interface */ 118 DEVMETHOD(device_probe, sec_probe), 119 DEVMETHOD(device_attach, sec_attach), 120 DEVMETHOD(device_detach, sec_detach), 121 122 DEVMETHOD(device_suspend, sec_suspend), 123 DEVMETHOD(device_resume, sec_resume), 124 DEVMETHOD(device_shutdown, sec_shutdown), 125 126 /* Crypto methods */ 127 DEVMETHOD(cryptodev_probesession, sec_probesession), 128 DEVMETHOD(cryptodev_newsession, sec_newsession), 129 DEVMETHOD(cryptodev_process, sec_process), 130 131 DEVMETHOD_END 132 }; 133 static driver_t sec_driver = { 134 "sec", 135 sec_methods, 136 sizeof(struct sec_softc), 137 }; 138 139 DRIVER_MODULE(sec, simplebus, sec_driver, 0, 0); 140 MODULE_DEPEND(sec, crypto, 1, 1, 1); 141 142 static struct sec_eu_methods sec_eus[] = { 143 { 144 sec_aesu_newsession, 145 sec_aesu_make_desc, 146 }, 147 { 148 sec_mdeu_newsession, 149 sec_mdeu_make_desc, 150 }, 151 { NULL, NULL } 152 }; 153 154 static inline void 155 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op) 156 { 157 158 /* Sync only if dma memory is valid */ 159 if (dma_mem->dma_vaddr != NULL) 160 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op); 161 } 162 163 static inline void * 164 sec_get_pointer_data(struct sec_desc *desc, u_int n) 165 { 166 167 return (desc->sd_ptr_dmem[n].dma_vaddr); 168 } 169 170 static int 171 sec_probe(device_t dev) 172 { 173 struct sec_softc *sc; 174 uint64_t id; 175 176 if (!ofw_bus_status_okay(dev)) 177 return (ENXIO); 178 179 if (!ofw_bus_is_compatible(dev, "fsl,sec2.0")) 180 return (ENXIO); 181 182 sc = device_get_softc(dev); 183 184 sc->sc_rrid = 0; 185 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, 186 RF_ACTIVE); 187 188 if (sc->sc_rres == NULL) 189 return (ENXIO); 190 191 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); 192 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); 193 194 id = SEC_READ(sc, SEC_ID); 195 196 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); 197 198 switch (id) { 199 case SEC_20_ID: 200 device_set_desc(dev, "Freescale Security Engine 2.0"); 201 sc->sc_version = 2; 202 break; 203 case SEC_30_ID: 204 device_set_desc(dev, "Freescale Security Engine 3.0"); 205 sc->sc_version = 3; 206 break; 207 case SEC_31_ID: 208 device_set_desc(dev, "Freescale Security Engine 3.1"); 209 sc->sc_version = 3; 210 break; 211 default: 212 device_printf(dev, "unknown SEC ID 0x%016"PRIx64"!\n", id); 213 return (ENXIO); 214 } 215 216 return (0); 217 } 218 219 static int 220 sec_attach(device_t dev) 221 { 222 struct sec_softc *sc; 223 struct sec_hw_lt *lt; 224 int error = 0; 225 int i; 226 227 sc = device_get_softc(dev); 228 sc->sc_dev = dev; 229 sc->sc_blocked = 0; 230 sc->sc_shutdown = 0; 231 232 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct sec_session), 233 CRYPTOCAP_F_HARDWARE); 234 if (sc->sc_cid < 0) { 235 device_printf(dev, "could not get crypto driver ID!\n"); 236 return (ENXIO); 237 } 238 239 /* Init locks */ 240 mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev), 241 "SEC Controller lock", MTX_DEF); 242 mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev), 243 "SEC Descriptors lock", MTX_DEF); 244 245 /* Allocate I/O memory for SEC registers */ 246 sc->sc_rrid = 0; 247 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid, 248 RF_ACTIVE); 249 250 if (sc->sc_rres == NULL) { 251 device_printf(dev, "could not allocate I/O memory!\n"); 252 goto fail1; 253 } 254 255 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres); 256 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres); 257 258 /* Setup interrupts */ 259 sc->sc_pri_irid = 0; 260 error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand, 261 &sc->sc_pri_irid, sec_primary_intr, "primary"); 262 263 if (error) 264 goto fail2; 265 266 if (sc->sc_version == 3) { 267 sc->sc_sec_irid = 1; 268 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand, 269 &sc->sc_sec_irid, sec_secondary_intr, "secondary"); 270 271 if (error) 272 goto fail3; 273 } 274 275 /* Alloc DMA memory for descriptors and link tables */ 276 error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem), 277 SEC_DESCRIPTORS * sizeof(struct sec_hw_desc)); 278 279 if (error) 280 goto fail4; 281 282 error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem), 283 (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt)); 284 285 if (error) 286 goto fail5; 287 288 /* Fill in descriptors and link tables */ 289 for (i = 0; i < SEC_DESCRIPTORS; i++) { 290 sc->sc_desc[i].sd_desc = 291 (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i; 292 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr + 293 (i * sizeof(struct sec_hw_desc)); 294 } 295 296 for (i = 0; i < SEC_LT_ENTRIES + 1; i++) { 297 sc->sc_lt[i].sl_lt = 298 (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i; 299 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr + 300 (i * sizeof(struct sec_hw_lt)); 301 } 302 303 /* Last entry in link table is used to create a circle */ 304 lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt; 305 lt->shl_length = 0; 306 lt->shl_r = 0; 307 lt->shl_n = 1; 308 lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr; 309 310 /* Init descriptor and link table queues pointers */ 311 SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS); 312 SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS); 313 SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS); 314 SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS); 315 SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS); 316 SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS); 317 SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES); 318 SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES); 319 320 /* Create masks for fast checks */ 321 sc->sc_int_error_mask = 0; 322 for (i = 0; i < SEC_CHANNELS; i++) 323 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i)); 324 325 switch (sc->sc_version) { 326 case 2: 327 sc->sc_channel_idle_mask = 328 (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) | 329 (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) | 330 (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) | 331 (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S); 332 break; 333 case 3: 334 sc->sc_channel_idle_mask = 335 (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) | 336 (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) | 337 (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) | 338 (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S); 339 break; 340 } 341 342 /* Init hardware */ 343 error = sec_init(sc); 344 345 if (error) 346 goto fail6; 347 348 return (0); 349 350 fail6: 351 sec_free_dma_mem(&(sc->sc_lt_dmem)); 352 fail5: 353 sec_free_dma_mem(&(sc->sc_desc_dmem)); 354 fail4: 355 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, 356 sc->sc_sec_irid, "secondary"); 357 fail3: 358 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, 359 sc->sc_pri_irid, "primary"); 360 fail2: 361 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres); 362 fail1: 363 mtx_destroy(&sc->sc_controller_lock); 364 mtx_destroy(&sc->sc_descriptors_lock); 365 366 return (ENXIO); 367 } 368 369 static int 370 sec_detach(device_t dev) 371 { 372 struct sec_softc *sc = device_get_softc(dev); 373 int i, error, timeout = SEC_TIMEOUT; 374 375 /* Prepare driver to shutdown */ 376 SEC_LOCK(sc, descriptors); 377 sc->sc_shutdown = 1; 378 SEC_UNLOCK(sc, descriptors); 379 380 /* Wait until all queued processing finishes */ 381 while (1) { 382 SEC_LOCK(sc, descriptors); 383 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc); 384 SEC_UNLOCK(sc, descriptors); 385 386 if (i == 0) 387 break; 388 389 if (timeout < 0) { 390 device_printf(dev, "queue flush timeout!\n"); 391 392 /* DMA can be still active - stop it */ 393 for (i = 0; i < SEC_CHANNELS; i++) 394 sec_channel_reset(sc, i, 1); 395 396 break; 397 } 398 399 timeout -= 1000; 400 DELAY(1000); 401 } 402 403 /* Disable interrupts */ 404 SEC_WRITE(sc, SEC_IER, 0); 405 406 /* Unregister from OCF */ 407 crypto_unregister_all(sc->sc_cid); 408 409 /* Free DMA memory */ 410 for (i = 0; i < SEC_DESCRIPTORS; i++) 411 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i])); 412 413 sec_free_dma_mem(&(sc->sc_lt_dmem)); 414 sec_free_dma_mem(&(sc->sc_desc_dmem)); 415 416 /* Release interrupts */ 417 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand, 418 sc->sc_pri_irid, "primary"); 419 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand, 420 sc->sc_sec_irid, "secondary"); 421 422 /* Release memory */ 423 if (sc->sc_rres) { 424 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, 425 sc->sc_rres); 426 if (error) 427 device_printf(dev, "bus_release_resource() failed for" 428 " I/O memory, error %d\n", error); 429 430 sc->sc_rres = NULL; 431 } 432 433 mtx_destroy(&sc->sc_controller_lock); 434 mtx_destroy(&sc->sc_descriptors_lock); 435 436 return (0); 437 } 438 439 static int 440 sec_suspend(device_t dev) 441 { 442 443 return (0); 444 } 445 446 static int 447 sec_resume(device_t dev) 448 { 449 450 return (0); 451 } 452 453 static int 454 sec_shutdown(device_t dev) 455 { 456 457 return (0); 458 } 459 460 static int 461 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand, 462 int *irid, driver_intr_t handler, const char *iname) 463 { 464 int error; 465 466 (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid, 467 RF_ACTIVE); 468 469 if ((*ires) == NULL) { 470 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname); 471 return (ENXIO); 472 } 473 474 error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET, 475 NULL, handler, sc, ihand); 476 477 if (error) { 478 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname); 479 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires)) 480 device_printf(sc->sc_dev, "could not release %s IRQ\n", 481 iname); 482 483 (*ires) = NULL; 484 return (error); 485 } 486 487 return (0); 488 } 489 490 static void 491 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand, 492 int irid, const char *iname) 493 { 494 int error; 495 496 if (ires == NULL) 497 return; 498 499 error = bus_teardown_intr(sc->sc_dev, ires, ihand); 500 if (error) 501 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s" 502 " IRQ, error %d\n", iname, error); 503 504 error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires); 505 if (error) 506 device_printf(sc->sc_dev, "bus_release_resource() failed for %s" 507 " IRQ, error %d\n", iname, error); 508 } 509 510 static void 511 sec_primary_intr(void *arg) 512 { 513 struct sec_session *ses; 514 struct sec_softc *sc = arg; 515 struct sec_desc *desc; 516 struct cryptop *crp; 517 uint64_t isr; 518 uint8_t hash[HASH_MAX_LEN]; 519 int i, wakeup = 0; 520 521 SEC_LOCK(sc, controller); 522 523 /* Check for errors */ 524 isr = SEC_READ(sc, SEC_ISR); 525 if (isr & sc->sc_int_error_mask) { 526 /* Check each channel for error */ 527 for (i = 0; i < SEC_CHANNELS; i++) { 528 if ((isr & SEC_INT_CH_ERR(i)) == 0) 529 continue; 530 531 device_printf(sc->sc_dev, 532 "I/O error on channel %i!\n", i); 533 534 /* Find and mark problematic descriptor */ 535 desc = sec_find_desc(sc, SEC_READ(sc, 536 SEC_CHAN_CDPR(i))); 537 538 if (desc != NULL) 539 desc->sd_error = EIO; 540 541 /* Do partial channel reset */ 542 sec_channel_reset(sc, i, 0); 543 } 544 } 545 546 /* ACK interrupt */ 547 SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL); 548 549 SEC_UNLOCK(sc, controller); 550 SEC_LOCK(sc, descriptors); 551 552 /* Handle processed descriptors */ 553 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 554 555 while (SEC_QUEUED_DESC_CNT(sc) > 0) { 556 desc = SEC_GET_QUEUED_DESC(sc); 557 558 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) { 559 SEC_PUT_BACK_QUEUED_DESC(sc); 560 break; 561 } 562 563 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD | 564 BUS_DMASYNC_PREWRITE); 565 566 crp = desc->sd_crp; 567 crp->crp_etype = desc->sd_error; 568 if (crp->crp_etype == 0) { 569 ses = crypto_get_driver_session(crp->crp_session); 570 if (ses->ss_mlen != 0) { 571 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 572 crypto_copydata(crp, 573 crp->crp_digest_start, 574 ses->ss_mlen, hash); 575 if (timingsafe_bcmp( 576 desc->sd_desc->shd_digest, 577 hash, ses->ss_mlen) != 0) 578 crp->crp_etype = EBADMSG; 579 } else 580 crypto_copyback(crp, 581 crp->crp_digest_start, 582 ses->ss_mlen, 583 desc->sd_desc->shd_digest); 584 } 585 } 586 crypto_done(desc->sd_crp); 587 588 SEC_DESC_FREE_POINTERS(desc); 589 SEC_DESC_FREE_LT(sc, desc); 590 SEC_DESC_QUEUED2FREE(sc); 591 } 592 593 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 594 595 if (!sc->sc_shutdown) { 596 wakeup = sc->sc_blocked; 597 sc->sc_blocked = 0; 598 } 599 600 SEC_UNLOCK(sc, descriptors); 601 602 /* Enqueue ready descriptors in hardware */ 603 sec_enqueue(sc); 604 605 if (wakeup) 606 crypto_unblock(sc->sc_cid, wakeup); 607 } 608 609 static void 610 sec_secondary_intr(void *arg) 611 { 612 struct sec_softc *sc = arg; 613 614 device_printf(sc->sc_dev, "spurious secondary interrupt!\n"); 615 sec_primary_intr(arg); 616 } 617 618 static int 619 sec_controller_reset(struct sec_softc *sc) 620 { 621 int timeout = SEC_TIMEOUT; 622 623 /* Reset Controller */ 624 SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR); 625 626 while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) { 627 DELAY(1000); 628 timeout -= 1000; 629 630 if (timeout < 0) { 631 device_printf(sc->sc_dev, "timeout while waiting for " 632 "device reset!\n"); 633 return (ETIMEDOUT); 634 } 635 } 636 637 return (0); 638 } 639 640 static int 641 sec_channel_reset(struct sec_softc *sc, int channel, int full) 642 { 643 int timeout = SEC_TIMEOUT; 644 uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON; 645 uint64_t reg; 646 647 /* Reset Channel */ 648 reg = SEC_READ(sc, SEC_CHAN_CCR(channel)); 649 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit); 650 651 while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) { 652 DELAY(1000); 653 timeout -= 1000; 654 655 if (timeout < 0) { 656 device_printf(sc->sc_dev, "timeout while waiting for " 657 "channel reset!\n"); 658 return (ETIMEDOUT); 659 } 660 } 661 662 if (full) { 663 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS; 664 665 switch(sc->sc_version) { 666 case 2: 667 reg |= SEC_CHAN_CCR_CDWE; 668 break; 669 case 3: 670 reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN; 671 break; 672 } 673 674 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg); 675 } 676 677 return (0); 678 } 679 680 static int 681 sec_init(struct sec_softc *sc) 682 { 683 uint64_t reg; 684 int error, i; 685 686 /* Reset controller twice to clear all pending interrupts */ 687 error = sec_controller_reset(sc); 688 if (error) 689 return (error); 690 691 error = sec_controller_reset(sc); 692 if (error) 693 return (error); 694 695 /* Reset channels */ 696 for (i = 0; i < SEC_CHANNELS; i++) { 697 error = sec_channel_reset(sc, i, 1); 698 if (error) 699 return (error); 700 } 701 702 /* Enable Interrupts */ 703 reg = SEC_INT_ITO; 704 for (i = 0; i < SEC_CHANNELS; i++) 705 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i); 706 707 SEC_WRITE(sc, SEC_IER, reg); 708 709 return (error); 710 } 711 712 static void 713 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 714 { 715 struct sec_dma_mem *dma_mem = arg; 716 717 if (error) 718 return; 719 720 KASSERT(nseg == 1, ("Wrong number of segments, should be 1")); 721 dma_mem->dma_paddr = segs->ds_addr; 722 } 723 724 static void 725 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg, 726 int error) 727 { 728 struct sec_desc_map_info *sdmi = arg; 729 struct sec_softc *sc = sdmi->sdmi_sc; 730 struct sec_lt *lt = NULL; 731 bus_addr_t addr; 732 bus_size_t size; 733 int i; 734 735 SEC_LOCK_ASSERT(sc, descriptors); 736 737 if (error) 738 return; 739 740 for (i = 0; i < nseg; i++) { 741 addr = segs[i].ds_addr; 742 size = segs[i].ds_len; 743 744 /* Skip requested offset */ 745 if (sdmi->sdmi_offset >= size) { 746 sdmi->sdmi_offset -= size; 747 continue; 748 } 749 750 addr += sdmi->sdmi_offset; 751 size -= sdmi->sdmi_offset; 752 sdmi->sdmi_offset = 0; 753 754 /* Do not link more than requested */ 755 if (sdmi->sdmi_size < size) 756 size = sdmi->sdmi_size; 757 758 lt = SEC_ALLOC_LT_ENTRY(sc); 759 lt->sl_lt->shl_length = size; 760 lt->sl_lt->shl_r = 0; 761 lt->sl_lt->shl_n = 0; 762 lt->sl_lt->shl_ptr = addr; 763 764 if (sdmi->sdmi_lt_first == NULL) 765 sdmi->sdmi_lt_first = lt; 766 767 sdmi->sdmi_lt_used += 1; 768 769 if ((sdmi->sdmi_size -= size) == 0) 770 break; 771 } 772 773 sdmi->sdmi_lt_last = lt; 774 } 775 776 static int 777 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem, 778 bus_size_t size) 779 { 780 int error; 781 782 if (dma_mem->dma_vaddr != NULL) 783 return (EBUSY); 784 785 error = bus_dma_tag_create(NULL, /* parent */ 786 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ 787 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 788 BUS_SPACE_MAXADDR, /* highaddr */ 789 NULL, NULL, /* filtfunc, filtfuncarg */ 790 size, 1, /* maxsize, nsegments */ 791 size, 0, /* maxsegsz, flags */ 792 NULL, NULL, /* lockfunc, lockfuncarg */ 793 &(dma_mem->dma_tag)); /* dmat */ 794 795 if (error) { 796 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 797 " %i!\n", error); 798 goto err1; 799 } 800 801 error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr), 802 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map)); 803 804 if (error) { 805 device_printf(sc->sc_dev, "failed to allocate DMA safe" 806 " memory, error %i!\n", error); 807 goto err2; 808 } 809 810 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map, 811 dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem, 812 BUS_DMA_NOWAIT); 813 814 if (error) { 815 device_printf(sc->sc_dev, "cannot get address of the DMA" 816 " memory, error %i\n", error); 817 goto err3; 818 } 819 820 dma_mem->dma_is_map = 0; 821 return (0); 822 823 err3: 824 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map); 825 err2: 826 bus_dma_tag_destroy(dma_mem->dma_tag); 827 err1: 828 dma_mem->dma_vaddr = NULL; 829 return(error); 830 } 831 832 static int 833 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, 834 struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi) 835 { 836 int error; 837 838 if (dma_mem->dma_vaddr != NULL) 839 return (EBUSY); 840 841 switch (crp->crp_buf.cb_type) { 842 case CRYPTO_BUF_CONTIG: 843 break; 844 case CRYPTO_BUF_UIO: 845 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE; 846 break; 847 case CRYPTO_BUF_MBUF: 848 size = m_length(crp->crp_buf.cb_mbuf, NULL); 849 break; 850 case CRYPTO_BUF_SINGLE_MBUF: 851 size = crp->crp_buf.cb_mbuf->m_len; 852 break; 853 case CRYPTO_BUF_VMPAGE: 854 size = PAGE_SIZE - crp->crp_buf.cb_vm_page_offset; 855 break; 856 default: 857 return (EINVAL); 858 } 859 860 error = bus_dma_tag_create(NULL, /* parent */ 861 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */ 862 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 863 BUS_SPACE_MAXADDR, /* highaddr */ 864 NULL, NULL, /* filtfunc, filtfuncarg */ 865 size, /* maxsize */ 866 SEC_FREE_LT_CNT(sc), /* nsegments */ 867 SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */ 868 NULL, NULL, /* lockfunc, lockfuncarg */ 869 &(dma_mem->dma_tag)); /* dmat */ 870 871 if (error) { 872 device_printf(sc->sc_dev, "failed to allocate busdma tag, error" 873 " %i!\n", error); 874 dma_mem->dma_vaddr = NULL; 875 return (error); 876 } 877 878 error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map)); 879 880 if (error) { 881 device_printf(sc->sc_dev, "failed to create DMA map, error %i!" 882 "\n", error); 883 bus_dma_tag_destroy(dma_mem->dma_tag); 884 return (error); 885 } 886 887 error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp, 888 sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT); 889 890 if (error) { 891 device_printf(sc->sc_dev, "cannot get address of the DMA" 892 " memory, error %i!\n", error); 893 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); 894 bus_dma_tag_destroy(dma_mem->dma_tag); 895 return (error); 896 } 897 898 dma_mem->dma_is_map = 1; 899 dma_mem->dma_vaddr = crp; 900 901 return (0); 902 } 903 904 static void 905 sec_free_dma_mem(struct sec_dma_mem *dma_mem) 906 { 907 908 /* Check for double free */ 909 if (dma_mem->dma_vaddr == NULL) 910 return; 911 912 bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map); 913 914 if (dma_mem->dma_is_map) 915 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map); 916 else 917 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, 918 dma_mem->dma_map); 919 920 bus_dma_tag_destroy(dma_mem->dma_tag); 921 dma_mem->dma_vaddr = NULL; 922 } 923 924 static int 925 sec_eu_channel(struct sec_softc *sc, int eu) 926 { 927 uint64_t reg; 928 int channel = 0; 929 930 SEC_LOCK_ASSERT(sc, controller); 931 932 reg = SEC_READ(sc, SEC_EUASR); 933 934 switch (eu) { 935 case SEC_EU_AFEU: 936 channel = SEC_EUASR_AFEU(reg); 937 break; 938 case SEC_EU_DEU: 939 channel = SEC_EUASR_DEU(reg); 940 break; 941 case SEC_EU_MDEU_A: 942 case SEC_EU_MDEU_B: 943 channel = SEC_EUASR_MDEU(reg); 944 break; 945 case SEC_EU_RNGU: 946 channel = SEC_EUASR_RNGU(reg); 947 break; 948 case SEC_EU_PKEU: 949 channel = SEC_EUASR_PKEU(reg); 950 break; 951 case SEC_EU_AESU: 952 channel = SEC_EUASR_AESU(reg); 953 break; 954 case SEC_EU_KEU: 955 channel = SEC_EUASR_KEU(reg); 956 break; 957 case SEC_EU_CRCU: 958 channel = SEC_EUASR_CRCU(reg); 959 break; 960 } 961 962 return (channel - 1); 963 } 964 965 static int 966 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel) 967 { 968 u_int fflvl = SEC_MAX_FIFO_LEVEL; 969 uint64_t reg; 970 int i; 971 972 SEC_LOCK_ASSERT(sc, controller); 973 974 /* Find free channel if have not got one */ 975 if (channel < 0) { 976 for (i = 0; i < SEC_CHANNELS; i++) { 977 reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); 978 979 if ((reg & sc->sc_channel_idle_mask) == 0) { 980 channel = i; 981 break; 982 } 983 } 984 } 985 986 /* There is no free channel */ 987 if (channel < 0) 988 return (-1); 989 990 /* Check FIFO level on selected channel */ 991 reg = SEC_READ(sc, SEC_CHAN_CSR(channel)); 992 993 switch(sc->sc_version) { 994 case 2: 995 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M; 996 break; 997 case 3: 998 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M; 999 break; 1000 } 1001 1002 if (fflvl >= SEC_MAX_FIFO_LEVEL) 1003 return (-1); 1004 1005 /* Enqueue descriptor in channel */ 1006 SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr); 1007 1008 return (channel); 1009 } 1010 1011 static void 1012 sec_enqueue(struct sec_softc *sc) 1013 { 1014 struct sec_desc *desc; 1015 int ch0, ch1; 1016 1017 SEC_LOCK(sc, descriptors); 1018 SEC_LOCK(sc, controller); 1019 1020 while (SEC_READY_DESC_CNT(sc) > 0) { 1021 desc = SEC_GET_READY_DESC(sc); 1022 1023 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0); 1024 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1); 1025 1026 /* 1027 * Both EU are used by the same channel. 1028 * Enqueue descriptor in channel used by busy EUs. 1029 */ 1030 if (ch0 >= 0 && ch0 == ch1) { 1031 if (sec_enqueue_desc(sc, desc, ch0) >= 0) { 1032 SEC_DESC_READY2QUEUED(sc); 1033 continue; 1034 } 1035 } 1036 1037 /* 1038 * Only one EU is free. 1039 * Enqueue descriptor in channel used by busy EU. 1040 */ 1041 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) { 1042 if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1) 1043 >= 0) { 1044 SEC_DESC_READY2QUEUED(sc); 1045 continue; 1046 } 1047 } 1048 1049 /* 1050 * Both EU are free. 1051 * Enqueue descriptor in first free channel. 1052 */ 1053 if (ch0 < 0 && ch1 < 0) { 1054 if (sec_enqueue_desc(sc, desc, -1) >= 0) { 1055 SEC_DESC_READY2QUEUED(sc); 1056 continue; 1057 } 1058 } 1059 1060 /* Current descriptor can not be queued at the moment */ 1061 SEC_PUT_BACK_READY_DESC(sc); 1062 break; 1063 } 1064 1065 SEC_UNLOCK(sc, controller); 1066 SEC_UNLOCK(sc, descriptors); 1067 } 1068 1069 static struct sec_desc * 1070 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr) 1071 { 1072 struct sec_desc *desc = NULL; 1073 int i; 1074 1075 SEC_LOCK_ASSERT(sc, descriptors); 1076 1077 for (i = 0; i < SEC_CHANNELS; i++) { 1078 if (sc->sc_desc[i].sd_desc_paddr == paddr) { 1079 desc = &(sc->sc_desc[i]); 1080 break; 1081 } 1082 } 1083 1084 return (desc); 1085 } 1086 1087 static int 1088 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n, 1089 bus_addr_t data, bus_size_t dsize) 1090 { 1091 struct sec_hw_desc_ptr *ptr; 1092 1093 SEC_LOCK_ASSERT(sc, descriptors); 1094 1095 ptr = &(desc->sd_desc->shd_pointer[n]); 1096 ptr->shdp_length = dsize; 1097 ptr->shdp_extent = 0; 1098 ptr->shdp_j = 0; 1099 ptr->shdp_ptr = data; 1100 1101 return (0); 1102 } 1103 1104 static int 1105 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc, 1106 u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize) 1107 { 1108 struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 }; 1109 struct sec_hw_desc_ptr *ptr; 1110 int error; 1111 1112 SEC_LOCK_ASSERT(sc, descriptors); 1113 1114 error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize, 1115 &sdmi); 1116 1117 if (error) 1118 return (error); 1119 1120 sdmi.sdmi_lt_last->sl_lt->shl_r = 1; 1121 desc->sd_lt_used += sdmi.sdmi_lt_used; 1122 1123 ptr = &(desc->sd_desc->shd_pointer[n]); 1124 ptr->shdp_length = dsize; 1125 ptr->shdp_extent = 0; 1126 ptr->shdp_j = 1; 1127 ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr; 1128 1129 return (0); 1130 } 1131 1132 static bool 1133 sec_cipher_supported(const struct crypto_session_params *csp) 1134 { 1135 1136 switch (csp->csp_cipher_alg) { 1137 case CRYPTO_AES_CBC: 1138 /* AESU */ 1139 if (csp->csp_ivlen != AES_BLOCK_LEN) 1140 return (false); 1141 break; 1142 default: 1143 return (false); 1144 } 1145 1146 if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN) 1147 return (false); 1148 1149 return (true); 1150 } 1151 1152 static bool 1153 sec_auth_supported(struct sec_softc *sc, 1154 const struct crypto_session_params *csp) 1155 { 1156 1157 switch (csp->csp_auth_alg) { 1158 case CRYPTO_SHA2_384_HMAC: 1159 case CRYPTO_SHA2_512_HMAC: 1160 if (sc->sc_version < 3) 1161 return (false); 1162 /* FALLTHROUGH */ 1163 case CRYPTO_SHA1_HMAC: 1164 case CRYPTO_SHA2_256_HMAC: 1165 if (csp->csp_auth_klen > SEC_MAX_KEY_LEN) 1166 return (false); 1167 break; 1168 case CRYPTO_SHA1: 1169 break; 1170 default: 1171 return (false); 1172 } 1173 return (true); 1174 } 1175 1176 static int 1177 sec_probesession(device_t dev, const struct crypto_session_params *csp) 1178 { 1179 struct sec_softc *sc = device_get_softc(dev); 1180 1181 if (csp->csp_flags != 0) 1182 return (EINVAL); 1183 switch (csp->csp_mode) { 1184 case CSP_MODE_DIGEST: 1185 if (!sec_auth_supported(sc, csp)) 1186 return (EINVAL); 1187 break; 1188 case CSP_MODE_CIPHER: 1189 if (!sec_cipher_supported(csp)) 1190 return (EINVAL); 1191 break; 1192 case CSP_MODE_ETA: 1193 if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp)) 1194 return (EINVAL); 1195 break; 1196 default: 1197 return (EINVAL); 1198 } 1199 return (CRYPTODEV_PROBE_HARDWARE); 1200 } 1201 1202 static int 1203 sec_newsession(device_t dev, crypto_session_t cses, 1204 const struct crypto_session_params *csp) 1205 { 1206 struct sec_eu_methods *eu = sec_eus; 1207 struct sec_session *ses; 1208 1209 ses = crypto_get_driver_session(cses); 1210 1211 /* Find EU for this session */ 1212 while (eu->sem_make_desc != NULL) { 1213 if (eu->sem_newsession(csp)) 1214 break; 1215 eu++; 1216 } 1217 KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session")); 1218 1219 /* Save cipher key */ 1220 if (csp->csp_cipher_key != NULL) 1221 memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen); 1222 1223 /* Save digest key */ 1224 if (csp->csp_auth_key != NULL) 1225 memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen); 1226 1227 if (csp->csp_auth_alg != 0) { 1228 if (csp->csp_auth_mlen == 0) 1229 ses->ss_mlen = crypto_auth_hash(csp)->hashsize; 1230 else 1231 ses->ss_mlen = csp->csp_auth_mlen; 1232 } 1233 1234 return (0); 1235 } 1236 1237 static int 1238 sec_process(device_t dev, struct cryptop *crp, int hint) 1239 { 1240 struct sec_softc *sc = device_get_softc(dev); 1241 struct sec_desc *desc = NULL; 1242 const struct crypto_session_params *csp; 1243 struct sec_session *ses; 1244 int error = 0; 1245 1246 ses = crypto_get_driver_session(crp->crp_session); 1247 csp = crypto_get_params(crp->crp_session); 1248 1249 /* Check for input length */ 1250 if (crypto_buffer_len(&crp->crp_buf) > SEC_MAX_DMA_BLOCK_SIZE) { 1251 crp->crp_etype = E2BIG; 1252 crypto_done(crp); 1253 return (0); 1254 } 1255 1256 SEC_LOCK(sc, descriptors); 1257 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1258 1259 /* Block driver if there is no free descriptors or we are going down */ 1260 if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) { 1261 sc->sc_blocked |= CRYPTO_SYMQ; 1262 SEC_UNLOCK(sc, descriptors); 1263 return (ERESTART); 1264 } 1265 1266 /* Prepare descriptor */ 1267 desc = SEC_GET_FREE_DESC(sc); 1268 desc->sd_lt_used = 0; 1269 desc->sd_error = 0; 1270 desc->sd_crp = crp; 1271 1272 if (csp->csp_cipher_alg != 0) 1273 crypto_read_iv(crp, desc->sd_desc->shd_iv); 1274 1275 if (crp->crp_cipher_key != NULL) 1276 memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen); 1277 1278 if (crp->crp_auth_key != NULL) 1279 memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen); 1280 1281 memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen); 1282 memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen); 1283 1284 error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp); 1285 1286 if (error) { 1287 SEC_DESC_FREE_POINTERS(desc); 1288 SEC_DESC_PUT_BACK_LT(sc, desc); 1289 SEC_PUT_BACK_FREE_DESC(sc); 1290 SEC_UNLOCK(sc, descriptors); 1291 crp->crp_etype = error; 1292 crypto_done(crp); 1293 return (0); 1294 } 1295 1296 /* 1297 * Skip DONE interrupt if this is not last request in burst, but only 1298 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE 1299 * signaling on each descriptor. 1300 */ 1301 if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3) 1302 desc->sd_desc->shd_dn = 0; 1303 else 1304 desc->sd_desc->shd_dn = 1; 1305 1306 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1307 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD | 1308 BUS_DMASYNC_POSTWRITE); 1309 SEC_DESC_FREE2READY(sc); 1310 SEC_UNLOCK(sc, descriptors); 1311 1312 /* Enqueue ready descriptors in hardware */ 1313 sec_enqueue(sc); 1314 1315 return (0); 1316 } 1317 1318 static int 1319 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc, 1320 const struct crypto_session_params *csp, struct cryptop *crp) 1321 { 1322 struct sec_hw_desc *hd = desc->sd_desc; 1323 int error; 1324 1325 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; 1326 hd->shd_eu_sel1 = SEC_EU_NONE; 1327 hd->shd_mode1 = 0; 1328 1329 /* Pointer 0: NULL */ 1330 error = sec_make_pointer_direct(sc, desc, 0, 0, 0); 1331 if (error) 1332 return (error); 1333 1334 /* Pointer 1: IV IN */ 1335 error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr + 1336 offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); 1337 if (error) 1338 return (error); 1339 1340 /* Pointer 2: Cipher Key */ 1341 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + 1342 offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); 1343 if (error) 1344 return (error); 1345 1346 /* Pointer 3: Data IN */ 1347 error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, 1348 crp->crp_payload_length); 1349 if (error) 1350 return (error); 1351 1352 /* Pointer 4: Data OUT */ 1353 error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, 1354 crp->crp_payload_length); 1355 if (error) 1356 return (error); 1357 1358 /* Pointer 5: IV OUT (Not used: NULL) */ 1359 error = sec_make_pointer_direct(sc, desc, 5, 0, 0); 1360 if (error) 1361 return (error); 1362 1363 /* Pointer 6: NULL */ 1364 error = sec_make_pointer_direct(sc, desc, 6, 0, 0); 1365 1366 return (error); 1367 } 1368 1369 static int 1370 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc, 1371 const struct crypto_session_params *csp, struct cryptop *crp) 1372 { 1373 struct sec_hw_desc *hd = desc->sd_desc; 1374 u_int eu, mode, hashlen; 1375 int error; 1376 1377 error = sec_mdeu_config(csp, &eu, &mode, &hashlen); 1378 if (error) 1379 return (error); 1380 1381 hd->shd_desc_type = SEC_DT_HMAC_SNOOP; 1382 hd->shd_eu_sel1 = eu; 1383 hd->shd_mode1 = mode; 1384 1385 /* Pointer 0: HMAC Key */ 1386 error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr + 1387 offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen); 1388 if (error) 1389 return (error); 1390 1391 /* Pointer 1: HMAC-Only Data IN */ 1392 error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start, 1393 crp->crp_aad_length); 1394 if (error) 1395 return (error); 1396 1397 /* Pointer 2: Cipher Key */ 1398 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr + 1399 offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen); 1400 if (error) 1401 return (error); 1402 1403 /* Pointer 3: IV IN */ 1404 error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr + 1405 offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen); 1406 if (error) 1407 return (error); 1408 1409 /* Pointer 4: Data IN */ 1410 error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start, 1411 crp->crp_payload_length); 1412 if (error) 1413 return (error); 1414 1415 /* Pointer 5: Data OUT */ 1416 error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start, 1417 crp->crp_payload_length); 1418 if (error) 1419 return (error); 1420 1421 /* Pointer 6: HMAC OUT */ 1422 error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr + 1423 offsetof(struct sec_hw_desc, shd_digest), hashlen); 1424 1425 return (error); 1426 } 1427 1428 /* AESU */ 1429 1430 static bool 1431 sec_aesu_newsession(const struct crypto_session_params *csp) 1432 { 1433 1434 return (csp->csp_cipher_alg == CRYPTO_AES_CBC); 1435 } 1436 1437 static int 1438 sec_aesu_make_desc(struct sec_softc *sc, 1439 const struct crypto_session_params *csp, struct sec_desc *desc, 1440 struct cryptop *crp) 1441 { 1442 struct sec_hw_desc *hd = desc->sd_desc; 1443 int error; 1444 1445 hd->shd_eu_sel0 = SEC_EU_AESU; 1446 hd->shd_mode0 = SEC_AESU_MODE_CBC; 1447 1448 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1449 hd->shd_mode0 |= SEC_AESU_MODE_ED; 1450 hd->shd_dir = 0; 1451 } else 1452 hd->shd_dir = 1; 1453 1454 if (csp->csp_mode == CSP_MODE_ETA) 1455 error = sec_build_common_s_desc(sc, desc, csp, crp); 1456 else 1457 error = sec_build_common_ns_desc(sc, desc, csp, crp); 1458 1459 return (error); 1460 } 1461 1462 /* MDEU */ 1463 1464 static bool 1465 sec_mdeu_can_handle(u_int alg) 1466 { 1467 switch (alg) { 1468 case CRYPTO_SHA1: 1469 case CRYPTO_SHA1_HMAC: 1470 case CRYPTO_SHA2_256_HMAC: 1471 case CRYPTO_SHA2_384_HMAC: 1472 case CRYPTO_SHA2_512_HMAC: 1473 return (true); 1474 default: 1475 return (false); 1476 } 1477 } 1478 1479 static int 1480 sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode, 1481 u_int *hashlen) 1482 { 1483 1484 *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT; 1485 *eu = SEC_EU_NONE; 1486 1487 switch (csp->csp_auth_alg) { 1488 case CRYPTO_SHA1_HMAC: 1489 *mode |= SEC_MDEU_MODE_HMAC; 1490 /* FALLTHROUGH */ 1491 case CRYPTO_SHA1: 1492 *eu = SEC_EU_MDEU_A; 1493 *mode |= SEC_MDEU_MODE_SHA1; 1494 *hashlen = SHA1_HASH_LEN; 1495 break; 1496 case CRYPTO_SHA2_256_HMAC: 1497 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256; 1498 *eu = SEC_EU_MDEU_A; 1499 break; 1500 case CRYPTO_SHA2_384_HMAC: 1501 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384; 1502 *eu = SEC_EU_MDEU_B; 1503 break; 1504 case CRYPTO_SHA2_512_HMAC: 1505 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512; 1506 *eu = SEC_EU_MDEU_B; 1507 break; 1508 default: 1509 return (EINVAL); 1510 } 1511 1512 if (*mode & SEC_MDEU_MODE_HMAC) 1513 *hashlen = SEC_HMAC_HASH_LEN; 1514 1515 return (0); 1516 } 1517 1518 static bool 1519 sec_mdeu_newsession(const struct crypto_session_params *csp) 1520 { 1521 1522 return (sec_mdeu_can_handle(csp->csp_auth_alg)); 1523 } 1524 1525 static int 1526 sec_mdeu_make_desc(struct sec_softc *sc, 1527 const struct crypto_session_params *csp, 1528 struct sec_desc *desc, struct cryptop *crp) 1529 { 1530 struct sec_hw_desc *hd = desc->sd_desc; 1531 u_int eu, mode, hashlen; 1532 int error; 1533 1534 error = sec_mdeu_config(csp, &eu, &mode, &hashlen); 1535 if (error) 1536 return (error); 1537 1538 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP; 1539 hd->shd_eu_sel0 = eu; 1540 hd->shd_mode0 = mode; 1541 hd->shd_eu_sel1 = SEC_EU_NONE; 1542 hd->shd_mode1 = 0; 1543 1544 /* Pointer 0: NULL */ 1545 error = sec_make_pointer_direct(sc, desc, 0, 0, 0); 1546 if (error) 1547 return (error); 1548 1549 /* Pointer 1: Context In (Not used: NULL) */ 1550 error = sec_make_pointer_direct(sc, desc, 1, 0, 0); 1551 if (error) 1552 return (error); 1553 1554 /* Pointer 2: HMAC Key (or NULL, depending on digest type) */ 1555 if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC) 1556 error = sec_make_pointer_direct(sc, desc, 2, 1557 desc->sd_desc_paddr + offsetof(struct sec_hw_desc, 1558 shd_mkey), csp->csp_auth_klen); 1559 else 1560 error = sec_make_pointer_direct(sc, desc, 2, 0, 0); 1561 1562 if (error) 1563 return (error); 1564 1565 /* Pointer 3: Input Data */ 1566 error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start, 1567 crp->crp_payload_length); 1568 if (error) 1569 return (error); 1570 1571 /* Pointer 4: NULL */ 1572 error = sec_make_pointer_direct(sc, desc, 4, 0, 0); 1573 if (error) 1574 return (error); 1575 1576 /* Pointer 5: Hash out */ 1577 error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr + 1578 offsetof(struct sec_hw_desc, shd_digest), hashlen); 1579 if (error) 1580 return (error); 1581 1582 /* Pointer 6: NULL */ 1583 error = sec_make_pointer_direct(sc, desc, 6, 0, 0); 1584 1585 return (0); 1586 } 1587