1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * SafeNet SafeXcel-1141 hardware crypto accelerator 35 */ 36 #include "opt_safe.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/proc.h> 41 #include <sys/errno.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 #include <sys/endian.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #include <machine/bus.h> 56 #include <machine/resource.h> 57 #include <sys/bus.h> 58 #include <sys/rman.h> 59 60 #include <opencrypto/cryptodev.h> 61 #include <opencrypto/xform_auth.h> 62 #include <sys/random.h> 63 #include <sys/kobj.h> 64 65 #include "cryptodev_if.h" 66 67 #include <dev/pci/pcivar.h> 68 #include <dev/pci/pcireg.h> 69 70 #ifdef SAFE_RNDTEST 71 #include <dev/rndtest/rndtest.h> 72 #endif 73 #include <dev/safe/safereg.h> 74 #include <dev/safe/safevar.h> 75 76 #ifndef bswap32 77 #define bswap32 NTOHL 78 #endif 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int safe_probe(device_t); 84 static int safe_attach(device_t); 85 static int safe_detach(device_t); 86 static int safe_suspend(device_t); 87 static int safe_resume(device_t); 88 static int safe_shutdown(device_t); 89 90 static int safe_probesession(device_t, const struct crypto_session_params *); 91 static int safe_newsession(device_t, crypto_session_t, 92 const struct crypto_session_params *); 93 static int safe_process(device_t, struct cryptop *, int); 94 95 static device_method_t safe_methods[] = { 96 /* Device interface */ 97 DEVMETHOD(device_probe, safe_probe), 98 DEVMETHOD(device_attach, safe_attach), 99 DEVMETHOD(device_detach, safe_detach), 100 DEVMETHOD(device_suspend, safe_suspend), 101 DEVMETHOD(device_resume, safe_resume), 102 DEVMETHOD(device_shutdown, safe_shutdown), 103 104 /* crypto device methods */ 105 DEVMETHOD(cryptodev_probesession, safe_probesession), 106 DEVMETHOD(cryptodev_newsession, safe_newsession), 107 DEVMETHOD(cryptodev_process, safe_process), 108 109 DEVMETHOD_END 110 }; 111 static driver_t safe_driver = { 112 "safe", 113 safe_methods, 114 sizeof (struct safe_softc) 115 }; 116 static devclass_t safe_devclass; 117 118 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); 119 MODULE_DEPEND(safe, crypto, 1, 1, 1); 120 #ifdef SAFE_RNDTEST 121 MODULE_DEPEND(safe, rndtest, 1, 1, 1); 122 #endif 123 124 static void safe_intr(void *); 125 static void safe_callback(struct safe_softc *, struct safe_ringentry *); 126 static void safe_feed(struct safe_softc *, struct safe_ringentry *); 127 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 128 #ifndef SAFE_NO_RNG 129 static void safe_rng_init(struct safe_softc *); 130 static void safe_rng(void *); 131 #endif /* SAFE_NO_RNG */ 132 static int safe_dma_malloc(struct safe_softc *, bus_size_t, 133 struct safe_dma_alloc *, int); 134 #define safe_dma_sync(_dma, _flags) \ 135 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 136 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 137 static int safe_dmamap_aligned(const struct safe_operand *); 138 static int safe_dmamap_uniform(const struct safe_operand *); 139 140 static void safe_reset_board(struct safe_softc *); 141 static void safe_init_board(struct safe_softc *); 142 static void safe_init_pciregs(device_t dev); 143 static void safe_cleanchip(struct safe_softc *); 144 static void safe_totalreset(struct safe_softc *); 145 146 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 147 148 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 149 "SafeNet driver parameters"); 150 151 #ifdef SAFE_DEBUG 152 static void safe_dump_dmastatus(struct safe_softc *, const char *); 153 static void safe_dump_ringstate(struct safe_softc *, const char *); 154 static void safe_dump_intrstate(struct safe_softc *, const char *); 155 static void safe_dump_request(struct safe_softc *, const char *, 156 struct safe_ringentry *); 157 158 static struct safe_softc *safec; /* for use by hw.safe.dump */ 159 160 static int safe_debug = 0; 161 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 162 0, "control debugging msgs"); 163 #define DPRINTF(_x) if (safe_debug) printf _x 164 #else 165 #define DPRINTF(_x) 166 #endif 167 168 #define READ_REG(sc,r) \ 169 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 170 171 #define WRITE_REG(sc,reg,val) \ 172 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 173 174 struct safe_stats safestats; 175 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 176 safe_stats, "driver statistics"); 177 #ifndef SAFE_NO_RNG 178 static int safe_rnginterval = 1; /* poll once a second */ 179 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 180 0, "RNG polling interval (secs)"); 181 static int safe_rngbufsize = 16; /* 64 bytes each poll */ 182 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 183 0, "RNG polling buffer size (32-bit words)"); 184 static int safe_rngmaxalarm = 8; /* max alarms before reset */ 185 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 186 0, "RNG max alarms before reset"); 187 #endif /* SAFE_NO_RNG */ 188 189 static int 190 safe_probe(device_t dev) 191 { 192 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 193 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 194 return (BUS_PROBE_DEFAULT); 195 return (ENXIO); 196 } 197 198 static const char* 199 safe_partname(struct safe_softc *sc) 200 { 201 /* XXX sprintf numbers when not decoded */ 202 switch (pci_get_vendor(sc->sc_dev)) { 203 case PCI_VENDOR_SAFENET: 204 switch (pci_get_device(sc->sc_dev)) { 205 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 206 } 207 return "SafeNet unknown-part"; 208 } 209 return "Unknown-vendor unknown-part"; 210 } 211 212 #ifndef SAFE_NO_RNG 213 static void 214 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 215 { 216 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 217 random_harvest_queue(buf, count, RANDOM_PURE_SAFE); 218 } 219 #endif /* SAFE_NO_RNG */ 220 221 static int 222 safe_attach(device_t dev) 223 { 224 struct safe_softc *sc = device_get_softc(dev); 225 u_int32_t raddr; 226 u_int32_t i; 227 int rid; 228 229 bzero(sc, sizeof (*sc)); 230 sc->sc_dev = dev; 231 232 /* XXX handle power management */ 233 234 pci_enable_busmaster(dev); 235 236 /* 237 * Setup memory-mapping of PCI registers. 238 */ 239 rid = BS_BAR; 240 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 241 RF_ACTIVE); 242 if (sc->sc_sr == NULL) { 243 device_printf(dev, "cannot map register space\n"); 244 goto bad; 245 } 246 sc->sc_st = rman_get_bustag(sc->sc_sr); 247 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 248 249 /* 250 * Arrange interrupt line. 251 */ 252 rid = 0; 253 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 254 RF_SHAREABLE|RF_ACTIVE); 255 if (sc->sc_irq == NULL) { 256 device_printf(dev, "could not map interrupt\n"); 257 goto bad1; 258 } 259 /* 260 * NB: Network code assumes we are blocked with splimp() 261 * so make sure the IRQ is mapped appropriately. 262 */ 263 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 264 NULL, safe_intr, sc, &sc->sc_ih)) { 265 device_printf(dev, "could not establish interrupt\n"); 266 goto bad2; 267 } 268 269 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), 270 CRYPTOCAP_F_HARDWARE); 271 if (sc->sc_cid < 0) { 272 device_printf(dev, "could not get crypto driver id\n"); 273 goto bad3; 274 } 275 276 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 277 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 278 279 /* 280 * Setup DMA descriptor area. 281 */ 282 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 283 1, /* alignment */ 284 SAFE_DMA_BOUNDARY, /* boundary */ 285 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 286 BUS_SPACE_MAXADDR, /* highaddr */ 287 NULL, NULL, /* filter, filterarg */ 288 SAFE_MAX_DMA, /* maxsize */ 289 SAFE_MAX_PART, /* nsegments */ 290 SAFE_MAX_SSIZE, /* maxsegsize */ 291 BUS_DMA_ALLOCNOW, /* flags */ 292 NULL, NULL, /* locking */ 293 &sc->sc_srcdmat)) { 294 device_printf(dev, "cannot allocate DMA tag\n"); 295 goto bad4; 296 } 297 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 298 1, /* alignment */ 299 SAFE_MAX_DSIZE, /* boundary */ 300 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 301 BUS_SPACE_MAXADDR, /* highaddr */ 302 NULL, NULL, /* filter, filterarg */ 303 SAFE_MAX_DMA, /* maxsize */ 304 SAFE_MAX_PART, /* nsegments */ 305 SAFE_MAX_DSIZE, /* maxsegsize */ 306 BUS_DMA_ALLOCNOW, /* flags */ 307 NULL, NULL, /* locking */ 308 &sc->sc_dstdmat)) { 309 device_printf(dev, "cannot allocate DMA tag\n"); 310 goto bad4; 311 } 312 313 /* 314 * Allocate packet engine descriptors. 315 */ 316 if (safe_dma_malloc(sc, 317 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 318 &sc->sc_ringalloc, 0)) { 319 device_printf(dev, "cannot allocate PE descriptor ring\n"); 320 bus_dma_tag_destroy(sc->sc_srcdmat); 321 goto bad4; 322 } 323 /* 324 * Hookup the static portion of all our data structures. 325 */ 326 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 327 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 328 sc->sc_front = sc->sc_ring; 329 sc->sc_back = sc->sc_ring; 330 raddr = sc->sc_ringalloc.dma_paddr; 331 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 332 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 333 struct safe_ringentry *re = &sc->sc_ring[i]; 334 335 re->re_desc.d_sa = raddr + 336 offsetof(struct safe_ringentry, re_sa); 337 re->re_sa.sa_staterec = raddr + 338 offsetof(struct safe_ringentry, re_sastate); 339 340 raddr += sizeof (struct safe_ringentry); 341 } 342 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 343 "packet engine ring", MTX_DEF); 344 345 /* 346 * Allocate scatter and gather particle descriptors. 347 */ 348 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 349 &sc->sc_spalloc, 0)) { 350 device_printf(dev, "cannot allocate source particle " 351 "descriptor ring\n"); 352 mtx_destroy(&sc->sc_ringmtx); 353 safe_dma_free(sc, &sc->sc_ringalloc); 354 bus_dma_tag_destroy(sc->sc_srcdmat); 355 goto bad4; 356 } 357 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 358 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 359 sc->sc_spfree = sc->sc_spring; 360 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 361 362 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 363 &sc->sc_dpalloc, 0)) { 364 device_printf(dev, "cannot allocate destination particle " 365 "descriptor ring\n"); 366 mtx_destroy(&sc->sc_ringmtx); 367 safe_dma_free(sc, &sc->sc_spalloc); 368 safe_dma_free(sc, &sc->sc_ringalloc); 369 bus_dma_tag_destroy(sc->sc_dstdmat); 370 goto bad4; 371 } 372 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 373 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 374 sc->sc_dpfree = sc->sc_dpring; 375 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 376 377 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 378 379 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); 380 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { 381 sc->sc_flags |= SAFE_FLAGS_RNG; 382 printf(" rng"); 383 } 384 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { 385 #if 0 386 printf(" key"); 387 sc->sc_flags |= SAFE_FLAGS_KEY; 388 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); 389 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); 390 #endif 391 } 392 if (sc->sc_devinfo & SAFE_DEVINFO_DES) { 393 printf(" des/3des"); 394 } 395 if (sc->sc_devinfo & SAFE_DEVINFO_AES) { 396 printf(" aes"); 397 } 398 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { 399 printf(" md5"); 400 } 401 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { 402 printf(" sha1"); 403 } 404 /* XXX other supported algorithms */ 405 printf("\n"); 406 407 safe_reset_board(sc); /* reset h/w */ 408 safe_init_pciregs(dev); /* init pci settings */ 409 safe_init_board(sc); /* init h/w */ 410 411 #ifndef SAFE_NO_RNG 412 if (sc->sc_flags & SAFE_FLAGS_RNG) { 413 #ifdef SAFE_RNDTEST 414 sc->sc_rndtest = rndtest_attach(dev); 415 if (sc->sc_rndtest) 416 sc->sc_harvest = rndtest_harvest; 417 else 418 sc->sc_harvest = default_harvest; 419 #else 420 sc->sc_harvest = default_harvest; 421 #endif 422 safe_rng_init(sc); 423 424 callout_init(&sc->sc_rngto, 1); 425 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 426 } 427 #endif /* SAFE_NO_RNG */ 428 #ifdef SAFE_DEBUG 429 safec = sc; /* for use by hw.safe.dump */ 430 #endif 431 return (0); 432 bad4: 433 crypto_unregister_all(sc->sc_cid); 434 bad3: 435 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 436 bad2: 437 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 438 bad1: 439 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 440 bad: 441 return (ENXIO); 442 } 443 444 /* 445 * Detach a device that successfully probed. 446 */ 447 static int 448 safe_detach(device_t dev) 449 { 450 struct safe_softc *sc = device_get_softc(dev); 451 452 /* XXX wait/abort active ops */ 453 454 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 455 456 callout_stop(&sc->sc_rngto); 457 458 crypto_unregister_all(sc->sc_cid); 459 460 #ifdef SAFE_RNDTEST 461 if (sc->sc_rndtest) 462 rndtest_detach(sc->sc_rndtest); 463 #endif 464 465 safe_cleanchip(sc); 466 safe_dma_free(sc, &sc->sc_dpalloc); 467 safe_dma_free(sc, &sc->sc_spalloc); 468 mtx_destroy(&sc->sc_ringmtx); 469 safe_dma_free(sc, &sc->sc_ringalloc); 470 471 bus_generic_detach(dev); 472 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 473 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 474 475 bus_dma_tag_destroy(sc->sc_srcdmat); 476 bus_dma_tag_destroy(sc->sc_dstdmat); 477 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 478 479 return (0); 480 } 481 482 /* 483 * Stop all chip i/o so that the kernel's probe routines don't 484 * get confused by errant DMAs when rebooting. 485 */ 486 static int 487 safe_shutdown(device_t dev) 488 { 489 #ifdef notyet 490 safe_stop(device_get_softc(dev)); 491 #endif 492 return (0); 493 } 494 495 /* 496 * Device suspend routine. 497 */ 498 static int 499 safe_suspend(device_t dev) 500 { 501 struct safe_softc *sc = device_get_softc(dev); 502 503 #ifdef notyet 504 /* XXX stop the device and save PCI settings */ 505 #endif 506 sc->sc_suspended = 1; 507 508 return (0); 509 } 510 511 static int 512 safe_resume(device_t dev) 513 { 514 struct safe_softc *sc = device_get_softc(dev); 515 516 #ifdef notyet 517 /* XXX retore PCI settings and start the device */ 518 #endif 519 sc->sc_suspended = 0; 520 return (0); 521 } 522 523 /* 524 * SafeXcel Interrupt routine 525 */ 526 static void 527 safe_intr(void *arg) 528 { 529 struct safe_softc *sc = arg; 530 volatile u_int32_t stat; 531 532 stat = READ_REG(sc, SAFE_HM_STAT); 533 if (stat == 0) /* shared irq, not for us */ 534 return; 535 536 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 537 538 if ((stat & SAFE_INT_PE_DDONE)) { 539 /* 540 * Descriptor(s) done; scan the ring and 541 * process completed operations. 542 */ 543 mtx_lock(&sc->sc_ringmtx); 544 while (sc->sc_back != sc->sc_front) { 545 struct safe_ringentry *re = sc->sc_back; 546 #ifdef SAFE_DEBUG 547 if (safe_debug) { 548 safe_dump_ringstate(sc, __func__); 549 safe_dump_request(sc, __func__, re); 550 } 551 #endif 552 /* 553 * safe_process marks ring entries that were allocated 554 * but not used with a csr of zero. This insures the 555 * ring front pointer never needs to be set backwards 556 * in the event that an entry is allocated but not used 557 * because of a setup error. 558 */ 559 if (re->re_desc.d_csr != 0) { 560 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 561 break; 562 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 563 break; 564 sc->sc_nqchip--; 565 safe_callback(sc, re); 566 } 567 if (++(sc->sc_back) == sc->sc_ringtop) 568 sc->sc_back = sc->sc_ring; 569 } 570 mtx_unlock(&sc->sc_ringmtx); 571 } 572 573 /* 574 * Check to see if we got any DMA Error 575 */ 576 if (stat & SAFE_INT_PE_ERROR) { 577 DPRINTF(("dmaerr dmastat %08x\n", 578 READ_REG(sc, SAFE_PE_DMASTAT))); 579 safestats.st_dmaerr++; 580 safe_totalreset(sc); 581 #if 0 582 safe_feed(sc); 583 #endif 584 } 585 586 if (sc->sc_needwakeup) { /* XXX check high watermark */ 587 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 588 DPRINTF(("%s: wakeup crypto %x\n", __func__, 589 sc->sc_needwakeup)); 590 sc->sc_needwakeup &= ~wakeup; 591 crypto_unblock(sc->sc_cid, wakeup); 592 } 593 } 594 595 /* 596 * safe_feed() - post a request to chip 597 */ 598 static void 599 safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 600 { 601 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 602 if (re->re_dst_map != NULL) 603 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 604 BUS_DMASYNC_PREREAD); 605 /* XXX have no smaller granularity */ 606 safe_dma_sync(&sc->sc_ringalloc, 607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 608 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 609 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 610 611 #ifdef SAFE_DEBUG 612 if (safe_debug) { 613 safe_dump_ringstate(sc, __func__); 614 safe_dump_request(sc, __func__, re); 615 } 616 #endif 617 sc->sc_nqchip++; 618 if (sc->sc_nqchip > safestats.st_maxqchip) 619 safestats.st_maxqchip = sc->sc_nqchip; 620 /* poke h/w to check descriptor ring, any value can be written */ 621 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 622 } 623 624 #define N(a) (sizeof(a) / sizeof (a[0])) 625 static void 626 safe_setup_enckey(struct safe_session *ses, const void *key) 627 { 628 int i; 629 630 bcopy(key, ses->ses_key, ses->ses_klen); 631 632 /* PE is little-endian, insure proper byte order */ 633 for (i = 0; i < N(ses->ses_key); i++) 634 ses->ses_key[i] = htole32(ses->ses_key[i]); 635 } 636 637 static void 638 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, 639 int klen) 640 { 641 SHA1_CTX sha1ctx; 642 int i; 643 644 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 645 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 646 647 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 648 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 649 650 explicit_bzero(&sha1ctx, sizeof(sha1ctx)); 651 652 /* PE is little-endian, insure proper byte order */ 653 for (i = 0; i < N(ses->ses_hminner); i++) { 654 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 655 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 656 } 657 } 658 #undef N 659 660 static bool 661 safe_auth_supported(struct safe_softc *sc, 662 const struct crypto_session_params *csp) 663 { 664 665 switch (csp->csp_auth_alg) { 666 case CRYPTO_SHA1_HMAC: 667 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) 668 return (false); 669 break; 670 default: 671 return (false); 672 } 673 return (true); 674 } 675 676 static bool 677 safe_cipher_supported(struct safe_softc *sc, 678 const struct crypto_session_params *csp) 679 { 680 681 switch (csp->csp_cipher_alg) { 682 case CRYPTO_AES_CBC: 683 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) 684 return (false); 685 if (csp->csp_ivlen != 16) 686 return (false); 687 if (csp->csp_cipher_klen != 16 && 688 csp->csp_cipher_klen != 24 && 689 csp->csp_cipher_klen != 32) 690 return (false); 691 break; 692 } 693 return (true); 694 } 695 696 static int 697 safe_probesession(device_t dev, const struct crypto_session_params *csp) 698 { 699 struct safe_softc *sc = device_get_softc(dev); 700 701 if (csp->csp_flags != 0) 702 return (EINVAL); 703 switch (csp->csp_mode) { 704 case CSP_MODE_DIGEST: 705 if (!safe_auth_supported(sc, csp)) 706 return (EINVAL); 707 break; 708 case CSP_MODE_CIPHER: 709 if (!safe_cipher_supported(sc, csp)) 710 return (EINVAL); 711 break; 712 case CSP_MODE_ETA: 713 if (!safe_auth_supported(sc, csp) || 714 !safe_cipher_supported(sc, csp)) 715 return (EINVAL); 716 break; 717 default: 718 return (EINVAL); 719 } 720 721 return (CRYPTODEV_PROBE_HARDWARE); 722 } 723 724 /* 725 * Allocate a new 'session'. 726 */ 727 static int 728 safe_newsession(device_t dev, crypto_session_t cses, 729 const struct crypto_session_params *csp) 730 { 731 struct safe_session *ses; 732 733 ses = crypto_get_driver_session(cses); 734 if (csp->csp_cipher_alg != 0) { 735 ses->ses_klen = csp->csp_cipher_klen; 736 if (csp->csp_cipher_key != NULL) 737 safe_setup_enckey(ses, csp->csp_cipher_key); 738 } 739 740 if (csp->csp_auth_alg != 0) { 741 ses->ses_mlen = csp->csp_auth_mlen; 742 if (ses->ses_mlen == 0) { 743 ses->ses_mlen = SHA1_HASH_LEN; 744 } 745 746 if (csp->csp_auth_key != NULL) { 747 safe_setup_mackey(ses, csp->csp_auth_alg, 748 csp->csp_auth_key, csp->csp_auth_klen); 749 } 750 } 751 752 return (0); 753 } 754 755 static void 756 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) 757 { 758 struct safe_operand *op = arg; 759 760 DPRINTF(("%s: nsegs %d error %d\n", __func__, 761 nsegs, error)); 762 if (error != 0) 763 return; 764 op->nsegs = nsegs; 765 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 766 } 767 768 static int 769 safe_process(device_t dev, struct cryptop *crp, int hint) 770 { 771 struct safe_softc *sc = device_get_softc(dev); 772 const struct crypto_session_params *csp; 773 int err = 0, i, nicealign, uniform; 774 int bypass, oplen; 775 int16_t coffset; 776 struct safe_session *ses; 777 struct safe_ringentry *re; 778 struct safe_sarec *sa; 779 struct safe_pdesc *pd; 780 u_int32_t cmd0, cmd1, staterec; 781 782 mtx_lock(&sc->sc_ringmtx); 783 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 784 safestats.st_ringfull++; 785 sc->sc_needwakeup |= CRYPTO_SYMQ; 786 mtx_unlock(&sc->sc_ringmtx); 787 return (ERESTART); 788 } 789 re = sc->sc_front; 790 791 staterec = re->re_sa.sa_staterec; /* save */ 792 /* NB: zero everything but the PE descriptor */ 793 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 794 re->re_sa.sa_staterec = staterec; /* restore */ 795 796 re->re_crp = crp; 797 798 sa = &re->re_sa; 799 ses = crypto_get_driver_session(crp->crp_session); 800 csp = crypto_get_params(crp->crp_session); 801 802 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 803 cmd1 = 0; 804 switch (csp->csp_mode) { 805 case CSP_MODE_DIGEST: 806 cmd0 |= SAFE_SA_CMD0_OP_HASH; 807 break; 808 case CSP_MODE_CIPHER: 809 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 810 break; 811 case CSP_MODE_ETA: 812 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 813 break; 814 } 815 816 if (csp->csp_cipher_alg != 0) { 817 if (crp->crp_cipher_key != NULL) 818 safe_setup_enckey(ses, crp->crp_cipher_key); 819 820 switch (csp->csp_cipher_alg) { 821 case CRYPTO_AES_CBC: 822 cmd0 |= SAFE_SA_CMD0_AES; 823 cmd1 |= SAFE_SA_CMD1_CBC; 824 if (ses->ses_klen * 8 == 128) 825 cmd1 |= SAFE_SA_CMD1_AES128; 826 else if (ses->ses_klen * 8 == 192) 827 cmd1 |= SAFE_SA_CMD1_AES192; 828 else 829 cmd1 |= SAFE_SA_CMD1_AES256; 830 } 831 832 /* 833 * Setup encrypt/decrypt state. When using basic ops 834 * we can't use an inline IV because hash/crypt offset 835 * must be from the end of the IV to the start of the 836 * crypt data and this leaves out the preceding header 837 * from the hash calculation. Instead we place the IV 838 * in the state record and set the hash/crypt offset to 839 * copy both the header+IV. 840 */ 841 crypto_read_iv(crp, re->re_sastate.sa_saved_iv); 842 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 843 844 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 845 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 846 847 /* 848 * XXX: I suspect we don't need this since we 849 * don't save the returned IV. 850 */ 851 cmd0 |= SAFE_SA_CMD0_SAVEIV; 852 } else { 853 cmd0 |= SAFE_SA_CMD0_INBOUND; 854 } 855 /* 856 * For basic encryption use the zero pad algorithm. 857 * This pads results to an 8-byte boundary and 858 * suppresses padding verification for inbound (i.e. 859 * decrypt) operations. 860 * 861 * NB: Not sure if the 8-byte pad boundary is a problem. 862 */ 863 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 864 865 /* XXX assert key bufs have the same size */ 866 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 867 } 868 869 if (csp->csp_auth_alg != 0) { 870 if (crp->crp_auth_key != NULL) { 871 safe_setup_mackey(ses, csp->csp_auth_alg, 872 crp->crp_auth_key, csp->csp_auth_klen); 873 } 874 875 switch (csp->csp_auth_alg) { 876 case CRYPTO_SHA1_HMAC: 877 cmd0 |= SAFE_SA_CMD0_SHA1; 878 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 879 break; 880 } 881 882 /* 883 * Digest data is loaded from the SA and the hash 884 * result is saved to the state block where we 885 * retrieve it for return to the caller. 886 */ 887 /* XXX assert digest bufs have the same size */ 888 bcopy(ses->ses_hminner, sa->sa_indigest, 889 sizeof(sa->sa_indigest)); 890 bcopy(ses->ses_hmouter, sa->sa_outdigest, 891 sizeof(sa->sa_outdigest)); 892 893 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 894 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 895 } 896 897 if (csp->csp_mode == CSP_MODE_ETA) { 898 /* 899 * The driver only supports ETA requests where there 900 * is no gap between the AAD and payload. 901 */ 902 if (crp->crp_aad_length != 0 && 903 crp->crp_aad_start + crp->crp_aad_length != 904 crp->crp_payload_start) { 905 safestats.st_lenmismatch++; 906 err = EINVAL; 907 goto errout; 908 } 909 if (crp->crp_aad_length != 0) 910 bypass = crp->crp_aad_start; 911 else 912 bypass = crp->crp_payload_start; 913 coffset = crp->crp_aad_length; 914 oplen = crp->crp_payload_start + crp->crp_payload_length; 915 #ifdef SAFE_DEBUG 916 if (safe_debug) { 917 printf("AAD: skip %d, len %d, digest %d\n", 918 crp->crp_aad_start, crp->crp_aad_length, 919 crp->crp_digest_start); 920 printf("payload: skip %d, len %d, IV %d\n", 921 crp->crp_payload_start, crp->crp_payload_length, 922 crp->crp_iv_start); 923 printf("bypass %d coffset %d oplen %d\n", 924 bypass, coffset, oplen); 925 } 926 #endif 927 if (coffset & 3) { /* offset must be 32-bit aligned */ 928 DPRINTF(("%s: coffset %u misaligned\n", 929 __func__, coffset)); 930 safestats.st_coffmisaligned++; 931 err = EINVAL; 932 goto errout; 933 } 934 coffset >>= 2; 935 if (coffset > 255) { /* offset must be <256 dwords */ 936 DPRINTF(("%s: coffset %u too big\n", 937 __func__, coffset)); 938 safestats.st_cofftoobig++; 939 err = EINVAL; 940 goto errout; 941 } 942 /* 943 * Tell the hardware to copy the header to the output. 944 * The header is defined as the data from the end of 945 * the bypass to the start of data to be encrypted. 946 * Typically this is the inline IV. Note that you need 947 * to do this even if src+dst are the same; it appears 948 * that w/o this bit the crypted data is written 949 * immediately after the bypass data. 950 */ 951 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 952 /* 953 * Disable IP header mutable bit handling. This is 954 * needed to get correct HMAC calculations. 955 */ 956 cmd1 |= SAFE_SA_CMD1_MUTABLE; 957 } else { 958 bypass = crp->crp_payload_start; 959 oplen = bypass + crp->crp_payload_length; 960 coffset = 0; 961 } 962 /* XXX verify multiple of 4 when using s/g */ 963 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 964 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 965 safestats.st_bypasstoobig++; 966 err = EINVAL; 967 goto errout; 968 } 969 970 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 971 safestats.st_nomap++; 972 err = ENOMEM; 973 goto errout; 974 } 975 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, 976 &re->re_src, BUS_DMA_NOWAIT) != 0) { 977 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 978 re->re_src_map = NULL; 979 safestats.st_noload++; 980 err = ENOMEM; 981 goto errout; 982 } 983 re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf); 984 nicealign = safe_dmamap_aligned(&re->re_src); 985 uniform = safe_dmamap_uniform(&re->re_src); 986 987 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 988 nicealign, uniform, re->re_src.nsegs)); 989 if (re->re_src.nsegs > 1) { 990 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 991 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 992 for (i = 0; i < re->re_src_nsegs; i++) { 993 /* NB: no need to check if there's space */ 994 pd = sc->sc_spfree; 995 if (++(sc->sc_spfree) == sc->sc_springtop) 996 sc->sc_spfree = sc->sc_spring; 997 998 KASSERT((pd->pd_flags&3) == 0 || 999 (pd->pd_flags&3) == SAFE_PD_DONE, 1000 ("bogus source particle descriptor; flags %x", 1001 pd->pd_flags)); 1002 pd->pd_addr = re->re_src_segs[i].ds_addr; 1003 pd->pd_size = re->re_src_segs[i].ds_len; 1004 pd->pd_flags = SAFE_PD_READY; 1005 } 1006 cmd0 |= SAFE_SA_CMD0_IGATHER; 1007 } else { 1008 /* 1009 * No need for gather, reference the operand directly. 1010 */ 1011 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1012 } 1013 1014 if (csp->csp_mode == CSP_MODE_DIGEST) { 1015 /* 1016 * Hash op; no destination needed. 1017 */ 1018 } else { 1019 if (nicealign && uniform == 1) { 1020 /* 1021 * Source layout is suitable for direct 1022 * sharing of the DMA map and segment list. 1023 */ 1024 re->re_dst = re->re_src; 1025 } else if (nicealign && uniform == 2) { 1026 /* 1027 * The source is properly aligned but requires a 1028 * different particle list to handle DMA of the 1029 * result. Create a new map and do the load to 1030 * create the segment list. The particle 1031 * descriptor setup code below will handle the 1032 * rest. 1033 */ 1034 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, 1035 &re->re_dst_map)) { 1036 safestats.st_nomap++; 1037 err = ENOMEM; 1038 goto errout; 1039 } 1040 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, 1041 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 1042 0) { 1043 bus_dmamap_destroy(sc->sc_dstdmat, 1044 re->re_dst_map); 1045 re->re_dst_map = NULL; 1046 safestats.st_noload++; 1047 err = ENOMEM; 1048 goto errout; 1049 } 1050 } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { 1051 int totlen, len; 1052 struct mbuf *m, *top, **mp; 1053 1054 /* 1055 * DMA constraints require that we allocate a 1056 * new mbuf chain for the destination. We 1057 * allocate an entire new set of mbufs of 1058 * optimal/required size and then tell the 1059 * hardware to copy any bits that are not 1060 * created as a byproduct of the operation. 1061 */ 1062 if (!nicealign) 1063 safestats.st_unaligned++; 1064 if (!uniform) 1065 safestats.st_notuniform++; 1066 totlen = re->re_src_mapsize; 1067 if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { 1068 len = MHLEN; 1069 MGETHDR(m, M_NOWAIT, MT_DATA); 1070 if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, 1071 M_NOWAIT)) { 1072 m_free(m); 1073 m = NULL; 1074 } 1075 } else { 1076 len = MLEN; 1077 MGET(m, M_NOWAIT, MT_DATA); 1078 } 1079 if (m == NULL) { 1080 safestats.st_nombuf++; 1081 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1082 goto errout; 1083 } 1084 if (totlen >= MINCLSIZE) { 1085 if (!(MCLGET(m, M_NOWAIT))) { 1086 m_free(m); 1087 safestats.st_nomcl++; 1088 err = sc->sc_nqchip ? 1089 ERESTART : ENOMEM; 1090 goto errout; 1091 } 1092 len = MCLBYTES; 1093 } 1094 m->m_len = len; 1095 top = NULL; 1096 mp = ⊤ 1097 1098 while (totlen > 0) { 1099 if (top) { 1100 MGET(m, M_NOWAIT, MT_DATA); 1101 if (m == NULL) { 1102 m_freem(top); 1103 safestats.st_nombuf++; 1104 err = sc->sc_nqchip ? 1105 ERESTART : ENOMEM; 1106 goto errout; 1107 } 1108 len = MLEN; 1109 } 1110 if (top && totlen >= MINCLSIZE) { 1111 if (!(MCLGET(m, M_NOWAIT))) { 1112 *mp = m; 1113 m_freem(top); 1114 safestats.st_nomcl++; 1115 err = sc->sc_nqchip ? 1116 ERESTART : ENOMEM; 1117 goto errout; 1118 } 1119 len = MCLBYTES; 1120 } 1121 m->m_len = len = min(totlen, len); 1122 totlen -= len; 1123 *mp = m; 1124 mp = &m->m_next; 1125 } 1126 re->re_dst_m = top; 1127 if (bus_dmamap_create(sc->sc_dstdmat, 1128 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1129 safestats.st_nomap++; 1130 err = ENOMEM; 1131 goto errout; 1132 } 1133 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, 1134 re->re_dst_map, top, re->re_dst_segs, 1135 &re->re_dst_nsegs, 0) != 0) { 1136 bus_dmamap_destroy(sc->sc_dstdmat, 1137 re->re_dst_map); 1138 re->re_dst_map = NULL; 1139 safestats.st_noload++; 1140 err = ENOMEM; 1141 goto errout; 1142 } 1143 re->re_dst_mapsize = re->re_src_mapsize; 1144 if (re->re_src.mapsize > oplen) { 1145 /* 1146 * There's data following what the 1147 * hardware will copy for us. If this 1148 * isn't just the ICV (that's going to 1149 * be written on completion), copy it 1150 * to the new mbufs 1151 */ 1152 if (!(csp->csp_mode == CSP_MODE_ETA && 1153 (re->re_src.mapsize-oplen) == ses->ses_mlen && 1154 crp->crp_digest_start == oplen)) 1155 safe_mcopy(crp->crp_buf.cb_mbuf, 1156 re->re_dst_m, oplen); 1157 else 1158 safestats.st_noicvcopy++; 1159 } 1160 } else { 1161 if (!nicealign) { 1162 safestats.st_iovmisaligned++; 1163 err = EINVAL; 1164 goto errout; 1165 } else { 1166 /* 1167 * There's no way to handle the DMA 1168 * requirements with this uio. We 1169 * could create a separate DMA area for 1170 * the result and then copy it back, 1171 * but for now we just bail and return 1172 * an error. Note that uio requests 1173 * > SAFE_MAX_DSIZE are handled because 1174 * the DMA map and segment list for the 1175 * destination wil result in a 1176 * destination particle list that does 1177 * the necessary scatter DMA. 1178 */ 1179 safestats.st_iovnotuniform++; 1180 err = EINVAL; 1181 goto errout; 1182 } 1183 } 1184 1185 if (re->re_dst.nsegs > 1) { 1186 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1187 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1188 for (i = 0; i < re->re_dst_nsegs; i++) { 1189 pd = sc->sc_dpfree; 1190 KASSERT((pd->pd_flags&3) == 0 || 1191 (pd->pd_flags&3) == SAFE_PD_DONE, 1192 ("bogus dest particle descriptor; flags %x", 1193 pd->pd_flags)); 1194 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1195 sc->sc_dpfree = sc->sc_dpring; 1196 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1197 pd->pd_flags = SAFE_PD_READY; 1198 } 1199 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1200 } else { 1201 /* 1202 * No need for scatter, reference the operand directly. 1203 */ 1204 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1205 } 1206 } 1207 1208 /* 1209 * All done with setup; fillin the SA command words 1210 * and the packet engine descriptor. The operation 1211 * is now ready for submission to the hardware. 1212 */ 1213 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1214 sa->sa_cmd1 = cmd1 1215 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1216 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1217 | SAFE_SA_CMD1_SRPCI 1218 ; 1219 /* 1220 * NB: the order of writes is important here. In case the 1221 * chip is scanning the ring because of an outstanding request 1222 * it might nab this one too. In that case we need to make 1223 * sure the setup is complete before we write the length 1224 * field of the descriptor as it signals the descriptor is 1225 * ready for processing. 1226 */ 1227 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1228 if (csp->csp_auth_alg != 0) 1229 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1230 re->re_desc.d_len = oplen 1231 | SAFE_PE_LEN_READY 1232 | (bypass << SAFE_PE_LEN_BYPASS_S) 1233 ; 1234 1235 safestats.st_ipackets++; 1236 safestats.st_ibytes += oplen; 1237 1238 if (++(sc->sc_front) == sc->sc_ringtop) 1239 sc->sc_front = sc->sc_ring; 1240 1241 /* XXX honor batching */ 1242 safe_feed(sc, re); 1243 mtx_unlock(&sc->sc_ringmtx); 1244 return (0); 1245 1246 errout: 1247 if (re->re_dst_m != NULL) 1248 m_freem(re->re_dst_m); 1249 1250 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1251 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1252 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1253 } 1254 if (re->re_src_map != NULL) { 1255 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1256 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1257 } 1258 mtx_unlock(&sc->sc_ringmtx); 1259 if (err != ERESTART) { 1260 crp->crp_etype = err; 1261 crypto_done(crp); 1262 } else { 1263 sc->sc_needwakeup |= CRYPTO_SYMQ; 1264 } 1265 return (err); 1266 } 1267 1268 static void 1269 safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1270 { 1271 const struct crypto_session_params *csp; 1272 struct cryptop *crp = (struct cryptop *)re->re_crp; 1273 struct safe_session *ses; 1274 uint8_t hash[HASH_MAX_LEN]; 1275 1276 ses = crypto_get_driver_session(crp->crp_session); 1277 csp = crypto_get_params(crp->crp_session); 1278 1279 safestats.st_opackets++; 1280 safestats.st_obytes += re->re_dst.mapsize; 1281 1282 safe_dma_sync(&sc->sc_ringalloc, 1283 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1284 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1285 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1286 re->re_desc.d_csr, 1287 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1288 safestats.st_peoperr++; 1289 crp->crp_etype = EIO; /* something more meaningful? */ 1290 } 1291 1292 /* 1293 * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if 1294 * it is non-NULL? 1295 */ 1296 1297 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1298 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1299 BUS_DMASYNC_POSTREAD); 1300 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1301 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1302 } 1303 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1304 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1305 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1306 1307 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1308 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { 1309 /* 1310 * SHA-1 ICV's are byte-swapped; fix 'em up 1311 * before copying them to their destination. 1312 */ 1313 re->re_sastate.sa_saved_indigest[0] = 1314 bswap32(re->re_sastate.sa_saved_indigest[0]); 1315 re->re_sastate.sa_saved_indigest[1] = 1316 bswap32(re->re_sastate.sa_saved_indigest[1]); 1317 re->re_sastate.sa_saved_indigest[2] = 1318 bswap32(re->re_sastate.sa_saved_indigest[2]); 1319 } 1320 1321 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1322 crypto_copydata(crp, crp->crp_digest_start, 1323 ses->ses_mlen, hash); 1324 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, 1325 hash, ses->ses_mlen) != 0) 1326 crp->crp_etype = EBADMSG; 1327 } else 1328 crypto_copyback(crp, crp->crp_digest_start, 1329 ses->ses_mlen, re->re_sastate.sa_saved_indigest); 1330 } 1331 crypto_done(crp); 1332 } 1333 1334 /* 1335 * Copy all data past offset from srcm to dstm. 1336 */ 1337 static void 1338 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1339 { 1340 u_int j, dlen, slen; 1341 caddr_t dptr, sptr; 1342 1343 /* 1344 * Advance src and dst to offset. 1345 */ 1346 j = offset; 1347 while (j >= srcm->m_len) { 1348 j -= srcm->m_len; 1349 srcm = srcm->m_next; 1350 if (srcm == NULL) 1351 return; 1352 } 1353 sptr = mtod(srcm, caddr_t) + j; 1354 slen = srcm->m_len - j; 1355 1356 j = offset; 1357 while (j >= dstm->m_len) { 1358 j -= dstm->m_len; 1359 dstm = dstm->m_next; 1360 if (dstm == NULL) 1361 return; 1362 } 1363 dptr = mtod(dstm, caddr_t) + j; 1364 dlen = dstm->m_len - j; 1365 1366 /* 1367 * Copy everything that remains. 1368 */ 1369 for (;;) { 1370 j = min(slen, dlen); 1371 bcopy(sptr, dptr, j); 1372 if (slen == j) { 1373 srcm = srcm->m_next; 1374 if (srcm == NULL) 1375 return; 1376 sptr = srcm->m_data; 1377 slen = srcm->m_len; 1378 } else 1379 sptr += j, slen -= j; 1380 if (dlen == j) { 1381 dstm = dstm->m_next; 1382 if (dstm == NULL) 1383 return; 1384 dptr = dstm->m_data; 1385 dlen = dstm->m_len; 1386 } else 1387 dptr += j, dlen -= j; 1388 } 1389 } 1390 1391 #ifndef SAFE_NO_RNG 1392 #define SAFE_RNG_MAXWAIT 1000 1393 1394 static void 1395 safe_rng_init(struct safe_softc *sc) 1396 { 1397 u_int32_t w, v; 1398 int i; 1399 1400 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1401 /* use default value according to the manual */ 1402 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1403 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1404 1405 /* 1406 * There is a bug in rev 1.0 of the 1140 that when the RNG 1407 * is brought out of reset the ready status flag does not 1408 * work until the RNG has finished its internal initialization. 1409 * 1410 * So in order to determine the device is through its 1411 * initialization we must read the data register, using the 1412 * status reg in the read in case it is initialized. Then read 1413 * the data register until it changes from the first read. 1414 * Once it changes read the data register until it changes 1415 * again. At this time the RNG is considered initialized. 1416 * This could take between 750ms - 1000ms in time. 1417 */ 1418 i = 0; 1419 w = READ_REG(sc, SAFE_RNG_OUT); 1420 do { 1421 v = READ_REG(sc, SAFE_RNG_OUT); 1422 if (v != w) { 1423 w = v; 1424 break; 1425 } 1426 DELAY(10); 1427 } while (++i < SAFE_RNG_MAXWAIT); 1428 1429 /* Wait Until data changes again */ 1430 i = 0; 1431 do { 1432 v = READ_REG(sc, SAFE_RNG_OUT); 1433 if (v != w) 1434 break; 1435 DELAY(10); 1436 } while (++i < SAFE_RNG_MAXWAIT); 1437 } 1438 1439 static __inline void 1440 safe_rng_disable_short_cycle(struct safe_softc *sc) 1441 { 1442 WRITE_REG(sc, SAFE_RNG_CTRL, 1443 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1444 } 1445 1446 static __inline void 1447 safe_rng_enable_short_cycle(struct safe_softc *sc) 1448 { 1449 WRITE_REG(sc, SAFE_RNG_CTRL, 1450 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1451 } 1452 1453 static __inline u_int32_t 1454 safe_rng_read(struct safe_softc *sc) 1455 { 1456 int i; 1457 1458 i = 0; 1459 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1460 ; 1461 return READ_REG(sc, SAFE_RNG_OUT); 1462 } 1463 1464 static void 1465 safe_rng(void *arg) 1466 { 1467 struct safe_softc *sc = arg; 1468 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1469 u_int maxwords; 1470 int i; 1471 1472 safestats.st_rng++; 1473 /* 1474 * Fetch the next block of data. 1475 */ 1476 maxwords = safe_rngbufsize; 1477 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1478 maxwords = SAFE_RNG_MAXBUFSIZ; 1479 retry: 1480 for (i = 0; i < maxwords; i++) 1481 buf[i] = safe_rng_read(sc); 1482 /* 1483 * Check the comparator alarm count and reset the h/w if 1484 * it exceeds our threshold. This guards against the 1485 * hardware oscillators resonating with external signals. 1486 */ 1487 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1488 u_int32_t freq_inc, w; 1489 1490 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1491 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1492 safestats.st_rngalarm++; 1493 safe_rng_enable_short_cycle(sc); 1494 freq_inc = 18; 1495 for (i = 0; i < 64; i++) { 1496 w = READ_REG(sc, SAFE_RNG_CNFG); 1497 freq_inc = ((w + freq_inc) & 0x3fL); 1498 w = ((w & ~0x3fL) | freq_inc); 1499 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1500 1501 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1502 1503 (void) safe_rng_read(sc); 1504 DELAY(25); 1505 1506 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1507 safe_rng_disable_short_cycle(sc); 1508 goto retry; 1509 } 1510 freq_inc = 1; 1511 } 1512 safe_rng_disable_short_cycle(sc); 1513 } else 1514 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1515 1516 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1517 callout_reset(&sc->sc_rngto, 1518 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1519 } 1520 #endif /* SAFE_NO_RNG */ 1521 1522 static void 1523 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1524 { 1525 bus_addr_t *paddr = (bus_addr_t*) arg; 1526 *paddr = segs->ds_addr; 1527 } 1528 1529 static int 1530 safe_dma_malloc( 1531 struct safe_softc *sc, 1532 bus_size_t size, 1533 struct safe_dma_alloc *dma, 1534 int mapflags 1535 ) 1536 { 1537 int r; 1538 1539 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1540 sizeof(u_int32_t), 0, /* alignment, bounds */ 1541 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1542 BUS_SPACE_MAXADDR, /* highaddr */ 1543 NULL, NULL, /* filter, filterarg */ 1544 size, /* maxsize */ 1545 1, /* nsegments */ 1546 size, /* maxsegsize */ 1547 BUS_DMA_ALLOCNOW, /* flags */ 1548 NULL, NULL, /* locking */ 1549 &dma->dma_tag); 1550 if (r != 0) { 1551 device_printf(sc->sc_dev, "safe_dma_malloc: " 1552 "bus_dma_tag_create failed; error %u\n", r); 1553 goto fail_0; 1554 } 1555 1556 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1557 BUS_DMA_NOWAIT, &dma->dma_map); 1558 if (r != 0) { 1559 device_printf(sc->sc_dev, "safe_dma_malloc: " 1560 "bus_dmammem_alloc failed; size %ju, error %u\n", 1561 (uintmax_t)size, r); 1562 goto fail_1; 1563 } 1564 1565 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1566 size, 1567 safe_dmamap_cb, 1568 &dma->dma_paddr, 1569 mapflags | BUS_DMA_NOWAIT); 1570 if (r != 0) { 1571 device_printf(sc->sc_dev, "safe_dma_malloc: " 1572 "bus_dmamap_load failed; error %u\n", r); 1573 goto fail_2; 1574 } 1575 1576 dma->dma_size = size; 1577 return (0); 1578 1579 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1580 fail_2: 1581 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1582 fail_1: 1583 bus_dma_tag_destroy(dma->dma_tag); 1584 fail_0: 1585 dma->dma_tag = NULL; 1586 return (r); 1587 } 1588 1589 static void 1590 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1591 { 1592 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1593 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1594 bus_dma_tag_destroy(dma->dma_tag); 1595 } 1596 1597 /* 1598 * Resets the board. Values in the regesters are left as is 1599 * from the reset (i.e. initial values are assigned elsewhere). 1600 */ 1601 static void 1602 safe_reset_board(struct safe_softc *sc) 1603 { 1604 u_int32_t v; 1605 /* 1606 * Reset the device. The manual says no delay 1607 * is needed between marking and clearing reset. 1608 */ 1609 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1610 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1611 SAFE_PE_DMACFG_SGRESET); 1612 WRITE_REG(sc, SAFE_PE_DMACFG, v 1613 | SAFE_PE_DMACFG_PERESET 1614 | SAFE_PE_DMACFG_PDRRESET 1615 | SAFE_PE_DMACFG_SGRESET); 1616 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1617 } 1618 1619 /* 1620 * Initialize registers we need to touch only once. 1621 */ 1622 static void 1623 safe_init_board(struct safe_softc *sc) 1624 { 1625 u_int32_t v, dwords; 1626 1627 v = READ_REG(sc, SAFE_PE_DMACFG); 1628 v &=~ SAFE_PE_DMACFG_PEMODE; 1629 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1630 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1631 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1632 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1633 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1634 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1635 ; 1636 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1637 #if 0 1638 /* XXX select byte swap based on host byte order */ 1639 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1640 #endif 1641 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1642 /* 1643 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1644 * "target mode transfers" done while the chip is DMA'ing 1645 * >1020 bytes cause the hardware to lockup. To avoid this 1646 * we reduce the max PCI transfer size and use small source 1647 * particle descriptors (<= 256 bytes). 1648 */ 1649 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1650 device_printf(sc->sc_dev, 1651 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1652 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1653 SAFE_REV_MAJ(sc->sc_chiprev), 1654 SAFE_REV_MIN(sc->sc_chiprev)); 1655 } 1656 1657 /* NB: operands+results are overlaid */ 1658 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1659 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1660 /* 1661 * Configure ring entry size and number of items in the ring. 1662 */ 1663 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1664 ("PE ring entry not 32-bit aligned!")); 1665 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1666 WRITE_REG(sc, SAFE_PE_RINGCFG, 1667 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1668 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1669 1670 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1671 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1672 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1673 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1674 /* 1675 * NB: destination particles are fixed size. We use 1676 * an mbuf cluster and require all results go to 1677 * clusters or smaller. 1678 */ 1679 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1680 1681 /* it's now safe to enable PE mode, do it */ 1682 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1683 1684 /* 1685 * Configure hardware to use level-triggered interrupts and 1686 * to interrupt after each descriptor is processed. 1687 */ 1688 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1689 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1690 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1691 } 1692 1693 /* 1694 * Init PCI registers 1695 */ 1696 static void 1697 safe_init_pciregs(device_t dev) 1698 { 1699 } 1700 1701 /* 1702 * Clean up after a chip crash. 1703 * It is assumed that the caller in splimp() 1704 */ 1705 static void 1706 safe_cleanchip(struct safe_softc *sc) 1707 { 1708 1709 if (sc->sc_nqchip != 0) { 1710 struct safe_ringentry *re = sc->sc_back; 1711 1712 while (re != sc->sc_front) { 1713 if (re->re_desc.d_csr != 0) 1714 safe_free_entry(sc, re); 1715 if (++re == sc->sc_ringtop) 1716 re = sc->sc_ring; 1717 } 1718 sc->sc_back = re; 1719 sc->sc_nqchip = 0; 1720 } 1721 } 1722 1723 /* 1724 * free a safe_q 1725 * It is assumed that the caller is within splimp(). 1726 */ 1727 static int 1728 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 1729 { 1730 struct cryptop *crp; 1731 1732 /* 1733 * Free header MCR 1734 */ 1735 if (re->re_dst_m != NULL) 1736 m_freem(re->re_dst_m); 1737 1738 crp = (struct cryptop *)re->re_crp; 1739 1740 re->re_desc.d_csr = 0; 1741 1742 crp->crp_etype = EFAULT; 1743 crypto_done(crp); 1744 return(0); 1745 } 1746 1747 /* 1748 * Routine to reset the chip and clean up. 1749 * It is assumed that the caller is in splimp() 1750 */ 1751 static void 1752 safe_totalreset(struct safe_softc *sc) 1753 { 1754 safe_reset_board(sc); 1755 safe_init_board(sc); 1756 safe_cleanchip(sc); 1757 } 1758 1759 /* 1760 * Is the operand suitable aligned for direct DMA. Each 1761 * segment must be aligned on a 32-bit boundary and all 1762 * but the last segment must be a multiple of 4 bytes. 1763 */ 1764 static int 1765 safe_dmamap_aligned(const struct safe_operand *op) 1766 { 1767 int i; 1768 1769 for (i = 0; i < op->nsegs; i++) { 1770 if (op->segs[i].ds_addr & 3) 1771 return (0); 1772 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 1773 return (0); 1774 } 1775 return (1); 1776 } 1777 1778 /* 1779 * Is the operand suitable for direct DMA as the destination 1780 * of an operation. The hardware requires that each ``particle'' 1781 * but the last in an operation result have the same size. We 1782 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 1783 * 0 if some segment is not a multiple of of this size, 1 if all 1784 * segments are exactly this size, or 2 if segments are at worst 1785 * a multple of this size. 1786 */ 1787 static int 1788 safe_dmamap_uniform(const struct safe_operand *op) 1789 { 1790 int result = 1; 1791 1792 if (op->nsegs > 0) { 1793 int i; 1794 1795 for (i = 0; i < op->nsegs-1; i++) { 1796 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 1797 return (0); 1798 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 1799 result = 2; 1800 } 1801 } 1802 return (result); 1803 } 1804 1805 #ifdef SAFE_DEBUG 1806 static void 1807 safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 1808 { 1809 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 1810 , tag 1811 , READ_REG(sc, SAFE_DMA_ENDIAN) 1812 , READ_REG(sc, SAFE_DMA_SRCADDR) 1813 , READ_REG(sc, SAFE_DMA_DSTADDR) 1814 , READ_REG(sc, SAFE_DMA_STAT) 1815 ); 1816 } 1817 1818 static void 1819 safe_dump_intrstate(struct safe_softc *sc, const char *tag) 1820 { 1821 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 1822 , tag 1823 , READ_REG(sc, SAFE_HI_CFG) 1824 , READ_REG(sc, SAFE_HI_MASK) 1825 , READ_REG(sc, SAFE_HI_DESC_CNT) 1826 , READ_REG(sc, SAFE_HU_STAT) 1827 , READ_REG(sc, SAFE_HM_STAT) 1828 ); 1829 } 1830 1831 static void 1832 safe_dump_ringstate(struct safe_softc *sc, const char *tag) 1833 { 1834 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 1835 1836 /* NB: assume caller has lock on ring */ 1837 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 1838 tag, 1839 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 1840 (unsigned long)(sc->sc_back - sc->sc_ring), 1841 (unsigned long)(sc->sc_front - sc->sc_ring)); 1842 } 1843 1844 static void 1845 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 1846 { 1847 int ix, nsegs; 1848 1849 ix = re - sc->sc_ring; 1850 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 1851 , tag 1852 , re, ix 1853 , re->re_desc.d_csr 1854 , re->re_desc.d_src 1855 , re->re_desc.d_dst 1856 , re->re_desc.d_sa 1857 , re->re_desc.d_len 1858 ); 1859 if (re->re_src.nsegs > 1) { 1860 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 1861 sizeof(struct safe_pdesc); 1862 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 1863 printf(" spd[%u] %p: %p size %u flags %x" 1864 , ix, &sc->sc_spring[ix] 1865 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 1866 , sc->sc_spring[ix].pd_size 1867 , sc->sc_spring[ix].pd_flags 1868 ); 1869 if (sc->sc_spring[ix].pd_size == 0) 1870 printf(" (zero!)"); 1871 printf("\n"); 1872 if (++ix == SAFE_TOTAL_SPART) 1873 ix = 0; 1874 } 1875 } 1876 if (re->re_dst.nsegs > 1) { 1877 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 1878 sizeof(struct safe_pdesc); 1879 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 1880 printf(" dpd[%u] %p: %p flags %x\n" 1881 , ix, &sc->sc_dpring[ix] 1882 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 1883 , sc->sc_dpring[ix].pd_flags 1884 ); 1885 if (++ix == SAFE_TOTAL_DPART) 1886 ix = 0; 1887 } 1888 } 1889 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 1890 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 1891 printf("sa: key %x %x %x %x %x %x %x %x\n" 1892 , re->re_sa.sa_key[0] 1893 , re->re_sa.sa_key[1] 1894 , re->re_sa.sa_key[2] 1895 , re->re_sa.sa_key[3] 1896 , re->re_sa.sa_key[4] 1897 , re->re_sa.sa_key[5] 1898 , re->re_sa.sa_key[6] 1899 , re->re_sa.sa_key[7] 1900 ); 1901 printf("sa: indigest %x %x %x %x %x\n" 1902 , re->re_sa.sa_indigest[0] 1903 , re->re_sa.sa_indigest[1] 1904 , re->re_sa.sa_indigest[2] 1905 , re->re_sa.sa_indigest[3] 1906 , re->re_sa.sa_indigest[4] 1907 ); 1908 printf("sa: outdigest %x %x %x %x %x\n" 1909 , re->re_sa.sa_outdigest[0] 1910 , re->re_sa.sa_outdigest[1] 1911 , re->re_sa.sa_outdigest[2] 1912 , re->re_sa.sa_outdigest[3] 1913 , re->re_sa.sa_outdigest[4] 1914 ); 1915 printf("sr: iv %x %x %x %x\n" 1916 , re->re_sastate.sa_saved_iv[0] 1917 , re->re_sastate.sa_saved_iv[1] 1918 , re->re_sastate.sa_saved_iv[2] 1919 , re->re_sastate.sa_saved_iv[3] 1920 ); 1921 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 1922 , re->re_sastate.sa_saved_hashbc 1923 , re->re_sastate.sa_saved_indigest[0] 1924 , re->re_sastate.sa_saved_indigest[1] 1925 , re->re_sastate.sa_saved_indigest[2] 1926 , re->re_sastate.sa_saved_indigest[3] 1927 , re->re_sastate.sa_saved_indigest[4] 1928 ); 1929 } 1930 1931 static void 1932 safe_dump_ring(struct safe_softc *sc, const char *tag) 1933 { 1934 mtx_lock(&sc->sc_ringmtx); 1935 printf("\nSafeNet Ring State:\n"); 1936 safe_dump_intrstate(sc, tag); 1937 safe_dump_dmastatus(sc, tag); 1938 safe_dump_ringstate(sc, tag); 1939 if (sc->sc_nqchip) { 1940 struct safe_ringentry *re = sc->sc_back; 1941 do { 1942 safe_dump_request(sc, tag, re); 1943 if (++re == sc->sc_ringtop) 1944 re = sc->sc_ring; 1945 } while (re != sc->sc_front); 1946 } 1947 mtx_unlock(&sc->sc_ringmtx); 1948 } 1949 1950 static int 1951 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 1952 { 1953 char dmode[64]; 1954 int error; 1955 1956 strncpy(dmode, "", sizeof(dmode) - 1); 1957 dmode[sizeof(dmode) - 1] = '\0'; 1958 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 1959 1960 if (error == 0 && req->newptr != NULL) { 1961 struct safe_softc *sc = safec; 1962 1963 if (!sc) 1964 return EINVAL; 1965 if (strncmp(dmode, "dma", 3) == 0) 1966 safe_dump_dmastatus(sc, "safe0"); 1967 else if (strncmp(dmode, "int", 3) == 0) 1968 safe_dump_intrstate(sc, "safe0"); 1969 else if (strncmp(dmode, "ring", 4) == 0) 1970 safe_dump_ring(sc, "safe0"); 1971 else 1972 return EINVAL; 1973 } 1974 return error; 1975 } 1976 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, 1977 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, 1978 sysctl_hw_safe_dump, "A", 1979 "Dump driver state"); 1980 #endif /* SAFE_DEBUG */ 1981