1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * SafeNet SafeXcel-1141 hardware crypto accelerator 35 */ 36 #include "opt_safe.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/proc.h> 41 #include <sys/errno.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 #include <sys/endian.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #include <machine/bus.h> 56 #include <machine/resource.h> 57 #include <sys/bus.h> 58 #include <sys/rman.h> 59 60 #include <opencrypto/cryptodev.h> 61 #include <opencrypto/xform_auth.h> 62 #include <sys/random.h> 63 #include <sys/kobj.h> 64 65 #include "cryptodev_if.h" 66 67 #include <dev/pci/pcivar.h> 68 #include <dev/pci/pcireg.h> 69 70 #ifdef SAFE_RNDTEST 71 #include <dev/rndtest/rndtest.h> 72 #endif 73 #include <dev/safe/safereg.h> 74 #include <dev/safe/safevar.h> 75 76 #ifndef bswap32 77 #define bswap32 NTOHL 78 #endif 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int safe_probe(device_t); 84 static int safe_attach(device_t); 85 static int safe_detach(device_t); 86 static int safe_suspend(device_t); 87 static int safe_resume(device_t); 88 static int safe_shutdown(device_t); 89 90 static int safe_probesession(device_t, const struct crypto_session_params *); 91 static int safe_newsession(device_t, crypto_session_t, 92 const struct crypto_session_params *); 93 static int safe_process(device_t, struct cryptop *, int); 94 95 static device_method_t safe_methods[] = { 96 /* Device interface */ 97 DEVMETHOD(device_probe, safe_probe), 98 DEVMETHOD(device_attach, safe_attach), 99 DEVMETHOD(device_detach, safe_detach), 100 DEVMETHOD(device_suspend, safe_suspend), 101 DEVMETHOD(device_resume, safe_resume), 102 DEVMETHOD(device_shutdown, safe_shutdown), 103 104 /* crypto device methods */ 105 DEVMETHOD(cryptodev_probesession, safe_probesession), 106 DEVMETHOD(cryptodev_newsession, safe_newsession), 107 DEVMETHOD(cryptodev_process, safe_process), 108 109 DEVMETHOD_END 110 }; 111 static driver_t safe_driver = { 112 "safe", 113 safe_methods, 114 sizeof (struct safe_softc) 115 }; 116 static devclass_t safe_devclass; 117 118 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); 119 MODULE_DEPEND(safe, crypto, 1, 1, 1); 120 #ifdef SAFE_RNDTEST 121 MODULE_DEPEND(safe, rndtest, 1, 1, 1); 122 #endif 123 124 static void safe_intr(void *); 125 static void safe_callback(struct safe_softc *, struct safe_ringentry *); 126 static void safe_feed(struct safe_softc *, struct safe_ringentry *); 127 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 128 #ifndef SAFE_NO_RNG 129 static void safe_rng_init(struct safe_softc *); 130 static void safe_rng(void *); 131 #endif /* SAFE_NO_RNG */ 132 static int safe_dma_malloc(struct safe_softc *, bus_size_t, 133 struct safe_dma_alloc *, int); 134 #define safe_dma_sync(_dma, _flags) \ 135 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 136 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 137 static int safe_dmamap_aligned(const struct safe_operand *); 138 static int safe_dmamap_uniform(const struct safe_operand *); 139 140 static void safe_reset_board(struct safe_softc *); 141 static void safe_init_board(struct safe_softc *); 142 static void safe_init_pciregs(device_t dev); 143 static void safe_cleanchip(struct safe_softc *); 144 static void safe_totalreset(struct safe_softc *); 145 146 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 147 148 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 149 "SafeNet driver parameters"); 150 151 #ifdef SAFE_DEBUG 152 static void safe_dump_dmastatus(struct safe_softc *, const char *); 153 static void safe_dump_ringstate(struct safe_softc *, const char *); 154 static void safe_dump_intrstate(struct safe_softc *, const char *); 155 static void safe_dump_request(struct safe_softc *, const char *, 156 struct safe_ringentry *); 157 158 static struct safe_softc *safec; /* for use by hw.safe.dump */ 159 160 static int safe_debug = 0; 161 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 162 0, "control debugging msgs"); 163 #define DPRINTF(_x) if (safe_debug) printf _x 164 #else 165 #define DPRINTF(_x) 166 #endif 167 168 #define READ_REG(sc,r) \ 169 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 170 171 #define WRITE_REG(sc,reg,val) \ 172 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 173 174 struct safe_stats safestats; 175 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 176 safe_stats, "driver statistics"); 177 #ifndef SAFE_NO_RNG 178 static int safe_rnginterval = 1; /* poll once a second */ 179 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 180 0, "RNG polling interval (secs)"); 181 static int safe_rngbufsize = 16; /* 64 bytes each poll */ 182 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 183 0, "RNG polling buffer size (32-bit words)"); 184 static int safe_rngmaxalarm = 8; /* max alarms before reset */ 185 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 186 0, "RNG max alarms before reset"); 187 #endif /* SAFE_NO_RNG */ 188 189 static int 190 safe_probe(device_t dev) 191 { 192 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 193 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 194 return (BUS_PROBE_DEFAULT); 195 return (ENXIO); 196 } 197 198 static const char* 199 safe_partname(struct safe_softc *sc) 200 { 201 /* XXX sprintf numbers when not decoded */ 202 switch (pci_get_vendor(sc->sc_dev)) { 203 case PCI_VENDOR_SAFENET: 204 switch (pci_get_device(sc->sc_dev)) { 205 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 206 } 207 return "SafeNet unknown-part"; 208 } 209 return "Unknown-vendor unknown-part"; 210 } 211 212 #ifndef SAFE_NO_RNG 213 static void 214 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 215 { 216 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 217 random_harvest_queue(buf, count, RANDOM_PURE_SAFE); 218 } 219 #endif /* SAFE_NO_RNG */ 220 221 static int 222 safe_attach(device_t dev) 223 { 224 struct safe_softc *sc = device_get_softc(dev); 225 u_int32_t raddr; 226 u_int32_t i; 227 int rid; 228 229 bzero(sc, sizeof (*sc)); 230 sc->sc_dev = dev; 231 232 /* XXX handle power management */ 233 234 pci_enable_busmaster(dev); 235 236 /* 237 * Setup memory-mapping of PCI registers. 238 */ 239 rid = BS_BAR; 240 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 241 RF_ACTIVE); 242 if (sc->sc_sr == NULL) { 243 device_printf(dev, "cannot map register space\n"); 244 goto bad; 245 } 246 sc->sc_st = rman_get_bustag(sc->sc_sr); 247 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 248 249 /* 250 * Arrange interrupt line. 251 */ 252 rid = 0; 253 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 254 RF_SHAREABLE|RF_ACTIVE); 255 if (sc->sc_irq == NULL) { 256 device_printf(dev, "could not map interrupt\n"); 257 goto bad1; 258 } 259 /* 260 * NB: Network code assumes we are blocked with splimp() 261 * so make sure the IRQ is mapped appropriately. 262 */ 263 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 264 NULL, safe_intr, sc, &sc->sc_ih)) { 265 device_printf(dev, "could not establish interrupt\n"); 266 goto bad2; 267 } 268 269 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), 270 CRYPTOCAP_F_HARDWARE); 271 if (sc->sc_cid < 0) { 272 device_printf(dev, "could not get crypto driver id\n"); 273 goto bad3; 274 } 275 276 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 277 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 278 279 /* 280 * Setup DMA descriptor area. 281 */ 282 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 283 1, /* alignment */ 284 SAFE_DMA_BOUNDARY, /* boundary */ 285 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 286 BUS_SPACE_MAXADDR, /* highaddr */ 287 NULL, NULL, /* filter, filterarg */ 288 SAFE_MAX_DMA, /* maxsize */ 289 SAFE_MAX_PART, /* nsegments */ 290 SAFE_MAX_SSIZE, /* maxsegsize */ 291 BUS_DMA_ALLOCNOW, /* flags */ 292 NULL, NULL, /* locking */ 293 &sc->sc_srcdmat)) { 294 device_printf(dev, "cannot allocate DMA tag\n"); 295 goto bad4; 296 } 297 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 298 1, /* alignment */ 299 SAFE_MAX_DSIZE, /* boundary */ 300 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 301 BUS_SPACE_MAXADDR, /* highaddr */ 302 NULL, NULL, /* filter, filterarg */ 303 SAFE_MAX_DMA, /* maxsize */ 304 SAFE_MAX_PART, /* nsegments */ 305 SAFE_MAX_DSIZE, /* maxsegsize */ 306 BUS_DMA_ALLOCNOW, /* flags */ 307 NULL, NULL, /* locking */ 308 &sc->sc_dstdmat)) { 309 device_printf(dev, "cannot allocate DMA tag\n"); 310 goto bad4; 311 } 312 313 /* 314 * Allocate packet engine descriptors. 315 */ 316 if (safe_dma_malloc(sc, 317 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 318 &sc->sc_ringalloc, 0)) { 319 device_printf(dev, "cannot allocate PE descriptor ring\n"); 320 bus_dma_tag_destroy(sc->sc_srcdmat); 321 goto bad4; 322 } 323 /* 324 * Hookup the static portion of all our data structures. 325 */ 326 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 327 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 328 sc->sc_front = sc->sc_ring; 329 sc->sc_back = sc->sc_ring; 330 raddr = sc->sc_ringalloc.dma_paddr; 331 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 332 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 333 struct safe_ringentry *re = &sc->sc_ring[i]; 334 335 re->re_desc.d_sa = raddr + 336 offsetof(struct safe_ringentry, re_sa); 337 re->re_sa.sa_staterec = raddr + 338 offsetof(struct safe_ringentry, re_sastate); 339 340 raddr += sizeof (struct safe_ringentry); 341 } 342 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 343 "packet engine ring", MTX_DEF); 344 345 /* 346 * Allocate scatter and gather particle descriptors. 347 */ 348 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 349 &sc->sc_spalloc, 0)) { 350 device_printf(dev, "cannot allocate source particle " 351 "descriptor ring\n"); 352 mtx_destroy(&sc->sc_ringmtx); 353 safe_dma_free(sc, &sc->sc_ringalloc); 354 bus_dma_tag_destroy(sc->sc_srcdmat); 355 goto bad4; 356 } 357 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 358 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 359 sc->sc_spfree = sc->sc_spring; 360 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 361 362 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 363 &sc->sc_dpalloc, 0)) { 364 device_printf(dev, "cannot allocate destination particle " 365 "descriptor ring\n"); 366 mtx_destroy(&sc->sc_ringmtx); 367 safe_dma_free(sc, &sc->sc_spalloc); 368 safe_dma_free(sc, &sc->sc_ringalloc); 369 bus_dma_tag_destroy(sc->sc_dstdmat); 370 goto bad4; 371 } 372 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 373 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 374 sc->sc_dpfree = sc->sc_dpring; 375 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 376 377 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 378 379 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); 380 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { 381 sc->sc_flags |= SAFE_FLAGS_RNG; 382 printf(" rng"); 383 } 384 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { 385 #if 0 386 printf(" key"); 387 sc->sc_flags |= SAFE_FLAGS_KEY; 388 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); 389 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); 390 #endif 391 } 392 if (sc->sc_devinfo & SAFE_DEVINFO_DES) { 393 printf(" des/3des"); 394 } 395 if (sc->sc_devinfo & SAFE_DEVINFO_AES) { 396 printf(" aes"); 397 } 398 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { 399 printf(" md5"); 400 } 401 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { 402 printf(" sha1"); 403 } 404 /* XXX other supported algorithms */ 405 printf("\n"); 406 407 safe_reset_board(sc); /* reset h/w */ 408 safe_init_pciregs(dev); /* init pci settings */ 409 safe_init_board(sc); /* init h/w */ 410 411 #ifndef SAFE_NO_RNG 412 if (sc->sc_flags & SAFE_FLAGS_RNG) { 413 #ifdef SAFE_RNDTEST 414 sc->sc_rndtest = rndtest_attach(dev); 415 if (sc->sc_rndtest) 416 sc->sc_harvest = rndtest_harvest; 417 else 418 sc->sc_harvest = default_harvest; 419 #else 420 sc->sc_harvest = default_harvest; 421 #endif 422 safe_rng_init(sc); 423 424 callout_init(&sc->sc_rngto, 1); 425 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 426 } 427 #endif /* SAFE_NO_RNG */ 428 #ifdef SAFE_DEBUG 429 safec = sc; /* for use by hw.safe.dump */ 430 #endif 431 return (0); 432 bad4: 433 crypto_unregister_all(sc->sc_cid); 434 bad3: 435 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 436 bad2: 437 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 438 bad1: 439 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 440 bad: 441 return (ENXIO); 442 } 443 444 /* 445 * Detach a device that successfully probed. 446 */ 447 static int 448 safe_detach(device_t dev) 449 { 450 struct safe_softc *sc = device_get_softc(dev); 451 452 /* XXX wait/abort active ops */ 453 454 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 455 456 callout_stop(&sc->sc_rngto); 457 458 crypto_unregister_all(sc->sc_cid); 459 460 #ifdef SAFE_RNDTEST 461 if (sc->sc_rndtest) 462 rndtest_detach(sc->sc_rndtest); 463 #endif 464 465 safe_cleanchip(sc); 466 safe_dma_free(sc, &sc->sc_dpalloc); 467 safe_dma_free(sc, &sc->sc_spalloc); 468 mtx_destroy(&sc->sc_ringmtx); 469 safe_dma_free(sc, &sc->sc_ringalloc); 470 471 bus_generic_detach(dev); 472 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 473 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 474 475 bus_dma_tag_destroy(sc->sc_srcdmat); 476 bus_dma_tag_destroy(sc->sc_dstdmat); 477 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 478 479 return (0); 480 } 481 482 /* 483 * Stop all chip i/o so that the kernel's probe routines don't 484 * get confused by errant DMAs when rebooting. 485 */ 486 static int 487 safe_shutdown(device_t dev) 488 { 489 #ifdef notyet 490 safe_stop(device_get_softc(dev)); 491 #endif 492 return (0); 493 } 494 495 /* 496 * Device suspend routine. 497 */ 498 static int 499 safe_suspend(device_t dev) 500 { 501 struct safe_softc *sc = device_get_softc(dev); 502 503 #ifdef notyet 504 /* XXX stop the device and save PCI settings */ 505 #endif 506 sc->sc_suspended = 1; 507 508 return (0); 509 } 510 511 static int 512 safe_resume(device_t dev) 513 { 514 struct safe_softc *sc = device_get_softc(dev); 515 516 #ifdef notyet 517 /* XXX retore PCI settings and start the device */ 518 #endif 519 sc->sc_suspended = 0; 520 return (0); 521 } 522 523 /* 524 * SafeXcel Interrupt routine 525 */ 526 static void 527 safe_intr(void *arg) 528 { 529 struct safe_softc *sc = arg; 530 volatile u_int32_t stat; 531 532 stat = READ_REG(sc, SAFE_HM_STAT); 533 if (stat == 0) /* shared irq, not for us */ 534 return; 535 536 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 537 538 if ((stat & SAFE_INT_PE_DDONE)) { 539 /* 540 * Descriptor(s) done; scan the ring and 541 * process completed operations. 542 */ 543 mtx_lock(&sc->sc_ringmtx); 544 while (sc->sc_back != sc->sc_front) { 545 struct safe_ringentry *re = sc->sc_back; 546 #ifdef SAFE_DEBUG 547 if (safe_debug) { 548 safe_dump_ringstate(sc, __func__); 549 safe_dump_request(sc, __func__, re); 550 } 551 #endif 552 /* 553 * safe_process marks ring entries that were allocated 554 * but not used with a csr of zero. This insures the 555 * ring front pointer never needs to be set backwards 556 * in the event that an entry is allocated but not used 557 * because of a setup error. 558 */ 559 if (re->re_desc.d_csr != 0) { 560 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 561 break; 562 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 563 break; 564 sc->sc_nqchip--; 565 safe_callback(sc, re); 566 } 567 if (++(sc->sc_back) == sc->sc_ringtop) 568 sc->sc_back = sc->sc_ring; 569 } 570 mtx_unlock(&sc->sc_ringmtx); 571 } 572 573 /* 574 * Check to see if we got any DMA Error 575 */ 576 if (stat & SAFE_INT_PE_ERROR) { 577 DPRINTF(("dmaerr dmastat %08x\n", 578 READ_REG(sc, SAFE_PE_DMASTAT))); 579 safestats.st_dmaerr++; 580 safe_totalreset(sc); 581 #if 0 582 safe_feed(sc); 583 #endif 584 } 585 586 if (sc->sc_needwakeup) { /* XXX check high watermark */ 587 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 588 DPRINTF(("%s: wakeup crypto %x\n", __func__, 589 sc->sc_needwakeup)); 590 sc->sc_needwakeup &= ~wakeup; 591 crypto_unblock(sc->sc_cid, wakeup); 592 } 593 } 594 595 /* 596 * safe_feed() - post a request to chip 597 */ 598 static void 599 safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 600 { 601 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 602 if (re->re_dst_map != NULL) 603 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 604 BUS_DMASYNC_PREREAD); 605 /* XXX have no smaller granularity */ 606 safe_dma_sync(&sc->sc_ringalloc, 607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 608 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 609 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 610 611 #ifdef SAFE_DEBUG 612 if (safe_debug) { 613 safe_dump_ringstate(sc, __func__); 614 safe_dump_request(sc, __func__, re); 615 } 616 #endif 617 sc->sc_nqchip++; 618 if (sc->sc_nqchip > safestats.st_maxqchip) 619 safestats.st_maxqchip = sc->sc_nqchip; 620 /* poke h/w to check descriptor ring, any value can be written */ 621 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 622 } 623 624 #define N(a) (sizeof(a) / sizeof (a[0])) 625 static void 626 safe_setup_enckey(struct safe_session *ses, const void *key) 627 { 628 int i; 629 630 bcopy(key, ses->ses_key, ses->ses_klen); 631 632 /* PE is little-endian, insure proper byte order */ 633 for (i = 0; i < N(ses->ses_key); i++) 634 ses->ses_key[i] = htole32(ses->ses_key[i]); 635 } 636 637 static void 638 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, 639 int klen) 640 { 641 SHA1_CTX sha1ctx; 642 int i; 643 644 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 645 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 646 647 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 648 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 649 650 explicit_bzero(&sha1ctx, sizeof(sha1ctx)); 651 652 /* PE is little-endian, insure proper byte order */ 653 for (i = 0; i < N(ses->ses_hminner); i++) { 654 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 655 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 656 } 657 } 658 #undef N 659 660 static bool 661 safe_auth_supported(struct safe_softc *sc, 662 const struct crypto_session_params *csp) 663 { 664 665 switch (csp->csp_auth_alg) { 666 case CRYPTO_SHA1_HMAC: 667 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) 668 return (false); 669 break; 670 default: 671 return (false); 672 } 673 return (true); 674 } 675 676 static bool 677 safe_cipher_supported(struct safe_softc *sc, 678 const struct crypto_session_params *csp) 679 { 680 681 switch (csp->csp_cipher_alg) { 682 case CRYPTO_AES_CBC: 683 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) 684 return (false); 685 if (csp->csp_ivlen != 16) 686 return (false); 687 if (csp->csp_cipher_klen != 16 && 688 csp->csp_cipher_klen != 24 && 689 csp->csp_cipher_klen != 32) 690 return (false); 691 break; 692 } 693 return (true); 694 } 695 696 static int 697 safe_probesession(device_t dev, const struct crypto_session_params *csp) 698 { 699 struct safe_softc *sc = device_get_softc(dev); 700 701 if (csp->csp_flags != 0) 702 return (EINVAL); 703 switch (csp->csp_mode) { 704 case CSP_MODE_DIGEST: 705 if (!safe_auth_supported(sc, csp)) 706 return (EINVAL); 707 break; 708 case CSP_MODE_CIPHER: 709 if (!safe_cipher_supported(sc, csp)) 710 return (EINVAL); 711 break; 712 case CSP_MODE_ETA: 713 if (!safe_auth_supported(sc, csp) || 714 !safe_cipher_supported(sc, csp)) 715 return (EINVAL); 716 break; 717 default: 718 return (EINVAL); 719 } 720 721 return (CRYPTODEV_PROBE_HARDWARE); 722 } 723 724 /* 725 * Allocate a new 'session'. 726 */ 727 static int 728 safe_newsession(device_t dev, crypto_session_t cses, 729 const struct crypto_session_params *csp) 730 { 731 struct safe_session *ses; 732 733 ses = crypto_get_driver_session(cses); 734 if (csp->csp_cipher_alg != 0) { 735 ses->ses_klen = csp->csp_cipher_klen; 736 if (csp->csp_cipher_key != NULL) 737 safe_setup_enckey(ses, csp->csp_cipher_key); 738 } 739 740 if (csp->csp_auth_alg != 0) { 741 ses->ses_mlen = csp->csp_auth_mlen; 742 if (ses->ses_mlen == 0) { 743 ses->ses_mlen = SHA1_HASH_LEN; 744 } 745 746 if (csp->csp_auth_key != NULL) { 747 safe_setup_mackey(ses, csp->csp_auth_alg, 748 csp->csp_auth_key, csp->csp_auth_klen); 749 } 750 } 751 752 return (0); 753 } 754 755 static bus_size_t 756 safe_crp_length(struct cryptop *crp) 757 { 758 759 switch (crp->crp_buf_type) { 760 case CRYPTO_BUF_MBUF: 761 return (crp->crp_mbuf->m_pkthdr.len); 762 case CRYPTO_BUF_UIO: 763 return (crp->crp_uio->uio_resid); 764 case CRYPTO_BUF_CONTIG: 765 return (crp->crp_ilen); 766 default: 767 panic("bad crp buffer type"); 768 } 769 } 770 771 static void 772 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) 773 { 774 struct safe_operand *op = arg; 775 776 DPRINTF(("%s: nsegs %d error %d\n", __func__, 777 nsegs, error)); 778 if (error != 0) 779 return; 780 op->nsegs = nsegs; 781 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 782 } 783 784 static int 785 safe_process(device_t dev, struct cryptop *crp, int hint) 786 { 787 struct safe_softc *sc = device_get_softc(dev); 788 const struct crypto_session_params *csp; 789 int err = 0, i, nicealign, uniform; 790 int bypass, oplen; 791 int16_t coffset; 792 struct safe_session *ses; 793 struct safe_ringentry *re; 794 struct safe_sarec *sa; 795 struct safe_pdesc *pd; 796 u_int32_t cmd0, cmd1, staterec; 797 798 mtx_lock(&sc->sc_ringmtx); 799 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 800 safestats.st_ringfull++; 801 sc->sc_needwakeup |= CRYPTO_SYMQ; 802 mtx_unlock(&sc->sc_ringmtx); 803 return (ERESTART); 804 } 805 re = sc->sc_front; 806 807 staterec = re->re_sa.sa_staterec; /* save */ 808 /* NB: zero everything but the PE descriptor */ 809 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 810 re->re_sa.sa_staterec = staterec; /* restore */ 811 812 re->re_crp = crp; 813 814 sa = &re->re_sa; 815 ses = crypto_get_driver_session(crp->crp_session); 816 csp = crypto_get_params(crp->crp_session); 817 818 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 819 cmd1 = 0; 820 switch (csp->csp_mode) { 821 case CSP_MODE_DIGEST: 822 cmd0 |= SAFE_SA_CMD0_OP_HASH; 823 break; 824 case CSP_MODE_CIPHER: 825 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 826 break; 827 case CSP_MODE_ETA: 828 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 829 break; 830 } 831 832 if (csp->csp_cipher_alg != 0) { 833 if (crp->crp_cipher_key != NULL) 834 safe_setup_enckey(ses, crp->crp_cipher_key); 835 836 switch (csp->csp_cipher_alg) { 837 case CRYPTO_AES_CBC: 838 cmd0 |= SAFE_SA_CMD0_AES; 839 cmd1 |= SAFE_SA_CMD1_CBC; 840 if (ses->ses_klen * 8 == 128) 841 cmd1 |= SAFE_SA_CMD1_AES128; 842 else if (ses->ses_klen * 8 == 192) 843 cmd1 |= SAFE_SA_CMD1_AES192; 844 else 845 cmd1 |= SAFE_SA_CMD1_AES256; 846 } 847 848 /* 849 * Setup encrypt/decrypt state. When using basic ops 850 * we can't use an inline IV because hash/crypt offset 851 * must be from the end of the IV to the start of the 852 * crypt data and this leaves out the preceding header 853 * from the hash calculation. Instead we place the IV 854 * in the state record and set the hash/crypt offset to 855 * copy both the header+IV. 856 */ 857 crypto_read_iv(crp, re->re_sastate.sa_saved_iv); 858 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 859 860 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 861 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 862 863 /* 864 * XXX: I suspect we don't need this since we 865 * don't save the returned IV. 866 */ 867 cmd0 |= SAFE_SA_CMD0_SAVEIV; 868 } else { 869 cmd0 |= SAFE_SA_CMD0_INBOUND; 870 } 871 /* 872 * For basic encryption use the zero pad algorithm. 873 * This pads results to an 8-byte boundary and 874 * suppresses padding verification for inbound (i.e. 875 * decrypt) operations. 876 * 877 * NB: Not sure if the 8-byte pad boundary is a problem. 878 */ 879 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 880 881 /* XXX assert key bufs have the same size */ 882 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 883 } 884 885 if (csp->csp_auth_alg != 0) { 886 if (crp->crp_auth_key != NULL) { 887 safe_setup_mackey(ses, csp->csp_auth_alg, 888 crp->crp_auth_key, csp->csp_auth_klen); 889 } 890 891 switch (csp->csp_auth_alg) { 892 case CRYPTO_SHA1_HMAC: 893 cmd0 |= SAFE_SA_CMD0_SHA1; 894 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 895 break; 896 } 897 898 /* 899 * Digest data is loaded from the SA and the hash 900 * result is saved to the state block where we 901 * retrieve it for return to the caller. 902 */ 903 /* XXX assert digest bufs have the same size */ 904 bcopy(ses->ses_hminner, sa->sa_indigest, 905 sizeof(sa->sa_indigest)); 906 bcopy(ses->ses_hmouter, sa->sa_outdigest, 907 sizeof(sa->sa_outdigest)); 908 909 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 910 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 911 } 912 913 if (csp->csp_mode == CSP_MODE_ETA) { 914 /* 915 * The driver only supports ETA requests where there 916 * is no gap between the AAD and payload. 917 */ 918 if (crp->crp_aad_length != 0 && 919 crp->crp_aad_start + crp->crp_aad_length != 920 crp->crp_payload_start) { 921 safestats.st_lenmismatch++; 922 err = EINVAL; 923 goto errout; 924 } 925 if (crp->crp_aad_length != 0) 926 bypass = crp->crp_aad_start; 927 else 928 bypass = crp->crp_payload_start; 929 coffset = crp->crp_aad_length; 930 oplen = crp->crp_payload_start + crp->crp_payload_length; 931 #ifdef SAFE_DEBUG 932 if (safe_debug) { 933 printf("AAD: skip %d, len %d, digest %d\n", 934 crp->crp_aad_start, crp->crp_aad_length, 935 crp->crp_digest_start); 936 printf("payload: skip %d, len %d, IV %d\n", 937 crp->crp_payload_start, crp->crp_payload_length, 938 crp->crp_iv_start); 939 printf("bypass %d coffset %d oplen %d\n", 940 bypass, coffset, oplen); 941 } 942 #endif 943 if (coffset & 3) { /* offset must be 32-bit aligned */ 944 DPRINTF(("%s: coffset %u misaligned\n", 945 __func__, coffset)); 946 safestats.st_coffmisaligned++; 947 err = EINVAL; 948 goto errout; 949 } 950 coffset >>= 2; 951 if (coffset > 255) { /* offset must be <256 dwords */ 952 DPRINTF(("%s: coffset %u too big\n", 953 __func__, coffset)); 954 safestats.st_cofftoobig++; 955 err = EINVAL; 956 goto errout; 957 } 958 /* 959 * Tell the hardware to copy the header to the output. 960 * The header is defined as the data from the end of 961 * the bypass to the start of data to be encrypted. 962 * Typically this is the inline IV. Note that you need 963 * to do this even if src+dst are the same; it appears 964 * that w/o this bit the crypted data is written 965 * immediately after the bypass data. 966 */ 967 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 968 /* 969 * Disable IP header mutable bit handling. This is 970 * needed to get correct HMAC calculations. 971 */ 972 cmd1 |= SAFE_SA_CMD1_MUTABLE; 973 } else { 974 bypass = crp->crp_payload_start; 975 oplen = bypass + crp->crp_payload_length; 976 coffset = 0; 977 } 978 /* XXX verify multiple of 4 when using s/g */ 979 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 980 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 981 safestats.st_bypasstoobig++; 982 err = EINVAL; 983 goto errout; 984 } 985 986 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 987 safestats.st_nomap++; 988 err = ENOMEM; 989 goto errout; 990 } 991 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, 992 &re->re_src, BUS_DMA_NOWAIT) != 0) { 993 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 994 re->re_src_map = NULL; 995 safestats.st_noload++; 996 err = ENOMEM; 997 goto errout; 998 } 999 re->re_src_mapsize = safe_crp_length(crp); 1000 nicealign = safe_dmamap_aligned(&re->re_src); 1001 uniform = safe_dmamap_uniform(&re->re_src); 1002 1003 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 1004 nicealign, uniform, re->re_src.nsegs)); 1005 if (re->re_src.nsegs > 1) { 1006 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 1007 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 1008 for (i = 0; i < re->re_src_nsegs; i++) { 1009 /* NB: no need to check if there's space */ 1010 pd = sc->sc_spfree; 1011 if (++(sc->sc_spfree) == sc->sc_springtop) 1012 sc->sc_spfree = sc->sc_spring; 1013 1014 KASSERT((pd->pd_flags&3) == 0 || 1015 (pd->pd_flags&3) == SAFE_PD_DONE, 1016 ("bogus source particle descriptor; flags %x", 1017 pd->pd_flags)); 1018 pd->pd_addr = re->re_src_segs[i].ds_addr; 1019 pd->pd_size = re->re_src_segs[i].ds_len; 1020 pd->pd_flags = SAFE_PD_READY; 1021 } 1022 cmd0 |= SAFE_SA_CMD0_IGATHER; 1023 } else { 1024 /* 1025 * No need for gather, reference the operand directly. 1026 */ 1027 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1028 } 1029 1030 if (csp->csp_mode == CSP_MODE_DIGEST) { 1031 /* 1032 * Hash op; no destination needed. 1033 */ 1034 } else { 1035 if (nicealign && uniform == 1) { 1036 /* 1037 * Source layout is suitable for direct 1038 * sharing of the DMA map and segment list. 1039 */ 1040 re->re_dst = re->re_src; 1041 } else if (nicealign && uniform == 2) { 1042 /* 1043 * The source is properly aligned but requires a 1044 * different particle list to handle DMA of the 1045 * result. Create a new map and do the load to 1046 * create the segment list. The particle 1047 * descriptor setup code below will handle the 1048 * rest. 1049 */ 1050 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, 1051 &re->re_dst_map)) { 1052 safestats.st_nomap++; 1053 err = ENOMEM; 1054 goto errout; 1055 } 1056 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, 1057 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 1058 0) { 1059 bus_dmamap_destroy(sc->sc_dstdmat, 1060 re->re_dst_map); 1061 re->re_dst_map = NULL; 1062 safestats.st_noload++; 1063 err = ENOMEM; 1064 goto errout; 1065 } 1066 } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { 1067 int totlen, len; 1068 struct mbuf *m, *top, **mp; 1069 1070 /* 1071 * DMA constraints require that we allocate a 1072 * new mbuf chain for the destination. We 1073 * allocate an entire new set of mbufs of 1074 * optimal/required size and then tell the 1075 * hardware to copy any bits that are not 1076 * created as a byproduct of the operation. 1077 */ 1078 if (!nicealign) 1079 safestats.st_unaligned++; 1080 if (!uniform) 1081 safestats.st_notuniform++; 1082 totlen = re->re_src_mapsize; 1083 if (crp->crp_mbuf->m_flags & M_PKTHDR) { 1084 len = MHLEN; 1085 MGETHDR(m, M_NOWAIT, MT_DATA); 1086 if (m && !m_dup_pkthdr(m, crp->crp_mbuf, 1087 M_NOWAIT)) { 1088 m_free(m); 1089 m = NULL; 1090 } 1091 } else { 1092 len = MLEN; 1093 MGET(m, M_NOWAIT, MT_DATA); 1094 } 1095 if (m == NULL) { 1096 safestats.st_nombuf++; 1097 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1098 goto errout; 1099 } 1100 if (totlen >= MINCLSIZE) { 1101 if (!(MCLGET(m, M_NOWAIT))) { 1102 m_free(m); 1103 safestats.st_nomcl++; 1104 err = sc->sc_nqchip ? 1105 ERESTART : ENOMEM; 1106 goto errout; 1107 } 1108 len = MCLBYTES; 1109 } 1110 m->m_len = len; 1111 top = NULL; 1112 mp = ⊤ 1113 1114 while (totlen > 0) { 1115 if (top) { 1116 MGET(m, M_NOWAIT, MT_DATA); 1117 if (m == NULL) { 1118 m_freem(top); 1119 safestats.st_nombuf++; 1120 err = sc->sc_nqchip ? 1121 ERESTART : ENOMEM; 1122 goto errout; 1123 } 1124 len = MLEN; 1125 } 1126 if (top && totlen >= MINCLSIZE) { 1127 if (!(MCLGET(m, M_NOWAIT))) { 1128 *mp = m; 1129 m_freem(top); 1130 safestats.st_nomcl++; 1131 err = sc->sc_nqchip ? 1132 ERESTART : ENOMEM; 1133 goto errout; 1134 } 1135 len = MCLBYTES; 1136 } 1137 m->m_len = len = min(totlen, len); 1138 totlen -= len; 1139 *mp = m; 1140 mp = &m->m_next; 1141 } 1142 re->re_dst_m = top; 1143 if (bus_dmamap_create(sc->sc_dstdmat, 1144 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1145 safestats.st_nomap++; 1146 err = ENOMEM; 1147 goto errout; 1148 } 1149 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, 1150 re->re_dst_map, top, re->re_dst_segs, 1151 &re->re_dst_nsegs, 0) != 0) { 1152 bus_dmamap_destroy(sc->sc_dstdmat, 1153 re->re_dst_map); 1154 re->re_dst_map = NULL; 1155 safestats.st_noload++; 1156 err = ENOMEM; 1157 goto errout; 1158 } 1159 re->re_dst_mapsize = re->re_src_mapsize; 1160 if (re->re_src.mapsize > oplen) { 1161 /* 1162 * There's data following what the 1163 * hardware will copy for us. If this 1164 * isn't just the ICV (that's going to 1165 * be written on completion), copy it 1166 * to the new mbufs 1167 */ 1168 if (!(csp->csp_mode == CSP_MODE_ETA && 1169 (re->re_src.mapsize-oplen) == ses->ses_mlen && 1170 crp->crp_digest_start == oplen)) 1171 safe_mcopy(crp->crp_mbuf, re->re_dst_m, 1172 oplen); 1173 else 1174 safestats.st_noicvcopy++; 1175 } 1176 } else { 1177 if (!nicealign) { 1178 safestats.st_iovmisaligned++; 1179 err = EINVAL; 1180 goto errout; 1181 } else { 1182 /* 1183 * There's no way to handle the DMA 1184 * requirements with this uio. We 1185 * could create a separate DMA area for 1186 * the result and then copy it back, 1187 * but for now we just bail and return 1188 * an error. Note that uio requests 1189 * > SAFE_MAX_DSIZE are handled because 1190 * the DMA map and segment list for the 1191 * destination wil result in a 1192 * destination particle list that does 1193 * the necessary scatter DMA. 1194 */ 1195 safestats.st_iovnotuniform++; 1196 err = EINVAL; 1197 goto errout; 1198 } 1199 } 1200 1201 if (re->re_dst.nsegs > 1) { 1202 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1203 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1204 for (i = 0; i < re->re_dst_nsegs; i++) { 1205 pd = sc->sc_dpfree; 1206 KASSERT((pd->pd_flags&3) == 0 || 1207 (pd->pd_flags&3) == SAFE_PD_DONE, 1208 ("bogus dest particle descriptor; flags %x", 1209 pd->pd_flags)); 1210 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1211 sc->sc_dpfree = sc->sc_dpring; 1212 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1213 pd->pd_flags = SAFE_PD_READY; 1214 } 1215 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1216 } else { 1217 /* 1218 * No need for scatter, reference the operand directly. 1219 */ 1220 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1221 } 1222 } 1223 1224 /* 1225 * All done with setup; fillin the SA command words 1226 * and the packet engine descriptor. The operation 1227 * is now ready for submission to the hardware. 1228 */ 1229 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1230 sa->sa_cmd1 = cmd1 1231 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1232 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1233 | SAFE_SA_CMD1_SRPCI 1234 ; 1235 /* 1236 * NB: the order of writes is important here. In case the 1237 * chip is scanning the ring because of an outstanding request 1238 * it might nab this one too. In that case we need to make 1239 * sure the setup is complete before we write the length 1240 * field of the descriptor as it signals the descriptor is 1241 * ready for processing. 1242 */ 1243 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1244 if (csp->csp_auth_alg != 0) 1245 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1246 re->re_desc.d_len = oplen 1247 | SAFE_PE_LEN_READY 1248 | (bypass << SAFE_PE_LEN_BYPASS_S) 1249 ; 1250 1251 safestats.st_ipackets++; 1252 safestats.st_ibytes += oplen; 1253 1254 if (++(sc->sc_front) == sc->sc_ringtop) 1255 sc->sc_front = sc->sc_ring; 1256 1257 /* XXX honor batching */ 1258 safe_feed(sc, re); 1259 mtx_unlock(&sc->sc_ringmtx); 1260 return (0); 1261 1262 errout: 1263 if (re->re_dst_m != NULL) 1264 m_freem(re->re_dst_m); 1265 1266 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1267 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1268 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1269 } 1270 if (re->re_src_map != NULL) { 1271 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1272 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1273 } 1274 mtx_unlock(&sc->sc_ringmtx); 1275 if (err != ERESTART) { 1276 crp->crp_etype = err; 1277 crypto_done(crp); 1278 } else { 1279 sc->sc_needwakeup |= CRYPTO_SYMQ; 1280 } 1281 return (err); 1282 } 1283 1284 static void 1285 safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1286 { 1287 const struct crypto_session_params *csp; 1288 struct cryptop *crp = (struct cryptop *)re->re_crp; 1289 struct safe_session *ses; 1290 uint8_t hash[HASH_MAX_LEN]; 1291 1292 ses = crypto_get_driver_session(crp->crp_session); 1293 csp = crypto_get_params(crp->crp_session); 1294 1295 safestats.st_opackets++; 1296 safestats.st_obytes += re->re_dst.mapsize; 1297 1298 safe_dma_sync(&sc->sc_ringalloc, 1299 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1300 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1301 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1302 re->re_desc.d_csr, 1303 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1304 safestats.st_peoperr++; 1305 crp->crp_etype = EIO; /* something more meaningful? */ 1306 } 1307 1308 /* XXX: Should crp_mbuf be updated to re->re_dst_m if it is non-NULL? */ 1309 1310 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1311 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1312 BUS_DMASYNC_POSTREAD); 1313 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1314 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1315 } 1316 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1317 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1318 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1319 1320 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1321 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { 1322 /* 1323 * SHA-1 ICV's are byte-swapped; fix 'em up 1324 * before copying them to their destination. 1325 */ 1326 re->re_sastate.sa_saved_indigest[0] = 1327 bswap32(re->re_sastate.sa_saved_indigest[0]); 1328 re->re_sastate.sa_saved_indigest[1] = 1329 bswap32(re->re_sastate.sa_saved_indigest[1]); 1330 re->re_sastate.sa_saved_indigest[2] = 1331 bswap32(re->re_sastate.sa_saved_indigest[2]); 1332 } 1333 1334 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1335 crypto_copydata(crp, crp->crp_digest_start, 1336 ses->ses_mlen, hash); 1337 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, 1338 hash, ses->ses_mlen) != 0) 1339 crp->crp_etype = EBADMSG; 1340 } else 1341 crypto_copyback(crp, crp->crp_digest_start, 1342 ses->ses_mlen, re->re_sastate.sa_saved_indigest); 1343 } 1344 crypto_done(crp); 1345 } 1346 1347 /* 1348 * Copy all data past offset from srcm to dstm. 1349 */ 1350 static void 1351 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1352 { 1353 u_int j, dlen, slen; 1354 caddr_t dptr, sptr; 1355 1356 /* 1357 * Advance src and dst to offset. 1358 */ 1359 j = offset; 1360 while (j >= srcm->m_len) { 1361 j -= srcm->m_len; 1362 srcm = srcm->m_next; 1363 if (srcm == NULL) 1364 return; 1365 } 1366 sptr = mtod(srcm, caddr_t) + j; 1367 slen = srcm->m_len - j; 1368 1369 j = offset; 1370 while (j >= dstm->m_len) { 1371 j -= dstm->m_len; 1372 dstm = dstm->m_next; 1373 if (dstm == NULL) 1374 return; 1375 } 1376 dptr = mtod(dstm, caddr_t) + j; 1377 dlen = dstm->m_len - j; 1378 1379 /* 1380 * Copy everything that remains. 1381 */ 1382 for (;;) { 1383 j = min(slen, dlen); 1384 bcopy(sptr, dptr, j); 1385 if (slen == j) { 1386 srcm = srcm->m_next; 1387 if (srcm == NULL) 1388 return; 1389 sptr = srcm->m_data; 1390 slen = srcm->m_len; 1391 } else 1392 sptr += j, slen -= j; 1393 if (dlen == j) { 1394 dstm = dstm->m_next; 1395 if (dstm == NULL) 1396 return; 1397 dptr = dstm->m_data; 1398 dlen = dstm->m_len; 1399 } else 1400 dptr += j, dlen -= j; 1401 } 1402 } 1403 1404 #ifndef SAFE_NO_RNG 1405 #define SAFE_RNG_MAXWAIT 1000 1406 1407 static void 1408 safe_rng_init(struct safe_softc *sc) 1409 { 1410 u_int32_t w, v; 1411 int i; 1412 1413 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1414 /* use default value according to the manual */ 1415 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1416 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1417 1418 /* 1419 * There is a bug in rev 1.0 of the 1140 that when the RNG 1420 * is brought out of reset the ready status flag does not 1421 * work until the RNG has finished its internal initialization. 1422 * 1423 * So in order to determine the device is through its 1424 * initialization we must read the data register, using the 1425 * status reg in the read in case it is initialized. Then read 1426 * the data register until it changes from the first read. 1427 * Once it changes read the data register until it changes 1428 * again. At this time the RNG is considered initialized. 1429 * This could take between 750ms - 1000ms in time. 1430 */ 1431 i = 0; 1432 w = READ_REG(sc, SAFE_RNG_OUT); 1433 do { 1434 v = READ_REG(sc, SAFE_RNG_OUT); 1435 if (v != w) { 1436 w = v; 1437 break; 1438 } 1439 DELAY(10); 1440 } while (++i < SAFE_RNG_MAXWAIT); 1441 1442 /* Wait Until data changes again */ 1443 i = 0; 1444 do { 1445 v = READ_REG(sc, SAFE_RNG_OUT); 1446 if (v != w) 1447 break; 1448 DELAY(10); 1449 } while (++i < SAFE_RNG_MAXWAIT); 1450 } 1451 1452 static __inline void 1453 safe_rng_disable_short_cycle(struct safe_softc *sc) 1454 { 1455 WRITE_REG(sc, SAFE_RNG_CTRL, 1456 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1457 } 1458 1459 static __inline void 1460 safe_rng_enable_short_cycle(struct safe_softc *sc) 1461 { 1462 WRITE_REG(sc, SAFE_RNG_CTRL, 1463 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1464 } 1465 1466 static __inline u_int32_t 1467 safe_rng_read(struct safe_softc *sc) 1468 { 1469 int i; 1470 1471 i = 0; 1472 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1473 ; 1474 return READ_REG(sc, SAFE_RNG_OUT); 1475 } 1476 1477 static void 1478 safe_rng(void *arg) 1479 { 1480 struct safe_softc *sc = arg; 1481 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1482 u_int maxwords; 1483 int i; 1484 1485 safestats.st_rng++; 1486 /* 1487 * Fetch the next block of data. 1488 */ 1489 maxwords = safe_rngbufsize; 1490 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1491 maxwords = SAFE_RNG_MAXBUFSIZ; 1492 retry: 1493 for (i = 0; i < maxwords; i++) 1494 buf[i] = safe_rng_read(sc); 1495 /* 1496 * Check the comparator alarm count and reset the h/w if 1497 * it exceeds our threshold. This guards against the 1498 * hardware oscillators resonating with external signals. 1499 */ 1500 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1501 u_int32_t freq_inc, w; 1502 1503 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1504 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1505 safestats.st_rngalarm++; 1506 safe_rng_enable_short_cycle(sc); 1507 freq_inc = 18; 1508 for (i = 0; i < 64; i++) { 1509 w = READ_REG(sc, SAFE_RNG_CNFG); 1510 freq_inc = ((w + freq_inc) & 0x3fL); 1511 w = ((w & ~0x3fL) | freq_inc); 1512 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1513 1514 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1515 1516 (void) safe_rng_read(sc); 1517 DELAY(25); 1518 1519 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1520 safe_rng_disable_short_cycle(sc); 1521 goto retry; 1522 } 1523 freq_inc = 1; 1524 } 1525 safe_rng_disable_short_cycle(sc); 1526 } else 1527 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1528 1529 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1530 callout_reset(&sc->sc_rngto, 1531 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1532 } 1533 #endif /* SAFE_NO_RNG */ 1534 1535 static void 1536 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1537 { 1538 bus_addr_t *paddr = (bus_addr_t*) arg; 1539 *paddr = segs->ds_addr; 1540 } 1541 1542 static int 1543 safe_dma_malloc( 1544 struct safe_softc *sc, 1545 bus_size_t size, 1546 struct safe_dma_alloc *dma, 1547 int mapflags 1548 ) 1549 { 1550 int r; 1551 1552 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1553 sizeof(u_int32_t), 0, /* alignment, bounds */ 1554 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1555 BUS_SPACE_MAXADDR, /* highaddr */ 1556 NULL, NULL, /* filter, filterarg */ 1557 size, /* maxsize */ 1558 1, /* nsegments */ 1559 size, /* maxsegsize */ 1560 BUS_DMA_ALLOCNOW, /* flags */ 1561 NULL, NULL, /* locking */ 1562 &dma->dma_tag); 1563 if (r != 0) { 1564 device_printf(sc->sc_dev, "safe_dma_malloc: " 1565 "bus_dma_tag_create failed; error %u\n", r); 1566 goto fail_0; 1567 } 1568 1569 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1570 BUS_DMA_NOWAIT, &dma->dma_map); 1571 if (r != 0) { 1572 device_printf(sc->sc_dev, "safe_dma_malloc: " 1573 "bus_dmammem_alloc failed; size %ju, error %u\n", 1574 (uintmax_t)size, r); 1575 goto fail_1; 1576 } 1577 1578 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1579 size, 1580 safe_dmamap_cb, 1581 &dma->dma_paddr, 1582 mapflags | BUS_DMA_NOWAIT); 1583 if (r != 0) { 1584 device_printf(sc->sc_dev, "safe_dma_malloc: " 1585 "bus_dmamap_load failed; error %u\n", r); 1586 goto fail_2; 1587 } 1588 1589 dma->dma_size = size; 1590 return (0); 1591 1592 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1593 fail_2: 1594 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1595 fail_1: 1596 bus_dma_tag_destroy(dma->dma_tag); 1597 fail_0: 1598 dma->dma_tag = NULL; 1599 return (r); 1600 } 1601 1602 static void 1603 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1604 { 1605 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1606 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1607 bus_dma_tag_destroy(dma->dma_tag); 1608 } 1609 1610 /* 1611 * Resets the board. Values in the regesters are left as is 1612 * from the reset (i.e. initial values are assigned elsewhere). 1613 */ 1614 static void 1615 safe_reset_board(struct safe_softc *sc) 1616 { 1617 u_int32_t v; 1618 /* 1619 * Reset the device. The manual says no delay 1620 * is needed between marking and clearing reset. 1621 */ 1622 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1623 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1624 SAFE_PE_DMACFG_SGRESET); 1625 WRITE_REG(sc, SAFE_PE_DMACFG, v 1626 | SAFE_PE_DMACFG_PERESET 1627 | SAFE_PE_DMACFG_PDRRESET 1628 | SAFE_PE_DMACFG_SGRESET); 1629 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1630 } 1631 1632 /* 1633 * Initialize registers we need to touch only once. 1634 */ 1635 static void 1636 safe_init_board(struct safe_softc *sc) 1637 { 1638 u_int32_t v, dwords; 1639 1640 v = READ_REG(sc, SAFE_PE_DMACFG); 1641 v &=~ SAFE_PE_DMACFG_PEMODE; 1642 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1643 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1644 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1645 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1646 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1647 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1648 ; 1649 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1650 #if 0 1651 /* XXX select byte swap based on host byte order */ 1652 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1653 #endif 1654 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1655 /* 1656 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1657 * "target mode transfers" done while the chip is DMA'ing 1658 * >1020 bytes cause the hardware to lockup. To avoid this 1659 * we reduce the max PCI transfer size and use small source 1660 * particle descriptors (<= 256 bytes). 1661 */ 1662 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1663 device_printf(sc->sc_dev, 1664 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1665 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1666 SAFE_REV_MAJ(sc->sc_chiprev), 1667 SAFE_REV_MIN(sc->sc_chiprev)); 1668 } 1669 1670 /* NB: operands+results are overlaid */ 1671 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1672 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1673 /* 1674 * Configure ring entry size and number of items in the ring. 1675 */ 1676 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1677 ("PE ring entry not 32-bit aligned!")); 1678 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1679 WRITE_REG(sc, SAFE_PE_RINGCFG, 1680 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1681 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1682 1683 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1684 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1685 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1686 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1687 /* 1688 * NB: destination particles are fixed size. We use 1689 * an mbuf cluster and require all results go to 1690 * clusters or smaller. 1691 */ 1692 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1693 1694 /* it's now safe to enable PE mode, do it */ 1695 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1696 1697 /* 1698 * Configure hardware to use level-triggered interrupts and 1699 * to interrupt after each descriptor is processed. 1700 */ 1701 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1702 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1703 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1704 } 1705 1706 /* 1707 * Init PCI registers 1708 */ 1709 static void 1710 safe_init_pciregs(device_t dev) 1711 { 1712 } 1713 1714 /* 1715 * Clean up after a chip crash. 1716 * It is assumed that the caller in splimp() 1717 */ 1718 static void 1719 safe_cleanchip(struct safe_softc *sc) 1720 { 1721 1722 if (sc->sc_nqchip != 0) { 1723 struct safe_ringentry *re = sc->sc_back; 1724 1725 while (re != sc->sc_front) { 1726 if (re->re_desc.d_csr != 0) 1727 safe_free_entry(sc, re); 1728 if (++re == sc->sc_ringtop) 1729 re = sc->sc_ring; 1730 } 1731 sc->sc_back = re; 1732 sc->sc_nqchip = 0; 1733 } 1734 } 1735 1736 /* 1737 * free a safe_q 1738 * It is assumed that the caller is within splimp(). 1739 */ 1740 static int 1741 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 1742 { 1743 struct cryptop *crp; 1744 1745 /* 1746 * Free header MCR 1747 */ 1748 if (re->re_dst_m != NULL) 1749 m_freem(re->re_dst_m); 1750 1751 crp = (struct cryptop *)re->re_crp; 1752 1753 re->re_desc.d_csr = 0; 1754 1755 crp->crp_etype = EFAULT; 1756 crypto_done(crp); 1757 return(0); 1758 } 1759 1760 /* 1761 * Routine to reset the chip and clean up. 1762 * It is assumed that the caller is in splimp() 1763 */ 1764 static void 1765 safe_totalreset(struct safe_softc *sc) 1766 { 1767 safe_reset_board(sc); 1768 safe_init_board(sc); 1769 safe_cleanchip(sc); 1770 } 1771 1772 /* 1773 * Is the operand suitable aligned for direct DMA. Each 1774 * segment must be aligned on a 32-bit boundary and all 1775 * but the last segment must be a multiple of 4 bytes. 1776 */ 1777 static int 1778 safe_dmamap_aligned(const struct safe_operand *op) 1779 { 1780 int i; 1781 1782 for (i = 0; i < op->nsegs; i++) { 1783 if (op->segs[i].ds_addr & 3) 1784 return (0); 1785 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 1786 return (0); 1787 } 1788 return (1); 1789 } 1790 1791 /* 1792 * Is the operand suitable for direct DMA as the destination 1793 * of an operation. The hardware requires that each ``particle'' 1794 * but the last in an operation result have the same size. We 1795 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 1796 * 0 if some segment is not a multiple of of this size, 1 if all 1797 * segments are exactly this size, or 2 if segments are at worst 1798 * a multple of this size. 1799 */ 1800 static int 1801 safe_dmamap_uniform(const struct safe_operand *op) 1802 { 1803 int result = 1; 1804 1805 if (op->nsegs > 0) { 1806 int i; 1807 1808 for (i = 0; i < op->nsegs-1; i++) { 1809 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 1810 return (0); 1811 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 1812 result = 2; 1813 } 1814 } 1815 return (result); 1816 } 1817 1818 #ifdef SAFE_DEBUG 1819 static void 1820 safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 1821 { 1822 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 1823 , tag 1824 , READ_REG(sc, SAFE_DMA_ENDIAN) 1825 , READ_REG(sc, SAFE_DMA_SRCADDR) 1826 , READ_REG(sc, SAFE_DMA_DSTADDR) 1827 , READ_REG(sc, SAFE_DMA_STAT) 1828 ); 1829 } 1830 1831 static void 1832 safe_dump_intrstate(struct safe_softc *sc, const char *tag) 1833 { 1834 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 1835 , tag 1836 , READ_REG(sc, SAFE_HI_CFG) 1837 , READ_REG(sc, SAFE_HI_MASK) 1838 , READ_REG(sc, SAFE_HI_DESC_CNT) 1839 , READ_REG(sc, SAFE_HU_STAT) 1840 , READ_REG(sc, SAFE_HM_STAT) 1841 ); 1842 } 1843 1844 static void 1845 safe_dump_ringstate(struct safe_softc *sc, const char *tag) 1846 { 1847 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 1848 1849 /* NB: assume caller has lock on ring */ 1850 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 1851 tag, 1852 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 1853 (unsigned long)(sc->sc_back - sc->sc_ring), 1854 (unsigned long)(sc->sc_front - sc->sc_ring)); 1855 } 1856 1857 static void 1858 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 1859 { 1860 int ix, nsegs; 1861 1862 ix = re - sc->sc_ring; 1863 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 1864 , tag 1865 , re, ix 1866 , re->re_desc.d_csr 1867 , re->re_desc.d_src 1868 , re->re_desc.d_dst 1869 , re->re_desc.d_sa 1870 , re->re_desc.d_len 1871 ); 1872 if (re->re_src.nsegs > 1) { 1873 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 1874 sizeof(struct safe_pdesc); 1875 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 1876 printf(" spd[%u] %p: %p size %u flags %x" 1877 , ix, &sc->sc_spring[ix] 1878 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 1879 , sc->sc_spring[ix].pd_size 1880 , sc->sc_spring[ix].pd_flags 1881 ); 1882 if (sc->sc_spring[ix].pd_size == 0) 1883 printf(" (zero!)"); 1884 printf("\n"); 1885 if (++ix == SAFE_TOTAL_SPART) 1886 ix = 0; 1887 } 1888 } 1889 if (re->re_dst.nsegs > 1) { 1890 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 1891 sizeof(struct safe_pdesc); 1892 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 1893 printf(" dpd[%u] %p: %p flags %x\n" 1894 , ix, &sc->sc_dpring[ix] 1895 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 1896 , sc->sc_dpring[ix].pd_flags 1897 ); 1898 if (++ix == SAFE_TOTAL_DPART) 1899 ix = 0; 1900 } 1901 } 1902 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 1903 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 1904 printf("sa: key %x %x %x %x %x %x %x %x\n" 1905 , re->re_sa.sa_key[0] 1906 , re->re_sa.sa_key[1] 1907 , re->re_sa.sa_key[2] 1908 , re->re_sa.sa_key[3] 1909 , re->re_sa.sa_key[4] 1910 , re->re_sa.sa_key[5] 1911 , re->re_sa.sa_key[6] 1912 , re->re_sa.sa_key[7] 1913 ); 1914 printf("sa: indigest %x %x %x %x %x\n" 1915 , re->re_sa.sa_indigest[0] 1916 , re->re_sa.sa_indigest[1] 1917 , re->re_sa.sa_indigest[2] 1918 , re->re_sa.sa_indigest[3] 1919 , re->re_sa.sa_indigest[4] 1920 ); 1921 printf("sa: outdigest %x %x %x %x %x\n" 1922 , re->re_sa.sa_outdigest[0] 1923 , re->re_sa.sa_outdigest[1] 1924 , re->re_sa.sa_outdigest[2] 1925 , re->re_sa.sa_outdigest[3] 1926 , re->re_sa.sa_outdigest[4] 1927 ); 1928 printf("sr: iv %x %x %x %x\n" 1929 , re->re_sastate.sa_saved_iv[0] 1930 , re->re_sastate.sa_saved_iv[1] 1931 , re->re_sastate.sa_saved_iv[2] 1932 , re->re_sastate.sa_saved_iv[3] 1933 ); 1934 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 1935 , re->re_sastate.sa_saved_hashbc 1936 , re->re_sastate.sa_saved_indigest[0] 1937 , re->re_sastate.sa_saved_indigest[1] 1938 , re->re_sastate.sa_saved_indigest[2] 1939 , re->re_sastate.sa_saved_indigest[3] 1940 , re->re_sastate.sa_saved_indigest[4] 1941 ); 1942 } 1943 1944 static void 1945 safe_dump_ring(struct safe_softc *sc, const char *tag) 1946 { 1947 mtx_lock(&sc->sc_ringmtx); 1948 printf("\nSafeNet Ring State:\n"); 1949 safe_dump_intrstate(sc, tag); 1950 safe_dump_dmastatus(sc, tag); 1951 safe_dump_ringstate(sc, tag); 1952 if (sc->sc_nqchip) { 1953 struct safe_ringentry *re = sc->sc_back; 1954 do { 1955 safe_dump_request(sc, tag, re); 1956 if (++re == sc->sc_ringtop) 1957 re = sc->sc_ring; 1958 } while (re != sc->sc_front); 1959 } 1960 mtx_unlock(&sc->sc_ringmtx); 1961 } 1962 1963 static int 1964 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 1965 { 1966 char dmode[64]; 1967 int error; 1968 1969 strncpy(dmode, "", sizeof(dmode) - 1); 1970 dmode[sizeof(dmode) - 1] = '\0'; 1971 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 1972 1973 if (error == 0 && req->newptr != NULL) { 1974 struct safe_softc *sc = safec; 1975 1976 if (!sc) 1977 return EINVAL; 1978 if (strncmp(dmode, "dma", 3) == 0) 1979 safe_dump_dmastatus(sc, "safe0"); 1980 else if (strncmp(dmode, "int", 3) == 0) 1981 safe_dump_intrstate(sc, "safe0"); 1982 else if (strncmp(dmode, "ring", 4) == 0) 1983 safe_dump_ring(sc, "safe0"); 1984 else 1985 return EINVAL; 1986 } 1987 return error; 1988 } 1989 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, 1990 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, 1991 sysctl_hw_safe_dump, "A", 1992 "Dump driver state"); 1993 #endif /* SAFE_DEBUG */ 1994