1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * SafeNet SafeXcel-1141 hardware crypto accelerator 35 */ 36 #include "opt_safe.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/proc.h> 41 #include <sys/errno.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 #include <sys/endian.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #include <machine/bus.h> 56 #include <machine/resource.h> 57 #include <sys/bus.h> 58 #include <sys/rman.h> 59 60 #include <opencrypto/cryptodev.h> 61 #include <opencrypto/xform_auth.h> 62 #include <sys/random.h> 63 #include <sys/kobj.h> 64 65 #include "cryptodev_if.h" 66 67 #include <dev/pci/pcivar.h> 68 #include <dev/pci/pcireg.h> 69 70 #ifdef SAFE_RNDTEST 71 #include <dev/rndtest/rndtest.h> 72 #endif 73 #include <dev/safe/safereg.h> 74 #include <dev/safe/safevar.h> 75 76 #ifndef bswap32 77 #define bswap32 NTOHL 78 #endif 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int safe_probe(device_t); 84 static int safe_attach(device_t); 85 static int safe_detach(device_t); 86 static int safe_suspend(device_t); 87 static int safe_resume(device_t); 88 static int safe_shutdown(device_t); 89 90 static int safe_probesession(device_t, const struct crypto_session_params *); 91 static int safe_newsession(device_t, crypto_session_t, 92 const struct crypto_session_params *); 93 static int safe_process(device_t, struct cryptop *, int); 94 95 static device_method_t safe_methods[] = { 96 /* Device interface */ 97 DEVMETHOD(device_probe, safe_probe), 98 DEVMETHOD(device_attach, safe_attach), 99 DEVMETHOD(device_detach, safe_detach), 100 DEVMETHOD(device_suspend, safe_suspend), 101 DEVMETHOD(device_resume, safe_resume), 102 DEVMETHOD(device_shutdown, safe_shutdown), 103 104 /* crypto device methods */ 105 DEVMETHOD(cryptodev_probesession, safe_probesession), 106 DEVMETHOD(cryptodev_newsession, safe_newsession), 107 DEVMETHOD(cryptodev_process, safe_process), 108 109 DEVMETHOD_END 110 }; 111 static driver_t safe_driver = { 112 "safe", 113 safe_methods, 114 sizeof (struct safe_softc) 115 }; 116 static devclass_t safe_devclass; 117 118 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); 119 MODULE_DEPEND(safe, crypto, 1, 1, 1); 120 #ifdef SAFE_RNDTEST 121 MODULE_DEPEND(safe, rndtest, 1, 1, 1); 122 #endif 123 124 static void safe_intr(void *); 125 static void safe_callback(struct safe_softc *, struct safe_ringentry *); 126 static void safe_feed(struct safe_softc *, struct safe_ringentry *); 127 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 128 #ifndef SAFE_NO_RNG 129 static void safe_rng_init(struct safe_softc *); 130 static void safe_rng(void *); 131 #endif /* SAFE_NO_RNG */ 132 static int safe_dma_malloc(struct safe_softc *, bus_size_t, 133 struct safe_dma_alloc *, int); 134 #define safe_dma_sync(_dma, _flags) \ 135 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 136 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 137 static int safe_dmamap_aligned(const struct safe_operand *); 138 static int safe_dmamap_uniform(const struct safe_operand *); 139 140 static void safe_reset_board(struct safe_softc *); 141 static void safe_init_board(struct safe_softc *); 142 static void safe_init_pciregs(device_t dev); 143 static void safe_cleanchip(struct safe_softc *); 144 static void safe_totalreset(struct safe_softc *); 145 146 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 147 148 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 149 "SafeNet driver parameters"); 150 151 #ifdef SAFE_DEBUG 152 static void safe_dump_dmastatus(struct safe_softc *, const char *); 153 static void safe_dump_ringstate(struct safe_softc *, const char *); 154 static void safe_dump_intrstate(struct safe_softc *, const char *); 155 static void safe_dump_request(struct safe_softc *, const char *, 156 struct safe_ringentry *); 157 158 static struct safe_softc *safec; /* for use by hw.safe.dump */ 159 160 static int safe_debug = 0; 161 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 162 0, "control debugging msgs"); 163 #define DPRINTF(_x) if (safe_debug) printf _x 164 #else 165 #define DPRINTF(_x) 166 #endif 167 168 #define READ_REG(sc,r) \ 169 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 170 171 #define WRITE_REG(sc,reg,val) \ 172 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 173 174 struct safe_stats safestats; 175 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 176 safe_stats, "driver statistics"); 177 #ifndef SAFE_NO_RNG 178 static int safe_rnginterval = 1; /* poll once a second */ 179 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 180 0, "RNG polling interval (secs)"); 181 static int safe_rngbufsize = 16; /* 64 bytes each poll */ 182 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 183 0, "RNG polling buffer size (32-bit words)"); 184 static int safe_rngmaxalarm = 8; /* max alarms before reset */ 185 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 186 0, "RNG max alarms before reset"); 187 #endif /* SAFE_NO_RNG */ 188 189 static int 190 safe_probe(device_t dev) 191 { 192 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 193 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 194 return (BUS_PROBE_DEFAULT); 195 return (ENXIO); 196 } 197 198 static const char* 199 safe_partname(struct safe_softc *sc) 200 { 201 /* XXX sprintf numbers when not decoded */ 202 switch (pci_get_vendor(sc->sc_dev)) { 203 case PCI_VENDOR_SAFENET: 204 switch (pci_get_device(sc->sc_dev)) { 205 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 206 } 207 return "SafeNet unknown-part"; 208 } 209 return "Unknown-vendor unknown-part"; 210 } 211 212 #ifndef SAFE_NO_RNG 213 static void 214 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 215 { 216 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 217 random_harvest_queue(buf, count, RANDOM_PURE_SAFE); 218 } 219 #endif /* SAFE_NO_RNG */ 220 221 static int 222 safe_attach(device_t dev) 223 { 224 struct safe_softc *sc = device_get_softc(dev); 225 u_int32_t raddr; 226 u_int32_t i; 227 int rid; 228 229 bzero(sc, sizeof (*sc)); 230 sc->sc_dev = dev; 231 232 /* XXX handle power management */ 233 234 pci_enable_busmaster(dev); 235 236 /* 237 * Setup memory-mapping of PCI registers. 238 */ 239 rid = BS_BAR; 240 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 241 RF_ACTIVE); 242 if (sc->sc_sr == NULL) { 243 device_printf(dev, "cannot map register space\n"); 244 goto bad; 245 } 246 sc->sc_st = rman_get_bustag(sc->sc_sr); 247 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 248 249 /* 250 * Arrange interrupt line. 251 */ 252 rid = 0; 253 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 254 RF_SHAREABLE|RF_ACTIVE); 255 if (sc->sc_irq == NULL) { 256 device_printf(dev, "could not map interrupt\n"); 257 goto bad1; 258 } 259 /* 260 * NB: Network code assumes we are blocked with splimp() 261 * so make sure the IRQ is mapped appropriately. 262 */ 263 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 264 NULL, safe_intr, sc, &sc->sc_ih)) { 265 device_printf(dev, "could not establish interrupt\n"); 266 goto bad2; 267 } 268 269 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), 270 CRYPTOCAP_F_HARDWARE); 271 if (sc->sc_cid < 0) { 272 device_printf(dev, "could not get crypto driver id\n"); 273 goto bad3; 274 } 275 276 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 277 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 278 279 /* 280 * Setup DMA descriptor area. 281 */ 282 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 283 1, /* alignment */ 284 SAFE_DMA_BOUNDARY, /* boundary */ 285 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 286 BUS_SPACE_MAXADDR, /* highaddr */ 287 NULL, NULL, /* filter, filterarg */ 288 SAFE_MAX_DMA, /* maxsize */ 289 SAFE_MAX_PART, /* nsegments */ 290 SAFE_MAX_SSIZE, /* maxsegsize */ 291 BUS_DMA_ALLOCNOW, /* flags */ 292 NULL, NULL, /* locking */ 293 &sc->sc_srcdmat)) { 294 device_printf(dev, "cannot allocate DMA tag\n"); 295 goto bad4; 296 } 297 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 298 1, /* alignment */ 299 SAFE_MAX_DSIZE, /* boundary */ 300 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 301 BUS_SPACE_MAXADDR, /* highaddr */ 302 NULL, NULL, /* filter, filterarg */ 303 SAFE_MAX_DMA, /* maxsize */ 304 SAFE_MAX_PART, /* nsegments */ 305 SAFE_MAX_DSIZE, /* maxsegsize */ 306 BUS_DMA_ALLOCNOW, /* flags */ 307 NULL, NULL, /* locking */ 308 &sc->sc_dstdmat)) { 309 device_printf(dev, "cannot allocate DMA tag\n"); 310 goto bad4; 311 } 312 313 /* 314 * Allocate packet engine descriptors. 315 */ 316 if (safe_dma_malloc(sc, 317 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 318 &sc->sc_ringalloc, 0)) { 319 device_printf(dev, "cannot allocate PE descriptor ring\n"); 320 bus_dma_tag_destroy(sc->sc_srcdmat); 321 goto bad4; 322 } 323 /* 324 * Hookup the static portion of all our data structures. 325 */ 326 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 327 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 328 sc->sc_front = sc->sc_ring; 329 sc->sc_back = sc->sc_ring; 330 raddr = sc->sc_ringalloc.dma_paddr; 331 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 332 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 333 struct safe_ringentry *re = &sc->sc_ring[i]; 334 335 re->re_desc.d_sa = raddr + 336 offsetof(struct safe_ringentry, re_sa); 337 re->re_sa.sa_staterec = raddr + 338 offsetof(struct safe_ringentry, re_sastate); 339 340 raddr += sizeof (struct safe_ringentry); 341 } 342 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 343 "packet engine ring", MTX_DEF); 344 345 /* 346 * Allocate scatter and gather particle descriptors. 347 */ 348 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 349 &sc->sc_spalloc, 0)) { 350 device_printf(dev, "cannot allocate source particle " 351 "descriptor ring\n"); 352 mtx_destroy(&sc->sc_ringmtx); 353 safe_dma_free(sc, &sc->sc_ringalloc); 354 bus_dma_tag_destroy(sc->sc_srcdmat); 355 goto bad4; 356 } 357 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 358 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 359 sc->sc_spfree = sc->sc_spring; 360 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 361 362 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 363 &sc->sc_dpalloc, 0)) { 364 device_printf(dev, "cannot allocate destination particle " 365 "descriptor ring\n"); 366 mtx_destroy(&sc->sc_ringmtx); 367 safe_dma_free(sc, &sc->sc_spalloc); 368 safe_dma_free(sc, &sc->sc_ringalloc); 369 bus_dma_tag_destroy(sc->sc_dstdmat); 370 goto bad4; 371 } 372 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 373 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 374 sc->sc_dpfree = sc->sc_dpring; 375 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 376 377 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 378 379 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); 380 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { 381 sc->sc_flags |= SAFE_FLAGS_RNG; 382 printf(" rng"); 383 } 384 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { 385 #if 0 386 printf(" key"); 387 sc->sc_flags |= SAFE_FLAGS_KEY; 388 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); 389 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); 390 #endif 391 } 392 if (sc->sc_devinfo & SAFE_DEVINFO_DES) { 393 printf(" des/3des"); 394 } 395 if (sc->sc_devinfo & SAFE_DEVINFO_AES) { 396 printf(" aes"); 397 } 398 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { 399 printf(" md5"); 400 } 401 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { 402 printf(" sha1"); 403 } 404 /* XXX other supported algorithms */ 405 printf("\n"); 406 407 safe_reset_board(sc); /* reset h/w */ 408 safe_init_pciregs(dev); /* init pci settings */ 409 safe_init_board(sc); /* init h/w */ 410 411 #ifndef SAFE_NO_RNG 412 if (sc->sc_flags & SAFE_FLAGS_RNG) { 413 #ifdef SAFE_RNDTEST 414 sc->sc_rndtest = rndtest_attach(dev); 415 if (sc->sc_rndtest) 416 sc->sc_harvest = rndtest_harvest; 417 else 418 sc->sc_harvest = default_harvest; 419 #else 420 sc->sc_harvest = default_harvest; 421 #endif 422 safe_rng_init(sc); 423 424 callout_init(&sc->sc_rngto, 1); 425 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 426 } 427 #endif /* SAFE_NO_RNG */ 428 #ifdef SAFE_DEBUG 429 safec = sc; /* for use by hw.safe.dump */ 430 #endif 431 return (0); 432 bad4: 433 crypto_unregister_all(sc->sc_cid); 434 bad3: 435 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 436 bad2: 437 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 438 bad1: 439 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 440 bad: 441 return (ENXIO); 442 } 443 444 /* 445 * Detach a device that successfully probed. 446 */ 447 static int 448 safe_detach(device_t dev) 449 { 450 struct safe_softc *sc = device_get_softc(dev); 451 452 /* XXX wait/abort active ops */ 453 454 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 455 456 callout_stop(&sc->sc_rngto); 457 458 crypto_unregister_all(sc->sc_cid); 459 460 #ifdef SAFE_RNDTEST 461 if (sc->sc_rndtest) 462 rndtest_detach(sc->sc_rndtest); 463 #endif 464 465 safe_cleanchip(sc); 466 safe_dma_free(sc, &sc->sc_dpalloc); 467 safe_dma_free(sc, &sc->sc_spalloc); 468 mtx_destroy(&sc->sc_ringmtx); 469 safe_dma_free(sc, &sc->sc_ringalloc); 470 471 bus_generic_detach(dev); 472 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 473 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 474 475 bus_dma_tag_destroy(sc->sc_srcdmat); 476 bus_dma_tag_destroy(sc->sc_dstdmat); 477 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 478 479 return (0); 480 } 481 482 /* 483 * Stop all chip i/o so that the kernel's probe routines don't 484 * get confused by errant DMAs when rebooting. 485 */ 486 static int 487 safe_shutdown(device_t dev) 488 { 489 #ifdef notyet 490 safe_stop(device_get_softc(dev)); 491 #endif 492 return (0); 493 } 494 495 /* 496 * Device suspend routine. 497 */ 498 static int 499 safe_suspend(device_t dev) 500 { 501 struct safe_softc *sc = device_get_softc(dev); 502 503 #ifdef notyet 504 /* XXX stop the device and save PCI settings */ 505 #endif 506 sc->sc_suspended = 1; 507 508 return (0); 509 } 510 511 static int 512 safe_resume(device_t dev) 513 { 514 struct safe_softc *sc = device_get_softc(dev); 515 516 #ifdef notyet 517 /* XXX retore PCI settings and start the device */ 518 #endif 519 sc->sc_suspended = 0; 520 return (0); 521 } 522 523 /* 524 * SafeXcel Interrupt routine 525 */ 526 static void 527 safe_intr(void *arg) 528 { 529 struct safe_softc *sc = arg; 530 volatile u_int32_t stat; 531 532 stat = READ_REG(sc, SAFE_HM_STAT); 533 if (stat == 0) /* shared irq, not for us */ 534 return; 535 536 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 537 538 if ((stat & SAFE_INT_PE_DDONE)) { 539 /* 540 * Descriptor(s) done; scan the ring and 541 * process completed operations. 542 */ 543 mtx_lock(&sc->sc_ringmtx); 544 while (sc->sc_back != sc->sc_front) { 545 struct safe_ringentry *re = sc->sc_back; 546 #ifdef SAFE_DEBUG 547 if (safe_debug) { 548 safe_dump_ringstate(sc, __func__); 549 safe_dump_request(sc, __func__, re); 550 } 551 #endif 552 /* 553 * safe_process marks ring entries that were allocated 554 * but not used with a csr of zero. This insures the 555 * ring front pointer never needs to be set backwards 556 * in the event that an entry is allocated but not used 557 * because of a setup error. 558 */ 559 if (re->re_desc.d_csr != 0) { 560 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 561 break; 562 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 563 break; 564 sc->sc_nqchip--; 565 safe_callback(sc, re); 566 } 567 if (++(sc->sc_back) == sc->sc_ringtop) 568 sc->sc_back = sc->sc_ring; 569 } 570 mtx_unlock(&sc->sc_ringmtx); 571 } 572 573 /* 574 * Check to see if we got any DMA Error 575 */ 576 if (stat & SAFE_INT_PE_ERROR) { 577 DPRINTF(("dmaerr dmastat %08x\n", 578 READ_REG(sc, SAFE_PE_DMASTAT))); 579 safestats.st_dmaerr++; 580 safe_totalreset(sc); 581 #if 0 582 safe_feed(sc); 583 #endif 584 } 585 586 if (sc->sc_needwakeup) { /* XXX check high watermark */ 587 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 588 DPRINTF(("%s: wakeup crypto %x\n", __func__, 589 sc->sc_needwakeup)); 590 sc->sc_needwakeup &= ~wakeup; 591 crypto_unblock(sc->sc_cid, wakeup); 592 } 593 } 594 595 /* 596 * safe_feed() - post a request to chip 597 */ 598 static void 599 safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 600 { 601 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 602 if (re->re_dst_map != NULL) 603 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 604 BUS_DMASYNC_PREREAD); 605 /* XXX have no smaller granularity */ 606 safe_dma_sync(&sc->sc_ringalloc, 607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 608 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 609 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 610 611 #ifdef SAFE_DEBUG 612 if (safe_debug) { 613 safe_dump_ringstate(sc, __func__); 614 safe_dump_request(sc, __func__, re); 615 } 616 #endif 617 sc->sc_nqchip++; 618 if (sc->sc_nqchip > safestats.st_maxqchip) 619 safestats.st_maxqchip = sc->sc_nqchip; 620 /* poke h/w to check descriptor ring, any value can be written */ 621 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 622 } 623 624 #define N(a) (sizeof(a) / sizeof (a[0])) 625 static void 626 safe_setup_enckey(struct safe_session *ses, const void *key) 627 { 628 int i; 629 630 bcopy(key, ses->ses_key, ses->ses_klen); 631 632 /* PE is little-endian, insure proper byte order */ 633 for (i = 0; i < N(ses->ses_key); i++) 634 ses->ses_key[i] = htole32(ses->ses_key[i]); 635 } 636 637 static void 638 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, 639 int klen) 640 { 641 SHA1_CTX sha1ctx; 642 int i; 643 644 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 645 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 646 647 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 648 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 649 650 explicit_bzero(&sha1ctx, sizeof(sha1ctx)); 651 652 /* PE is little-endian, insure proper byte order */ 653 for (i = 0; i < N(ses->ses_hminner); i++) { 654 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 655 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 656 } 657 } 658 #undef N 659 660 static bool 661 safe_auth_supported(struct safe_softc *sc, 662 const struct crypto_session_params *csp) 663 { 664 665 switch (csp->csp_auth_alg) { 666 case CRYPTO_SHA1_HMAC: 667 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) 668 return (false); 669 break; 670 default: 671 return (false); 672 } 673 return (true); 674 } 675 676 static bool 677 safe_cipher_supported(struct safe_softc *sc, 678 const struct crypto_session_params *csp) 679 { 680 681 switch (csp->csp_cipher_alg) { 682 case CRYPTO_AES_CBC: 683 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) 684 return (false); 685 if (csp->csp_ivlen != 16) 686 return (false); 687 if (csp->csp_cipher_klen != 16 && 688 csp->csp_cipher_klen != 24 && 689 csp->csp_cipher_klen != 32) 690 return (false); 691 break; 692 } 693 return (true); 694 } 695 696 static int 697 safe_probesession(device_t dev, const struct crypto_session_params *csp) 698 { 699 struct safe_softc *sc = device_get_softc(dev); 700 701 if (csp->csp_flags != 0) 702 return (EINVAL); 703 switch (csp->csp_mode) { 704 case CSP_MODE_DIGEST: 705 if (!safe_auth_supported(sc, csp)) 706 return (EINVAL); 707 break; 708 case CSP_MODE_CIPHER: 709 if (!safe_cipher_supported(sc, csp)) 710 return (EINVAL); 711 break; 712 case CSP_MODE_ETA: 713 if (!safe_auth_supported(sc, csp) || 714 !safe_cipher_supported(sc, csp)) 715 return (EINVAL); 716 break; 717 default: 718 return (EINVAL); 719 } 720 721 return (CRYPTODEV_PROBE_HARDWARE); 722 } 723 724 /* 725 * Allocate a new 'session'. 726 */ 727 static int 728 safe_newsession(device_t dev, crypto_session_t cses, 729 const struct crypto_session_params *csp) 730 { 731 struct safe_session *ses; 732 733 ses = crypto_get_driver_session(cses); 734 if (csp->csp_cipher_alg != 0) { 735 ses->ses_klen = csp->csp_cipher_klen; 736 if (csp->csp_cipher_key != NULL) 737 safe_setup_enckey(ses, csp->csp_cipher_key); 738 } 739 740 if (csp->csp_auth_alg != 0) { 741 ses->ses_mlen = csp->csp_auth_mlen; 742 if (ses->ses_mlen == 0) { 743 ses->ses_mlen = SHA1_HASH_LEN; 744 } 745 746 if (csp->csp_auth_key != NULL) { 747 safe_setup_mackey(ses, csp->csp_auth_alg, 748 csp->csp_auth_key, csp->csp_auth_klen); 749 } 750 } 751 752 return (0); 753 } 754 755 static void 756 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) 757 { 758 struct safe_operand *op = arg; 759 760 DPRINTF(("%s: nsegs %d error %d\n", __func__, 761 nsegs, error)); 762 if (error != 0) 763 return; 764 op->nsegs = nsegs; 765 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 766 } 767 768 static int 769 safe_process(device_t dev, struct cryptop *crp, int hint) 770 { 771 struct safe_softc *sc = device_get_softc(dev); 772 const struct crypto_session_params *csp; 773 int err = 0, i, nicealign, uniform; 774 int bypass, oplen; 775 int16_t coffset; 776 struct safe_session *ses; 777 struct safe_ringentry *re; 778 struct safe_sarec *sa; 779 struct safe_pdesc *pd; 780 u_int32_t cmd0, cmd1, staterec; 781 782 mtx_lock(&sc->sc_ringmtx); 783 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 784 safestats.st_ringfull++; 785 sc->sc_needwakeup |= CRYPTO_SYMQ; 786 mtx_unlock(&sc->sc_ringmtx); 787 return (ERESTART); 788 } 789 re = sc->sc_front; 790 791 staterec = re->re_sa.sa_staterec; /* save */ 792 /* NB: zero everything but the PE descriptor */ 793 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 794 re->re_sa.sa_staterec = staterec; /* restore */ 795 796 re->re_crp = crp; 797 798 sa = &re->re_sa; 799 ses = crypto_get_driver_session(crp->crp_session); 800 csp = crypto_get_params(crp->crp_session); 801 802 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 803 cmd1 = 0; 804 switch (csp->csp_mode) { 805 case CSP_MODE_DIGEST: 806 cmd0 |= SAFE_SA_CMD0_OP_HASH; 807 break; 808 case CSP_MODE_CIPHER: 809 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 810 break; 811 case CSP_MODE_ETA: 812 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 813 break; 814 } 815 816 if (csp->csp_cipher_alg != 0) { 817 if (crp->crp_cipher_key != NULL) 818 safe_setup_enckey(ses, crp->crp_cipher_key); 819 820 switch (csp->csp_cipher_alg) { 821 case CRYPTO_AES_CBC: 822 cmd0 |= SAFE_SA_CMD0_AES; 823 cmd1 |= SAFE_SA_CMD1_CBC; 824 if (ses->ses_klen * 8 == 128) 825 cmd1 |= SAFE_SA_CMD1_AES128; 826 else if (ses->ses_klen * 8 == 192) 827 cmd1 |= SAFE_SA_CMD1_AES192; 828 else 829 cmd1 |= SAFE_SA_CMD1_AES256; 830 } 831 832 /* 833 * Setup encrypt/decrypt state. When using basic ops 834 * we can't use an inline IV because hash/crypt offset 835 * must be from the end of the IV to the start of the 836 * crypt data and this leaves out the preceding header 837 * from the hash calculation. Instead we place the IV 838 * in the state record and set the hash/crypt offset to 839 * copy both the header+IV. 840 */ 841 crypto_read_iv(crp, re->re_sastate.sa_saved_iv); 842 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 843 844 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 845 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 846 847 /* 848 * XXX: I suspect we don't need this since we 849 * don't save the returned IV. 850 */ 851 cmd0 |= SAFE_SA_CMD0_SAVEIV; 852 } else { 853 cmd0 |= SAFE_SA_CMD0_INBOUND; 854 } 855 /* 856 * For basic encryption use the zero pad algorithm. 857 * This pads results to an 8-byte boundary and 858 * suppresses padding verification for inbound (i.e. 859 * decrypt) operations. 860 * 861 * NB: Not sure if the 8-byte pad boundary is a problem. 862 */ 863 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 864 865 /* XXX assert key bufs have the same size */ 866 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 867 } 868 869 if (csp->csp_auth_alg != 0) { 870 if (crp->crp_auth_key != NULL) { 871 safe_setup_mackey(ses, csp->csp_auth_alg, 872 crp->crp_auth_key, csp->csp_auth_klen); 873 } 874 875 switch (csp->csp_auth_alg) { 876 case CRYPTO_SHA1_HMAC: 877 cmd0 |= SAFE_SA_CMD0_SHA1; 878 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 879 break; 880 } 881 882 /* 883 * Digest data is loaded from the SA and the hash 884 * result is saved to the state block where we 885 * retrieve it for return to the caller. 886 */ 887 /* XXX assert digest bufs have the same size */ 888 bcopy(ses->ses_hminner, sa->sa_indigest, 889 sizeof(sa->sa_indigest)); 890 bcopy(ses->ses_hmouter, sa->sa_outdigest, 891 sizeof(sa->sa_outdigest)); 892 893 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 894 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 895 } 896 897 if (csp->csp_mode == CSP_MODE_ETA) { 898 /* 899 * The driver only supports ETA requests where there 900 * is no gap between the AAD and payload. 901 */ 902 if (crp->crp_aad_length != 0 && 903 crp->crp_aad_start + crp->crp_aad_length != 904 crp->crp_payload_start) { 905 safestats.st_lenmismatch++; 906 err = EINVAL; 907 goto errout; 908 } 909 if (crp->crp_aad_length != 0) 910 bypass = crp->crp_aad_start; 911 else 912 bypass = crp->crp_payload_start; 913 coffset = crp->crp_aad_length; 914 oplen = crp->crp_payload_start + crp->crp_payload_length; 915 #ifdef SAFE_DEBUG 916 if (safe_debug) { 917 printf("AAD: skip %d, len %d, digest %d\n", 918 crp->crp_aad_start, crp->crp_aad_length, 919 crp->crp_digest_start); 920 printf("payload: skip %d, len %d, IV %d\n", 921 crp->crp_payload_start, crp->crp_payload_length, 922 crp->crp_iv_start); 923 printf("bypass %d coffset %d oplen %d\n", 924 bypass, coffset, oplen); 925 } 926 #endif 927 if (coffset & 3) { /* offset must be 32-bit aligned */ 928 DPRINTF(("%s: coffset %u misaligned\n", 929 __func__, coffset)); 930 safestats.st_coffmisaligned++; 931 err = EINVAL; 932 goto errout; 933 } 934 coffset >>= 2; 935 if (coffset > 255) { /* offset must be <256 dwords */ 936 DPRINTF(("%s: coffset %u too big\n", 937 __func__, coffset)); 938 safestats.st_cofftoobig++; 939 err = EINVAL; 940 goto errout; 941 } 942 /* 943 * Tell the hardware to copy the header to the output. 944 * The header is defined as the data from the end of 945 * the bypass to the start of data to be encrypted. 946 * Typically this is the inline IV. Note that you need 947 * to do this even if src+dst are the same; it appears 948 * that w/o this bit the crypted data is written 949 * immediately after the bypass data. 950 */ 951 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 952 /* 953 * Disable IP header mutable bit handling. This is 954 * needed to get correct HMAC calculations. 955 */ 956 cmd1 |= SAFE_SA_CMD1_MUTABLE; 957 } else { 958 bypass = crp->crp_payload_start; 959 oplen = bypass + crp->crp_payload_length; 960 coffset = 0; 961 } 962 /* XXX verify multiple of 4 when using s/g */ 963 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 964 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 965 safestats.st_bypasstoobig++; 966 err = EINVAL; 967 goto errout; 968 } 969 970 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 971 safestats.st_nomap++; 972 err = ENOMEM; 973 goto errout; 974 } 975 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, 976 &re->re_src, BUS_DMA_NOWAIT) != 0) { 977 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 978 re->re_src_map = NULL; 979 safestats.st_noload++; 980 err = ENOMEM; 981 goto errout; 982 } 983 re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf); 984 nicealign = safe_dmamap_aligned(&re->re_src); 985 uniform = safe_dmamap_uniform(&re->re_src); 986 987 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 988 nicealign, uniform, re->re_src.nsegs)); 989 if (re->re_src.nsegs > 1) { 990 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 991 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 992 for (i = 0; i < re->re_src_nsegs; i++) { 993 /* NB: no need to check if there's space */ 994 pd = sc->sc_spfree; 995 if (++(sc->sc_spfree) == sc->sc_springtop) 996 sc->sc_spfree = sc->sc_spring; 997 998 KASSERT((pd->pd_flags&3) == 0 || 999 (pd->pd_flags&3) == SAFE_PD_DONE, 1000 ("bogus source particle descriptor; flags %x", 1001 pd->pd_flags)); 1002 pd->pd_addr = re->re_src_segs[i].ds_addr; 1003 pd->pd_size = re->re_src_segs[i].ds_len; 1004 pd->pd_flags = SAFE_PD_READY; 1005 } 1006 cmd0 |= SAFE_SA_CMD0_IGATHER; 1007 } else { 1008 /* 1009 * No need for gather, reference the operand directly. 1010 */ 1011 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1012 } 1013 1014 if (csp->csp_mode == CSP_MODE_DIGEST) { 1015 /* 1016 * Hash op; no destination needed. 1017 */ 1018 } else { 1019 if (nicealign && uniform == 1) { 1020 /* 1021 * Source layout is suitable for direct 1022 * sharing of the DMA map and segment list. 1023 */ 1024 re->re_dst = re->re_src; 1025 } else if (nicealign && uniform == 2) { 1026 /* 1027 * The source is properly aligned but requires a 1028 * different particle list to handle DMA of the 1029 * result. Create a new map and do the load to 1030 * create the segment list. The particle 1031 * descriptor setup code below will handle the 1032 * rest. 1033 */ 1034 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, 1035 &re->re_dst_map)) { 1036 safestats.st_nomap++; 1037 err = ENOMEM; 1038 goto errout; 1039 } 1040 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, 1041 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 1042 0) { 1043 bus_dmamap_destroy(sc->sc_dstdmat, 1044 re->re_dst_map); 1045 re->re_dst_map = NULL; 1046 safestats.st_noload++; 1047 err = ENOMEM; 1048 goto errout; 1049 } 1050 } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { 1051 int totlen, len; 1052 struct mbuf *m, *top, **mp; 1053 1054 /* 1055 * DMA constraints require that we allocate a 1056 * new mbuf chain for the destination. We 1057 * allocate an entire new set of mbufs of 1058 * optimal/required size and then tell the 1059 * hardware to copy any bits that are not 1060 * created as a byproduct of the operation. 1061 */ 1062 if (!nicealign) 1063 safestats.st_unaligned++; 1064 if (!uniform) 1065 safestats.st_notuniform++; 1066 totlen = re->re_src_mapsize; 1067 if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { 1068 len = MHLEN; 1069 MGETHDR(m, M_NOWAIT, MT_DATA); 1070 if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, 1071 M_NOWAIT)) { 1072 m_free(m); 1073 m = NULL; 1074 } 1075 } else { 1076 len = MLEN; 1077 MGET(m, M_NOWAIT, MT_DATA); 1078 } 1079 if (m == NULL) { 1080 safestats.st_nombuf++; 1081 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1082 goto errout; 1083 } 1084 if (totlen >= MINCLSIZE) { 1085 if (!(MCLGET(m, M_NOWAIT))) { 1086 m_free(m); 1087 safestats.st_nomcl++; 1088 err = sc->sc_nqchip ? 1089 ERESTART : ENOMEM; 1090 goto errout; 1091 } 1092 len = MCLBYTES; 1093 } 1094 m->m_len = len; 1095 top = NULL; 1096 mp = ⊤ 1097 1098 while (totlen > 0) { 1099 if (top) { 1100 MGET(m, M_NOWAIT, MT_DATA); 1101 if (m == NULL) { 1102 m_freem(top); 1103 safestats.st_nombuf++; 1104 err = sc->sc_nqchip ? 1105 ERESTART : ENOMEM; 1106 goto errout; 1107 } 1108 len = MLEN; 1109 } 1110 if (top && totlen >= MINCLSIZE) { 1111 if (!(MCLGET(m, M_NOWAIT))) { 1112 *mp = m; 1113 m_freem(top); 1114 safestats.st_nomcl++; 1115 err = sc->sc_nqchip ? 1116 ERESTART : ENOMEM; 1117 goto errout; 1118 } 1119 len = MCLBYTES; 1120 } 1121 m->m_len = len = min(totlen, len); 1122 totlen -= len; 1123 *mp = m; 1124 mp = &m->m_next; 1125 } 1126 re->re_dst_m = top; 1127 if (bus_dmamap_create(sc->sc_dstdmat, 1128 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1129 safestats.st_nomap++; 1130 err = ENOMEM; 1131 goto errout; 1132 } 1133 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, 1134 re->re_dst_map, top, re->re_dst_segs, 1135 &re->re_dst_nsegs, 0) != 0) { 1136 bus_dmamap_destroy(sc->sc_dstdmat, 1137 re->re_dst_map); 1138 re->re_dst_map = NULL; 1139 safestats.st_noload++; 1140 err = ENOMEM; 1141 goto errout; 1142 } 1143 re->re_dst_mapsize = re->re_src_mapsize; 1144 if (re->re_src.mapsize > oplen) { 1145 /* 1146 * There's data following what the 1147 * hardware will copy for us. If this 1148 * isn't just the ICV (that's going to 1149 * be written on completion), copy it 1150 * to the new mbufs 1151 */ 1152 if (!(csp->csp_mode == CSP_MODE_ETA && 1153 (re->re_src.mapsize-oplen) == ses->ses_mlen && 1154 crp->crp_digest_start == oplen)) 1155 safe_mcopy(crp->crp_buf.cb_mbuf, 1156 re->re_dst_m, oplen); 1157 else 1158 safestats.st_noicvcopy++; 1159 } 1160 } else { 1161 if (!nicealign) { 1162 safestats.st_iovmisaligned++; 1163 err = EINVAL; 1164 goto errout; 1165 } else { 1166 /* 1167 * There's no way to handle the DMA 1168 * requirements with this uio. We 1169 * could create a separate DMA area for 1170 * the result and then copy it back, 1171 * but for now we just bail and return 1172 * an error. Note that uio requests 1173 * > SAFE_MAX_DSIZE are handled because 1174 * the DMA map and segment list for the 1175 * destination wil result in a 1176 * destination particle list that does 1177 * the necessary scatter DMA. 1178 */ 1179 safestats.st_iovnotuniform++; 1180 err = EINVAL; 1181 goto errout; 1182 } 1183 } 1184 1185 if (re->re_dst.nsegs > 1) { 1186 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1187 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1188 for (i = 0; i < re->re_dst_nsegs; i++) { 1189 pd = sc->sc_dpfree; 1190 KASSERT((pd->pd_flags&3) == 0 || 1191 (pd->pd_flags&3) == SAFE_PD_DONE, 1192 ("bogus dest particle descriptor; flags %x", 1193 pd->pd_flags)); 1194 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1195 sc->sc_dpfree = sc->sc_dpring; 1196 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1197 pd->pd_flags = SAFE_PD_READY; 1198 } 1199 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1200 } else { 1201 /* 1202 * No need for scatter, reference the operand directly. 1203 */ 1204 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1205 } 1206 } 1207 1208 /* 1209 * All done with setup; fillin the SA command words 1210 * and the packet engine descriptor. The operation 1211 * is now ready for submission to the hardware. 1212 */ 1213 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1214 sa->sa_cmd1 = cmd1 1215 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1216 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1217 | SAFE_SA_CMD1_SRPCI 1218 ; 1219 /* 1220 * NB: the order of writes is important here. In case the 1221 * chip is scanning the ring because of an outstanding request 1222 * it might nab this one too. In that case we need to make 1223 * sure the setup is complete before we write the length 1224 * field of the descriptor as it signals the descriptor is 1225 * ready for processing. 1226 */ 1227 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1228 if (csp->csp_auth_alg != 0) 1229 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1230 re->re_desc.d_len = oplen 1231 | SAFE_PE_LEN_READY 1232 | (bypass << SAFE_PE_LEN_BYPASS_S) 1233 ; 1234 1235 safestats.st_ipackets++; 1236 safestats.st_ibytes += oplen; 1237 1238 if (++(sc->sc_front) == sc->sc_ringtop) 1239 sc->sc_front = sc->sc_ring; 1240 1241 /* XXX honor batching */ 1242 safe_feed(sc, re); 1243 mtx_unlock(&sc->sc_ringmtx); 1244 return (0); 1245 1246 errout: 1247 if (re->re_dst_m != NULL) 1248 m_freem(re->re_dst_m); 1249 1250 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1251 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1252 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1253 } 1254 if (re->re_src_map != NULL) { 1255 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1256 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1257 } 1258 mtx_unlock(&sc->sc_ringmtx); 1259 if (err != ERESTART) { 1260 crp->crp_etype = err; 1261 crypto_done(crp); 1262 err = 0; 1263 } else { 1264 sc->sc_needwakeup |= CRYPTO_SYMQ; 1265 } 1266 return (err); 1267 } 1268 1269 static void 1270 safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1271 { 1272 const struct crypto_session_params *csp; 1273 struct cryptop *crp = (struct cryptop *)re->re_crp; 1274 struct safe_session *ses; 1275 uint8_t hash[HASH_MAX_LEN]; 1276 1277 ses = crypto_get_driver_session(crp->crp_session); 1278 csp = crypto_get_params(crp->crp_session); 1279 1280 safestats.st_opackets++; 1281 safestats.st_obytes += re->re_dst.mapsize; 1282 1283 safe_dma_sync(&sc->sc_ringalloc, 1284 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1285 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1286 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1287 re->re_desc.d_csr, 1288 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1289 safestats.st_peoperr++; 1290 crp->crp_etype = EIO; /* something more meaningful? */ 1291 } 1292 1293 /* 1294 * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if 1295 * it is non-NULL? 1296 */ 1297 1298 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1299 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1300 BUS_DMASYNC_POSTREAD); 1301 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1302 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1303 } 1304 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1305 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1306 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1307 1308 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1309 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { 1310 /* 1311 * SHA-1 ICV's are byte-swapped; fix 'em up 1312 * before copying them to their destination. 1313 */ 1314 re->re_sastate.sa_saved_indigest[0] = 1315 bswap32(re->re_sastate.sa_saved_indigest[0]); 1316 re->re_sastate.sa_saved_indigest[1] = 1317 bswap32(re->re_sastate.sa_saved_indigest[1]); 1318 re->re_sastate.sa_saved_indigest[2] = 1319 bswap32(re->re_sastate.sa_saved_indigest[2]); 1320 } 1321 1322 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1323 crypto_copydata(crp, crp->crp_digest_start, 1324 ses->ses_mlen, hash); 1325 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, 1326 hash, ses->ses_mlen) != 0) 1327 crp->crp_etype = EBADMSG; 1328 } else 1329 crypto_copyback(crp, crp->crp_digest_start, 1330 ses->ses_mlen, re->re_sastate.sa_saved_indigest); 1331 } 1332 crypto_done(crp); 1333 } 1334 1335 /* 1336 * Copy all data past offset from srcm to dstm. 1337 */ 1338 static void 1339 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1340 { 1341 u_int j, dlen, slen; 1342 caddr_t dptr, sptr; 1343 1344 /* 1345 * Advance src and dst to offset. 1346 */ 1347 j = offset; 1348 while (j >= srcm->m_len) { 1349 j -= srcm->m_len; 1350 srcm = srcm->m_next; 1351 if (srcm == NULL) 1352 return; 1353 } 1354 sptr = mtod(srcm, caddr_t) + j; 1355 slen = srcm->m_len - j; 1356 1357 j = offset; 1358 while (j >= dstm->m_len) { 1359 j -= dstm->m_len; 1360 dstm = dstm->m_next; 1361 if (dstm == NULL) 1362 return; 1363 } 1364 dptr = mtod(dstm, caddr_t) + j; 1365 dlen = dstm->m_len - j; 1366 1367 /* 1368 * Copy everything that remains. 1369 */ 1370 for (;;) { 1371 j = min(slen, dlen); 1372 bcopy(sptr, dptr, j); 1373 if (slen == j) { 1374 srcm = srcm->m_next; 1375 if (srcm == NULL) 1376 return; 1377 sptr = srcm->m_data; 1378 slen = srcm->m_len; 1379 } else 1380 sptr += j, slen -= j; 1381 if (dlen == j) { 1382 dstm = dstm->m_next; 1383 if (dstm == NULL) 1384 return; 1385 dptr = dstm->m_data; 1386 dlen = dstm->m_len; 1387 } else 1388 dptr += j, dlen -= j; 1389 } 1390 } 1391 1392 #ifndef SAFE_NO_RNG 1393 #define SAFE_RNG_MAXWAIT 1000 1394 1395 static void 1396 safe_rng_init(struct safe_softc *sc) 1397 { 1398 u_int32_t w, v; 1399 int i; 1400 1401 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1402 /* use default value according to the manual */ 1403 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1404 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1405 1406 /* 1407 * There is a bug in rev 1.0 of the 1140 that when the RNG 1408 * is brought out of reset the ready status flag does not 1409 * work until the RNG has finished its internal initialization. 1410 * 1411 * So in order to determine the device is through its 1412 * initialization we must read the data register, using the 1413 * status reg in the read in case it is initialized. Then read 1414 * the data register until it changes from the first read. 1415 * Once it changes read the data register until it changes 1416 * again. At this time the RNG is considered initialized. 1417 * This could take between 750ms - 1000ms in time. 1418 */ 1419 i = 0; 1420 w = READ_REG(sc, SAFE_RNG_OUT); 1421 do { 1422 v = READ_REG(sc, SAFE_RNG_OUT); 1423 if (v != w) { 1424 w = v; 1425 break; 1426 } 1427 DELAY(10); 1428 } while (++i < SAFE_RNG_MAXWAIT); 1429 1430 /* Wait Until data changes again */ 1431 i = 0; 1432 do { 1433 v = READ_REG(sc, SAFE_RNG_OUT); 1434 if (v != w) 1435 break; 1436 DELAY(10); 1437 } while (++i < SAFE_RNG_MAXWAIT); 1438 } 1439 1440 static __inline void 1441 safe_rng_disable_short_cycle(struct safe_softc *sc) 1442 { 1443 WRITE_REG(sc, SAFE_RNG_CTRL, 1444 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1445 } 1446 1447 static __inline void 1448 safe_rng_enable_short_cycle(struct safe_softc *sc) 1449 { 1450 WRITE_REG(sc, SAFE_RNG_CTRL, 1451 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1452 } 1453 1454 static __inline u_int32_t 1455 safe_rng_read(struct safe_softc *sc) 1456 { 1457 int i; 1458 1459 i = 0; 1460 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1461 ; 1462 return READ_REG(sc, SAFE_RNG_OUT); 1463 } 1464 1465 static void 1466 safe_rng(void *arg) 1467 { 1468 struct safe_softc *sc = arg; 1469 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1470 u_int maxwords; 1471 int i; 1472 1473 safestats.st_rng++; 1474 /* 1475 * Fetch the next block of data. 1476 */ 1477 maxwords = safe_rngbufsize; 1478 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1479 maxwords = SAFE_RNG_MAXBUFSIZ; 1480 retry: 1481 for (i = 0; i < maxwords; i++) 1482 buf[i] = safe_rng_read(sc); 1483 /* 1484 * Check the comparator alarm count and reset the h/w if 1485 * it exceeds our threshold. This guards against the 1486 * hardware oscillators resonating with external signals. 1487 */ 1488 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1489 u_int32_t freq_inc, w; 1490 1491 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1492 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1493 safestats.st_rngalarm++; 1494 safe_rng_enable_short_cycle(sc); 1495 freq_inc = 18; 1496 for (i = 0; i < 64; i++) { 1497 w = READ_REG(sc, SAFE_RNG_CNFG); 1498 freq_inc = ((w + freq_inc) & 0x3fL); 1499 w = ((w & ~0x3fL) | freq_inc); 1500 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1501 1502 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1503 1504 (void) safe_rng_read(sc); 1505 DELAY(25); 1506 1507 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1508 safe_rng_disable_short_cycle(sc); 1509 goto retry; 1510 } 1511 freq_inc = 1; 1512 } 1513 safe_rng_disable_short_cycle(sc); 1514 } else 1515 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1516 1517 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1518 callout_reset(&sc->sc_rngto, 1519 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1520 } 1521 #endif /* SAFE_NO_RNG */ 1522 1523 static void 1524 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1525 { 1526 bus_addr_t *paddr = (bus_addr_t*) arg; 1527 *paddr = segs->ds_addr; 1528 } 1529 1530 static int 1531 safe_dma_malloc( 1532 struct safe_softc *sc, 1533 bus_size_t size, 1534 struct safe_dma_alloc *dma, 1535 int mapflags 1536 ) 1537 { 1538 int r; 1539 1540 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1541 sizeof(u_int32_t), 0, /* alignment, bounds */ 1542 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1543 BUS_SPACE_MAXADDR, /* highaddr */ 1544 NULL, NULL, /* filter, filterarg */ 1545 size, /* maxsize */ 1546 1, /* nsegments */ 1547 size, /* maxsegsize */ 1548 BUS_DMA_ALLOCNOW, /* flags */ 1549 NULL, NULL, /* locking */ 1550 &dma->dma_tag); 1551 if (r != 0) { 1552 device_printf(sc->sc_dev, "safe_dma_malloc: " 1553 "bus_dma_tag_create failed; error %u\n", r); 1554 goto fail_0; 1555 } 1556 1557 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1558 BUS_DMA_NOWAIT, &dma->dma_map); 1559 if (r != 0) { 1560 device_printf(sc->sc_dev, "safe_dma_malloc: " 1561 "bus_dmammem_alloc failed; size %ju, error %u\n", 1562 (uintmax_t)size, r); 1563 goto fail_1; 1564 } 1565 1566 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1567 size, 1568 safe_dmamap_cb, 1569 &dma->dma_paddr, 1570 mapflags | BUS_DMA_NOWAIT); 1571 if (r != 0) { 1572 device_printf(sc->sc_dev, "safe_dma_malloc: " 1573 "bus_dmamap_load failed; error %u\n", r); 1574 goto fail_2; 1575 } 1576 1577 dma->dma_size = size; 1578 return (0); 1579 1580 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1581 fail_2: 1582 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1583 fail_1: 1584 bus_dma_tag_destroy(dma->dma_tag); 1585 fail_0: 1586 dma->dma_tag = NULL; 1587 return (r); 1588 } 1589 1590 static void 1591 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1592 { 1593 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1594 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1595 bus_dma_tag_destroy(dma->dma_tag); 1596 } 1597 1598 /* 1599 * Resets the board. Values in the regesters are left as is 1600 * from the reset (i.e. initial values are assigned elsewhere). 1601 */ 1602 static void 1603 safe_reset_board(struct safe_softc *sc) 1604 { 1605 u_int32_t v; 1606 /* 1607 * Reset the device. The manual says no delay 1608 * is needed between marking and clearing reset. 1609 */ 1610 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1611 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1612 SAFE_PE_DMACFG_SGRESET); 1613 WRITE_REG(sc, SAFE_PE_DMACFG, v 1614 | SAFE_PE_DMACFG_PERESET 1615 | SAFE_PE_DMACFG_PDRRESET 1616 | SAFE_PE_DMACFG_SGRESET); 1617 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1618 } 1619 1620 /* 1621 * Initialize registers we need to touch only once. 1622 */ 1623 static void 1624 safe_init_board(struct safe_softc *sc) 1625 { 1626 u_int32_t v, dwords; 1627 1628 v = READ_REG(sc, SAFE_PE_DMACFG); 1629 v &=~ SAFE_PE_DMACFG_PEMODE; 1630 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1631 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1632 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1633 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1634 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1635 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1636 ; 1637 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1638 #if 0 1639 /* XXX select byte swap based on host byte order */ 1640 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1641 #endif 1642 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1643 /* 1644 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1645 * "target mode transfers" done while the chip is DMA'ing 1646 * >1020 bytes cause the hardware to lockup. To avoid this 1647 * we reduce the max PCI transfer size and use small source 1648 * particle descriptors (<= 256 bytes). 1649 */ 1650 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1651 device_printf(sc->sc_dev, 1652 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1653 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1654 SAFE_REV_MAJ(sc->sc_chiprev), 1655 SAFE_REV_MIN(sc->sc_chiprev)); 1656 } 1657 1658 /* NB: operands+results are overlaid */ 1659 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1660 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1661 /* 1662 * Configure ring entry size and number of items in the ring. 1663 */ 1664 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1665 ("PE ring entry not 32-bit aligned!")); 1666 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1667 WRITE_REG(sc, SAFE_PE_RINGCFG, 1668 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1669 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1670 1671 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1672 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1673 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1674 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1675 /* 1676 * NB: destination particles are fixed size. We use 1677 * an mbuf cluster and require all results go to 1678 * clusters or smaller. 1679 */ 1680 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1681 1682 /* it's now safe to enable PE mode, do it */ 1683 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1684 1685 /* 1686 * Configure hardware to use level-triggered interrupts and 1687 * to interrupt after each descriptor is processed. 1688 */ 1689 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1690 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1691 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1692 } 1693 1694 /* 1695 * Init PCI registers 1696 */ 1697 static void 1698 safe_init_pciregs(device_t dev) 1699 { 1700 } 1701 1702 /* 1703 * Clean up after a chip crash. 1704 * It is assumed that the caller in splimp() 1705 */ 1706 static void 1707 safe_cleanchip(struct safe_softc *sc) 1708 { 1709 1710 if (sc->sc_nqchip != 0) { 1711 struct safe_ringentry *re = sc->sc_back; 1712 1713 while (re != sc->sc_front) { 1714 if (re->re_desc.d_csr != 0) 1715 safe_free_entry(sc, re); 1716 if (++re == sc->sc_ringtop) 1717 re = sc->sc_ring; 1718 } 1719 sc->sc_back = re; 1720 sc->sc_nqchip = 0; 1721 } 1722 } 1723 1724 /* 1725 * free a safe_q 1726 * It is assumed that the caller is within splimp(). 1727 */ 1728 static int 1729 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 1730 { 1731 struct cryptop *crp; 1732 1733 /* 1734 * Free header MCR 1735 */ 1736 if (re->re_dst_m != NULL) 1737 m_freem(re->re_dst_m); 1738 1739 crp = (struct cryptop *)re->re_crp; 1740 1741 re->re_desc.d_csr = 0; 1742 1743 crp->crp_etype = EFAULT; 1744 crypto_done(crp); 1745 return(0); 1746 } 1747 1748 /* 1749 * Routine to reset the chip and clean up. 1750 * It is assumed that the caller is in splimp() 1751 */ 1752 static void 1753 safe_totalreset(struct safe_softc *sc) 1754 { 1755 safe_reset_board(sc); 1756 safe_init_board(sc); 1757 safe_cleanchip(sc); 1758 } 1759 1760 /* 1761 * Is the operand suitable aligned for direct DMA. Each 1762 * segment must be aligned on a 32-bit boundary and all 1763 * but the last segment must be a multiple of 4 bytes. 1764 */ 1765 static int 1766 safe_dmamap_aligned(const struct safe_operand *op) 1767 { 1768 int i; 1769 1770 for (i = 0; i < op->nsegs; i++) { 1771 if (op->segs[i].ds_addr & 3) 1772 return (0); 1773 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 1774 return (0); 1775 } 1776 return (1); 1777 } 1778 1779 /* 1780 * Is the operand suitable for direct DMA as the destination 1781 * of an operation. The hardware requires that each ``particle'' 1782 * but the last in an operation result have the same size. We 1783 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 1784 * 0 if some segment is not a multiple of of this size, 1 if all 1785 * segments are exactly this size, or 2 if segments are at worst 1786 * a multple of this size. 1787 */ 1788 static int 1789 safe_dmamap_uniform(const struct safe_operand *op) 1790 { 1791 int result = 1; 1792 1793 if (op->nsegs > 0) { 1794 int i; 1795 1796 for (i = 0; i < op->nsegs-1; i++) { 1797 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 1798 return (0); 1799 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 1800 result = 2; 1801 } 1802 } 1803 return (result); 1804 } 1805 1806 #ifdef SAFE_DEBUG 1807 static void 1808 safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 1809 { 1810 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 1811 , tag 1812 , READ_REG(sc, SAFE_DMA_ENDIAN) 1813 , READ_REG(sc, SAFE_DMA_SRCADDR) 1814 , READ_REG(sc, SAFE_DMA_DSTADDR) 1815 , READ_REG(sc, SAFE_DMA_STAT) 1816 ); 1817 } 1818 1819 static void 1820 safe_dump_intrstate(struct safe_softc *sc, const char *tag) 1821 { 1822 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 1823 , tag 1824 , READ_REG(sc, SAFE_HI_CFG) 1825 , READ_REG(sc, SAFE_HI_MASK) 1826 , READ_REG(sc, SAFE_HI_DESC_CNT) 1827 , READ_REG(sc, SAFE_HU_STAT) 1828 , READ_REG(sc, SAFE_HM_STAT) 1829 ); 1830 } 1831 1832 static void 1833 safe_dump_ringstate(struct safe_softc *sc, const char *tag) 1834 { 1835 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 1836 1837 /* NB: assume caller has lock on ring */ 1838 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 1839 tag, 1840 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 1841 (unsigned long)(sc->sc_back - sc->sc_ring), 1842 (unsigned long)(sc->sc_front - sc->sc_ring)); 1843 } 1844 1845 static void 1846 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 1847 { 1848 int ix, nsegs; 1849 1850 ix = re - sc->sc_ring; 1851 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 1852 , tag 1853 , re, ix 1854 , re->re_desc.d_csr 1855 , re->re_desc.d_src 1856 , re->re_desc.d_dst 1857 , re->re_desc.d_sa 1858 , re->re_desc.d_len 1859 ); 1860 if (re->re_src.nsegs > 1) { 1861 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 1862 sizeof(struct safe_pdesc); 1863 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 1864 printf(" spd[%u] %p: %p size %u flags %x" 1865 , ix, &sc->sc_spring[ix] 1866 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 1867 , sc->sc_spring[ix].pd_size 1868 , sc->sc_spring[ix].pd_flags 1869 ); 1870 if (sc->sc_spring[ix].pd_size == 0) 1871 printf(" (zero!)"); 1872 printf("\n"); 1873 if (++ix == SAFE_TOTAL_SPART) 1874 ix = 0; 1875 } 1876 } 1877 if (re->re_dst.nsegs > 1) { 1878 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 1879 sizeof(struct safe_pdesc); 1880 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 1881 printf(" dpd[%u] %p: %p flags %x\n" 1882 , ix, &sc->sc_dpring[ix] 1883 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 1884 , sc->sc_dpring[ix].pd_flags 1885 ); 1886 if (++ix == SAFE_TOTAL_DPART) 1887 ix = 0; 1888 } 1889 } 1890 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 1891 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 1892 printf("sa: key %x %x %x %x %x %x %x %x\n" 1893 , re->re_sa.sa_key[0] 1894 , re->re_sa.sa_key[1] 1895 , re->re_sa.sa_key[2] 1896 , re->re_sa.sa_key[3] 1897 , re->re_sa.sa_key[4] 1898 , re->re_sa.sa_key[5] 1899 , re->re_sa.sa_key[6] 1900 , re->re_sa.sa_key[7] 1901 ); 1902 printf("sa: indigest %x %x %x %x %x\n" 1903 , re->re_sa.sa_indigest[0] 1904 , re->re_sa.sa_indigest[1] 1905 , re->re_sa.sa_indigest[2] 1906 , re->re_sa.sa_indigest[3] 1907 , re->re_sa.sa_indigest[4] 1908 ); 1909 printf("sa: outdigest %x %x %x %x %x\n" 1910 , re->re_sa.sa_outdigest[0] 1911 , re->re_sa.sa_outdigest[1] 1912 , re->re_sa.sa_outdigest[2] 1913 , re->re_sa.sa_outdigest[3] 1914 , re->re_sa.sa_outdigest[4] 1915 ); 1916 printf("sr: iv %x %x %x %x\n" 1917 , re->re_sastate.sa_saved_iv[0] 1918 , re->re_sastate.sa_saved_iv[1] 1919 , re->re_sastate.sa_saved_iv[2] 1920 , re->re_sastate.sa_saved_iv[3] 1921 ); 1922 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 1923 , re->re_sastate.sa_saved_hashbc 1924 , re->re_sastate.sa_saved_indigest[0] 1925 , re->re_sastate.sa_saved_indigest[1] 1926 , re->re_sastate.sa_saved_indigest[2] 1927 , re->re_sastate.sa_saved_indigest[3] 1928 , re->re_sastate.sa_saved_indigest[4] 1929 ); 1930 } 1931 1932 static void 1933 safe_dump_ring(struct safe_softc *sc, const char *tag) 1934 { 1935 mtx_lock(&sc->sc_ringmtx); 1936 printf("\nSafeNet Ring State:\n"); 1937 safe_dump_intrstate(sc, tag); 1938 safe_dump_dmastatus(sc, tag); 1939 safe_dump_ringstate(sc, tag); 1940 if (sc->sc_nqchip) { 1941 struct safe_ringentry *re = sc->sc_back; 1942 do { 1943 safe_dump_request(sc, tag, re); 1944 if (++re == sc->sc_ringtop) 1945 re = sc->sc_ring; 1946 } while (re != sc->sc_front); 1947 } 1948 mtx_unlock(&sc->sc_ringmtx); 1949 } 1950 1951 static int 1952 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 1953 { 1954 char dmode[64]; 1955 int error; 1956 1957 strncpy(dmode, "", sizeof(dmode) - 1); 1958 dmode[sizeof(dmode) - 1] = '\0'; 1959 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 1960 1961 if (error == 0 && req->newptr != NULL) { 1962 struct safe_softc *sc = safec; 1963 1964 if (!sc) 1965 return EINVAL; 1966 if (strncmp(dmode, "dma", 3) == 0) 1967 safe_dump_dmastatus(sc, "safe0"); 1968 else if (strncmp(dmode, "int", 3) == 0) 1969 safe_dump_intrstate(sc, "safe0"); 1970 else if (strncmp(dmode, "ring", 4) == 0) 1971 safe_dump_ring(sc, "safe0"); 1972 else 1973 return EINVAL; 1974 } 1975 return error; 1976 } 1977 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, 1978 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, 1979 sysctl_hw_safe_dump, "A", 1980 "Dump driver state"); 1981 #endif /* SAFE_DEBUG */ 1982