1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 /* 32 * SafeNet SafeXcel-1141 hardware crypto accelerator 33 */ 34 #include "opt_safe.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/proc.h> 39 #include <sys/errno.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/mbuf.h> 43 #include <sys/module.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/sysctl.h> 47 #include <sys/endian.h> 48 #include <sys/uio.h> 49 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 53 #include <machine/bus.h> 54 #include <machine/resource.h> 55 #include <sys/bus.h> 56 #include <sys/rman.h> 57 58 #include <opencrypto/cryptodev.h> 59 #include <opencrypto/xform_auth.h> 60 #include <sys/random.h> 61 #include <sys/kobj.h> 62 63 #include "cryptodev_if.h" 64 65 #include <dev/pci/pcivar.h> 66 #include <dev/pci/pcireg.h> 67 68 #ifdef SAFE_RNDTEST 69 #include <dev/rndtest/rndtest.h> 70 #endif 71 #include <dev/safe/safereg.h> 72 #include <dev/safe/safevar.h> 73 74 #ifndef bswap32 75 #define bswap32 NTOHL 76 #endif 77 78 /* 79 * Prototypes and count for the pci_device structure 80 */ 81 static int safe_probe(device_t); 82 static int safe_attach(device_t); 83 static int safe_detach(device_t); 84 static int safe_suspend(device_t); 85 static int safe_resume(device_t); 86 static int safe_shutdown(device_t); 87 88 static int safe_probesession(device_t, const struct crypto_session_params *); 89 static int safe_newsession(device_t, crypto_session_t, 90 const struct crypto_session_params *); 91 static int safe_process(device_t, struct cryptop *, int); 92 93 static device_method_t safe_methods[] = { 94 /* Device interface */ 95 DEVMETHOD(device_probe, safe_probe), 96 DEVMETHOD(device_attach, safe_attach), 97 DEVMETHOD(device_detach, safe_detach), 98 DEVMETHOD(device_suspend, safe_suspend), 99 DEVMETHOD(device_resume, safe_resume), 100 DEVMETHOD(device_shutdown, safe_shutdown), 101 102 /* crypto device methods */ 103 DEVMETHOD(cryptodev_probesession, safe_probesession), 104 DEVMETHOD(cryptodev_newsession, safe_newsession), 105 DEVMETHOD(cryptodev_process, safe_process), 106 107 DEVMETHOD_END 108 }; 109 110 static driver_t safe_driver = { 111 "safe", 112 safe_methods, 113 sizeof (struct safe_softc) 114 }; 115 116 DRIVER_MODULE(safe, pci, safe_driver, 0, 0); 117 MODULE_DEPEND(safe, crypto, 1, 1, 1); 118 #ifdef SAFE_RNDTEST 119 MODULE_DEPEND(safe, rndtest, 1, 1, 1); 120 #endif 121 122 static void safe_intr(void *); 123 static void safe_callback(struct safe_softc *, struct safe_ringentry *); 124 static void safe_feed(struct safe_softc *, struct safe_ringentry *); 125 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 126 #ifndef SAFE_NO_RNG 127 static void safe_rng_init(struct safe_softc *); 128 static void safe_rng(void *); 129 #endif /* SAFE_NO_RNG */ 130 static int safe_dma_malloc(struct safe_softc *, bus_size_t, 131 struct safe_dma_alloc *, int); 132 #define safe_dma_sync(_dma, _flags) \ 133 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 134 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 135 static int safe_dmamap_aligned(const struct safe_operand *); 136 static int safe_dmamap_uniform(const struct safe_operand *); 137 138 static void safe_reset_board(struct safe_softc *); 139 static void safe_init_board(struct safe_softc *); 140 static void safe_init_pciregs(device_t dev); 141 static void safe_cleanchip(struct safe_softc *); 142 static void safe_totalreset(struct safe_softc *); 143 144 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 145 146 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 147 "SafeNet driver parameters"); 148 149 #ifdef SAFE_DEBUG 150 static void safe_dump_dmastatus(struct safe_softc *, const char *); 151 static void safe_dump_ringstate(struct safe_softc *, const char *); 152 static void safe_dump_intrstate(struct safe_softc *, const char *); 153 static void safe_dump_request(struct safe_softc *, const char *, 154 struct safe_ringentry *); 155 156 static struct safe_softc *safec; /* for use by hw.safe.dump */ 157 158 static int safe_debug = 0; 159 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 160 0, "control debugging msgs"); 161 #define DPRINTF(_x) if (safe_debug) printf _x 162 #else 163 #define DPRINTF(_x) 164 #endif 165 166 #define READ_REG(sc,r) \ 167 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 168 169 #define WRITE_REG(sc,reg,val) \ 170 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 171 172 struct safe_stats safestats; 173 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 174 safe_stats, "driver statistics"); 175 #ifndef SAFE_NO_RNG 176 static int safe_rnginterval = 1; /* poll once a second */ 177 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 178 0, "RNG polling interval (secs)"); 179 static int safe_rngbufsize = 16; /* 64 bytes each poll */ 180 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 181 0, "RNG polling buffer size (32-bit words)"); 182 static int safe_rngmaxalarm = 8; /* max alarms before reset */ 183 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 184 0, "RNG max alarms before reset"); 185 #endif /* SAFE_NO_RNG */ 186 187 static int 188 safe_probe(device_t dev) 189 { 190 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 191 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 192 return (BUS_PROBE_DEFAULT); 193 return (ENXIO); 194 } 195 196 static const char* 197 safe_partname(struct safe_softc *sc) 198 { 199 /* XXX sprintf numbers when not decoded */ 200 switch (pci_get_vendor(sc->sc_dev)) { 201 case PCI_VENDOR_SAFENET: 202 switch (pci_get_device(sc->sc_dev)) { 203 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 204 } 205 return "SafeNet unknown-part"; 206 } 207 return "Unknown-vendor unknown-part"; 208 } 209 210 #ifndef SAFE_NO_RNG 211 static void 212 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 213 { 214 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 215 random_harvest_queue(buf, count, RANDOM_PURE_SAFE); 216 } 217 #endif /* SAFE_NO_RNG */ 218 219 static int 220 safe_attach(device_t dev) 221 { 222 struct safe_softc *sc = device_get_softc(dev); 223 u_int32_t raddr; 224 u_int32_t i; 225 int rid; 226 227 bzero(sc, sizeof (*sc)); 228 sc->sc_dev = dev; 229 230 /* XXX handle power management */ 231 232 pci_enable_busmaster(dev); 233 234 /* 235 * Setup memory-mapping of PCI registers. 236 */ 237 rid = BS_BAR; 238 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 239 RF_ACTIVE); 240 if (sc->sc_sr == NULL) { 241 device_printf(dev, "cannot map register space\n"); 242 goto bad; 243 } 244 sc->sc_st = rman_get_bustag(sc->sc_sr); 245 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 246 247 /* 248 * Arrange interrupt line. 249 */ 250 rid = 0; 251 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 252 RF_SHAREABLE|RF_ACTIVE); 253 if (sc->sc_irq == NULL) { 254 device_printf(dev, "could not map interrupt\n"); 255 goto bad1; 256 } 257 /* 258 * NB: Network code assumes we are blocked with splimp() 259 * so make sure the IRQ is mapped appropriately. 260 */ 261 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 262 NULL, safe_intr, sc, &sc->sc_ih)) { 263 device_printf(dev, "could not establish interrupt\n"); 264 goto bad2; 265 } 266 267 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), 268 CRYPTOCAP_F_HARDWARE); 269 if (sc->sc_cid < 0) { 270 device_printf(dev, "could not get crypto driver id\n"); 271 goto bad3; 272 } 273 274 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 275 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 276 277 /* 278 * Setup DMA descriptor area. 279 */ 280 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 281 1, /* alignment */ 282 SAFE_DMA_BOUNDARY, /* boundary */ 283 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 284 BUS_SPACE_MAXADDR, /* highaddr */ 285 NULL, NULL, /* filter, filterarg */ 286 SAFE_MAX_DMA, /* maxsize */ 287 SAFE_MAX_PART, /* nsegments */ 288 SAFE_MAX_SSIZE, /* maxsegsize */ 289 BUS_DMA_ALLOCNOW, /* flags */ 290 NULL, NULL, /* locking */ 291 &sc->sc_srcdmat)) { 292 device_printf(dev, "cannot allocate DMA tag\n"); 293 goto bad4; 294 } 295 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 296 1, /* alignment */ 297 SAFE_MAX_DSIZE, /* boundary */ 298 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 299 BUS_SPACE_MAXADDR, /* highaddr */ 300 NULL, NULL, /* filter, filterarg */ 301 SAFE_MAX_DMA, /* maxsize */ 302 SAFE_MAX_PART, /* nsegments */ 303 SAFE_MAX_DSIZE, /* maxsegsize */ 304 BUS_DMA_ALLOCNOW, /* flags */ 305 NULL, NULL, /* locking */ 306 &sc->sc_dstdmat)) { 307 device_printf(dev, "cannot allocate DMA tag\n"); 308 goto bad4; 309 } 310 311 /* 312 * Allocate packet engine descriptors. 313 */ 314 if (safe_dma_malloc(sc, 315 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 316 &sc->sc_ringalloc, 0)) { 317 device_printf(dev, "cannot allocate PE descriptor ring\n"); 318 bus_dma_tag_destroy(sc->sc_srcdmat); 319 goto bad4; 320 } 321 /* 322 * Hookup the static portion of all our data structures. 323 */ 324 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 325 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 326 sc->sc_front = sc->sc_ring; 327 sc->sc_back = sc->sc_ring; 328 raddr = sc->sc_ringalloc.dma_paddr; 329 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 330 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 331 struct safe_ringentry *re = &sc->sc_ring[i]; 332 333 re->re_desc.d_sa = raddr + 334 offsetof(struct safe_ringentry, re_sa); 335 re->re_sa.sa_staterec = raddr + 336 offsetof(struct safe_ringentry, re_sastate); 337 338 raddr += sizeof (struct safe_ringentry); 339 } 340 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 341 "packet engine ring", MTX_DEF); 342 343 /* 344 * Allocate scatter and gather particle descriptors. 345 */ 346 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 347 &sc->sc_spalloc, 0)) { 348 device_printf(dev, "cannot allocate source particle " 349 "descriptor ring\n"); 350 mtx_destroy(&sc->sc_ringmtx); 351 safe_dma_free(sc, &sc->sc_ringalloc); 352 bus_dma_tag_destroy(sc->sc_srcdmat); 353 goto bad4; 354 } 355 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 356 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 357 sc->sc_spfree = sc->sc_spring; 358 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 359 360 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 361 &sc->sc_dpalloc, 0)) { 362 device_printf(dev, "cannot allocate destination particle " 363 "descriptor ring\n"); 364 mtx_destroy(&sc->sc_ringmtx); 365 safe_dma_free(sc, &sc->sc_spalloc); 366 safe_dma_free(sc, &sc->sc_ringalloc); 367 bus_dma_tag_destroy(sc->sc_dstdmat); 368 goto bad4; 369 } 370 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 371 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 372 sc->sc_dpfree = sc->sc_dpring; 373 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 374 375 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 376 377 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); 378 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { 379 sc->sc_flags |= SAFE_FLAGS_RNG; 380 printf(" rng"); 381 } 382 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { 383 #if 0 384 printf(" key"); 385 sc->sc_flags |= SAFE_FLAGS_KEY; 386 #endif 387 } 388 if (sc->sc_devinfo & SAFE_DEVINFO_DES) { 389 printf(" des/3des"); 390 } 391 if (sc->sc_devinfo & SAFE_DEVINFO_AES) { 392 printf(" aes"); 393 } 394 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { 395 printf(" md5"); 396 } 397 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { 398 printf(" sha1"); 399 } 400 /* XXX other supported algorithms */ 401 printf("\n"); 402 403 safe_reset_board(sc); /* reset h/w */ 404 safe_init_pciregs(dev); /* init pci settings */ 405 safe_init_board(sc); /* init h/w */ 406 407 #ifndef SAFE_NO_RNG 408 if (sc->sc_flags & SAFE_FLAGS_RNG) { 409 #ifdef SAFE_RNDTEST 410 sc->sc_rndtest = rndtest_attach(dev); 411 if (sc->sc_rndtest) 412 sc->sc_harvest = rndtest_harvest; 413 else 414 sc->sc_harvest = default_harvest; 415 #else 416 sc->sc_harvest = default_harvest; 417 #endif 418 safe_rng_init(sc); 419 420 callout_init(&sc->sc_rngto, 1); 421 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 422 } 423 #endif /* SAFE_NO_RNG */ 424 #ifdef SAFE_DEBUG 425 safec = sc; /* for use by hw.safe.dump */ 426 #endif 427 gone_in(16, "%s(4) is deprecated in 15.0 and removed in 16.0\n", 428 safe_driver.name); 429 return (0); 430 bad4: 431 crypto_unregister_all(sc->sc_cid); 432 bad3: 433 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 434 bad2: 435 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 436 bad1: 437 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 438 bad: 439 return (ENXIO); 440 } 441 442 /* 443 * Detach a device that successfully probed. 444 */ 445 static int 446 safe_detach(device_t dev) 447 { 448 struct safe_softc *sc = device_get_softc(dev); 449 450 /* XXX wait/abort active ops */ 451 452 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 453 454 callout_stop(&sc->sc_rngto); 455 456 crypto_unregister_all(sc->sc_cid); 457 458 #ifdef SAFE_RNDTEST 459 if (sc->sc_rndtest) 460 rndtest_detach(sc->sc_rndtest); 461 #endif 462 463 safe_cleanchip(sc); 464 safe_dma_free(sc, &sc->sc_dpalloc); 465 safe_dma_free(sc, &sc->sc_spalloc); 466 mtx_destroy(&sc->sc_ringmtx); 467 safe_dma_free(sc, &sc->sc_ringalloc); 468 469 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 470 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 471 472 bus_dma_tag_destroy(sc->sc_srcdmat); 473 bus_dma_tag_destroy(sc->sc_dstdmat); 474 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 475 476 return (0); 477 } 478 479 /* 480 * Stop all chip i/o so that the kernel's probe routines don't 481 * get confused by errant DMAs when rebooting. 482 */ 483 static int 484 safe_shutdown(device_t dev) 485 { 486 #ifdef notyet 487 safe_stop(device_get_softc(dev)); 488 #endif 489 return (0); 490 } 491 492 /* 493 * Device suspend routine. 494 */ 495 static int 496 safe_suspend(device_t dev) 497 { 498 struct safe_softc *sc = device_get_softc(dev); 499 500 #ifdef notyet 501 /* XXX stop the device and save PCI settings */ 502 #endif 503 sc->sc_suspended = 1; 504 505 return (0); 506 } 507 508 static int 509 safe_resume(device_t dev) 510 { 511 struct safe_softc *sc = device_get_softc(dev); 512 513 #ifdef notyet 514 /* XXX retore PCI settings and start the device */ 515 #endif 516 sc->sc_suspended = 0; 517 return (0); 518 } 519 520 /* 521 * SafeXcel Interrupt routine 522 */ 523 static void 524 safe_intr(void *arg) 525 { 526 struct safe_softc *sc = arg; 527 volatile u_int32_t stat; 528 529 stat = READ_REG(sc, SAFE_HM_STAT); 530 if (stat == 0) /* shared irq, not for us */ 531 return; 532 533 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 534 535 if ((stat & SAFE_INT_PE_DDONE)) { 536 /* 537 * Descriptor(s) done; scan the ring and 538 * process completed operations. 539 */ 540 mtx_lock(&sc->sc_ringmtx); 541 while (sc->sc_back != sc->sc_front) { 542 struct safe_ringentry *re = sc->sc_back; 543 #ifdef SAFE_DEBUG 544 if (safe_debug) { 545 safe_dump_ringstate(sc, __func__); 546 safe_dump_request(sc, __func__, re); 547 } 548 #endif 549 /* 550 * safe_process marks ring entries that were allocated 551 * but not used with a csr of zero. This insures the 552 * ring front pointer never needs to be set backwards 553 * in the event that an entry is allocated but not used 554 * because of a setup error. 555 */ 556 if (re->re_desc.d_csr != 0) { 557 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 558 break; 559 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 560 break; 561 sc->sc_nqchip--; 562 safe_callback(sc, re); 563 } 564 if (++(sc->sc_back) == sc->sc_ringtop) 565 sc->sc_back = sc->sc_ring; 566 } 567 mtx_unlock(&sc->sc_ringmtx); 568 } 569 570 /* 571 * Check to see if we got any DMA Error 572 */ 573 if (stat & SAFE_INT_PE_ERROR) { 574 DPRINTF(("dmaerr dmastat %08x\n", 575 READ_REG(sc, SAFE_PE_DMASTAT))); 576 safestats.st_dmaerr++; 577 safe_totalreset(sc); 578 #if 0 579 safe_feed(sc); 580 #endif 581 } 582 583 if (sc->sc_needwakeup) { /* XXX check high watermark */ 584 int wakeup = sc->sc_needwakeup & CRYPTO_SYMQ; 585 DPRINTF(("%s: wakeup crypto %x\n", __func__, 586 sc->sc_needwakeup)); 587 sc->sc_needwakeup &= ~wakeup; 588 crypto_unblock(sc->sc_cid, wakeup); 589 } 590 } 591 592 /* 593 * safe_feed() - post a request to chip 594 */ 595 static void 596 safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 597 { 598 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 599 if (re->re_dst_map != NULL) 600 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 601 BUS_DMASYNC_PREREAD); 602 /* XXX have no smaller granularity */ 603 safe_dma_sync(&sc->sc_ringalloc, 604 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 605 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 606 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 607 608 #ifdef SAFE_DEBUG 609 if (safe_debug) { 610 safe_dump_ringstate(sc, __func__); 611 safe_dump_request(sc, __func__, re); 612 } 613 #endif 614 sc->sc_nqchip++; 615 if (sc->sc_nqchip > safestats.st_maxqchip) 616 safestats.st_maxqchip = sc->sc_nqchip; 617 /* poke h/w to check descriptor ring, any value can be written */ 618 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 619 } 620 621 #define N(a) (sizeof(a) / sizeof (a[0])) 622 static void 623 safe_setup_enckey(struct safe_session *ses, const void *key) 624 { 625 int i; 626 627 bcopy(key, ses->ses_key, ses->ses_klen); 628 629 /* PE is little-endian, insure proper byte order */ 630 for (i = 0; i < N(ses->ses_key); i++) 631 ses->ses_key[i] = htole32(ses->ses_key[i]); 632 } 633 634 static void 635 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, 636 int klen) 637 { 638 SHA1_CTX sha1ctx; 639 int i; 640 641 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 642 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 643 644 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 645 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 646 647 explicit_bzero(&sha1ctx, sizeof(sha1ctx)); 648 649 /* PE is little-endian, insure proper byte order */ 650 for (i = 0; i < N(ses->ses_hminner); i++) { 651 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 652 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 653 } 654 } 655 #undef N 656 657 static bool 658 safe_auth_supported(struct safe_softc *sc, 659 const struct crypto_session_params *csp) 660 { 661 662 switch (csp->csp_auth_alg) { 663 case CRYPTO_SHA1_HMAC: 664 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) 665 return (false); 666 break; 667 default: 668 return (false); 669 } 670 return (true); 671 } 672 673 static bool 674 safe_cipher_supported(struct safe_softc *sc, 675 const struct crypto_session_params *csp) 676 { 677 678 switch (csp->csp_cipher_alg) { 679 case CRYPTO_AES_CBC: 680 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) 681 return (false); 682 if (csp->csp_ivlen != 16) 683 return (false); 684 if (csp->csp_cipher_klen != 16 && 685 csp->csp_cipher_klen != 24 && 686 csp->csp_cipher_klen != 32) 687 return (false); 688 break; 689 } 690 return (true); 691 } 692 693 static int 694 safe_probesession(device_t dev, const struct crypto_session_params *csp) 695 { 696 struct safe_softc *sc = device_get_softc(dev); 697 698 if (csp->csp_flags != 0) 699 return (EINVAL); 700 switch (csp->csp_mode) { 701 case CSP_MODE_DIGEST: 702 if (!safe_auth_supported(sc, csp)) 703 return (EINVAL); 704 break; 705 case CSP_MODE_CIPHER: 706 if (!safe_cipher_supported(sc, csp)) 707 return (EINVAL); 708 break; 709 case CSP_MODE_ETA: 710 if (!safe_auth_supported(sc, csp) || 711 !safe_cipher_supported(sc, csp)) 712 return (EINVAL); 713 break; 714 default: 715 return (EINVAL); 716 } 717 718 return (CRYPTODEV_PROBE_HARDWARE); 719 } 720 721 /* 722 * Allocate a new 'session'. 723 */ 724 static int 725 safe_newsession(device_t dev, crypto_session_t cses, 726 const struct crypto_session_params *csp) 727 { 728 struct safe_session *ses; 729 730 ses = crypto_get_driver_session(cses); 731 if (csp->csp_cipher_alg != 0) { 732 ses->ses_klen = csp->csp_cipher_klen; 733 if (csp->csp_cipher_key != NULL) 734 safe_setup_enckey(ses, csp->csp_cipher_key); 735 } 736 737 if (csp->csp_auth_alg != 0) { 738 ses->ses_mlen = csp->csp_auth_mlen; 739 if (ses->ses_mlen == 0) { 740 ses->ses_mlen = SHA1_HASH_LEN; 741 } 742 743 if (csp->csp_auth_key != NULL) { 744 safe_setup_mackey(ses, csp->csp_auth_alg, 745 csp->csp_auth_key, csp->csp_auth_klen); 746 } 747 } 748 749 return (0); 750 } 751 752 static void 753 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) 754 { 755 struct safe_operand *op = arg; 756 757 DPRINTF(("%s: nsegs %d error %d\n", __func__, 758 nsegs, error)); 759 if (error != 0) 760 return; 761 op->nsegs = nsegs; 762 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 763 } 764 765 static int 766 safe_process(device_t dev, struct cryptop *crp, int hint) 767 { 768 struct safe_softc *sc = device_get_softc(dev); 769 const struct crypto_session_params *csp; 770 int err = 0, i, nicealign, uniform; 771 int bypass, oplen; 772 int16_t coffset; 773 struct safe_session *ses; 774 struct safe_ringentry *re; 775 struct safe_sarec *sa; 776 struct safe_pdesc *pd; 777 u_int32_t cmd0, cmd1, staterec; 778 779 mtx_lock(&sc->sc_ringmtx); 780 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 781 safestats.st_ringfull++; 782 sc->sc_needwakeup |= CRYPTO_SYMQ; 783 mtx_unlock(&sc->sc_ringmtx); 784 return (ERESTART); 785 } 786 re = sc->sc_front; 787 788 staterec = re->re_sa.sa_staterec; /* save */ 789 /* NB: zero everything but the PE descriptor */ 790 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 791 re->re_sa.sa_staterec = staterec; /* restore */ 792 793 re->re_crp = crp; 794 795 sa = &re->re_sa; 796 ses = crypto_get_driver_session(crp->crp_session); 797 csp = crypto_get_params(crp->crp_session); 798 799 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 800 cmd1 = 0; 801 switch (csp->csp_mode) { 802 case CSP_MODE_DIGEST: 803 cmd0 |= SAFE_SA_CMD0_OP_HASH; 804 break; 805 case CSP_MODE_CIPHER: 806 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 807 break; 808 case CSP_MODE_ETA: 809 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 810 break; 811 } 812 813 if (csp->csp_cipher_alg != 0) { 814 if (crp->crp_cipher_key != NULL) 815 safe_setup_enckey(ses, crp->crp_cipher_key); 816 817 switch (csp->csp_cipher_alg) { 818 case CRYPTO_AES_CBC: 819 cmd0 |= SAFE_SA_CMD0_AES; 820 cmd1 |= SAFE_SA_CMD1_CBC; 821 if (ses->ses_klen * 8 == 128) 822 cmd1 |= SAFE_SA_CMD1_AES128; 823 else if (ses->ses_klen * 8 == 192) 824 cmd1 |= SAFE_SA_CMD1_AES192; 825 else 826 cmd1 |= SAFE_SA_CMD1_AES256; 827 } 828 829 /* 830 * Setup encrypt/decrypt state. When using basic ops 831 * we can't use an inline IV because hash/crypt offset 832 * must be from the end of the IV to the start of the 833 * crypt data and this leaves out the preceding header 834 * from the hash calculation. Instead we place the IV 835 * in the state record and set the hash/crypt offset to 836 * copy both the header+IV. 837 */ 838 crypto_read_iv(crp, re->re_sastate.sa_saved_iv); 839 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 840 841 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 842 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 843 844 /* 845 * XXX: I suspect we don't need this since we 846 * don't save the returned IV. 847 */ 848 cmd0 |= SAFE_SA_CMD0_SAVEIV; 849 } else { 850 cmd0 |= SAFE_SA_CMD0_INBOUND; 851 } 852 /* 853 * For basic encryption use the zero pad algorithm. 854 * This pads results to an 8-byte boundary and 855 * suppresses padding verification for inbound (i.e. 856 * decrypt) operations. 857 * 858 * NB: Not sure if the 8-byte pad boundary is a problem. 859 */ 860 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 861 862 /* XXX assert key bufs have the same size */ 863 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 864 } 865 866 if (csp->csp_auth_alg != 0) { 867 if (crp->crp_auth_key != NULL) { 868 safe_setup_mackey(ses, csp->csp_auth_alg, 869 crp->crp_auth_key, csp->csp_auth_klen); 870 } 871 872 switch (csp->csp_auth_alg) { 873 case CRYPTO_SHA1_HMAC: 874 cmd0 |= SAFE_SA_CMD0_SHA1; 875 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 876 break; 877 } 878 879 /* 880 * Digest data is loaded from the SA and the hash 881 * result is saved to the state block where we 882 * retrieve it for return to the caller. 883 */ 884 /* XXX assert digest bufs have the same size */ 885 bcopy(ses->ses_hminner, sa->sa_indigest, 886 sizeof(sa->sa_indigest)); 887 bcopy(ses->ses_hmouter, sa->sa_outdigest, 888 sizeof(sa->sa_outdigest)); 889 890 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 891 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 892 } 893 894 if (csp->csp_mode == CSP_MODE_ETA) { 895 /* 896 * The driver only supports ETA requests where there 897 * is no gap between the AAD and payload. 898 */ 899 if (crp->crp_aad_length != 0 && 900 crp->crp_aad_start + crp->crp_aad_length != 901 crp->crp_payload_start) { 902 safestats.st_lenmismatch++; 903 err = EINVAL; 904 goto errout; 905 } 906 if (crp->crp_aad_length != 0) 907 bypass = crp->crp_aad_start; 908 else 909 bypass = crp->crp_payload_start; 910 coffset = crp->crp_aad_length; 911 oplen = crp->crp_payload_start + crp->crp_payload_length; 912 #ifdef SAFE_DEBUG 913 if (safe_debug) { 914 printf("AAD: skip %d, len %d, digest %d\n", 915 crp->crp_aad_start, crp->crp_aad_length, 916 crp->crp_digest_start); 917 printf("payload: skip %d, len %d, IV %d\n", 918 crp->crp_payload_start, crp->crp_payload_length, 919 crp->crp_iv_start); 920 printf("bypass %d coffset %d oplen %d\n", 921 bypass, coffset, oplen); 922 } 923 #endif 924 if (coffset & 3) { /* offset must be 32-bit aligned */ 925 DPRINTF(("%s: coffset %u misaligned\n", 926 __func__, coffset)); 927 safestats.st_coffmisaligned++; 928 err = EINVAL; 929 goto errout; 930 } 931 coffset >>= 2; 932 if (coffset > 255) { /* offset must be <256 dwords */ 933 DPRINTF(("%s: coffset %u too big\n", 934 __func__, coffset)); 935 safestats.st_cofftoobig++; 936 err = EINVAL; 937 goto errout; 938 } 939 /* 940 * Tell the hardware to copy the header to the output. 941 * The header is defined as the data from the end of 942 * the bypass to the start of data to be encrypted. 943 * Typically this is the inline IV. Note that you need 944 * to do this even if src+dst are the same; it appears 945 * that w/o this bit the crypted data is written 946 * immediately after the bypass data. 947 */ 948 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 949 /* 950 * Disable IP header mutable bit handling. This is 951 * needed to get correct HMAC calculations. 952 */ 953 cmd1 |= SAFE_SA_CMD1_MUTABLE; 954 } else { 955 bypass = crp->crp_payload_start; 956 oplen = bypass + crp->crp_payload_length; 957 coffset = 0; 958 } 959 /* XXX verify multiple of 4 when using s/g */ 960 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 961 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 962 safestats.st_bypasstoobig++; 963 err = EINVAL; 964 goto errout; 965 } 966 967 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 968 safestats.st_nomap++; 969 err = ENOMEM; 970 goto errout; 971 } 972 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, 973 &re->re_src, BUS_DMA_NOWAIT) != 0) { 974 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 975 re->re_src_map = NULL; 976 safestats.st_noload++; 977 err = ENOMEM; 978 goto errout; 979 } 980 re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf); 981 nicealign = safe_dmamap_aligned(&re->re_src); 982 uniform = safe_dmamap_uniform(&re->re_src); 983 984 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 985 nicealign, uniform, re->re_src.nsegs)); 986 if (re->re_src.nsegs > 1) { 987 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 988 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 989 for (i = 0; i < re->re_src_nsegs; i++) { 990 /* NB: no need to check if there's space */ 991 pd = sc->sc_spfree; 992 if (++(sc->sc_spfree) == sc->sc_springtop) 993 sc->sc_spfree = sc->sc_spring; 994 995 KASSERT((pd->pd_flags&3) == 0 || 996 (pd->pd_flags&3) == SAFE_PD_DONE, 997 ("bogus source particle descriptor; flags %x", 998 pd->pd_flags)); 999 pd->pd_addr = re->re_src_segs[i].ds_addr; 1000 pd->pd_size = re->re_src_segs[i].ds_len; 1001 pd->pd_flags = SAFE_PD_READY; 1002 } 1003 cmd0 |= SAFE_SA_CMD0_IGATHER; 1004 } else { 1005 /* 1006 * No need for gather, reference the operand directly. 1007 */ 1008 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1009 } 1010 1011 if (csp->csp_mode == CSP_MODE_DIGEST) { 1012 /* 1013 * Hash op; no destination needed. 1014 */ 1015 } else { 1016 if (nicealign && uniform == 1) { 1017 /* 1018 * Source layout is suitable for direct 1019 * sharing of the DMA map and segment list. 1020 */ 1021 re->re_dst = re->re_src; 1022 } else if (nicealign && uniform == 2) { 1023 /* 1024 * The source is properly aligned but requires a 1025 * different particle list to handle DMA of the 1026 * result. Create a new map and do the load to 1027 * create the segment list. The particle 1028 * descriptor setup code below will handle the 1029 * rest. 1030 */ 1031 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, 1032 &re->re_dst_map)) { 1033 safestats.st_nomap++; 1034 err = ENOMEM; 1035 goto errout; 1036 } 1037 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, 1038 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 1039 0) { 1040 bus_dmamap_destroy(sc->sc_dstdmat, 1041 re->re_dst_map); 1042 re->re_dst_map = NULL; 1043 safestats.st_noload++; 1044 err = ENOMEM; 1045 goto errout; 1046 } 1047 } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { 1048 int totlen, len; 1049 struct mbuf *m, *top, **mp; 1050 1051 /* 1052 * DMA constraints require that we allocate a 1053 * new mbuf chain for the destination. We 1054 * allocate an entire new set of mbufs of 1055 * optimal/required size and then tell the 1056 * hardware to copy any bits that are not 1057 * created as a byproduct of the operation. 1058 */ 1059 if (!nicealign) 1060 safestats.st_unaligned++; 1061 if (!uniform) 1062 safestats.st_notuniform++; 1063 totlen = re->re_src_mapsize; 1064 if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { 1065 len = MHLEN; 1066 MGETHDR(m, M_NOWAIT, MT_DATA); 1067 if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, 1068 M_NOWAIT)) { 1069 m_free(m); 1070 m = NULL; 1071 } 1072 } else { 1073 len = MLEN; 1074 MGET(m, M_NOWAIT, MT_DATA); 1075 } 1076 if (m == NULL) { 1077 safestats.st_nombuf++; 1078 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1079 goto errout; 1080 } 1081 if (totlen >= MINCLSIZE) { 1082 if (!(MCLGET(m, M_NOWAIT))) { 1083 m_free(m); 1084 safestats.st_nomcl++; 1085 err = sc->sc_nqchip ? 1086 ERESTART : ENOMEM; 1087 goto errout; 1088 } 1089 len = MCLBYTES; 1090 } 1091 m->m_len = len; 1092 top = NULL; 1093 mp = ⊤ 1094 1095 while (totlen > 0) { 1096 if (top) { 1097 MGET(m, M_NOWAIT, MT_DATA); 1098 if (m == NULL) { 1099 m_freem(top); 1100 safestats.st_nombuf++; 1101 err = sc->sc_nqchip ? 1102 ERESTART : ENOMEM; 1103 goto errout; 1104 } 1105 len = MLEN; 1106 } 1107 if (top && totlen >= MINCLSIZE) { 1108 if (!(MCLGET(m, M_NOWAIT))) { 1109 *mp = m; 1110 m_freem(top); 1111 safestats.st_nomcl++; 1112 err = sc->sc_nqchip ? 1113 ERESTART : ENOMEM; 1114 goto errout; 1115 } 1116 len = MCLBYTES; 1117 } 1118 m->m_len = len = min(totlen, len); 1119 totlen -= len; 1120 *mp = m; 1121 mp = &m->m_next; 1122 } 1123 re->re_dst_m = top; 1124 if (bus_dmamap_create(sc->sc_dstdmat, 1125 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1126 safestats.st_nomap++; 1127 err = ENOMEM; 1128 goto errout; 1129 } 1130 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, 1131 re->re_dst_map, top, re->re_dst_segs, 1132 &re->re_dst_nsegs, 0) != 0) { 1133 bus_dmamap_destroy(sc->sc_dstdmat, 1134 re->re_dst_map); 1135 re->re_dst_map = NULL; 1136 safestats.st_noload++; 1137 err = ENOMEM; 1138 goto errout; 1139 } 1140 re->re_dst_mapsize = re->re_src_mapsize; 1141 if (re->re_src.mapsize > oplen) { 1142 /* 1143 * There's data following what the 1144 * hardware will copy for us. If this 1145 * isn't just the ICV (that's going to 1146 * be written on completion), copy it 1147 * to the new mbufs 1148 */ 1149 if (!(csp->csp_mode == CSP_MODE_ETA && 1150 (re->re_src.mapsize-oplen) == ses->ses_mlen && 1151 crp->crp_digest_start == oplen)) 1152 safe_mcopy(crp->crp_buf.cb_mbuf, 1153 re->re_dst_m, oplen); 1154 else 1155 safestats.st_noicvcopy++; 1156 } 1157 } else { 1158 if (!nicealign) { 1159 safestats.st_iovmisaligned++; 1160 err = EINVAL; 1161 goto errout; 1162 } else { 1163 /* 1164 * There's no way to handle the DMA 1165 * requirements with this uio. We 1166 * could create a separate DMA area for 1167 * the result and then copy it back, 1168 * but for now we just bail and return 1169 * an error. Note that uio requests 1170 * > SAFE_MAX_DSIZE are handled because 1171 * the DMA map and segment list for the 1172 * destination wil result in a 1173 * destination particle list that does 1174 * the necessary scatter DMA. 1175 */ 1176 safestats.st_iovnotuniform++; 1177 err = EINVAL; 1178 goto errout; 1179 } 1180 } 1181 1182 if (re->re_dst.nsegs > 1) { 1183 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1184 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1185 for (i = 0; i < re->re_dst_nsegs; i++) { 1186 pd = sc->sc_dpfree; 1187 KASSERT((pd->pd_flags&3) == 0 || 1188 (pd->pd_flags&3) == SAFE_PD_DONE, 1189 ("bogus dest particle descriptor; flags %x", 1190 pd->pd_flags)); 1191 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1192 sc->sc_dpfree = sc->sc_dpring; 1193 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1194 pd->pd_flags = SAFE_PD_READY; 1195 } 1196 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1197 } else { 1198 /* 1199 * No need for scatter, reference the operand directly. 1200 */ 1201 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1202 } 1203 } 1204 1205 /* 1206 * All done with setup; fillin the SA command words 1207 * and the packet engine descriptor. The operation 1208 * is now ready for submission to the hardware. 1209 */ 1210 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1211 sa->sa_cmd1 = cmd1 1212 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1213 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1214 | SAFE_SA_CMD1_SRPCI 1215 ; 1216 /* 1217 * NB: the order of writes is important here. In case the 1218 * chip is scanning the ring because of an outstanding request 1219 * it might nab this one too. In that case we need to make 1220 * sure the setup is complete before we write the length 1221 * field of the descriptor as it signals the descriptor is 1222 * ready for processing. 1223 */ 1224 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1225 if (csp->csp_auth_alg != 0) 1226 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1227 re->re_desc.d_len = oplen 1228 | SAFE_PE_LEN_READY 1229 | (bypass << SAFE_PE_LEN_BYPASS_S) 1230 ; 1231 1232 safestats.st_ipackets++; 1233 safestats.st_ibytes += oplen; 1234 1235 if (++(sc->sc_front) == sc->sc_ringtop) 1236 sc->sc_front = sc->sc_ring; 1237 1238 /* XXX honor batching */ 1239 safe_feed(sc, re); 1240 mtx_unlock(&sc->sc_ringmtx); 1241 return (0); 1242 1243 errout: 1244 if (re->re_dst_m != NULL) 1245 m_freem(re->re_dst_m); 1246 1247 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1248 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1249 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1250 } 1251 if (re->re_src_map != NULL) { 1252 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1253 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1254 } 1255 mtx_unlock(&sc->sc_ringmtx); 1256 if (err != ERESTART) { 1257 crp->crp_etype = err; 1258 crypto_done(crp); 1259 err = 0; 1260 } else { 1261 sc->sc_needwakeup |= CRYPTO_SYMQ; 1262 } 1263 return (err); 1264 } 1265 1266 static void 1267 safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1268 { 1269 const struct crypto_session_params *csp; 1270 struct cryptop *crp = (struct cryptop *)re->re_crp; 1271 struct safe_session *ses; 1272 uint8_t hash[HASH_MAX_LEN]; 1273 1274 ses = crypto_get_driver_session(crp->crp_session); 1275 csp = crypto_get_params(crp->crp_session); 1276 1277 safestats.st_opackets++; 1278 safestats.st_obytes += re->re_dst.mapsize; 1279 1280 safe_dma_sync(&sc->sc_ringalloc, 1281 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1282 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1283 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1284 re->re_desc.d_csr, 1285 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1286 safestats.st_peoperr++; 1287 crp->crp_etype = EIO; /* something more meaningful? */ 1288 } 1289 1290 /* 1291 * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if 1292 * it is non-NULL? 1293 */ 1294 1295 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1296 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1297 BUS_DMASYNC_POSTREAD); 1298 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1299 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1300 } 1301 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1302 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1303 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1304 1305 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1306 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { 1307 /* 1308 * SHA-1 ICV's are byte-swapped; fix 'em up 1309 * before copying them to their destination. 1310 */ 1311 re->re_sastate.sa_saved_indigest[0] = 1312 bswap32(re->re_sastate.sa_saved_indigest[0]); 1313 re->re_sastate.sa_saved_indigest[1] = 1314 bswap32(re->re_sastate.sa_saved_indigest[1]); 1315 re->re_sastate.sa_saved_indigest[2] = 1316 bswap32(re->re_sastate.sa_saved_indigest[2]); 1317 } 1318 1319 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1320 crypto_copydata(crp, crp->crp_digest_start, 1321 ses->ses_mlen, hash); 1322 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, 1323 hash, ses->ses_mlen) != 0) 1324 crp->crp_etype = EBADMSG; 1325 } else 1326 crypto_copyback(crp, crp->crp_digest_start, 1327 ses->ses_mlen, re->re_sastate.sa_saved_indigest); 1328 } 1329 crypto_done(crp); 1330 } 1331 1332 /* 1333 * Copy all data past offset from srcm to dstm. 1334 */ 1335 static void 1336 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1337 { 1338 u_int j, dlen, slen; 1339 caddr_t dptr, sptr; 1340 1341 /* 1342 * Advance src and dst to offset. 1343 */ 1344 j = offset; 1345 while (j >= srcm->m_len) { 1346 j -= srcm->m_len; 1347 srcm = srcm->m_next; 1348 if (srcm == NULL) 1349 return; 1350 } 1351 sptr = mtod(srcm, caddr_t) + j; 1352 slen = srcm->m_len - j; 1353 1354 j = offset; 1355 while (j >= dstm->m_len) { 1356 j -= dstm->m_len; 1357 dstm = dstm->m_next; 1358 if (dstm == NULL) 1359 return; 1360 } 1361 dptr = mtod(dstm, caddr_t) + j; 1362 dlen = dstm->m_len - j; 1363 1364 /* 1365 * Copy everything that remains. 1366 */ 1367 for (;;) { 1368 j = min(slen, dlen); 1369 bcopy(sptr, dptr, j); 1370 if (slen == j) { 1371 srcm = srcm->m_next; 1372 if (srcm == NULL) 1373 return; 1374 sptr = srcm->m_data; 1375 slen = srcm->m_len; 1376 } else 1377 sptr += j, slen -= j; 1378 if (dlen == j) { 1379 dstm = dstm->m_next; 1380 if (dstm == NULL) 1381 return; 1382 dptr = dstm->m_data; 1383 dlen = dstm->m_len; 1384 } else 1385 dptr += j, dlen -= j; 1386 } 1387 } 1388 1389 #ifndef SAFE_NO_RNG 1390 #define SAFE_RNG_MAXWAIT 1000 1391 1392 static void 1393 safe_rng_init(struct safe_softc *sc) 1394 { 1395 u_int32_t w, v; 1396 int i; 1397 1398 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1399 /* use default value according to the manual */ 1400 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1401 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1402 1403 /* 1404 * There is a bug in rev 1.0 of the 1140 that when the RNG 1405 * is brought out of reset the ready status flag does not 1406 * work until the RNG has finished its internal initialization. 1407 * 1408 * So in order to determine the device is through its 1409 * initialization we must read the data register, using the 1410 * status reg in the read in case it is initialized. Then read 1411 * the data register until it changes from the first read. 1412 * Once it changes read the data register until it changes 1413 * again. At this time the RNG is considered initialized. 1414 * This could take between 750ms - 1000ms in time. 1415 */ 1416 i = 0; 1417 w = READ_REG(sc, SAFE_RNG_OUT); 1418 do { 1419 v = READ_REG(sc, SAFE_RNG_OUT); 1420 if (v != w) { 1421 w = v; 1422 break; 1423 } 1424 DELAY(10); 1425 } while (++i < SAFE_RNG_MAXWAIT); 1426 1427 /* Wait Until data changes again */ 1428 i = 0; 1429 do { 1430 v = READ_REG(sc, SAFE_RNG_OUT); 1431 if (v != w) 1432 break; 1433 DELAY(10); 1434 } while (++i < SAFE_RNG_MAXWAIT); 1435 } 1436 1437 static __inline void 1438 safe_rng_disable_short_cycle(struct safe_softc *sc) 1439 { 1440 WRITE_REG(sc, SAFE_RNG_CTRL, 1441 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1442 } 1443 1444 static __inline void 1445 safe_rng_enable_short_cycle(struct safe_softc *sc) 1446 { 1447 WRITE_REG(sc, SAFE_RNG_CTRL, 1448 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1449 } 1450 1451 static __inline u_int32_t 1452 safe_rng_read(struct safe_softc *sc) 1453 { 1454 int i; 1455 1456 i = 0; 1457 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1458 ; 1459 return READ_REG(sc, SAFE_RNG_OUT); 1460 } 1461 1462 static void 1463 safe_rng(void *arg) 1464 { 1465 struct safe_softc *sc = arg; 1466 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1467 u_int maxwords; 1468 int i; 1469 1470 safestats.st_rng++; 1471 /* 1472 * Fetch the next block of data. 1473 */ 1474 maxwords = safe_rngbufsize; 1475 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1476 maxwords = SAFE_RNG_MAXBUFSIZ; 1477 retry: 1478 for (i = 0; i < maxwords; i++) 1479 buf[i] = safe_rng_read(sc); 1480 /* 1481 * Check the comparator alarm count and reset the h/w if 1482 * it exceeds our threshold. This guards against the 1483 * hardware oscillators resonating with external signals. 1484 */ 1485 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1486 u_int32_t freq_inc, w; 1487 1488 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1489 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1490 safestats.st_rngalarm++; 1491 safe_rng_enable_short_cycle(sc); 1492 freq_inc = 18; 1493 for (i = 0; i < 64; i++) { 1494 w = READ_REG(sc, SAFE_RNG_CNFG); 1495 freq_inc = ((w + freq_inc) & 0x3fL); 1496 w = ((w & ~0x3fL) | freq_inc); 1497 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1498 1499 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1500 1501 (void) safe_rng_read(sc); 1502 DELAY(25); 1503 1504 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1505 safe_rng_disable_short_cycle(sc); 1506 goto retry; 1507 } 1508 freq_inc = 1; 1509 } 1510 safe_rng_disable_short_cycle(sc); 1511 } else 1512 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1513 1514 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1515 callout_reset(&sc->sc_rngto, 1516 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1517 } 1518 #endif /* SAFE_NO_RNG */ 1519 1520 static void 1521 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1522 { 1523 bus_addr_t *paddr = (bus_addr_t*) arg; 1524 *paddr = segs->ds_addr; 1525 } 1526 1527 static int 1528 safe_dma_malloc( 1529 struct safe_softc *sc, 1530 bus_size_t size, 1531 struct safe_dma_alloc *dma, 1532 int mapflags 1533 ) 1534 { 1535 int r; 1536 1537 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1538 sizeof(u_int32_t), 0, /* alignment, bounds */ 1539 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1540 BUS_SPACE_MAXADDR, /* highaddr */ 1541 NULL, NULL, /* filter, filterarg */ 1542 size, /* maxsize */ 1543 1, /* nsegments */ 1544 size, /* maxsegsize */ 1545 BUS_DMA_ALLOCNOW, /* flags */ 1546 NULL, NULL, /* locking */ 1547 &dma->dma_tag); 1548 if (r != 0) { 1549 device_printf(sc->sc_dev, "safe_dma_malloc: " 1550 "bus_dma_tag_create failed; error %u\n", r); 1551 goto fail_0; 1552 } 1553 1554 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1555 BUS_DMA_NOWAIT, &dma->dma_map); 1556 if (r != 0) { 1557 device_printf(sc->sc_dev, "safe_dma_malloc: " 1558 "bus_dmammem_alloc failed; size %ju, error %u\n", 1559 (uintmax_t)size, r); 1560 goto fail_1; 1561 } 1562 1563 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1564 size, 1565 safe_dmamap_cb, 1566 &dma->dma_paddr, 1567 mapflags | BUS_DMA_NOWAIT); 1568 if (r != 0) { 1569 device_printf(sc->sc_dev, "safe_dma_malloc: " 1570 "bus_dmamap_load failed; error %u\n", r); 1571 goto fail_2; 1572 } 1573 1574 dma->dma_size = size; 1575 return (0); 1576 1577 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1578 fail_2: 1579 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1580 fail_1: 1581 bus_dma_tag_destroy(dma->dma_tag); 1582 fail_0: 1583 dma->dma_tag = NULL; 1584 return (r); 1585 } 1586 1587 static void 1588 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1589 { 1590 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1591 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1592 bus_dma_tag_destroy(dma->dma_tag); 1593 } 1594 1595 /* 1596 * Resets the board. Values in the regesters are left as is 1597 * from the reset (i.e. initial values are assigned elsewhere). 1598 */ 1599 static void 1600 safe_reset_board(struct safe_softc *sc) 1601 { 1602 u_int32_t v; 1603 /* 1604 * Reset the device. The manual says no delay 1605 * is needed between marking and clearing reset. 1606 */ 1607 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1608 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1609 SAFE_PE_DMACFG_SGRESET); 1610 WRITE_REG(sc, SAFE_PE_DMACFG, v 1611 | SAFE_PE_DMACFG_PERESET 1612 | SAFE_PE_DMACFG_PDRRESET 1613 | SAFE_PE_DMACFG_SGRESET); 1614 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1615 } 1616 1617 /* 1618 * Initialize registers we need to touch only once. 1619 */ 1620 static void 1621 safe_init_board(struct safe_softc *sc) 1622 { 1623 u_int32_t v, dwords; 1624 1625 v = READ_REG(sc, SAFE_PE_DMACFG); 1626 v &=~ SAFE_PE_DMACFG_PEMODE; 1627 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1628 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1629 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1630 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1631 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1632 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1633 ; 1634 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1635 #if 0 1636 /* XXX select byte swap based on host byte order */ 1637 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1638 #endif 1639 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1640 /* 1641 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1642 * "target mode transfers" done while the chip is DMA'ing 1643 * >1020 bytes cause the hardware to lockup. To avoid this 1644 * we reduce the max PCI transfer size and use small source 1645 * particle descriptors (<= 256 bytes). 1646 */ 1647 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1648 device_printf(sc->sc_dev, 1649 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1650 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1651 SAFE_REV_MAJ(sc->sc_chiprev), 1652 SAFE_REV_MIN(sc->sc_chiprev)); 1653 } 1654 1655 /* NB: operands+results are overlaid */ 1656 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1657 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1658 /* 1659 * Configure ring entry size and number of items in the ring. 1660 */ 1661 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1662 ("PE ring entry not 32-bit aligned!")); 1663 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1664 WRITE_REG(sc, SAFE_PE_RINGCFG, 1665 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1666 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1667 1668 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1669 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1670 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1671 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1672 /* 1673 * NB: destination particles are fixed size. We use 1674 * an mbuf cluster and require all results go to 1675 * clusters or smaller. 1676 */ 1677 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1678 1679 /* it's now safe to enable PE mode, do it */ 1680 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1681 1682 /* 1683 * Configure hardware to use level-triggered interrupts and 1684 * to interrupt after each descriptor is processed. 1685 */ 1686 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1687 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1688 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1689 } 1690 1691 /* 1692 * Init PCI registers 1693 */ 1694 static void 1695 safe_init_pciregs(device_t dev) 1696 { 1697 } 1698 1699 /* 1700 * Clean up after a chip crash. 1701 * It is assumed that the caller in splimp() 1702 */ 1703 static void 1704 safe_cleanchip(struct safe_softc *sc) 1705 { 1706 1707 if (sc->sc_nqchip != 0) { 1708 struct safe_ringentry *re = sc->sc_back; 1709 1710 while (re != sc->sc_front) { 1711 if (re->re_desc.d_csr != 0) 1712 safe_free_entry(sc, re); 1713 if (++re == sc->sc_ringtop) 1714 re = sc->sc_ring; 1715 } 1716 sc->sc_back = re; 1717 sc->sc_nqchip = 0; 1718 } 1719 } 1720 1721 /* 1722 * free a safe_q 1723 * It is assumed that the caller is within splimp(). 1724 */ 1725 static int 1726 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 1727 { 1728 struct cryptop *crp; 1729 1730 /* 1731 * Free header MCR 1732 */ 1733 if (re->re_dst_m != NULL) 1734 m_freem(re->re_dst_m); 1735 1736 crp = (struct cryptop *)re->re_crp; 1737 1738 re->re_desc.d_csr = 0; 1739 1740 crp->crp_etype = EFAULT; 1741 crypto_done(crp); 1742 return(0); 1743 } 1744 1745 /* 1746 * Routine to reset the chip and clean up. 1747 * It is assumed that the caller is in splimp() 1748 */ 1749 static void 1750 safe_totalreset(struct safe_softc *sc) 1751 { 1752 safe_reset_board(sc); 1753 safe_init_board(sc); 1754 safe_cleanchip(sc); 1755 } 1756 1757 /* 1758 * Is the operand suitable aligned for direct DMA. Each 1759 * segment must be aligned on a 32-bit boundary and all 1760 * but the last segment must be a multiple of 4 bytes. 1761 */ 1762 static int 1763 safe_dmamap_aligned(const struct safe_operand *op) 1764 { 1765 int i; 1766 1767 for (i = 0; i < op->nsegs; i++) { 1768 if (op->segs[i].ds_addr & 3) 1769 return (0); 1770 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 1771 return (0); 1772 } 1773 return (1); 1774 } 1775 1776 /* 1777 * Is the operand suitable for direct DMA as the destination 1778 * of an operation. The hardware requires that each ``particle'' 1779 * but the last in an operation result have the same size. We 1780 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 1781 * 0 if some segment is not a multiple of this size, 1 if all 1782 * segments are exactly this size, or 2 if segments are at worst 1783 * a multiple of this size. 1784 */ 1785 static int 1786 safe_dmamap_uniform(const struct safe_operand *op) 1787 { 1788 int result = 1; 1789 1790 if (op->nsegs > 0) { 1791 int i; 1792 1793 for (i = 0; i < op->nsegs-1; i++) { 1794 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 1795 return (0); 1796 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 1797 result = 2; 1798 } 1799 } 1800 return (result); 1801 } 1802 1803 #ifdef SAFE_DEBUG 1804 static void 1805 safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 1806 { 1807 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 1808 , tag 1809 , READ_REG(sc, SAFE_DMA_ENDIAN) 1810 , READ_REG(sc, SAFE_DMA_SRCADDR) 1811 , READ_REG(sc, SAFE_DMA_DSTADDR) 1812 , READ_REG(sc, SAFE_DMA_STAT) 1813 ); 1814 } 1815 1816 static void 1817 safe_dump_intrstate(struct safe_softc *sc, const char *tag) 1818 { 1819 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 1820 , tag 1821 , READ_REG(sc, SAFE_HI_CFG) 1822 , READ_REG(sc, SAFE_HI_MASK) 1823 , READ_REG(sc, SAFE_HI_DESC_CNT) 1824 , READ_REG(sc, SAFE_HU_STAT) 1825 , READ_REG(sc, SAFE_HM_STAT) 1826 ); 1827 } 1828 1829 static void 1830 safe_dump_ringstate(struct safe_softc *sc, const char *tag) 1831 { 1832 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 1833 1834 /* NB: assume caller has lock on ring */ 1835 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 1836 tag, 1837 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 1838 (unsigned long)(sc->sc_back - sc->sc_ring), 1839 (unsigned long)(sc->sc_front - sc->sc_ring)); 1840 } 1841 1842 static void 1843 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 1844 { 1845 int ix, nsegs; 1846 1847 ix = re - sc->sc_ring; 1848 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 1849 , tag 1850 , re, ix 1851 , re->re_desc.d_csr 1852 , re->re_desc.d_src 1853 , re->re_desc.d_dst 1854 , re->re_desc.d_sa 1855 , re->re_desc.d_len 1856 ); 1857 if (re->re_src.nsegs > 1) { 1858 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 1859 sizeof(struct safe_pdesc); 1860 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 1861 printf(" spd[%u] %p: %p size %u flags %x" 1862 , ix, &sc->sc_spring[ix] 1863 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 1864 , sc->sc_spring[ix].pd_size 1865 , sc->sc_spring[ix].pd_flags 1866 ); 1867 if (sc->sc_spring[ix].pd_size == 0) 1868 printf(" (zero!)"); 1869 printf("\n"); 1870 if (++ix == SAFE_TOTAL_SPART) 1871 ix = 0; 1872 } 1873 } 1874 if (re->re_dst.nsegs > 1) { 1875 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 1876 sizeof(struct safe_pdesc); 1877 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 1878 printf(" dpd[%u] %p: %p flags %x\n" 1879 , ix, &sc->sc_dpring[ix] 1880 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 1881 , sc->sc_dpring[ix].pd_flags 1882 ); 1883 if (++ix == SAFE_TOTAL_DPART) 1884 ix = 0; 1885 } 1886 } 1887 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 1888 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 1889 printf("sa: key %x %x %x %x %x %x %x %x\n" 1890 , re->re_sa.sa_key[0] 1891 , re->re_sa.sa_key[1] 1892 , re->re_sa.sa_key[2] 1893 , re->re_sa.sa_key[3] 1894 , re->re_sa.sa_key[4] 1895 , re->re_sa.sa_key[5] 1896 , re->re_sa.sa_key[6] 1897 , re->re_sa.sa_key[7] 1898 ); 1899 printf("sa: indigest %x %x %x %x %x\n" 1900 , re->re_sa.sa_indigest[0] 1901 , re->re_sa.sa_indigest[1] 1902 , re->re_sa.sa_indigest[2] 1903 , re->re_sa.sa_indigest[3] 1904 , re->re_sa.sa_indigest[4] 1905 ); 1906 printf("sa: outdigest %x %x %x %x %x\n" 1907 , re->re_sa.sa_outdigest[0] 1908 , re->re_sa.sa_outdigest[1] 1909 , re->re_sa.sa_outdigest[2] 1910 , re->re_sa.sa_outdigest[3] 1911 , re->re_sa.sa_outdigest[4] 1912 ); 1913 printf("sr: iv %x %x %x %x\n" 1914 , re->re_sastate.sa_saved_iv[0] 1915 , re->re_sastate.sa_saved_iv[1] 1916 , re->re_sastate.sa_saved_iv[2] 1917 , re->re_sastate.sa_saved_iv[3] 1918 ); 1919 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 1920 , re->re_sastate.sa_saved_hashbc 1921 , re->re_sastate.sa_saved_indigest[0] 1922 , re->re_sastate.sa_saved_indigest[1] 1923 , re->re_sastate.sa_saved_indigest[2] 1924 , re->re_sastate.sa_saved_indigest[3] 1925 , re->re_sastate.sa_saved_indigest[4] 1926 ); 1927 } 1928 1929 static void 1930 safe_dump_ring(struct safe_softc *sc, const char *tag) 1931 { 1932 mtx_lock(&sc->sc_ringmtx); 1933 printf("\nSafeNet Ring State:\n"); 1934 safe_dump_intrstate(sc, tag); 1935 safe_dump_dmastatus(sc, tag); 1936 safe_dump_ringstate(sc, tag); 1937 if (sc->sc_nqchip) { 1938 struct safe_ringentry *re = sc->sc_back; 1939 do { 1940 safe_dump_request(sc, tag, re); 1941 if (++re == sc->sc_ringtop) 1942 re = sc->sc_ring; 1943 } while (re != sc->sc_front); 1944 } 1945 mtx_unlock(&sc->sc_ringmtx); 1946 } 1947 1948 static int 1949 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 1950 { 1951 char dmode[64]; 1952 int error; 1953 1954 strncpy(dmode, "", sizeof(dmode) - 1); 1955 dmode[sizeof(dmode) - 1] = '\0'; 1956 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 1957 1958 if (error == 0 && req->newptr != NULL) { 1959 struct safe_softc *sc = safec; 1960 1961 if (!sc) 1962 return EINVAL; 1963 if (strncmp(dmode, "dma", 3) == 0) 1964 safe_dump_dmastatus(sc, "safe0"); 1965 else if (strncmp(dmode, "int", 3) == 0) 1966 safe_dump_intrstate(sc, "safe0"); 1967 else if (strncmp(dmode, "ring", 4) == 0) 1968 safe_dump_ring(sc, "safe0"); 1969 else 1970 return EINVAL; 1971 } 1972 return error; 1973 } 1974 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, 1975 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, 1976 sysctl_hw_safe_dump, "A", 1977 "Dump driver state"); 1978 #endif /* SAFE_DEBUG */ 1979