1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 /* 32 * SafeNet SafeXcel-1141 hardware crypto accelerator 33 */ 34 #include "opt_safe.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/proc.h> 39 #include <sys/errno.h> 40 #include <sys/malloc.h> 41 #include <sys/kernel.h> 42 #include <sys/mbuf.h> 43 #include <sys/module.h> 44 #include <sys/lock.h> 45 #include <sys/mutex.h> 46 #include <sys/sysctl.h> 47 #include <sys/endian.h> 48 #include <sys/uio.h> 49 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 53 #include <machine/bus.h> 54 #include <machine/resource.h> 55 #include <sys/bus.h> 56 #include <sys/rman.h> 57 58 #include <opencrypto/cryptodev.h> 59 #include <opencrypto/xform_auth.h> 60 #include <sys/random.h> 61 #include <sys/kobj.h> 62 63 #include "cryptodev_if.h" 64 65 #include <dev/pci/pcivar.h> 66 #include <dev/pci/pcireg.h> 67 68 #ifdef SAFE_RNDTEST 69 #include <dev/rndtest/rndtest.h> 70 #endif 71 #include <dev/safe/safereg.h> 72 #include <dev/safe/safevar.h> 73 74 #ifndef bswap32 75 #define bswap32 NTOHL 76 #endif 77 78 /* 79 * Prototypes and count for the pci_device structure 80 */ 81 static int safe_probe(device_t); 82 static int safe_attach(device_t); 83 static int safe_detach(device_t); 84 static int safe_suspend(device_t); 85 static int safe_resume(device_t); 86 static int safe_shutdown(device_t); 87 88 static int safe_probesession(device_t, const struct crypto_session_params *); 89 static int safe_newsession(device_t, crypto_session_t, 90 const struct crypto_session_params *); 91 static int safe_process(device_t, struct cryptop *, int); 92 93 static device_method_t safe_methods[] = { 94 /* Device interface */ 95 DEVMETHOD(device_probe, safe_probe), 96 DEVMETHOD(device_attach, safe_attach), 97 DEVMETHOD(device_detach, safe_detach), 98 DEVMETHOD(device_suspend, safe_suspend), 99 DEVMETHOD(device_resume, safe_resume), 100 DEVMETHOD(device_shutdown, safe_shutdown), 101 102 /* crypto device methods */ 103 DEVMETHOD(cryptodev_probesession, safe_probesession), 104 DEVMETHOD(cryptodev_newsession, safe_newsession), 105 DEVMETHOD(cryptodev_process, safe_process), 106 107 DEVMETHOD_END 108 }; 109 110 static driver_t safe_driver = { 111 "safe", 112 safe_methods, 113 sizeof (struct safe_softc) 114 }; 115 116 DRIVER_MODULE(safe, pci, safe_driver, 0, 0); 117 MODULE_DEPEND(safe, crypto, 1, 1, 1); 118 #ifdef SAFE_RNDTEST 119 MODULE_DEPEND(safe, rndtest, 1, 1, 1); 120 #endif 121 122 static void safe_intr(void *); 123 static void safe_callback(struct safe_softc *, struct safe_ringentry *); 124 static void safe_feed(struct safe_softc *, struct safe_ringentry *); 125 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 126 #ifndef SAFE_NO_RNG 127 static void safe_rng_init(struct safe_softc *); 128 static void safe_rng(void *); 129 #endif /* SAFE_NO_RNG */ 130 static int safe_dma_malloc(struct safe_softc *, bus_size_t, 131 struct safe_dma_alloc *, int); 132 #define safe_dma_sync(_dma, _flags) \ 133 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 134 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 135 static int safe_dmamap_aligned(const struct safe_operand *); 136 static int safe_dmamap_uniform(const struct safe_operand *); 137 138 static void safe_reset_board(struct safe_softc *); 139 static void safe_init_board(struct safe_softc *); 140 static void safe_init_pciregs(device_t dev); 141 static void safe_cleanchip(struct safe_softc *); 142 static void safe_totalreset(struct safe_softc *); 143 144 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 145 146 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 147 "SafeNet driver parameters"); 148 149 #ifdef SAFE_DEBUG 150 static void safe_dump_dmastatus(struct safe_softc *, const char *); 151 static void safe_dump_ringstate(struct safe_softc *, const char *); 152 static void safe_dump_intrstate(struct safe_softc *, const char *); 153 static void safe_dump_request(struct safe_softc *, const char *, 154 struct safe_ringentry *); 155 156 static struct safe_softc *safec; /* for use by hw.safe.dump */ 157 158 static int safe_debug = 0; 159 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 160 0, "control debugging msgs"); 161 #define DPRINTF(_x) if (safe_debug) printf _x 162 #else 163 #define DPRINTF(_x) 164 #endif 165 166 #define READ_REG(sc,r) \ 167 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 168 169 #define WRITE_REG(sc,reg,val) \ 170 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 171 172 struct safe_stats safestats; 173 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 174 safe_stats, "driver statistics"); 175 #ifndef SAFE_NO_RNG 176 static int safe_rnginterval = 1; /* poll once a second */ 177 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 178 0, "RNG polling interval (secs)"); 179 static int safe_rngbufsize = 16; /* 64 bytes each poll */ 180 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 181 0, "RNG polling buffer size (32-bit words)"); 182 static int safe_rngmaxalarm = 8; /* max alarms before reset */ 183 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 184 0, "RNG max alarms before reset"); 185 #endif /* SAFE_NO_RNG */ 186 187 static int 188 safe_probe(device_t dev) 189 { 190 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 191 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 192 return (BUS_PROBE_DEFAULT); 193 return (ENXIO); 194 } 195 196 static const char* 197 safe_partname(struct safe_softc *sc) 198 { 199 /* XXX sprintf numbers when not decoded */ 200 switch (pci_get_vendor(sc->sc_dev)) { 201 case PCI_VENDOR_SAFENET: 202 switch (pci_get_device(sc->sc_dev)) { 203 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 204 } 205 return "SafeNet unknown-part"; 206 } 207 return "Unknown-vendor unknown-part"; 208 } 209 210 #ifndef SAFE_NO_RNG 211 static void 212 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 213 { 214 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 215 random_harvest_queue(buf, count, RANDOM_PURE_SAFE); 216 } 217 #endif /* SAFE_NO_RNG */ 218 219 static int 220 safe_attach(device_t dev) 221 { 222 struct safe_softc *sc = device_get_softc(dev); 223 u_int32_t raddr; 224 u_int32_t i; 225 int rid; 226 227 bzero(sc, sizeof (*sc)); 228 sc->sc_dev = dev; 229 230 /* XXX handle power management */ 231 232 pci_enable_busmaster(dev); 233 234 /* 235 * Setup memory-mapping of PCI registers. 236 */ 237 rid = BS_BAR; 238 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 239 RF_ACTIVE); 240 if (sc->sc_sr == NULL) { 241 device_printf(dev, "cannot map register space\n"); 242 goto bad; 243 } 244 sc->sc_st = rman_get_bustag(sc->sc_sr); 245 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 246 247 /* 248 * Arrange interrupt line. 249 */ 250 rid = 0; 251 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 252 RF_SHAREABLE|RF_ACTIVE); 253 if (sc->sc_irq == NULL) { 254 device_printf(dev, "could not map interrupt\n"); 255 goto bad1; 256 } 257 /* 258 * NB: Network code assumes we are blocked with splimp() 259 * so make sure the IRQ is mapped appropriately. 260 */ 261 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 262 NULL, safe_intr, sc, &sc->sc_ih)) { 263 device_printf(dev, "could not establish interrupt\n"); 264 goto bad2; 265 } 266 267 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), 268 CRYPTOCAP_F_HARDWARE); 269 if (sc->sc_cid < 0) { 270 device_printf(dev, "could not get crypto driver id\n"); 271 goto bad3; 272 } 273 274 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 275 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 276 277 /* 278 * Setup DMA descriptor area. 279 */ 280 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 281 1, /* alignment */ 282 SAFE_DMA_BOUNDARY, /* boundary */ 283 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 284 BUS_SPACE_MAXADDR, /* highaddr */ 285 NULL, NULL, /* filter, filterarg */ 286 SAFE_MAX_DMA, /* maxsize */ 287 SAFE_MAX_PART, /* nsegments */ 288 SAFE_MAX_SSIZE, /* maxsegsize */ 289 BUS_DMA_ALLOCNOW, /* flags */ 290 NULL, NULL, /* locking */ 291 &sc->sc_srcdmat)) { 292 device_printf(dev, "cannot allocate DMA tag\n"); 293 goto bad4; 294 } 295 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 296 1, /* alignment */ 297 SAFE_MAX_DSIZE, /* boundary */ 298 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 299 BUS_SPACE_MAXADDR, /* highaddr */ 300 NULL, NULL, /* filter, filterarg */ 301 SAFE_MAX_DMA, /* maxsize */ 302 SAFE_MAX_PART, /* nsegments */ 303 SAFE_MAX_DSIZE, /* maxsegsize */ 304 BUS_DMA_ALLOCNOW, /* flags */ 305 NULL, NULL, /* locking */ 306 &sc->sc_dstdmat)) { 307 device_printf(dev, "cannot allocate DMA tag\n"); 308 goto bad4; 309 } 310 311 /* 312 * Allocate packet engine descriptors. 313 */ 314 if (safe_dma_malloc(sc, 315 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 316 &sc->sc_ringalloc, 0)) { 317 device_printf(dev, "cannot allocate PE descriptor ring\n"); 318 bus_dma_tag_destroy(sc->sc_srcdmat); 319 goto bad4; 320 } 321 /* 322 * Hookup the static portion of all our data structures. 323 */ 324 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 325 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 326 sc->sc_front = sc->sc_ring; 327 sc->sc_back = sc->sc_ring; 328 raddr = sc->sc_ringalloc.dma_paddr; 329 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 330 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 331 struct safe_ringentry *re = &sc->sc_ring[i]; 332 333 re->re_desc.d_sa = raddr + 334 offsetof(struct safe_ringentry, re_sa); 335 re->re_sa.sa_staterec = raddr + 336 offsetof(struct safe_ringentry, re_sastate); 337 338 raddr += sizeof (struct safe_ringentry); 339 } 340 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 341 "packet engine ring", MTX_DEF); 342 343 /* 344 * Allocate scatter and gather particle descriptors. 345 */ 346 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 347 &sc->sc_spalloc, 0)) { 348 device_printf(dev, "cannot allocate source particle " 349 "descriptor ring\n"); 350 mtx_destroy(&sc->sc_ringmtx); 351 safe_dma_free(sc, &sc->sc_ringalloc); 352 bus_dma_tag_destroy(sc->sc_srcdmat); 353 goto bad4; 354 } 355 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 356 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 357 sc->sc_spfree = sc->sc_spring; 358 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 359 360 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 361 &sc->sc_dpalloc, 0)) { 362 device_printf(dev, "cannot allocate destination particle " 363 "descriptor ring\n"); 364 mtx_destroy(&sc->sc_ringmtx); 365 safe_dma_free(sc, &sc->sc_spalloc); 366 safe_dma_free(sc, &sc->sc_ringalloc); 367 bus_dma_tag_destroy(sc->sc_dstdmat); 368 goto bad4; 369 } 370 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 371 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 372 sc->sc_dpfree = sc->sc_dpring; 373 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 374 375 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 376 377 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); 378 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { 379 sc->sc_flags |= SAFE_FLAGS_RNG; 380 printf(" rng"); 381 } 382 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { 383 #if 0 384 printf(" key"); 385 sc->sc_flags |= SAFE_FLAGS_KEY; 386 #endif 387 } 388 if (sc->sc_devinfo & SAFE_DEVINFO_DES) { 389 printf(" des/3des"); 390 } 391 if (sc->sc_devinfo & SAFE_DEVINFO_AES) { 392 printf(" aes"); 393 } 394 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { 395 printf(" md5"); 396 } 397 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { 398 printf(" sha1"); 399 } 400 /* XXX other supported algorithms */ 401 printf("\n"); 402 403 safe_reset_board(sc); /* reset h/w */ 404 safe_init_pciregs(dev); /* init pci settings */ 405 safe_init_board(sc); /* init h/w */ 406 407 #ifndef SAFE_NO_RNG 408 if (sc->sc_flags & SAFE_FLAGS_RNG) { 409 #ifdef SAFE_RNDTEST 410 sc->sc_rndtest = rndtest_attach(dev); 411 if (sc->sc_rndtest) 412 sc->sc_harvest = rndtest_harvest; 413 else 414 sc->sc_harvest = default_harvest; 415 #else 416 sc->sc_harvest = default_harvest; 417 #endif 418 safe_rng_init(sc); 419 420 callout_init(&sc->sc_rngto, 1); 421 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 422 } 423 #endif /* SAFE_NO_RNG */ 424 #ifdef SAFE_DEBUG 425 safec = sc; /* for use by hw.safe.dump */ 426 #endif 427 return (0); 428 bad4: 429 crypto_unregister_all(sc->sc_cid); 430 bad3: 431 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 432 bad2: 433 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 434 bad1: 435 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 436 bad: 437 return (ENXIO); 438 } 439 440 /* 441 * Detach a device that successfully probed. 442 */ 443 static int 444 safe_detach(device_t dev) 445 { 446 struct safe_softc *sc = device_get_softc(dev); 447 448 /* XXX wait/abort active ops */ 449 450 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 451 452 callout_stop(&sc->sc_rngto); 453 454 crypto_unregister_all(sc->sc_cid); 455 456 #ifdef SAFE_RNDTEST 457 if (sc->sc_rndtest) 458 rndtest_detach(sc->sc_rndtest); 459 #endif 460 461 safe_cleanchip(sc); 462 safe_dma_free(sc, &sc->sc_dpalloc); 463 safe_dma_free(sc, &sc->sc_spalloc); 464 mtx_destroy(&sc->sc_ringmtx); 465 safe_dma_free(sc, &sc->sc_ringalloc); 466 467 bus_generic_detach(dev); 468 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 469 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 470 471 bus_dma_tag_destroy(sc->sc_srcdmat); 472 bus_dma_tag_destroy(sc->sc_dstdmat); 473 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 474 475 return (0); 476 } 477 478 /* 479 * Stop all chip i/o so that the kernel's probe routines don't 480 * get confused by errant DMAs when rebooting. 481 */ 482 static int 483 safe_shutdown(device_t dev) 484 { 485 #ifdef notyet 486 safe_stop(device_get_softc(dev)); 487 #endif 488 return (0); 489 } 490 491 /* 492 * Device suspend routine. 493 */ 494 static int 495 safe_suspend(device_t dev) 496 { 497 struct safe_softc *sc = device_get_softc(dev); 498 499 #ifdef notyet 500 /* XXX stop the device and save PCI settings */ 501 #endif 502 sc->sc_suspended = 1; 503 504 return (0); 505 } 506 507 static int 508 safe_resume(device_t dev) 509 { 510 struct safe_softc *sc = device_get_softc(dev); 511 512 #ifdef notyet 513 /* XXX retore PCI settings and start the device */ 514 #endif 515 sc->sc_suspended = 0; 516 return (0); 517 } 518 519 /* 520 * SafeXcel Interrupt routine 521 */ 522 static void 523 safe_intr(void *arg) 524 { 525 struct safe_softc *sc = arg; 526 volatile u_int32_t stat; 527 528 stat = READ_REG(sc, SAFE_HM_STAT); 529 if (stat == 0) /* shared irq, not for us */ 530 return; 531 532 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 533 534 if ((stat & SAFE_INT_PE_DDONE)) { 535 /* 536 * Descriptor(s) done; scan the ring and 537 * process completed operations. 538 */ 539 mtx_lock(&sc->sc_ringmtx); 540 while (sc->sc_back != sc->sc_front) { 541 struct safe_ringentry *re = sc->sc_back; 542 #ifdef SAFE_DEBUG 543 if (safe_debug) { 544 safe_dump_ringstate(sc, __func__); 545 safe_dump_request(sc, __func__, re); 546 } 547 #endif 548 /* 549 * safe_process marks ring entries that were allocated 550 * but not used with a csr of zero. This insures the 551 * ring front pointer never needs to be set backwards 552 * in the event that an entry is allocated but not used 553 * because of a setup error. 554 */ 555 if (re->re_desc.d_csr != 0) { 556 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 557 break; 558 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 559 break; 560 sc->sc_nqchip--; 561 safe_callback(sc, re); 562 } 563 if (++(sc->sc_back) == sc->sc_ringtop) 564 sc->sc_back = sc->sc_ring; 565 } 566 mtx_unlock(&sc->sc_ringmtx); 567 } 568 569 /* 570 * Check to see if we got any DMA Error 571 */ 572 if (stat & SAFE_INT_PE_ERROR) { 573 DPRINTF(("dmaerr dmastat %08x\n", 574 READ_REG(sc, SAFE_PE_DMASTAT))); 575 safestats.st_dmaerr++; 576 safe_totalreset(sc); 577 #if 0 578 safe_feed(sc); 579 #endif 580 } 581 582 if (sc->sc_needwakeup) { /* XXX check high watermark */ 583 int wakeup = sc->sc_needwakeup & CRYPTO_SYMQ; 584 DPRINTF(("%s: wakeup crypto %x\n", __func__, 585 sc->sc_needwakeup)); 586 sc->sc_needwakeup &= ~wakeup; 587 crypto_unblock(sc->sc_cid, wakeup); 588 } 589 } 590 591 /* 592 * safe_feed() - post a request to chip 593 */ 594 static void 595 safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 596 { 597 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 598 if (re->re_dst_map != NULL) 599 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 600 BUS_DMASYNC_PREREAD); 601 /* XXX have no smaller granularity */ 602 safe_dma_sync(&sc->sc_ringalloc, 603 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 604 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 605 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 606 607 #ifdef SAFE_DEBUG 608 if (safe_debug) { 609 safe_dump_ringstate(sc, __func__); 610 safe_dump_request(sc, __func__, re); 611 } 612 #endif 613 sc->sc_nqchip++; 614 if (sc->sc_nqchip > safestats.st_maxqchip) 615 safestats.st_maxqchip = sc->sc_nqchip; 616 /* poke h/w to check descriptor ring, any value can be written */ 617 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 618 } 619 620 #define N(a) (sizeof(a) / sizeof (a[0])) 621 static void 622 safe_setup_enckey(struct safe_session *ses, const void *key) 623 { 624 int i; 625 626 bcopy(key, ses->ses_key, ses->ses_klen); 627 628 /* PE is little-endian, insure proper byte order */ 629 for (i = 0; i < N(ses->ses_key); i++) 630 ses->ses_key[i] = htole32(ses->ses_key[i]); 631 } 632 633 static void 634 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, 635 int klen) 636 { 637 SHA1_CTX sha1ctx; 638 int i; 639 640 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 641 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 642 643 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 644 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 645 646 explicit_bzero(&sha1ctx, sizeof(sha1ctx)); 647 648 /* PE is little-endian, insure proper byte order */ 649 for (i = 0; i < N(ses->ses_hminner); i++) { 650 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 651 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 652 } 653 } 654 #undef N 655 656 static bool 657 safe_auth_supported(struct safe_softc *sc, 658 const struct crypto_session_params *csp) 659 { 660 661 switch (csp->csp_auth_alg) { 662 case CRYPTO_SHA1_HMAC: 663 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) 664 return (false); 665 break; 666 default: 667 return (false); 668 } 669 return (true); 670 } 671 672 static bool 673 safe_cipher_supported(struct safe_softc *sc, 674 const struct crypto_session_params *csp) 675 { 676 677 switch (csp->csp_cipher_alg) { 678 case CRYPTO_AES_CBC: 679 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) 680 return (false); 681 if (csp->csp_ivlen != 16) 682 return (false); 683 if (csp->csp_cipher_klen != 16 && 684 csp->csp_cipher_klen != 24 && 685 csp->csp_cipher_klen != 32) 686 return (false); 687 break; 688 } 689 return (true); 690 } 691 692 static int 693 safe_probesession(device_t dev, const struct crypto_session_params *csp) 694 { 695 struct safe_softc *sc = device_get_softc(dev); 696 697 if (csp->csp_flags != 0) 698 return (EINVAL); 699 switch (csp->csp_mode) { 700 case CSP_MODE_DIGEST: 701 if (!safe_auth_supported(sc, csp)) 702 return (EINVAL); 703 break; 704 case CSP_MODE_CIPHER: 705 if (!safe_cipher_supported(sc, csp)) 706 return (EINVAL); 707 break; 708 case CSP_MODE_ETA: 709 if (!safe_auth_supported(sc, csp) || 710 !safe_cipher_supported(sc, csp)) 711 return (EINVAL); 712 break; 713 default: 714 return (EINVAL); 715 } 716 717 return (CRYPTODEV_PROBE_HARDWARE); 718 } 719 720 /* 721 * Allocate a new 'session'. 722 */ 723 static int 724 safe_newsession(device_t dev, crypto_session_t cses, 725 const struct crypto_session_params *csp) 726 { 727 struct safe_session *ses; 728 729 ses = crypto_get_driver_session(cses); 730 if (csp->csp_cipher_alg != 0) { 731 ses->ses_klen = csp->csp_cipher_klen; 732 if (csp->csp_cipher_key != NULL) 733 safe_setup_enckey(ses, csp->csp_cipher_key); 734 } 735 736 if (csp->csp_auth_alg != 0) { 737 ses->ses_mlen = csp->csp_auth_mlen; 738 if (ses->ses_mlen == 0) { 739 ses->ses_mlen = SHA1_HASH_LEN; 740 } 741 742 if (csp->csp_auth_key != NULL) { 743 safe_setup_mackey(ses, csp->csp_auth_alg, 744 csp->csp_auth_key, csp->csp_auth_klen); 745 } 746 } 747 748 return (0); 749 } 750 751 static void 752 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) 753 { 754 struct safe_operand *op = arg; 755 756 DPRINTF(("%s: nsegs %d error %d\n", __func__, 757 nsegs, error)); 758 if (error != 0) 759 return; 760 op->nsegs = nsegs; 761 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 762 } 763 764 static int 765 safe_process(device_t dev, struct cryptop *crp, int hint) 766 { 767 struct safe_softc *sc = device_get_softc(dev); 768 const struct crypto_session_params *csp; 769 int err = 0, i, nicealign, uniform; 770 int bypass, oplen; 771 int16_t coffset; 772 struct safe_session *ses; 773 struct safe_ringentry *re; 774 struct safe_sarec *sa; 775 struct safe_pdesc *pd; 776 u_int32_t cmd0, cmd1, staterec; 777 778 mtx_lock(&sc->sc_ringmtx); 779 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 780 safestats.st_ringfull++; 781 sc->sc_needwakeup |= CRYPTO_SYMQ; 782 mtx_unlock(&sc->sc_ringmtx); 783 return (ERESTART); 784 } 785 re = sc->sc_front; 786 787 staterec = re->re_sa.sa_staterec; /* save */ 788 /* NB: zero everything but the PE descriptor */ 789 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 790 re->re_sa.sa_staterec = staterec; /* restore */ 791 792 re->re_crp = crp; 793 794 sa = &re->re_sa; 795 ses = crypto_get_driver_session(crp->crp_session); 796 csp = crypto_get_params(crp->crp_session); 797 798 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 799 cmd1 = 0; 800 switch (csp->csp_mode) { 801 case CSP_MODE_DIGEST: 802 cmd0 |= SAFE_SA_CMD0_OP_HASH; 803 break; 804 case CSP_MODE_CIPHER: 805 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 806 break; 807 case CSP_MODE_ETA: 808 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 809 break; 810 } 811 812 if (csp->csp_cipher_alg != 0) { 813 if (crp->crp_cipher_key != NULL) 814 safe_setup_enckey(ses, crp->crp_cipher_key); 815 816 switch (csp->csp_cipher_alg) { 817 case CRYPTO_AES_CBC: 818 cmd0 |= SAFE_SA_CMD0_AES; 819 cmd1 |= SAFE_SA_CMD1_CBC; 820 if (ses->ses_klen * 8 == 128) 821 cmd1 |= SAFE_SA_CMD1_AES128; 822 else if (ses->ses_klen * 8 == 192) 823 cmd1 |= SAFE_SA_CMD1_AES192; 824 else 825 cmd1 |= SAFE_SA_CMD1_AES256; 826 } 827 828 /* 829 * Setup encrypt/decrypt state. When using basic ops 830 * we can't use an inline IV because hash/crypt offset 831 * must be from the end of the IV to the start of the 832 * crypt data and this leaves out the preceding header 833 * from the hash calculation. Instead we place the IV 834 * in the state record and set the hash/crypt offset to 835 * copy both the header+IV. 836 */ 837 crypto_read_iv(crp, re->re_sastate.sa_saved_iv); 838 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 839 840 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 841 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 842 843 /* 844 * XXX: I suspect we don't need this since we 845 * don't save the returned IV. 846 */ 847 cmd0 |= SAFE_SA_CMD0_SAVEIV; 848 } else { 849 cmd0 |= SAFE_SA_CMD0_INBOUND; 850 } 851 /* 852 * For basic encryption use the zero pad algorithm. 853 * This pads results to an 8-byte boundary and 854 * suppresses padding verification for inbound (i.e. 855 * decrypt) operations. 856 * 857 * NB: Not sure if the 8-byte pad boundary is a problem. 858 */ 859 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 860 861 /* XXX assert key bufs have the same size */ 862 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 863 } 864 865 if (csp->csp_auth_alg != 0) { 866 if (crp->crp_auth_key != NULL) { 867 safe_setup_mackey(ses, csp->csp_auth_alg, 868 crp->crp_auth_key, csp->csp_auth_klen); 869 } 870 871 switch (csp->csp_auth_alg) { 872 case CRYPTO_SHA1_HMAC: 873 cmd0 |= SAFE_SA_CMD0_SHA1; 874 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 875 break; 876 } 877 878 /* 879 * Digest data is loaded from the SA and the hash 880 * result is saved to the state block where we 881 * retrieve it for return to the caller. 882 */ 883 /* XXX assert digest bufs have the same size */ 884 bcopy(ses->ses_hminner, sa->sa_indigest, 885 sizeof(sa->sa_indigest)); 886 bcopy(ses->ses_hmouter, sa->sa_outdigest, 887 sizeof(sa->sa_outdigest)); 888 889 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 890 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 891 } 892 893 if (csp->csp_mode == CSP_MODE_ETA) { 894 /* 895 * The driver only supports ETA requests where there 896 * is no gap between the AAD and payload. 897 */ 898 if (crp->crp_aad_length != 0 && 899 crp->crp_aad_start + crp->crp_aad_length != 900 crp->crp_payload_start) { 901 safestats.st_lenmismatch++; 902 err = EINVAL; 903 goto errout; 904 } 905 if (crp->crp_aad_length != 0) 906 bypass = crp->crp_aad_start; 907 else 908 bypass = crp->crp_payload_start; 909 coffset = crp->crp_aad_length; 910 oplen = crp->crp_payload_start + crp->crp_payload_length; 911 #ifdef SAFE_DEBUG 912 if (safe_debug) { 913 printf("AAD: skip %d, len %d, digest %d\n", 914 crp->crp_aad_start, crp->crp_aad_length, 915 crp->crp_digest_start); 916 printf("payload: skip %d, len %d, IV %d\n", 917 crp->crp_payload_start, crp->crp_payload_length, 918 crp->crp_iv_start); 919 printf("bypass %d coffset %d oplen %d\n", 920 bypass, coffset, oplen); 921 } 922 #endif 923 if (coffset & 3) { /* offset must be 32-bit aligned */ 924 DPRINTF(("%s: coffset %u misaligned\n", 925 __func__, coffset)); 926 safestats.st_coffmisaligned++; 927 err = EINVAL; 928 goto errout; 929 } 930 coffset >>= 2; 931 if (coffset > 255) { /* offset must be <256 dwords */ 932 DPRINTF(("%s: coffset %u too big\n", 933 __func__, coffset)); 934 safestats.st_cofftoobig++; 935 err = EINVAL; 936 goto errout; 937 } 938 /* 939 * Tell the hardware to copy the header to the output. 940 * The header is defined as the data from the end of 941 * the bypass to the start of data to be encrypted. 942 * Typically this is the inline IV. Note that you need 943 * to do this even if src+dst are the same; it appears 944 * that w/o this bit the crypted data is written 945 * immediately after the bypass data. 946 */ 947 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 948 /* 949 * Disable IP header mutable bit handling. This is 950 * needed to get correct HMAC calculations. 951 */ 952 cmd1 |= SAFE_SA_CMD1_MUTABLE; 953 } else { 954 bypass = crp->crp_payload_start; 955 oplen = bypass + crp->crp_payload_length; 956 coffset = 0; 957 } 958 /* XXX verify multiple of 4 when using s/g */ 959 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 960 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 961 safestats.st_bypasstoobig++; 962 err = EINVAL; 963 goto errout; 964 } 965 966 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 967 safestats.st_nomap++; 968 err = ENOMEM; 969 goto errout; 970 } 971 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, 972 &re->re_src, BUS_DMA_NOWAIT) != 0) { 973 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 974 re->re_src_map = NULL; 975 safestats.st_noload++; 976 err = ENOMEM; 977 goto errout; 978 } 979 re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf); 980 nicealign = safe_dmamap_aligned(&re->re_src); 981 uniform = safe_dmamap_uniform(&re->re_src); 982 983 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 984 nicealign, uniform, re->re_src.nsegs)); 985 if (re->re_src.nsegs > 1) { 986 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 987 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 988 for (i = 0; i < re->re_src_nsegs; i++) { 989 /* NB: no need to check if there's space */ 990 pd = sc->sc_spfree; 991 if (++(sc->sc_spfree) == sc->sc_springtop) 992 sc->sc_spfree = sc->sc_spring; 993 994 KASSERT((pd->pd_flags&3) == 0 || 995 (pd->pd_flags&3) == SAFE_PD_DONE, 996 ("bogus source particle descriptor; flags %x", 997 pd->pd_flags)); 998 pd->pd_addr = re->re_src_segs[i].ds_addr; 999 pd->pd_size = re->re_src_segs[i].ds_len; 1000 pd->pd_flags = SAFE_PD_READY; 1001 } 1002 cmd0 |= SAFE_SA_CMD0_IGATHER; 1003 } else { 1004 /* 1005 * No need for gather, reference the operand directly. 1006 */ 1007 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1008 } 1009 1010 if (csp->csp_mode == CSP_MODE_DIGEST) { 1011 /* 1012 * Hash op; no destination needed. 1013 */ 1014 } else { 1015 if (nicealign && uniform == 1) { 1016 /* 1017 * Source layout is suitable for direct 1018 * sharing of the DMA map and segment list. 1019 */ 1020 re->re_dst = re->re_src; 1021 } else if (nicealign && uniform == 2) { 1022 /* 1023 * The source is properly aligned but requires a 1024 * different particle list to handle DMA of the 1025 * result. Create a new map and do the load to 1026 * create the segment list. The particle 1027 * descriptor setup code below will handle the 1028 * rest. 1029 */ 1030 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, 1031 &re->re_dst_map)) { 1032 safestats.st_nomap++; 1033 err = ENOMEM; 1034 goto errout; 1035 } 1036 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, 1037 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 1038 0) { 1039 bus_dmamap_destroy(sc->sc_dstdmat, 1040 re->re_dst_map); 1041 re->re_dst_map = NULL; 1042 safestats.st_noload++; 1043 err = ENOMEM; 1044 goto errout; 1045 } 1046 } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) { 1047 int totlen, len; 1048 struct mbuf *m, *top, **mp; 1049 1050 /* 1051 * DMA constraints require that we allocate a 1052 * new mbuf chain for the destination. We 1053 * allocate an entire new set of mbufs of 1054 * optimal/required size and then tell the 1055 * hardware to copy any bits that are not 1056 * created as a byproduct of the operation. 1057 */ 1058 if (!nicealign) 1059 safestats.st_unaligned++; 1060 if (!uniform) 1061 safestats.st_notuniform++; 1062 totlen = re->re_src_mapsize; 1063 if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) { 1064 len = MHLEN; 1065 MGETHDR(m, M_NOWAIT, MT_DATA); 1066 if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf, 1067 M_NOWAIT)) { 1068 m_free(m); 1069 m = NULL; 1070 } 1071 } else { 1072 len = MLEN; 1073 MGET(m, M_NOWAIT, MT_DATA); 1074 } 1075 if (m == NULL) { 1076 safestats.st_nombuf++; 1077 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1078 goto errout; 1079 } 1080 if (totlen >= MINCLSIZE) { 1081 if (!(MCLGET(m, M_NOWAIT))) { 1082 m_free(m); 1083 safestats.st_nomcl++; 1084 err = sc->sc_nqchip ? 1085 ERESTART : ENOMEM; 1086 goto errout; 1087 } 1088 len = MCLBYTES; 1089 } 1090 m->m_len = len; 1091 top = NULL; 1092 mp = ⊤ 1093 1094 while (totlen > 0) { 1095 if (top) { 1096 MGET(m, M_NOWAIT, MT_DATA); 1097 if (m == NULL) { 1098 m_freem(top); 1099 safestats.st_nombuf++; 1100 err = sc->sc_nqchip ? 1101 ERESTART : ENOMEM; 1102 goto errout; 1103 } 1104 len = MLEN; 1105 } 1106 if (top && totlen >= MINCLSIZE) { 1107 if (!(MCLGET(m, M_NOWAIT))) { 1108 *mp = m; 1109 m_freem(top); 1110 safestats.st_nomcl++; 1111 err = sc->sc_nqchip ? 1112 ERESTART : ENOMEM; 1113 goto errout; 1114 } 1115 len = MCLBYTES; 1116 } 1117 m->m_len = len = min(totlen, len); 1118 totlen -= len; 1119 *mp = m; 1120 mp = &m->m_next; 1121 } 1122 re->re_dst_m = top; 1123 if (bus_dmamap_create(sc->sc_dstdmat, 1124 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1125 safestats.st_nomap++; 1126 err = ENOMEM; 1127 goto errout; 1128 } 1129 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, 1130 re->re_dst_map, top, re->re_dst_segs, 1131 &re->re_dst_nsegs, 0) != 0) { 1132 bus_dmamap_destroy(sc->sc_dstdmat, 1133 re->re_dst_map); 1134 re->re_dst_map = NULL; 1135 safestats.st_noload++; 1136 err = ENOMEM; 1137 goto errout; 1138 } 1139 re->re_dst_mapsize = re->re_src_mapsize; 1140 if (re->re_src.mapsize > oplen) { 1141 /* 1142 * There's data following what the 1143 * hardware will copy for us. If this 1144 * isn't just the ICV (that's going to 1145 * be written on completion), copy it 1146 * to the new mbufs 1147 */ 1148 if (!(csp->csp_mode == CSP_MODE_ETA && 1149 (re->re_src.mapsize-oplen) == ses->ses_mlen && 1150 crp->crp_digest_start == oplen)) 1151 safe_mcopy(crp->crp_buf.cb_mbuf, 1152 re->re_dst_m, oplen); 1153 else 1154 safestats.st_noicvcopy++; 1155 } 1156 } else { 1157 if (!nicealign) { 1158 safestats.st_iovmisaligned++; 1159 err = EINVAL; 1160 goto errout; 1161 } else { 1162 /* 1163 * There's no way to handle the DMA 1164 * requirements with this uio. We 1165 * could create a separate DMA area for 1166 * the result and then copy it back, 1167 * but for now we just bail and return 1168 * an error. Note that uio requests 1169 * > SAFE_MAX_DSIZE are handled because 1170 * the DMA map and segment list for the 1171 * destination wil result in a 1172 * destination particle list that does 1173 * the necessary scatter DMA. 1174 */ 1175 safestats.st_iovnotuniform++; 1176 err = EINVAL; 1177 goto errout; 1178 } 1179 } 1180 1181 if (re->re_dst.nsegs > 1) { 1182 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1183 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1184 for (i = 0; i < re->re_dst_nsegs; i++) { 1185 pd = sc->sc_dpfree; 1186 KASSERT((pd->pd_flags&3) == 0 || 1187 (pd->pd_flags&3) == SAFE_PD_DONE, 1188 ("bogus dest particle descriptor; flags %x", 1189 pd->pd_flags)); 1190 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1191 sc->sc_dpfree = sc->sc_dpring; 1192 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1193 pd->pd_flags = SAFE_PD_READY; 1194 } 1195 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1196 } else { 1197 /* 1198 * No need for scatter, reference the operand directly. 1199 */ 1200 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1201 } 1202 } 1203 1204 /* 1205 * All done with setup; fillin the SA command words 1206 * and the packet engine descriptor. The operation 1207 * is now ready for submission to the hardware. 1208 */ 1209 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1210 sa->sa_cmd1 = cmd1 1211 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1212 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1213 | SAFE_SA_CMD1_SRPCI 1214 ; 1215 /* 1216 * NB: the order of writes is important here. In case the 1217 * chip is scanning the ring because of an outstanding request 1218 * it might nab this one too. In that case we need to make 1219 * sure the setup is complete before we write the length 1220 * field of the descriptor as it signals the descriptor is 1221 * ready for processing. 1222 */ 1223 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1224 if (csp->csp_auth_alg != 0) 1225 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1226 re->re_desc.d_len = oplen 1227 | SAFE_PE_LEN_READY 1228 | (bypass << SAFE_PE_LEN_BYPASS_S) 1229 ; 1230 1231 safestats.st_ipackets++; 1232 safestats.st_ibytes += oplen; 1233 1234 if (++(sc->sc_front) == sc->sc_ringtop) 1235 sc->sc_front = sc->sc_ring; 1236 1237 /* XXX honor batching */ 1238 safe_feed(sc, re); 1239 mtx_unlock(&sc->sc_ringmtx); 1240 return (0); 1241 1242 errout: 1243 if (re->re_dst_m != NULL) 1244 m_freem(re->re_dst_m); 1245 1246 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1247 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1248 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1249 } 1250 if (re->re_src_map != NULL) { 1251 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1252 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1253 } 1254 mtx_unlock(&sc->sc_ringmtx); 1255 if (err != ERESTART) { 1256 crp->crp_etype = err; 1257 crypto_done(crp); 1258 err = 0; 1259 } else { 1260 sc->sc_needwakeup |= CRYPTO_SYMQ; 1261 } 1262 return (err); 1263 } 1264 1265 static void 1266 safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1267 { 1268 const struct crypto_session_params *csp; 1269 struct cryptop *crp = (struct cryptop *)re->re_crp; 1270 struct safe_session *ses; 1271 uint8_t hash[HASH_MAX_LEN]; 1272 1273 ses = crypto_get_driver_session(crp->crp_session); 1274 csp = crypto_get_params(crp->crp_session); 1275 1276 safestats.st_opackets++; 1277 safestats.st_obytes += re->re_dst.mapsize; 1278 1279 safe_dma_sync(&sc->sc_ringalloc, 1280 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1281 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1282 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1283 re->re_desc.d_csr, 1284 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1285 safestats.st_peoperr++; 1286 crp->crp_etype = EIO; /* something more meaningful? */ 1287 } 1288 1289 /* 1290 * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if 1291 * it is non-NULL? 1292 */ 1293 1294 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1295 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1296 BUS_DMASYNC_POSTREAD); 1297 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1298 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1299 } 1300 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1301 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1302 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1303 1304 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1305 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { 1306 /* 1307 * SHA-1 ICV's are byte-swapped; fix 'em up 1308 * before copying them to their destination. 1309 */ 1310 re->re_sastate.sa_saved_indigest[0] = 1311 bswap32(re->re_sastate.sa_saved_indigest[0]); 1312 re->re_sastate.sa_saved_indigest[1] = 1313 bswap32(re->re_sastate.sa_saved_indigest[1]); 1314 re->re_sastate.sa_saved_indigest[2] = 1315 bswap32(re->re_sastate.sa_saved_indigest[2]); 1316 } 1317 1318 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1319 crypto_copydata(crp, crp->crp_digest_start, 1320 ses->ses_mlen, hash); 1321 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, 1322 hash, ses->ses_mlen) != 0) 1323 crp->crp_etype = EBADMSG; 1324 } else 1325 crypto_copyback(crp, crp->crp_digest_start, 1326 ses->ses_mlen, re->re_sastate.sa_saved_indigest); 1327 } 1328 crypto_done(crp); 1329 } 1330 1331 /* 1332 * Copy all data past offset from srcm to dstm. 1333 */ 1334 static void 1335 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1336 { 1337 u_int j, dlen, slen; 1338 caddr_t dptr, sptr; 1339 1340 /* 1341 * Advance src and dst to offset. 1342 */ 1343 j = offset; 1344 while (j >= srcm->m_len) { 1345 j -= srcm->m_len; 1346 srcm = srcm->m_next; 1347 if (srcm == NULL) 1348 return; 1349 } 1350 sptr = mtod(srcm, caddr_t) + j; 1351 slen = srcm->m_len - j; 1352 1353 j = offset; 1354 while (j >= dstm->m_len) { 1355 j -= dstm->m_len; 1356 dstm = dstm->m_next; 1357 if (dstm == NULL) 1358 return; 1359 } 1360 dptr = mtod(dstm, caddr_t) + j; 1361 dlen = dstm->m_len - j; 1362 1363 /* 1364 * Copy everything that remains. 1365 */ 1366 for (;;) { 1367 j = min(slen, dlen); 1368 bcopy(sptr, dptr, j); 1369 if (slen == j) { 1370 srcm = srcm->m_next; 1371 if (srcm == NULL) 1372 return; 1373 sptr = srcm->m_data; 1374 slen = srcm->m_len; 1375 } else 1376 sptr += j, slen -= j; 1377 if (dlen == j) { 1378 dstm = dstm->m_next; 1379 if (dstm == NULL) 1380 return; 1381 dptr = dstm->m_data; 1382 dlen = dstm->m_len; 1383 } else 1384 dptr += j, dlen -= j; 1385 } 1386 } 1387 1388 #ifndef SAFE_NO_RNG 1389 #define SAFE_RNG_MAXWAIT 1000 1390 1391 static void 1392 safe_rng_init(struct safe_softc *sc) 1393 { 1394 u_int32_t w, v; 1395 int i; 1396 1397 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1398 /* use default value according to the manual */ 1399 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1400 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1401 1402 /* 1403 * There is a bug in rev 1.0 of the 1140 that when the RNG 1404 * is brought out of reset the ready status flag does not 1405 * work until the RNG has finished its internal initialization. 1406 * 1407 * So in order to determine the device is through its 1408 * initialization we must read the data register, using the 1409 * status reg in the read in case it is initialized. Then read 1410 * the data register until it changes from the first read. 1411 * Once it changes read the data register until it changes 1412 * again. At this time the RNG is considered initialized. 1413 * This could take between 750ms - 1000ms in time. 1414 */ 1415 i = 0; 1416 w = READ_REG(sc, SAFE_RNG_OUT); 1417 do { 1418 v = READ_REG(sc, SAFE_RNG_OUT); 1419 if (v != w) { 1420 w = v; 1421 break; 1422 } 1423 DELAY(10); 1424 } while (++i < SAFE_RNG_MAXWAIT); 1425 1426 /* Wait Until data changes again */ 1427 i = 0; 1428 do { 1429 v = READ_REG(sc, SAFE_RNG_OUT); 1430 if (v != w) 1431 break; 1432 DELAY(10); 1433 } while (++i < SAFE_RNG_MAXWAIT); 1434 } 1435 1436 static __inline void 1437 safe_rng_disable_short_cycle(struct safe_softc *sc) 1438 { 1439 WRITE_REG(sc, SAFE_RNG_CTRL, 1440 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1441 } 1442 1443 static __inline void 1444 safe_rng_enable_short_cycle(struct safe_softc *sc) 1445 { 1446 WRITE_REG(sc, SAFE_RNG_CTRL, 1447 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1448 } 1449 1450 static __inline u_int32_t 1451 safe_rng_read(struct safe_softc *sc) 1452 { 1453 int i; 1454 1455 i = 0; 1456 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1457 ; 1458 return READ_REG(sc, SAFE_RNG_OUT); 1459 } 1460 1461 static void 1462 safe_rng(void *arg) 1463 { 1464 struct safe_softc *sc = arg; 1465 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1466 u_int maxwords; 1467 int i; 1468 1469 safestats.st_rng++; 1470 /* 1471 * Fetch the next block of data. 1472 */ 1473 maxwords = safe_rngbufsize; 1474 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1475 maxwords = SAFE_RNG_MAXBUFSIZ; 1476 retry: 1477 for (i = 0; i < maxwords; i++) 1478 buf[i] = safe_rng_read(sc); 1479 /* 1480 * Check the comparator alarm count and reset the h/w if 1481 * it exceeds our threshold. This guards against the 1482 * hardware oscillators resonating with external signals. 1483 */ 1484 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1485 u_int32_t freq_inc, w; 1486 1487 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1488 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1489 safestats.st_rngalarm++; 1490 safe_rng_enable_short_cycle(sc); 1491 freq_inc = 18; 1492 for (i = 0; i < 64; i++) { 1493 w = READ_REG(sc, SAFE_RNG_CNFG); 1494 freq_inc = ((w + freq_inc) & 0x3fL); 1495 w = ((w & ~0x3fL) | freq_inc); 1496 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1497 1498 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1499 1500 (void) safe_rng_read(sc); 1501 DELAY(25); 1502 1503 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1504 safe_rng_disable_short_cycle(sc); 1505 goto retry; 1506 } 1507 freq_inc = 1; 1508 } 1509 safe_rng_disable_short_cycle(sc); 1510 } else 1511 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1512 1513 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1514 callout_reset(&sc->sc_rngto, 1515 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1516 } 1517 #endif /* SAFE_NO_RNG */ 1518 1519 static void 1520 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1521 { 1522 bus_addr_t *paddr = (bus_addr_t*) arg; 1523 *paddr = segs->ds_addr; 1524 } 1525 1526 static int 1527 safe_dma_malloc( 1528 struct safe_softc *sc, 1529 bus_size_t size, 1530 struct safe_dma_alloc *dma, 1531 int mapflags 1532 ) 1533 { 1534 int r; 1535 1536 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1537 sizeof(u_int32_t), 0, /* alignment, bounds */ 1538 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1539 BUS_SPACE_MAXADDR, /* highaddr */ 1540 NULL, NULL, /* filter, filterarg */ 1541 size, /* maxsize */ 1542 1, /* nsegments */ 1543 size, /* maxsegsize */ 1544 BUS_DMA_ALLOCNOW, /* flags */ 1545 NULL, NULL, /* locking */ 1546 &dma->dma_tag); 1547 if (r != 0) { 1548 device_printf(sc->sc_dev, "safe_dma_malloc: " 1549 "bus_dma_tag_create failed; error %u\n", r); 1550 goto fail_0; 1551 } 1552 1553 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1554 BUS_DMA_NOWAIT, &dma->dma_map); 1555 if (r != 0) { 1556 device_printf(sc->sc_dev, "safe_dma_malloc: " 1557 "bus_dmammem_alloc failed; size %ju, error %u\n", 1558 (uintmax_t)size, r); 1559 goto fail_1; 1560 } 1561 1562 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1563 size, 1564 safe_dmamap_cb, 1565 &dma->dma_paddr, 1566 mapflags | BUS_DMA_NOWAIT); 1567 if (r != 0) { 1568 device_printf(sc->sc_dev, "safe_dma_malloc: " 1569 "bus_dmamap_load failed; error %u\n", r); 1570 goto fail_2; 1571 } 1572 1573 dma->dma_size = size; 1574 return (0); 1575 1576 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1577 fail_2: 1578 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1579 fail_1: 1580 bus_dma_tag_destroy(dma->dma_tag); 1581 fail_0: 1582 dma->dma_tag = NULL; 1583 return (r); 1584 } 1585 1586 static void 1587 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1588 { 1589 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1590 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1591 bus_dma_tag_destroy(dma->dma_tag); 1592 } 1593 1594 /* 1595 * Resets the board. Values in the regesters are left as is 1596 * from the reset (i.e. initial values are assigned elsewhere). 1597 */ 1598 static void 1599 safe_reset_board(struct safe_softc *sc) 1600 { 1601 u_int32_t v; 1602 /* 1603 * Reset the device. The manual says no delay 1604 * is needed between marking and clearing reset. 1605 */ 1606 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1607 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1608 SAFE_PE_DMACFG_SGRESET); 1609 WRITE_REG(sc, SAFE_PE_DMACFG, v 1610 | SAFE_PE_DMACFG_PERESET 1611 | SAFE_PE_DMACFG_PDRRESET 1612 | SAFE_PE_DMACFG_SGRESET); 1613 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1614 } 1615 1616 /* 1617 * Initialize registers we need to touch only once. 1618 */ 1619 static void 1620 safe_init_board(struct safe_softc *sc) 1621 { 1622 u_int32_t v, dwords; 1623 1624 v = READ_REG(sc, SAFE_PE_DMACFG); 1625 v &=~ SAFE_PE_DMACFG_PEMODE; 1626 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1627 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1628 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1629 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1630 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1631 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1632 ; 1633 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1634 #if 0 1635 /* XXX select byte swap based on host byte order */ 1636 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1637 #endif 1638 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1639 /* 1640 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1641 * "target mode transfers" done while the chip is DMA'ing 1642 * >1020 bytes cause the hardware to lockup. To avoid this 1643 * we reduce the max PCI transfer size and use small source 1644 * particle descriptors (<= 256 bytes). 1645 */ 1646 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1647 device_printf(sc->sc_dev, 1648 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1649 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1650 SAFE_REV_MAJ(sc->sc_chiprev), 1651 SAFE_REV_MIN(sc->sc_chiprev)); 1652 } 1653 1654 /* NB: operands+results are overlaid */ 1655 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1656 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1657 /* 1658 * Configure ring entry size and number of items in the ring. 1659 */ 1660 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1661 ("PE ring entry not 32-bit aligned!")); 1662 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1663 WRITE_REG(sc, SAFE_PE_RINGCFG, 1664 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1665 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1666 1667 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1668 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1669 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1670 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1671 /* 1672 * NB: destination particles are fixed size. We use 1673 * an mbuf cluster and require all results go to 1674 * clusters or smaller. 1675 */ 1676 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1677 1678 /* it's now safe to enable PE mode, do it */ 1679 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1680 1681 /* 1682 * Configure hardware to use level-triggered interrupts and 1683 * to interrupt after each descriptor is processed. 1684 */ 1685 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1686 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1687 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1688 } 1689 1690 /* 1691 * Init PCI registers 1692 */ 1693 static void 1694 safe_init_pciregs(device_t dev) 1695 { 1696 } 1697 1698 /* 1699 * Clean up after a chip crash. 1700 * It is assumed that the caller in splimp() 1701 */ 1702 static void 1703 safe_cleanchip(struct safe_softc *sc) 1704 { 1705 1706 if (sc->sc_nqchip != 0) { 1707 struct safe_ringentry *re = sc->sc_back; 1708 1709 while (re != sc->sc_front) { 1710 if (re->re_desc.d_csr != 0) 1711 safe_free_entry(sc, re); 1712 if (++re == sc->sc_ringtop) 1713 re = sc->sc_ring; 1714 } 1715 sc->sc_back = re; 1716 sc->sc_nqchip = 0; 1717 } 1718 } 1719 1720 /* 1721 * free a safe_q 1722 * It is assumed that the caller is within splimp(). 1723 */ 1724 static int 1725 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 1726 { 1727 struct cryptop *crp; 1728 1729 /* 1730 * Free header MCR 1731 */ 1732 if (re->re_dst_m != NULL) 1733 m_freem(re->re_dst_m); 1734 1735 crp = (struct cryptop *)re->re_crp; 1736 1737 re->re_desc.d_csr = 0; 1738 1739 crp->crp_etype = EFAULT; 1740 crypto_done(crp); 1741 return(0); 1742 } 1743 1744 /* 1745 * Routine to reset the chip and clean up. 1746 * It is assumed that the caller is in splimp() 1747 */ 1748 static void 1749 safe_totalreset(struct safe_softc *sc) 1750 { 1751 safe_reset_board(sc); 1752 safe_init_board(sc); 1753 safe_cleanchip(sc); 1754 } 1755 1756 /* 1757 * Is the operand suitable aligned for direct DMA. Each 1758 * segment must be aligned on a 32-bit boundary and all 1759 * but the last segment must be a multiple of 4 bytes. 1760 */ 1761 static int 1762 safe_dmamap_aligned(const struct safe_operand *op) 1763 { 1764 int i; 1765 1766 for (i = 0; i < op->nsegs; i++) { 1767 if (op->segs[i].ds_addr & 3) 1768 return (0); 1769 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 1770 return (0); 1771 } 1772 return (1); 1773 } 1774 1775 /* 1776 * Is the operand suitable for direct DMA as the destination 1777 * of an operation. The hardware requires that each ``particle'' 1778 * but the last in an operation result have the same size. We 1779 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 1780 * 0 if some segment is not a multiple of this size, 1 if all 1781 * segments are exactly this size, or 2 if segments are at worst 1782 * a multiple of this size. 1783 */ 1784 static int 1785 safe_dmamap_uniform(const struct safe_operand *op) 1786 { 1787 int result = 1; 1788 1789 if (op->nsegs > 0) { 1790 int i; 1791 1792 for (i = 0; i < op->nsegs-1; i++) { 1793 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 1794 return (0); 1795 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 1796 result = 2; 1797 } 1798 } 1799 return (result); 1800 } 1801 1802 #ifdef SAFE_DEBUG 1803 static void 1804 safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 1805 { 1806 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 1807 , tag 1808 , READ_REG(sc, SAFE_DMA_ENDIAN) 1809 , READ_REG(sc, SAFE_DMA_SRCADDR) 1810 , READ_REG(sc, SAFE_DMA_DSTADDR) 1811 , READ_REG(sc, SAFE_DMA_STAT) 1812 ); 1813 } 1814 1815 static void 1816 safe_dump_intrstate(struct safe_softc *sc, const char *tag) 1817 { 1818 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 1819 , tag 1820 , READ_REG(sc, SAFE_HI_CFG) 1821 , READ_REG(sc, SAFE_HI_MASK) 1822 , READ_REG(sc, SAFE_HI_DESC_CNT) 1823 , READ_REG(sc, SAFE_HU_STAT) 1824 , READ_REG(sc, SAFE_HM_STAT) 1825 ); 1826 } 1827 1828 static void 1829 safe_dump_ringstate(struct safe_softc *sc, const char *tag) 1830 { 1831 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 1832 1833 /* NB: assume caller has lock on ring */ 1834 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 1835 tag, 1836 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 1837 (unsigned long)(sc->sc_back - sc->sc_ring), 1838 (unsigned long)(sc->sc_front - sc->sc_ring)); 1839 } 1840 1841 static void 1842 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 1843 { 1844 int ix, nsegs; 1845 1846 ix = re - sc->sc_ring; 1847 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 1848 , tag 1849 , re, ix 1850 , re->re_desc.d_csr 1851 , re->re_desc.d_src 1852 , re->re_desc.d_dst 1853 , re->re_desc.d_sa 1854 , re->re_desc.d_len 1855 ); 1856 if (re->re_src.nsegs > 1) { 1857 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 1858 sizeof(struct safe_pdesc); 1859 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 1860 printf(" spd[%u] %p: %p size %u flags %x" 1861 , ix, &sc->sc_spring[ix] 1862 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 1863 , sc->sc_spring[ix].pd_size 1864 , sc->sc_spring[ix].pd_flags 1865 ); 1866 if (sc->sc_spring[ix].pd_size == 0) 1867 printf(" (zero!)"); 1868 printf("\n"); 1869 if (++ix == SAFE_TOTAL_SPART) 1870 ix = 0; 1871 } 1872 } 1873 if (re->re_dst.nsegs > 1) { 1874 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 1875 sizeof(struct safe_pdesc); 1876 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 1877 printf(" dpd[%u] %p: %p flags %x\n" 1878 , ix, &sc->sc_dpring[ix] 1879 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 1880 , sc->sc_dpring[ix].pd_flags 1881 ); 1882 if (++ix == SAFE_TOTAL_DPART) 1883 ix = 0; 1884 } 1885 } 1886 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 1887 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 1888 printf("sa: key %x %x %x %x %x %x %x %x\n" 1889 , re->re_sa.sa_key[0] 1890 , re->re_sa.sa_key[1] 1891 , re->re_sa.sa_key[2] 1892 , re->re_sa.sa_key[3] 1893 , re->re_sa.sa_key[4] 1894 , re->re_sa.sa_key[5] 1895 , re->re_sa.sa_key[6] 1896 , re->re_sa.sa_key[7] 1897 ); 1898 printf("sa: indigest %x %x %x %x %x\n" 1899 , re->re_sa.sa_indigest[0] 1900 , re->re_sa.sa_indigest[1] 1901 , re->re_sa.sa_indigest[2] 1902 , re->re_sa.sa_indigest[3] 1903 , re->re_sa.sa_indigest[4] 1904 ); 1905 printf("sa: outdigest %x %x %x %x %x\n" 1906 , re->re_sa.sa_outdigest[0] 1907 , re->re_sa.sa_outdigest[1] 1908 , re->re_sa.sa_outdigest[2] 1909 , re->re_sa.sa_outdigest[3] 1910 , re->re_sa.sa_outdigest[4] 1911 ); 1912 printf("sr: iv %x %x %x %x\n" 1913 , re->re_sastate.sa_saved_iv[0] 1914 , re->re_sastate.sa_saved_iv[1] 1915 , re->re_sastate.sa_saved_iv[2] 1916 , re->re_sastate.sa_saved_iv[3] 1917 ); 1918 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 1919 , re->re_sastate.sa_saved_hashbc 1920 , re->re_sastate.sa_saved_indigest[0] 1921 , re->re_sastate.sa_saved_indigest[1] 1922 , re->re_sastate.sa_saved_indigest[2] 1923 , re->re_sastate.sa_saved_indigest[3] 1924 , re->re_sastate.sa_saved_indigest[4] 1925 ); 1926 } 1927 1928 static void 1929 safe_dump_ring(struct safe_softc *sc, const char *tag) 1930 { 1931 mtx_lock(&sc->sc_ringmtx); 1932 printf("\nSafeNet Ring State:\n"); 1933 safe_dump_intrstate(sc, tag); 1934 safe_dump_dmastatus(sc, tag); 1935 safe_dump_ringstate(sc, tag); 1936 if (sc->sc_nqchip) { 1937 struct safe_ringentry *re = sc->sc_back; 1938 do { 1939 safe_dump_request(sc, tag, re); 1940 if (++re == sc->sc_ringtop) 1941 re = sc->sc_ring; 1942 } while (re != sc->sc_front); 1943 } 1944 mtx_unlock(&sc->sc_ringmtx); 1945 } 1946 1947 static int 1948 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 1949 { 1950 char dmode[64]; 1951 int error; 1952 1953 strncpy(dmode, "", sizeof(dmode) - 1); 1954 dmode[sizeof(dmode) - 1] = '\0'; 1955 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 1956 1957 if (error == 0 && req->newptr != NULL) { 1958 struct safe_softc *sc = safec; 1959 1960 if (!sc) 1961 return EINVAL; 1962 if (strncmp(dmode, "dma", 3) == 0) 1963 safe_dump_dmastatus(sc, "safe0"); 1964 else if (strncmp(dmode, "int", 3) == 0) 1965 safe_dump_intrstate(sc, "safe0"); 1966 else if (strncmp(dmode, "ring", 4) == 0) 1967 safe_dump_ring(sc, "safe0"); 1968 else 1969 return EINVAL; 1970 } 1971 return error; 1972 } 1973 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, 1974 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, 1975 sysctl_hw_safe_dump, "A", 1976 "Dump driver state"); 1977 #endif /* SAFE_DEBUG */ 1978