1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * SafeNet SafeXcel-1141 hardware crypto accelerator 35 */ 36 #include "opt_safe.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/proc.h> 41 #include <sys/errno.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 #include <sys/endian.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #include <machine/bus.h> 56 #include <machine/resource.h> 57 #include <sys/bus.h> 58 #include <sys/rman.h> 59 60 #include <opencrypto/cryptodev.h> 61 #include <opencrypto/xform_auth.h> 62 #include <sys/random.h> 63 #include <sys/kobj.h> 64 65 #include "cryptodev_if.h" 66 67 #include <dev/pci/pcivar.h> 68 #include <dev/pci/pcireg.h> 69 70 #ifdef SAFE_RNDTEST 71 #include <dev/rndtest/rndtest.h> 72 #endif 73 #include <dev/safe/safereg.h> 74 #include <dev/safe/safevar.h> 75 76 #ifndef bswap32 77 #define bswap32 NTOHL 78 #endif 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int safe_probe(device_t); 84 static int safe_attach(device_t); 85 static int safe_detach(device_t); 86 static int safe_suspend(device_t); 87 static int safe_resume(device_t); 88 static int safe_shutdown(device_t); 89 90 static int safe_probesession(device_t, const struct crypto_session_params *); 91 static int safe_newsession(device_t, crypto_session_t, 92 const struct crypto_session_params *); 93 static int safe_process(device_t, struct cryptop *, int); 94 95 static device_method_t safe_methods[] = { 96 /* Device interface */ 97 DEVMETHOD(device_probe, safe_probe), 98 DEVMETHOD(device_attach, safe_attach), 99 DEVMETHOD(device_detach, safe_detach), 100 DEVMETHOD(device_suspend, safe_suspend), 101 DEVMETHOD(device_resume, safe_resume), 102 DEVMETHOD(device_shutdown, safe_shutdown), 103 104 /* crypto device methods */ 105 DEVMETHOD(cryptodev_probesession, safe_probesession), 106 DEVMETHOD(cryptodev_newsession, safe_newsession), 107 DEVMETHOD(cryptodev_process, safe_process), 108 109 DEVMETHOD_END 110 }; 111 static driver_t safe_driver = { 112 "safe", 113 safe_methods, 114 sizeof (struct safe_softc) 115 }; 116 static devclass_t safe_devclass; 117 118 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); 119 MODULE_DEPEND(safe, crypto, 1, 1, 1); 120 #ifdef SAFE_RNDTEST 121 MODULE_DEPEND(safe, rndtest, 1, 1, 1); 122 #endif 123 124 static void safe_intr(void *); 125 static void safe_callback(struct safe_softc *, struct safe_ringentry *); 126 static void safe_feed(struct safe_softc *, struct safe_ringentry *); 127 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 128 #ifndef SAFE_NO_RNG 129 static void safe_rng_init(struct safe_softc *); 130 static void safe_rng(void *); 131 #endif /* SAFE_NO_RNG */ 132 static int safe_dma_malloc(struct safe_softc *, bus_size_t, 133 struct safe_dma_alloc *, int); 134 #define safe_dma_sync(_dma, _flags) \ 135 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 136 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 137 static int safe_dmamap_aligned(const struct safe_operand *); 138 static int safe_dmamap_uniform(const struct safe_operand *); 139 140 static void safe_reset_board(struct safe_softc *); 141 static void safe_init_board(struct safe_softc *); 142 static void safe_init_pciregs(device_t dev); 143 static void safe_cleanchip(struct safe_softc *); 144 static void safe_totalreset(struct safe_softc *); 145 146 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 147 148 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 149 "SafeNet driver parameters"); 150 151 #ifdef SAFE_DEBUG 152 static void safe_dump_dmastatus(struct safe_softc *, const char *); 153 static void safe_dump_ringstate(struct safe_softc *, const char *); 154 static void safe_dump_intrstate(struct safe_softc *, const char *); 155 static void safe_dump_request(struct safe_softc *, const char *, 156 struct safe_ringentry *); 157 158 static struct safe_softc *safec; /* for use by hw.safe.dump */ 159 160 static int safe_debug = 0; 161 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 162 0, "control debugging msgs"); 163 #define DPRINTF(_x) if (safe_debug) printf _x 164 #else 165 #define DPRINTF(_x) 166 #endif 167 168 #define READ_REG(sc,r) \ 169 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 170 171 #define WRITE_REG(sc,reg,val) \ 172 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 173 174 struct safe_stats safestats; 175 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 176 safe_stats, "driver statistics"); 177 #ifndef SAFE_NO_RNG 178 static int safe_rnginterval = 1; /* poll once a second */ 179 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 180 0, "RNG polling interval (secs)"); 181 static int safe_rngbufsize = 16; /* 64 bytes each poll */ 182 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 183 0, "RNG polling buffer size (32-bit words)"); 184 static int safe_rngmaxalarm = 8; /* max alarms before reset */ 185 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 186 0, "RNG max alarms before reset"); 187 #endif /* SAFE_NO_RNG */ 188 189 static int 190 safe_probe(device_t dev) 191 { 192 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 193 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 194 return (BUS_PROBE_DEFAULT); 195 return (ENXIO); 196 } 197 198 static const char* 199 safe_partname(struct safe_softc *sc) 200 { 201 /* XXX sprintf numbers when not decoded */ 202 switch (pci_get_vendor(sc->sc_dev)) { 203 case PCI_VENDOR_SAFENET: 204 switch (pci_get_device(sc->sc_dev)) { 205 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 206 } 207 return "SafeNet unknown-part"; 208 } 209 return "Unknown-vendor unknown-part"; 210 } 211 212 #ifndef SAFE_NO_RNG 213 static void 214 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 215 { 216 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 217 random_harvest_queue(buf, count, RANDOM_PURE_SAFE); 218 } 219 #endif /* SAFE_NO_RNG */ 220 221 static int 222 safe_attach(device_t dev) 223 { 224 struct safe_softc *sc = device_get_softc(dev); 225 u_int32_t raddr; 226 u_int32_t i; 227 int rid; 228 229 bzero(sc, sizeof (*sc)); 230 sc->sc_dev = dev; 231 232 /* XXX handle power management */ 233 234 pci_enable_busmaster(dev); 235 236 /* 237 * Setup memory-mapping of PCI registers. 238 */ 239 rid = BS_BAR; 240 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 241 RF_ACTIVE); 242 if (sc->sc_sr == NULL) { 243 device_printf(dev, "cannot map register space\n"); 244 goto bad; 245 } 246 sc->sc_st = rman_get_bustag(sc->sc_sr); 247 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 248 249 /* 250 * Arrange interrupt line. 251 */ 252 rid = 0; 253 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 254 RF_SHAREABLE|RF_ACTIVE); 255 if (sc->sc_irq == NULL) { 256 device_printf(dev, "could not map interrupt\n"); 257 goto bad1; 258 } 259 /* 260 * NB: Network code assumes we are blocked with splimp() 261 * so make sure the IRQ is mapped appropriately. 262 */ 263 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 264 NULL, safe_intr, sc, &sc->sc_ih)) { 265 device_printf(dev, "could not establish interrupt\n"); 266 goto bad2; 267 } 268 269 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), 270 CRYPTOCAP_F_HARDWARE); 271 if (sc->sc_cid < 0) { 272 device_printf(dev, "could not get crypto driver id\n"); 273 goto bad3; 274 } 275 276 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 277 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 278 279 /* 280 * Setup DMA descriptor area. 281 */ 282 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 283 1, /* alignment */ 284 SAFE_DMA_BOUNDARY, /* boundary */ 285 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 286 BUS_SPACE_MAXADDR, /* highaddr */ 287 NULL, NULL, /* filter, filterarg */ 288 SAFE_MAX_DMA, /* maxsize */ 289 SAFE_MAX_PART, /* nsegments */ 290 SAFE_MAX_SSIZE, /* maxsegsize */ 291 BUS_DMA_ALLOCNOW, /* flags */ 292 NULL, NULL, /* locking */ 293 &sc->sc_srcdmat)) { 294 device_printf(dev, "cannot allocate DMA tag\n"); 295 goto bad4; 296 } 297 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 298 1, /* alignment */ 299 SAFE_MAX_DSIZE, /* boundary */ 300 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 301 BUS_SPACE_MAXADDR, /* highaddr */ 302 NULL, NULL, /* filter, filterarg */ 303 SAFE_MAX_DMA, /* maxsize */ 304 SAFE_MAX_PART, /* nsegments */ 305 SAFE_MAX_DSIZE, /* maxsegsize */ 306 BUS_DMA_ALLOCNOW, /* flags */ 307 NULL, NULL, /* locking */ 308 &sc->sc_dstdmat)) { 309 device_printf(dev, "cannot allocate DMA tag\n"); 310 goto bad4; 311 } 312 313 /* 314 * Allocate packet engine descriptors. 315 */ 316 if (safe_dma_malloc(sc, 317 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 318 &sc->sc_ringalloc, 0)) { 319 device_printf(dev, "cannot allocate PE descriptor ring\n"); 320 bus_dma_tag_destroy(sc->sc_srcdmat); 321 goto bad4; 322 } 323 /* 324 * Hookup the static portion of all our data structures. 325 */ 326 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 327 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 328 sc->sc_front = sc->sc_ring; 329 sc->sc_back = sc->sc_ring; 330 raddr = sc->sc_ringalloc.dma_paddr; 331 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 332 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 333 struct safe_ringentry *re = &sc->sc_ring[i]; 334 335 re->re_desc.d_sa = raddr + 336 offsetof(struct safe_ringentry, re_sa); 337 re->re_sa.sa_staterec = raddr + 338 offsetof(struct safe_ringentry, re_sastate); 339 340 raddr += sizeof (struct safe_ringentry); 341 } 342 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 343 "packet engine ring", MTX_DEF); 344 345 /* 346 * Allocate scatter and gather particle descriptors. 347 */ 348 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 349 &sc->sc_spalloc, 0)) { 350 device_printf(dev, "cannot allocate source particle " 351 "descriptor ring\n"); 352 mtx_destroy(&sc->sc_ringmtx); 353 safe_dma_free(sc, &sc->sc_ringalloc); 354 bus_dma_tag_destroy(sc->sc_srcdmat); 355 goto bad4; 356 } 357 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 358 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 359 sc->sc_spfree = sc->sc_spring; 360 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 361 362 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 363 &sc->sc_dpalloc, 0)) { 364 device_printf(dev, "cannot allocate destination particle " 365 "descriptor ring\n"); 366 mtx_destroy(&sc->sc_ringmtx); 367 safe_dma_free(sc, &sc->sc_spalloc); 368 safe_dma_free(sc, &sc->sc_ringalloc); 369 bus_dma_tag_destroy(sc->sc_dstdmat); 370 goto bad4; 371 } 372 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 373 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 374 sc->sc_dpfree = sc->sc_dpring; 375 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 376 377 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 378 379 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); 380 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { 381 sc->sc_flags |= SAFE_FLAGS_RNG; 382 printf(" rng"); 383 } 384 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { 385 #if 0 386 printf(" key"); 387 sc->sc_flags |= SAFE_FLAGS_KEY; 388 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); 389 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); 390 #endif 391 } 392 if (sc->sc_devinfo & SAFE_DEVINFO_DES) { 393 printf(" des/3des"); 394 } 395 if (sc->sc_devinfo & SAFE_DEVINFO_AES) { 396 printf(" aes"); 397 } 398 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { 399 printf(" md5"); 400 } 401 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { 402 printf(" sha1"); 403 } 404 /* XXX other supported algorithms */ 405 printf("\n"); 406 407 safe_reset_board(sc); /* reset h/w */ 408 safe_init_pciregs(dev); /* init pci settings */ 409 safe_init_board(sc); /* init h/w */ 410 411 #ifndef SAFE_NO_RNG 412 if (sc->sc_flags & SAFE_FLAGS_RNG) { 413 #ifdef SAFE_RNDTEST 414 sc->sc_rndtest = rndtest_attach(dev); 415 if (sc->sc_rndtest) 416 sc->sc_harvest = rndtest_harvest; 417 else 418 sc->sc_harvest = default_harvest; 419 #else 420 sc->sc_harvest = default_harvest; 421 #endif 422 safe_rng_init(sc); 423 424 callout_init(&sc->sc_rngto, 1); 425 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 426 } 427 #endif /* SAFE_NO_RNG */ 428 #ifdef SAFE_DEBUG 429 safec = sc; /* for use by hw.safe.dump */ 430 #endif 431 return (0); 432 bad4: 433 crypto_unregister_all(sc->sc_cid); 434 bad3: 435 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 436 bad2: 437 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 438 bad1: 439 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 440 bad: 441 return (ENXIO); 442 } 443 444 /* 445 * Detach a device that successfully probed. 446 */ 447 static int 448 safe_detach(device_t dev) 449 { 450 struct safe_softc *sc = device_get_softc(dev); 451 452 /* XXX wait/abort active ops */ 453 454 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 455 456 callout_stop(&sc->sc_rngto); 457 458 crypto_unregister_all(sc->sc_cid); 459 460 #ifdef SAFE_RNDTEST 461 if (sc->sc_rndtest) 462 rndtest_detach(sc->sc_rndtest); 463 #endif 464 465 safe_cleanchip(sc); 466 safe_dma_free(sc, &sc->sc_dpalloc); 467 safe_dma_free(sc, &sc->sc_spalloc); 468 mtx_destroy(&sc->sc_ringmtx); 469 safe_dma_free(sc, &sc->sc_ringalloc); 470 471 bus_generic_detach(dev); 472 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 473 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 474 475 bus_dma_tag_destroy(sc->sc_srcdmat); 476 bus_dma_tag_destroy(sc->sc_dstdmat); 477 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 478 479 return (0); 480 } 481 482 /* 483 * Stop all chip i/o so that the kernel's probe routines don't 484 * get confused by errant DMAs when rebooting. 485 */ 486 static int 487 safe_shutdown(device_t dev) 488 { 489 #ifdef notyet 490 safe_stop(device_get_softc(dev)); 491 #endif 492 return (0); 493 } 494 495 /* 496 * Device suspend routine. 497 */ 498 static int 499 safe_suspend(device_t dev) 500 { 501 struct safe_softc *sc = device_get_softc(dev); 502 503 #ifdef notyet 504 /* XXX stop the device and save PCI settings */ 505 #endif 506 sc->sc_suspended = 1; 507 508 return (0); 509 } 510 511 static int 512 safe_resume(device_t dev) 513 { 514 struct safe_softc *sc = device_get_softc(dev); 515 516 #ifdef notyet 517 /* XXX retore PCI settings and start the device */ 518 #endif 519 sc->sc_suspended = 0; 520 return (0); 521 } 522 523 /* 524 * SafeXcel Interrupt routine 525 */ 526 static void 527 safe_intr(void *arg) 528 { 529 struct safe_softc *sc = arg; 530 volatile u_int32_t stat; 531 532 stat = READ_REG(sc, SAFE_HM_STAT); 533 if (stat == 0) /* shared irq, not for us */ 534 return; 535 536 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 537 538 if ((stat & SAFE_INT_PE_DDONE)) { 539 /* 540 * Descriptor(s) done; scan the ring and 541 * process completed operations. 542 */ 543 mtx_lock(&sc->sc_ringmtx); 544 while (sc->sc_back != sc->sc_front) { 545 struct safe_ringentry *re = sc->sc_back; 546 #ifdef SAFE_DEBUG 547 if (safe_debug) { 548 safe_dump_ringstate(sc, __func__); 549 safe_dump_request(sc, __func__, re); 550 } 551 #endif 552 /* 553 * safe_process marks ring entries that were allocated 554 * but not used with a csr of zero. This insures the 555 * ring front pointer never needs to be set backwards 556 * in the event that an entry is allocated but not used 557 * because of a setup error. 558 */ 559 if (re->re_desc.d_csr != 0) { 560 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 561 break; 562 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 563 break; 564 sc->sc_nqchip--; 565 safe_callback(sc, re); 566 } 567 if (++(sc->sc_back) == sc->sc_ringtop) 568 sc->sc_back = sc->sc_ring; 569 } 570 mtx_unlock(&sc->sc_ringmtx); 571 } 572 573 /* 574 * Check to see if we got any DMA Error 575 */ 576 if (stat & SAFE_INT_PE_ERROR) { 577 DPRINTF(("dmaerr dmastat %08x\n", 578 READ_REG(sc, SAFE_PE_DMASTAT))); 579 safestats.st_dmaerr++; 580 safe_totalreset(sc); 581 #if 0 582 safe_feed(sc); 583 #endif 584 } 585 586 if (sc->sc_needwakeup) { /* XXX check high watermark */ 587 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 588 DPRINTF(("%s: wakeup crypto %x\n", __func__, 589 sc->sc_needwakeup)); 590 sc->sc_needwakeup &= ~wakeup; 591 crypto_unblock(sc->sc_cid, wakeup); 592 } 593 } 594 595 /* 596 * safe_feed() - post a request to chip 597 */ 598 static void 599 safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 600 { 601 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 602 if (re->re_dst_map != NULL) 603 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 604 BUS_DMASYNC_PREREAD); 605 /* XXX have no smaller granularity */ 606 safe_dma_sync(&sc->sc_ringalloc, 607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 608 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 609 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 610 611 #ifdef SAFE_DEBUG 612 if (safe_debug) { 613 safe_dump_ringstate(sc, __func__); 614 safe_dump_request(sc, __func__, re); 615 } 616 #endif 617 sc->sc_nqchip++; 618 if (sc->sc_nqchip > safestats.st_maxqchip) 619 safestats.st_maxqchip = sc->sc_nqchip; 620 /* poke h/w to check descriptor ring, any value can be written */ 621 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 622 } 623 624 #define N(a) (sizeof(a) / sizeof (a[0])) 625 static void 626 safe_setup_enckey(struct safe_session *ses, const void *key) 627 { 628 int i; 629 630 bcopy(key, ses->ses_key, ses->ses_klen); 631 632 /* PE is little-endian, insure proper byte order */ 633 for (i = 0; i < N(ses->ses_key); i++) 634 ses->ses_key[i] = htole32(ses->ses_key[i]); 635 } 636 637 static void 638 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, 639 int klen) 640 { 641 MD5_CTX md5ctx; 642 SHA1_CTX sha1ctx; 643 int i; 644 645 if (algo == CRYPTO_MD5_HMAC) { 646 hmac_init_ipad(&auth_hash_hmac_md5, key, klen, &md5ctx); 647 bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); 648 649 hmac_init_opad(&auth_hash_hmac_md5, key, klen, &md5ctx); 650 bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); 651 652 explicit_bzero(&md5ctx, sizeof(md5ctx)); 653 } else { 654 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 655 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 656 657 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 658 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 659 660 explicit_bzero(&sha1ctx, sizeof(sha1ctx)); 661 } 662 663 /* PE is little-endian, insure proper byte order */ 664 for (i = 0; i < N(ses->ses_hminner); i++) { 665 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 666 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 667 } 668 } 669 #undef N 670 671 static bool 672 safe_auth_supported(struct safe_softc *sc, 673 const struct crypto_session_params *csp) 674 { 675 676 switch (csp->csp_auth_alg) { 677 case CRYPTO_MD5_HMAC: 678 if ((sc->sc_devinfo & SAFE_DEVINFO_MD5) == 0) 679 return (false); 680 break; 681 case CRYPTO_SHA1_HMAC: 682 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) 683 return (false); 684 break; 685 default: 686 return (false); 687 } 688 return (true); 689 } 690 691 static bool 692 safe_cipher_supported(struct safe_softc *sc, 693 const struct crypto_session_params *csp) 694 { 695 696 switch (csp->csp_cipher_alg) { 697 case CRYPTO_DES_CBC: 698 case CRYPTO_3DES_CBC: 699 if ((sc->sc_devinfo & SAFE_DEVINFO_DES) == 0) 700 return (false); 701 if (csp->csp_ivlen != 8) 702 return (false); 703 if (csp->csp_cipher_alg == CRYPTO_DES_CBC) { 704 if (csp->csp_cipher_klen != 8) 705 return (false); 706 } else { 707 if (csp->csp_cipher_klen != 24) 708 return (false); 709 } 710 break; 711 case CRYPTO_AES_CBC: 712 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) 713 return (false); 714 if (csp->csp_ivlen != 16) 715 return (false); 716 if (csp->csp_cipher_klen != 16 && 717 csp->csp_cipher_klen != 24 && 718 csp->csp_cipher_klen != 32) 719 return (false); 720 break; 721 } 722 return (true); 723 } 724 725 static int 726 safe_probesession(device_t dev, const struct crypto_session_params *csp) 727 { 728 struct safe_softc *sc = device_get_softc(dev); 729 730 if (csp->csp_flags != 0) 731 return (EINVAL); 732 switch (csp->csp_mode) { 733 case CSP_MODE_DIGEST: 734 if (!safe_auth_supported(sc, csp)) 735 return (EINVAL); 736 break; 737 case CSP_MODE_CIPHER: 738 if (!safe_cipher_supported(sc, csp)) 739 return (EINVAL); 740 break; 741 case CSP_MODE_ETA: 742 if (!safe_auth_supported(sc, csp) || 743 !safe_cipher_supported(sc, csp)) 744 return (EINVAL); 745 break; 746 default: 747 return (EINVAL); 748 } 749 750 return (CRYPTODEV_PROBE_HARDWARE); 751 } 752 753 /* 754 * Allocate a new 'session'. 755 */ 756 static int 757 safe_newsession(device_t dev, crypto_session_t cses, 758 const struct crypto_session_params *csp) 759 { 760 struct safe_session *ses; 761 762 ses = crypto_get_driver_session(cses); 763 if (csp->csp_cipher_alg != 0) { 764 ses->ses_klen = csp->csp_cipher_klen; 765 if (csp->csp_cipher_key != NULL) 766 safe_setup_enckey(ses, csp->csp_cipher_key); 767 } 768 769 if (csp->csp_auth_alg != 0) { 770 ses->ses_mlen = csp->csp_auth_mlen; 771 if (ses->ses_mlen == 0) { 772 if (csp->csp_auth_alg == CRYPTO_MD5_HMAC) 773 ses->ses_mlen = MD5_HASH_LEN; 774 else 775 ses->ses_mlen = SHA1_HASH_LEN; 776 } 777 778 if (csp->csp_auth_key != NULL) { 779 safe_setup_mackey(ses, csp->csp_auth_alg, 780 csp->csp_auth_key, csp->csp_auth_klen); 781 } 782 } 783 784 return (0); 785 } 786 787 static bus_size_t 788 safe_crp_length(struct cryptop *crp) 789 { 790 791 switch (crp->crp_buf_type) { 792 case CRYPTO_BUF_MBUF: 793 return (crp->crp_mbuf->m_pkthdr.len); 794 case CRYPTO_BUF_UIO: 795 return (crp->crp_uio->uio_resid); 796 case CRYPTO_BUF_CONTIG: 797 return (crp->crp_ilen); 798 default: 799 panic("bad crp buffer type"); 800 } 801 } 802 803 static void 804 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) 805 { 806 struct safe_operand *op = arg; 807 808 DPRINTF(("%s: nsegs %d error %d\n", __func__, 809 nsegs, error)); 810 if (error != 0) 811 return; 812 op->nsegs = nsegs; 813 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 814 } 815 816 static int 817 safe_process(device_t dev, struct cryptop *crp, int hint) 818 { 819 struct safe_softc *sc = device_get_softc(dev); 820 const struct crypto_session_params *csp; 821 int err = 0, i, nicealign, uniform; 822 int bypass, oplen; 823 int16_t coffset; 824 struct safe_session *ses; 825 struct safe_ringentry *re; 826 struct safe_sarec *sa; 827 struct safe_pdesc *pd; 828 u_int32_t cmd0, cmd1, staterec; 829 830 mtx_lock(&sc->sc_ringmtx); 831 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 832 safestats.st_ringfull++; 833 sc->sc_needwakeup |= CRYPTO_SYMQ; 834 mtx_unlock(&sc->sc_ringmtx); 835 return (ERESTART); 836 } 837 re = sc->sc_front; 838 839 staterec = re->re_sa.sa_staterec; /* save */ 840 /* NB: zero everything but the PE descriptor */ 841 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 842 re->re_sa.sa_staterec = staterec; /* restore */ 843 844 re->re_crp = crp; 845 846 sa = &re->re_sa; 847 ses = crypto_get_driver_session(crp->crp_session); 848 csp = crypto_get_params(crp->crp_session); 849 850 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 851 cmd1 = 0; 852 switch (csp->csp_mode) { 853 case CSP_MODE_DIGEST: 854 cmd0 |= SAFE_SA_CMD0_OP_HASH; 855 break; 856 case CSP_MODE_CIPHER: 857 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 858 break; 859 case CSP_MODE_ETA: 860 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 861 break; 862 } 863 864 if (csp->csp_cipher_alg != 0) { 865 if (crp->crp_cipher_key != NULL) 866 safe_setup_enckey(ses, crp->crp_cipher_key); 867 868 switch (csp->csp_cipher_alg) { 869 case CRYPTO_DES_CBC: 870 cmd0 |= SAFE_SA_CMD0_DES; 871 cmd1 |= SAFE_SA_CMD1_CBC; 872 break; 873 case CRYPTO_3DES_CBC: 874 cmd0 |= SAFE_SA_CMD0_3DES; 875 cmd1 |= SAFE_SA_CMD1_CBC; 876 break; 877 case CRYPTO_AES_CBC: 878 cmd0 |= SAFE_SA_CMD0_AES; 879 cmd1 |= SAFE_SA_CMD1_CBC; 880 if (ses->ses_klen * 8 == 128) 881 cmd1 |= SAFE_SA_CMD1_AES128; 882 else if (ses->ses_klen * 8 == 192) 883 cmd1 |= SAFE_SA_CMD1_AES192; 884 else 885 cmd1 |= SAFE_SA_CMD1_AES256; 886 } 887 888 /* 889 * Setup encrypt/decrypt state. When using basic ops 890 * we can't use an inline IV because hash/crypt offset 891 * must be from the end of the IV to the start of the 892 * crypt data and this leaves out the preceding header 893 * from the hash calculation. Instead we place the IV 894 * in the state record and set the hash/crypt offset to 895 * copy both the header+IV. 896 */ 897 crypto_read_iv(crp, re->re_sastate.sa_saved_iv); 898 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 899 900 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 901 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 902 903 /* 904 * XXX: I suspect we don't need this since we 905 * don't save the returned IV. 906 */ 907 cmd0 |= SAFE_SA_CMD0_SAVEIV; 908 } else { 909 cmd0 |= SAFE_SA_CMD0_INBOUND; 910 } 911 /* 912 * For basic encryption use the zero pad algorithm. 913 * This pads results to an 8-byte boundary and 914 * suppresses padding verification for inbound (i.e. 915 * decrypt) operations. 916 * 917 * NB: Not sure if the 8-byte pad boundary is a problem. 918 */ 919 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 920 921 /* XXX assert key bufs have the same size */ 922 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 923 } 924 925 if (csp->csp_auth_alg != 0) { 926 if (crp->crp_auth_key != NULL) { 927 safe_setup_mackey(ses, csp->csp_auth_alg, 928 crp->crp_auth_key, csp->csp_auth_klen); 929 } 930 931 switch (csp->csp_auth_alg) { 932 case CRYPTO_MD5_HMAC: 933 cmd0 |= SAFE_SA_CMD0_MD5; 934 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 935 break; 936 case CRYPTO_SHA1_HMAC: 937 cmd0 |= SAFE_SA_CMD0_SHA1; 938 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 939 break; 940 } 941 942 /* 943 * Digest data is loaded from the SA and the hash 944 * result is saved to the state block where we 945 * retrieve it for return to the caller. 946 */ 947 /* XXX assert digest bufs have the same size */ 948 bcopy(ses->ses_hminner, sa->sa_indigest, 949 sizeof(sa->sa_indigest)); 950 bcopy(ses->ses_hmouter, sa->sa_outdigest, 951 sizeof(sa->sa_outdigest)); 952 953 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 954 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 955 } 956 957 if (csp->csp_mode == CSP_MODE_ETA) { 958 /* 959 * The driver only supports ETA requests where there 960 * is no gap between the AAD and payload. 961 */ 962 if (crp->crp_aad_length != 0 && 963 crp->crp_aad_start + crp->crp_aad_length != 964 crp->crp_payload_start) { 965 safestats.st_lenmismatch++; 966 err = EINVAL; 967 goto errout; 968 } 969 if (crp->crp_aad_length != 0) 970 bypass = crp->crp_aad_start; 971 else 972 bypass = crp->crp_payload_start; 973 coffset = crp->crp_aad_length; 974 oplen = crp->crp_payload_start + crp->crp_payload_length; 975 #ifdef SAFE_DEBUG 976 if (safe_debug) { 977 printf("AAD: skip %d, len %d, digest %d\n", 978 crp->crp_aad_start, crp->crp_aad_length, 979 crp->crp_digest_start); 980 printf("payload: skip %d, len %d, IV %d\n", 981 crp->crp_payload_start, crp->crp_payload_length, 982 crp->crp_iv_start); 983 printf("bypass %d coffset %d oplen %d\n", 984 bypass, coffset, oplen); 985 } 986 #endif 987 if (coffset & 3) { /* offset must be 32-bit aligned */ 988 DPRINTF(("%s: coffset %u misaligned\n", 989 __func__, coffset)); 990 safestats.st_coffmisaligned++; 991 err = EINVAL; 992 goto errout; 993 } 994 coffset >>= 2; 995 if (coffset > 255) { /* offset must be <256 dwords */ 996 DPRINTF(("%s: coffset %u too big\n", 997 __func__, coffset)); 998 safestats.st_cofftoobig++; 999 err = EINVAL; 1000 goto errout; 1001 } 1002 /* 1003 * Tell the hardware to copy the header to the output. 1004 * The header is defined as the data from the end of 1005 * the bypass to the start of data to be encrypted. 1006 * Typically this is the inline IV. Note that you need 1007 * to do this even if src+dst are the same; it appears 1008 * that w/o this bit the crypted data is written 1009 * immediately after the bypass data. 1010 */ 1011 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 1012 /* 1013 * Disable IP header mutable bit handling. This is 1014 * needed to get correct HMAC calculations. 1015 */ 1016 cmd1 |= SAFE_SA_CMD1_MUTABLE; 1017 } else { 1018 bypass = crp->crp_payload_start; 1019 oplen = bypass + crp->crp_payload_length; 1020 coffset = 0; 1021 } 1022 /* XXX verify multiple of 4 when using s/g */ 1023 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 1024 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 1025 safestats.st_bypasstoobig++; 1026 err = EINVAL; 1027 goto errout; 1028 } 1029 1030 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 1031 safestats.st_nomap++; 1032 err = ENOMEM; 1033 goto errout; 1034 } 1035 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, 1036 &re->re_src, BUS_DMA_NOWAIT) != 0) { 1037 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1038 re->re_src_map = NULL; 1039 safestats.st_noload++; 1040 err = ENOMEM; 1041 goto errout; 1042 } 1043 re->re_src_mapsize = safe_crp_length(crp); 1044 nicealign = safe_dmamap_aligned(&re->re_src); 1045 uniform = safe_dmamap_uniform(&re->re_src); 1046 1047 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 1048 nicealign, uniform, re->re_src.nsegs)); 1049 if (re->re_src.nsegs > 1) { 1050 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 1051 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 1052 for (i = 0; i < re->re_src_nsegs; i++) { 1053 /* NB: no need to check if there's space */ 1054 pd = sc->sc_spfree; 1055 if (++(sc->sc_spfree) == sc->sc_springtop) 1056 sc->sc_spfree = sc->sc_spring; 1057 1058 KASSERT((pd->pd_flags&3) == 0 || 1059 (pd->pd_flags&3) == SAFE_PD_DONE, 1060 ("bogus source particle descriptor; flags %x", 1061 pd->pd_flags)); 1062 pd->pd_addr = re->re_src_segs[i].ds_addr; 1063 pd->pd_size = re->re_src_segs[i].ds_len; 1064 pd->pd_flags = SAFE_PD_READY; 1065 } 1066 cmd0 |= SAFE_SA_CMD0_IGATHER; 1067 } else { 1068 /* 1069 * No need for gather, reference the operand directly. 1070 */ 1071 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1072 } 1073 1074 if (csp->csp_mode == CSP_MODE_DIGEST) { 1075 /* 1076 * Hash op; no destination needed. 1077 */ 1078 } else { 1079 if (nicealign && uniform == 1) { 1080 /* 1081 * Source layout is suitable for direct 1082 * sharing of the DMA map and segment list. 1083 */ 1084 re->re_dst = re->re_src; 1085 } else if (nicealign && uniform == 2) { 1086 /* 1087 * The source is properly aligned but requires a 1088 * different particle list to handle DMA of the 1089 * result. Create a new map and do the load to 1090 * create the segment list. The particle 1091 * descriptor setup code below will handle the 1092 * rest. 1093 */ 1094 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, 1095 &re->re_dst_map)) { 1096 safestats.st_nomap++; 1097 err = ENOMEM; 1098 goto errout; 1099 } 1100 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, 1101 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 1102 0) { 1103 bus_dmamap_destroy(sc->sc_dstdmat, 1104 re->re_dst_map); 1105 re->re_dst_map = NULL; 1106 safestats.st_noload++; 1107 err = ENOMEM; 1108 goto errout; 1109 } 1110 } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { 1111 int totlen, len; 1112 struct mbuf *m, *top, **mp; 1113 1114 /* 1115 * DMA constraints require that we allocate a 1116 * new mbuf chain for the destination. We 1117 * allocate an entire new set of mbufs of 1118 * optimal/required size and then tell the 1119 * hardware to copy any bits that are not 1120 * created as a byproduct of the operation. 1121 */ 1122 if (!nicealign) 1123 safestats.st_unaligned++; 1124 if (!uniform) 1125 safestats.st_notuniform++; 1126 totlen = re->re_src_mapsize; 1127 if (crp->crp_mbuf->m_flags & M_PKTHDR) { 1128 len = MHLEN; 1129 MGETHDR(m, M_NOWAIT, MT_DATA); 1130 if (m && !m_dup_pkthdr(m, crp->crp_mbuf, 1131 M_NOWAIT)) { 1132 m_free(m); 1133 m = NULL; 1134 } 1135 } else { 1136 len = MLEN; 1137 MGET(m, M_NOWAIT, MT_DATA); 1138 } 1139 if (m == NULL) { 1140 safestats.st_nombuf++; 1141 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1142 goto errout; 1143 } 1144 if (totlen >= MINCLSIZE) { 1145 if (!(MCLGET(m, M_NOWAIT))) { 1146 m_free(m); 1147 safestats.st_nomcl++; 1148 err = sc->sc_nqchip ? 1149 ERESTART : ENOMEM; 1150 goto errout; 1151 } 1152 len = MCLBYTES; 1153 } 1154 m->m_len = len; 1155 top = NULL; 1156 mp = ⊤ 1157 1158 while (totlen > 0) { 1159 if (top) { 1160 MGET(m, M_NOWAIT, MT_DATA); 1161 if (m == NULL) { 1162 m_freem(top); 1163 safestats.st_nombuf++; 1164 err = sc->sc_nqchip ? 1165 ERESTART : ENOMEM; 1166 goto errout; 1167 } 1168 len = MLEN; 1169 } 1170 if (top && totlen >= MINCLSIZE) { 1171 if (!(MCLGET(m, M_NOWAIT))) { 1172 *mp = m; 1173 m_freem(top); 1174 safestats.st_nomcl++; 1175 err = sc->sc_nqchip ? 1176 ERESTART : ENOMEM; 1177 goto errout; 1178 } 1179 len = MCLBYTES; 1180 } 1181 m->m_len = len = min(totlen, len); 1182 totlen -= len; 1183 *mp = m; 1184 mp = &m->m_next; 1185 } 1186 re->re_dst_m = top; 1187 if (bus_dmamap_create(sc->sc_dstdmat, 1188 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1189 safestats.st_nomap++; 1190 err = ENOMEM; 1191 goto errout; 1192 } 1193 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, 1194 re->re_dst_map, top, re->re_dst_segs, 1195 &re->re_dst_nsegs, 0) != 0) { 1196 bus_dmamap_destroy(sc->sc_dstdmat, 1197 re->re_dst_map); 1198 re->re_dst_map = NULL; 1199 safestats.st_noload++; 1200 err = ENOMEM; 1201 goto errout; 1202 } 1203 re->re_dst_mapsize = re->re_src_mapsize; 1204 if (re->re_src.mapsize > oplen) { 1205 /* 1206 * There's data following what the 1207 * hardware will copy for us. If this 1208 * isn't just the ICV (that's going to 1209 * be written on completion), copy it 1210 * to the new mbufs 1211 */ 1212 if (!(csp->csp_mode == CSP_MODE_ETA && 1213 (re->re_src.mapsize-oplen) == ses->ses_mlen && 1214 crp->crp_digest_start == oplen)) 1215 safe_mcopy(crp->crp_mbuf, re->re_dst_m, 1216 oplen); 1217 else 1218 safestats.st_noicvcopy++; 1219 } 1220 } else { 1221 if (!nicealign) { 1222 safestats.st_iovmisaligned++; 1223 err = EINVAL; 1224 goto errout; 1225 } else { 1226 /* 1227 * There's no way to handle the DMA 1228 * requirements with this uio. We 1229 * could create a separate DMA area for 1230 * the result and then copy it back, 1231 * but for now we just bail and return 1232 * an error. Note that uio requests 1233 * > SAFE_MAX_DSIZE are handled because 1234 * the DMA map and segment list for the 1235 * destination wil result in a 1236 * destination particle list that does 1237 * the necessary scatter DMA. 1238 */ 1239 safestats.st_iovnotuniform++; 1240 err = EINVAL; 1241 goto errout; 1242 } 1243 } 1244 1245 if (re->re_dst.nsegs > 1) { 1246 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1247 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1248 for (i = 0; i < re->re_dst_nsegs; i++) { 1249 pd = sc->sc_dpfree; 1250 KASSERT((pd->pd_flags&3) == 0 || 1251 (pd->pd_flags&3) == SAFE_PD_DONE, 1252 ("bogus dest particle descriptor; flags %x", 1253 pd->pd_flags)); 1254 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1255 sc->sc_dpfree = sc->sc_dpring; 1256 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1257 pd->pd_flags = SAFE_PD_READY; 1258 } 1259 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1260 } else { 1261 /* 1262 * No need for scatter, reference the operand directly. 1263 */ 1264 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1265 } 1266 } 1267 1268 /* 1269 * All done with setup; fillin the SA command words 1270 * and the packet engine descriptor. The operation 1271 * is now ready for submission to the hardware. 1272 */ 1273 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1274 sa->sa_cmd1 = cmd1 1275 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1276 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1277 | SAFE_SA_CMD1_SRPCI 1278 ; 1279 /* 1280 * NB: the order of writes is important here. In case the 1281 * chip is scanning the ring because of an outstanding request 1282 * it might nab this one too. In that case we need to make 1283 * sure the setup is complete before we write the length 1284 * field of the descriptor as it signals the descriptor is 1285 * ready for processing. 1286 */ 1287 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1288 if (csp->csp_auth_alg != 0) 1289 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1290 re->re_desc.d_len = oplen 1291 | SAFE_PE_LEN_READY 1292 | (bypass << SAFE_PE_LEN_BYPASS_S) 1293 ; 1294 1295 safestats.st_ipackets++; 1296 safestats.st_ibytes += oplen; 1297 1298 if (++(sc->sc_front) == sc->sc_ringtop) 1299 sc->sc_front = sc->sc_ring; 1300 1301 /* XXX honor batching */ 1302 safe_feed(sc, re); 1303 mtx_unlock(&sc->sc_ringmtx); 1304 return (0); 1305 1306 errout: 1307 if (re->re_dst_m != NULL) 1308 m_freem(re->re_dst_m); 1309 1310 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1311 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1312 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1313 } 1314 if (re->re_src_map != NULL) { 1315 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1316 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1317 } 1318 mtx_unlock(&sc->sc_ringmtx); 1319 if (err != ERESTART) { 1320 crp->crp_etype = err; 1321 crypto_done(crp); 1322 } else { 1323 sc->sc_needwakeup |= CRYPTO_SYMQ; 1324 } 1325 return (err); 1326 } 1327 1328 static void 1329 safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1330 { 1331 const struct crypto_session_params *csp; 1332 struct cryptop *crp = (struct cryptop *)re->re_crp; 1333 struct safe_session *ses; 1334 uint8_t hash[HASH_MAX_LEN]; 1335 1336 ses = crypto_get_driver_session(crp->crp_session); 1337 csp = crypto_get_params(crp->crp_session); 1338 1339 safestats.st_opackets++; 1340 safestats.st_obytes += re->re_dst.mapsize; 1341 1342 safe_dma_sync(&sc->sc_ringalloc, 1343 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1344 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1345 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1346 re->re_desc.d_csr, 1347 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1348 safestats.st_peoperr++; 1349 crp->crp_etype = EIO; /* something more meaningful? */ 1350 } 1351 1352 /* XXX: Should crp_mbuf be updated to re->re_dst_m if it is non-NULL? */ 1353 1354 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1355 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1356 BUS_DMASYNC_POSTREAD); 1357 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1358 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1359 } 1360 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1361 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1362 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1363 1364 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1365 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { 1366 /* 1367 * SHA-1 ICV's are byte-swapped; fix 'em up 1368 * before copying them to their destination. 1369 */ 1370 re->re_sastate.sa_saved_indigest[0] = 1371 bswap32(re->re_sastate.sa_saved_indigest[0]); 1372 re->re_sastate.sa_saved_indigest[1] = 1373 bswap32(re->re_sastate.sa_saved_indigest[1]); 1374 re->re_sastate.sa_saved_indigest[2] = 1375 bswap32(re->re_sastate.sa_saved_indigest[2]); 1376 } 1377 1378 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1379 crypto_copydata(crp, crp->crp_digest_start, 1380 ses->ses_mlen, hash); 1381 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, 1382 hash, ses->ses_mlen) != 0) 1383 crp->crp_etype = EBADMSG; 1384 } else 1385 crypto_copyback(crp, crp->crp_digest_start, 1386 ses->ses_mlen, re->re_sastate.sa_saved_indigest); 1387 } 1388 crypto_done(crp); 1389 } 1390 1391 /* 1392 * Copy all data past offset from srcm to dstm. 1393 */ 1394 static void 1395 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1396 { 1397 u_int j, dlen, slen; 1398 caddr_t dptr, sptr; 1399 1400 /* 1401 * Advance src and dst to offset. 1402 */ 1403 j = offset; 1404 while (j >= srcm->m_len) { 1405 j -= srcm->m_len; 1406 srcm = srcm->m_next; 1407 if (srcm == NULL) 1408 return; 1409 } 1410 sptr = mtod(srcm, caddr_t) + j; 1411 slen = srcm->m_len - j; 1412 1413 j = offset; 1414 while (j >= dstm->m_len) { 1415 j -= dstm->m_len; 1416 dstm = dstm->m_next; 1417 if (dstm == NULL) 1418 return; 1419 } 1420 dptr = mtod(dstm, caddr_t) + j; 1421 dlen = dstm->m_len - j; 1422 1423 /* 1424 * Copy everything that remains. 1425 */ 1426 for (;;) { 1427 j = min(slen, dlen); 1428 bcopy(sptr, dptr, j); 1429 if (slen == j) { 1430 srcm = srcm->m_next; 1431 if (srcm == NULL) 1432 return; 1433 sptr = srcm->m_data; 1434 slen = srcm->m_len; 1435 } else 1436 sptr += j, slen -= j; 1437 if (dlen == j) { 1438 dstm = dstm->m_next; 1439 if (dstm == NULL) 1440 return; 1441 dptr = dstm->m_data; 1442 dlen = dstm->m_len; 1443 } else 1444 dptr += j, dlen -= j; 1445 } 1446 } 1447 1448 #ifndef SAFE_NO_RNG 1449 #define SAFE_RNG_MAXWAIT 1000 1450 1451 static void 1452 safe_rng_init(struct safe_softc *sc) 1453 { 1454 u_int32_t w, v; 1455 int i; 1456 1457 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1458 /* use default value according to the manual */ 1459 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1460 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1461 1462 /* 1463 * There is a bug in rev 1.0 of the 1140 that when the RNG 1464 * is brought out of reset the ready status flag does not 1465 * work until the RNG has finished its internal initialization. 1466 * 1467 * So in order to determine the device is through its 1468 * initialization we must read the data register, using the 1469 * status reg in the read in case it is initialized. Then read 1470 * the data register until it changes from the first read. 1471 * Once it changes read the data register until it changes 1472 * again. At this time the RNG is considered initialized. 1473 * This could take between 750ms - 1000ms in time. 1474 */ 1475 i = 0; 1476 w = READ_REG(sc, SAFE_RNG_OUT); 1477 do { 1478 v = READ_REG(sc, SAFE_RNG_OUT); 1479 if (v != w) { 1480 w = v; 1481 break; 1482 } 1483 DELAY(10); 1484 } while (++i < SAFE_RNG_MAXWAIT); 1485 1486 /* Wait Until data changes again */ 1487 i = 0; 1488 do { 1489 v = READ_REG(sc, SAFE_RNG_OUT); 1490 if (v != w) 1491 break; 1492 DELAY(10); 1493 } while (++i < SAFE_RNG_MAXWAIT); 1494 } 1495 1496 static __inline void 1497 safe_rng_disable_short_cycle(struct safe_softc *sc) 1498 { 1499 WRITE_REG(sc, SAFE_RNG_CTRL, 1500 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1501 } 1502 1503 static __inline void 1504 safe_rng_enable_short_cycle(struct safe_softc *sc) 1505 { 1506 WRITE_REG(sc, SAFE_RNG_CTRL, 1507 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1508 } 1509 1510 static __inline u_int32_t 1511 safe_rng_read(struct safe_softc *sc) 1512 { 1513 int i; 1514 1515 i = 0; 1516 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1517 ; 1518 return READ_REG(sc, SAFE_RNG_OUT); 1519 } 1520 1521 static void 1522 safe_rng(void *arg) 1523 { 1524 struct safe_softc *sc = arg; 1525 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1526 u_int maxwords; 1527 int i; 1528 1529 safestats.st_rng++; 1530 /* 1531 * Fetch the next block of data. 1532 */ 1533 maxwords = safe_rngbufsize; 1534 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1535 maxwords = SAFE_RNG_MAXBUFSIZ; 1536 retry: 1537 for (i = 0; i < maxwords; i++) 1538 buf[i] = safe_rng_read(sc); 1539 /* 1540 * Check the comparator alarm count and reset the h/w if 1541 * it exceeds our threshold. This guards against the 1542 * hardware oscillators resonating with external signals. 1543 */ 1544 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1545 u_int32_t freq_inc, w; 1546 1547 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1548 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1549 safestats.st_rngalarm++; 1550 safe_rng_enable_short_cycle(sc); 1551 freq_inc = 18; 1552 for (i = 0; i < 64; i++) { 1553 w = READ_REG(sc, SAFE_RNG_CNFG); 1554 freq_inc = ((w + freq_inc) & 0x3fL); 1555 w = ((w & ~0x3fL) | freq_inc); 1556 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1557 1558 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1559 1560 (void) safe_rng_read(sc); 1561 DELAY(25); 1562 1563 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1564 safe_rng_disable_short_cycle(sc); 1565 goto retry; 1566 } 1567 freq_inc = 1; 1568 } 1569 safe_rng_disable_short_cycle(sc); 1570 } else 1571 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1572 1573 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1574 callout_reset(&sc->sc_rngto, 1575 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1576 } 1577 #endif /* SAFE_NO_RNG */ 1578 1579 static void 1580 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1581 { 1582 bus_addr_t *paddr = (bus_addr_t*) arg; 1583 *paddr = segs->ds_addr; 1584 } 1585 1586 static int 1587 safe_dma_malloc( 1588 struct safe_softc *sc, 1589 bus_size_t size, 1590 struct safe_dma_alloc *dma, 1591 int mapflags 1592 ) 1593 { 1594 int r; 1595 1596 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1597 sizeof(u_int32_t), 0, /* alignment, bounds */ 1598 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1599 BUS_SPACE_MAXADDR, /* highaddr */ 1600 NULL, NULL, /* filter, filterarg */ 1601 size, /* maxsize */ 1602 1, /* nsegments */ 1603 size, /* maxsegsize */ 1604 BUS_DMA_ALLOCNOW, /* flags */ 1605 NULL, NULL, /* locking */ 1606 &dma->dma_tag); 1607 if (r != 0) { 1608 device_printf(sc->sc_dev, "safe_dma_malloc: " 1609 "bus_dma_tag_create failed; error %u\n", r); 1610 goto fail_0; 1611 } 1612 1613 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1614 BUS_DMA_NOWAIT, &dma->dma_map); 1615 if (r != 0) { 1616 device_printf(sc->sc_dev, "safe_dma_malloc: " 1617 "bus_dmammem_alloc failed; size %ju, error %u\n", 1618 (uintmax_t)size, r); 1619 goto fail_1; 1620 } 1621 1622 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1623 size, 1624 safe_dmamap_cb, 1625 &dma->dma_paddr, 1626 mapflags | BUS_DMA_NOWAIT); 1627 if (r != 0) { 1628 device_printf(sc->sc_dev, "safe_dma_malloc: " 1629 "bus_dmamap_load failed; error %u\n", r); 1630 goto fail_2; 1631 } 1632 1633 dma->dma_size = size; 1634 return (0); 1635 1636 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1637 fail_2: 1638 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1639 fail_1: 1640 bus_dma_tag_destroy(dma->dma_tag); 1641 fail_0: 1642 dma->dma_tag = NULL; 1643 return (r); 1644 } 1645 1646 static void 1647 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1648 { 1649 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1650 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1651 bus_dma_tag_destroy(dma->dma_tag); 1652 } 1653 1654 /* 1655 * Resets the board. Values in the regesters are left as is 1656 * from the reset (i.e. initial values are assigned elsewhere). 1657 */ 1658 static void 1659 safe_reset_board(struct safe_softc *sc) 1660 { 1661 u_int32_t v; 1662 /* 1663 * Reset the device. The manual says no delay 1664 * is needed between marking and clearing reset. 1665 */ 1666 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1667 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1668 SAFE_PE_DMACFG_SGRESET); 1669 WRITE_REG(sc, SAFE_PE_DMACFG, v 1670 | SAFE_PE_DMACFG_PERESET 1671 | SAFE_PE_DMACFG_PDRRESET 1672 | SAFE_PE_DMACFG_SGRESET); 1673 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1674 } 1675 1676 /* 1677 * Initialize registers we need to touch only once. 1678 */ 1679 static void 1680 safe_init_board(struct safe_softc *sc) 1681 { 1682 u_int32_t v, dwords; 1683 1684 v = READ_REG(sc, SAFE_PE_DMACFG); 1685 v &=~ SAFE_PE_DMACFG_PEMODE; 1686 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1687 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1688 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1689 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1690 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1691 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1692 ; 1693 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1694 #if 0 1695 /* XXX select byte swap based on host byte order */ 1696 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1697 #endif 1698 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1699 /* 1700 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1701 * "target mode transfers" done while the chip is DMA'ing 1702 * >1020 bytes cause the hardware to lockup. To avoid this 1703 * we reduce the max PCI transfer size and use small source 1704 * particle descriptors (<= 256 bytes). 1705 */ 1706 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1707 device_printf(sc->sc_dev, 1708 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1709 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1710 SAFE_REV_MAJ(sc->sc_chiprev), 1711 SAFE_REV_MIN(sc->sc_chiprev)); 1712 } 1713 1714 /* NB: operands+results are overlaid */ 1715 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1716 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1717 /* 1718 * Configure ring entry size and number of items in the ring. 1719 */ 1720 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1721 ("PE ring entry not 32-bit aligned!")); 1722 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1723 WRITE_REG(sc, SAFE_PE_RINGCFG, 1724 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1725 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1726 1727 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1728 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1729 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1730 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1731 /* 1732 * NB: destination particles are fixed size. We use 1733 * an mbuf cluster and require all results go to 1734 * clusters or smaller. 1735 */ 1736 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1737 1738 /* it's now safe to enable PE mode, do it */ 1739 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1740 1741 /* 1742 * Configure hardware to use level-triggered interrupts and 1743 * to interrupt after each descriptor is processed. 1744 */ 1745 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1746 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1747 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1748 } 1749 1750 /* 1751 * Init PCI registers 1752 */ 1753 static void 1754 safe_init_pciregs(device_t dev) 1755 { 1756 } 1757 1758 /* 1759 * Clean up after a chip crash. 1760 * It is assumed that the caller in splimp() 1761 */ 1762 static void 1763 safe_cleanchip(struct safe_softc *sc) 1764 { 1765 1766 if (sc->sc_nqchip != 0) { 1767 struct safe_ringentry *re = sc->sc_back; 1768 1769 while (re != sc->sc_front) { 1770 if (re->re_desc.d_csr != 0) 1771 safe_free_entry(sc, re); 1772 if (++re == sc->sc_ringtop) 1773 re = sc->sc_ring; 1774 } 1775 sc->sc_back = re; 1776 sc->sc_nqchip = 0; 1777 } 1778 } 1779 1780 /* 1781 * free a safe_q 1782 * It is assumed that the caller is within splimp(). 1783 */ 1784 static int 1785 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 1786 { 1787 struct cryptop *crp; 1788 1789 /* 1790 * Free header MCR 1791 */ 1792 if (re->re_dst_m != NULL) 1793 m_freem(re->re_dst_m); 1794 1795 crp = (struct cryptop *)re->re_crp; 1796 1797 re->re_desc.d_csr = 0; 1798 1799 crp->crp_etype = EFAULT; 1800 crypto_done(crp); 1801 return(0); 1802 } 1803 1804 /* 1805 * Routine to reset the chip and clean up. 1806 * It is assumed that the caller is in splimp() 1807 */ 1808 static void 1809 safe_totalreset(struct safe_softc *sc) 1810 { 1811 safe_reset_board(sc); 1812 safe_init_board(sc); 1813 safe_cleanchip(sc); 1814 } 1815 1816 /* 1817 * Is the operand suitable aligned for direct DMA. Each 1818 * segment must be aligned on a 32-bit boundary and all 1819 * but the last segment must be a multiple of 4 bytes. 1820 */ 1821 static int 1822 safe_dmamap_aligned(const struct safe_operand *op) 1823 { 1824 int i; 1825 1826 for (i = 0; i < op->nsegs; i++) { 1827 if (op->segs[i].ds_addr & 3) 1828 return (0); 1829 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 1830 return (0); 1831 } 1832 return (1); 1833 } 1834 1835 /* 1836 * Is the operand suitable for direct DMA as the destination 1837 * of an operation. The hardware requires that each ``particle'' 1838 * but the last in an operation result have the same size. We 1839 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 1840 * 0 if some segment is not a multiple of of this size, 1 if all 1841 * segments are exactly this size, or 2 if segments are at worst 1842 * a multple of this size. 1843 */ 1844 static int 1845 safe_dmamap_uniform(const struct safe_operand *op) 1846 { 1847 int result = 1; 1848 1849 if (op->nsegs > 0) { 1850 int i; 1851 1852 for (i = 0; i < op->nsegs-1; i++) { 1853 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 1854 return (0); 1855 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 1856 result = 2; 1857 } 1858 } 1859 return (result); 1860 } 1861 1862 #ifdef SAFE_DEBUG 1863 static void 1864 safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 1865 { 1866 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 1867 , tag 1868 , READ_REG(sc, SAFE_DMA_ENDIAN) 1869 , READ_REG(sc, SAFE_DMA_SRCADDR) 1870 , READ_REG(sc, SAFE_DMA_DSTADDR) 1871 , READ_REG(sc, SAFE_DMA_STAT) 1872 ); 1873 } 1874 1875 static void 1876 safe_dump_intrstate(struct safe_softc *sc, const char *tag) 1877 { 1878 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 1879 , tag 1880 , READ_REG(sc, SAFE_HI_CFG) 1881 , READ_REG(sc, SAFE_HI_MASK) 1882 , READ_REG(sc, SAFE_HI_DESC_CNT) 1883 , READ_REG(sc, SAFE_HU_STAT) 1884 , READ_REG(sc, SAFE_HM_STAT) 1885 ); 1886 } 1887 1888 static void 1889 safe_dump_ringstate(struct safe_softc *sc, const char *tag) 1890 { 1891 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 1892 1893 /* NB: assume caller has lock on ring */ 1894 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 1895 tag, 1896 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 1897 (unsigned long)(sc->sc_back - sc->sc_ring), 1898 (unsigned long)(sc->sc_front - sc->sc_ring)); 1899 } 1900 1901 static void 1902 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 1903 { 1904 int ix, nsegs; 1905 1906 ix = re - sc->sc_ring; 1907 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 1908 , tag 1909 , re, ix 1910 , re->re_desc.d_csr 1911 , re->re_desc.d_src 1912 , re->re_desc.d_dst 1913 , re->re_desc.d_sa 1914 , re->re_desc.d_len 1915 ); 1916 if (re->re_src.nsegs > 1) { 1917 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 1918 sizeof(struct safe_pdesc); 1919 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 1920 printf(" spd[%u] %p: %p size %u flags %x" 1921 , ix, &sc->sc_spring[ix] 1922 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 1923 , sc->sc_spring[ix].pd_size 1924 , sc->sc_spring[ix].pd_flags 1925 ); 1926 if (sc->sc_spring[ix].pd_size == 0) 1927 printf(" (zero!)"); 1928 printf("\n"); 1929 if (++ix == SAFE_TOTAL_SPART) 1930 ix = 0; 1931 } 1932 } 1933 if (re->re_dst.nsegs > 1) { 1934 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 1935 sizeof(struct safe_pdesc); 1936 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 1937 printf(" dpd[%u] %p: %p flags %x\n" 1938 , ix, &sc->sc_dpring[ix] 1939 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 1940 , sc->sc_dpring[ix].pd_flags 1941 ); 1942 if (++ix == SAFE_TOTAL_DPART) 1943 ix = 0; 1944 } 1945 } 1946 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 1947 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 1948 printf("sa: key %x %x %x %x %x %x %x %x\n" 1949 , re->re_sa.sa_key[0] 1950 , re->re_sa.sa_key[1] 1951 , re->re_sa.sa_key[2] 1952 , re->re_sa.sa_key[3] 1953 , re->re_sa.sa_key[4] 1954 , re->re_sa.sa_key[5] 1955 , re->re_sa.sa_key[6] 1956 , re->re_sa.sa_key[7] 1957 ); 1958 printf("sa: indigest %x %x %x %x %x\n" 1959 , re->re_sa.sa_indigest[0] 1960 , re->re_sa.sa_indigest[1] 1961 , re->re_sa.sa_indigest[2] 1962 , re->re_sa.sa_indigest[3] 1963 , re->re_sa.sa_indigest[4] 1964 ); 1965 printf("sa: outdigest %x %x %x %x %x\n" 1966 , re->re_sa.sa_outdigest[0] 1967 , re->re_sa.sa_outdigest[1] 1968 , re->re_sa.sa_outdigest[2] 1969 , re->re_sa.sa_outdigest[3] 1970 , re->re_sa.sa_outdigest[4] 1971 ); 1972 printf("sr: iv %x %x %x %x\n" 1973 , re->re_sastate.sa_saved_iv[0] 1974 , re->re_sastate.sa_saved_iv[1] 1975 , re->re_sastate.sa_saved_iv[2] 1976 , re->re_sastate.sa_saved_iv[3] 1977 ); 1978 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 1979 , re->re_sastate.sa_saved_hashbc 1980 , re->re_sastate.sa_saved_indigest[0] 1981 , re->re_sastate.sa_saved_indigest[1] 1982 , re->re_sastate.sa_saved_indigest[2] 1983 , re->re_sastate.sa_saved_indigest[3] 1984 , re->re_sastate.sa_saved_indigest[4] 1985 ); 1986 } 1987 1988 static void 1989 safe_dump_ring(struct safe_softc *sc, const char *tag) 1990 { 1991 mtx_lock(&sc->sc_ringmtx); 1992 printf("\nSafeNet Ring State:\n"); 1993 safe_dump_intrstate(sc, tag); 1994 safe_dump_dmastatus(sc, tag); 1995 safe_dump_ringstate(sc, tag); 1996 if (sc->sc_nqchip) { 1997 struct safe_ringentry *re = sc->sc_back; 1998 do { 1999 safe_dump_request(sc, tag, re); 2000 if (++re == sc->sc_ringtop) 2001 re = sc->sc_ring; 2002 } while (re != sc->sc_front); 2003 } 2004 mtx_unlock(&sc->sc_ringmtx); 2005 } 2006 2007 static int 2008 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 2009 { 2010 char dmode[64]; 2011 int error; 2012 2013 strncpy(dmode, "", sizeof(dmode) - 1); 2014 dmode[sizeof(dmode) - 1] = '\0'; 2015 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 2016 2017 if (error == 0 && req->newptr != NULL) { 2018 struct safe_softc *sc = safec; 2019 2020 if (!sc) 2021 return EINVAL; 2022 if (strncmp(dmode, "dma", 3) == 0) 2023 safe_dump_dmastatus(sc, "safe0"); 2024 else if (strncmp(dmode, "int", 3) == 0) 2025 safe_dump_intrstate(sc, "safe0"); 2026 else if (strncmp(dmode, "ring", 4) == 0) 2027 safe_dump_ring(sc, "safe0"); 2028 else 2029 return EINVAL; 2030 } 2031 return error; 2032 } 2033 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, 2034 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, 2035 sysctl_hw_safe_dump, "A", 2036 "Dump driver state"); 2037 #endif /* SAFE_DEBUG */ 2038