1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * SafeNet SafeXcel-1141 hardware crypto accelerator 35 */ 36 #include "opt_safe.h" 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/proc.h> 41 #include <sys/errno.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/mbuf.h> 45 #include <sys/module.h> 46 #include <sys/lock.h> 47 #include <sys/mutex.h> 48 #include <sys/sysctl.h> 49 #include <sys/endian.h> 50 #include <sys/uio.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #include <machine/bus.h> 56 #include <machine/resource.h> 57 #include <sys/bus.h> 58 #include <sys/rman.h> 59 60 #include <opencrypto/cryptodev.h> 61 #include <opencrypto/xform_auth.h> 62 #include <sys/random.h> 63 #include <sys/kobj.h> 64 65 #include "cryptodev_if.h" 66 67 #include <dev/pci/pcivar.h> 68 #include <dev/pci/pcireg.h> 69 70 #ifdef SAFE_RNDTEST 71 #include <dev/rndtest/rndtest.h> 72 #endif 73 #include <dev/safe/safereg.h> 74 #include <dev/safe/safevar.h> 75 76 #ifndef bswap32 77 #define bswap32 NTOHL 78 #endif 79 80 /* 81 * Prototypes and count for the pci_device structure 82 */ 83 static int safe_probe(device_t); 84 static int safe_attach(device_t); 85 static int safe_detach(device_t); 86 static int safe_suspend(device_t); 87 static int safe_resume(device_t); 88 static int safe_shutdown(device_t); 89 90 static int safe_probesession(device_t, const struct crypto_session_params *); 91 static int safe_newsession(device_t, crypto_session_t, 92 const struct crypto_session_params *); 93 static int safe_process(device_t, struct cryptop *, int); 94 95 static device_method_t safe_methods[] = { 96 /* Device interface */ 97 DEVMETHOD(device_probe, safe_probe), 98 DEVMETHOD(device_attach, safe_attach), 99 DEVMETHOD(device_detach, safe_detach), 100 DEVMETHOD(device_suspend, safe_suspend), 101 DEVMETHOD(device_resume, safe_resume), 102 DEVMETHOD(device_shutdown, safe_shutdown), 103 104 /* crypto device methods */ 105 DEVMETHOD(cryptodev_probesession, safe_probesession), 106 DEVMETHOD(cryptodev_newsession, safe_newsession), 107 DEVMETHOD(cryptodev_process, safe_process), 108 109 DEVMETHOD_END 110 }; 111 static driver_t safe_driver = { 112 "safe", 113 safe_methods, 114 sizeof (struct safe_softc) 115 }; 116 static devclass_t safe_devclass; 117 118 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0); 119 MODULE_DEPEND(safe, crypto, 1, 1, 1); 120 #ifdef SAFE_RNDTEST 121 MODULE_DEPEND(safe, rndtest, 1, 1, 1); 122 #endif 123 124 static void safe_intr(void *); 125 static void safe_callback(struct safe_softc *, struct safe_ringentry *); 126 static void safe_feed(struct safe_softc *, struct safe_ringentry *); 127 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int); 128 #ifndef SAFE_NO_RNG 129 static void safe_rng_init(struct safe_softc *); 130 static void safe_rng(void *); 131 #endif /* SAFE_NO_RNG */ 132 static int safe_dma_malloc(struct safe_softc *, bus_size_t, 133 struct safe_dma_alloc *, int); 134 #define safe_dma_sync(_dma, _flags) \ 135 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags)) 136 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *); 137 static int safe_dmamap_aligned(const struct safe_operand *); 138 static int safe_dmamap_uniform(const struct safe_operand *); 139 140 static void safe_reset_board(struct safe_softc *); 141 static void safe_init_board(struct safe_softc *); 142 static void safe_init_pciregs(device_t dev); 143 static void safe_cleanchip(struct safe_softc *); 144 static void safe_totalreset(struct safe_softc *); 145 146 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *); 147 148 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 149 "SafeNet driver parameters"); 150 151 #ifdef SAFE_DEBUG 152 static void safe_dump_dmastatus(struct safe_softc *, const char *); 153 static void safe_dump_ringstate(struct safe_softc *, const char *); 154 static void safe_dump_intrstate(struct safe_softc *, const char *); 155 static void safe_dump_request(struct safe_softc *, const char *, 156 struct safe_ringentry *); 157 158 static struct safe_softc *safec; /* for use by hw.safe.dump */ 159 160 static int safe_debug = 0; 161 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug, 162 0, "control debugging msgs"); 163 #define DPRINTF(_x) if (safe_debug) printf _x 164 #else 165 #define DPRINTF(_x) 166 #endif 167 168 #define READ_REG(sc,r) \ 169 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r)) 170 171 #define WRITE_REG(sc,reg,val) \ 172 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val) 173 174 struct safe_stats safestats; 175 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats, 176 safe_stats, "driver statistics"); 177 #ifndef SAFE_NO_RNG 178 static int safe_rnginterval = 1; /* poll once a second */ 179 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval, 180 0, "RNG polling interval (secs)"); 181 static int safe_rngbufsize = 16; /* 64 bytes each poll */ 182 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize, 183 0, "RNG polling buffer size (32-bit words)"); 184 static int safe_rngmaxalarm = 8; /* max alarms before reset */ 185 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm, 186 0, "RNG max alarms before reset"); 187 #endif /* SAFE_NO_RNG */ 188 189 static int 190 safe_probe(device_t dev) 191 { 192 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET && 193 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL) 194 return (BUS_PROBE_DEFAULT); 195 return (ENXIO); 196 } 197 198 static const char* 199 safe_partname(struct safe_softc *sc) 200 { 201 /* XXX sprintf numbers when not decoded */ 202 switch (pci_get_vendor(sc->sc_dev)) { 203 case PCI_VENDOR_SAFENET: 204 switch (pci_get_device(sc->sc_dev)) { 205 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141"; 206 } 207 return "SafeNet unknown-part"; 208 } 209 return "Unknown-vendor unknown-part"; 210 } 211 212 #ifndef SAFE_NO_RNG 213 static void 214 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 215 { 216 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 217 random_harvest_queue(buf, count, RANDOM_PURE_SAFE); 218 } 219 #endif /* SAFE_NO_RNG */ 220 221 static int 222 safe_attach(device_t dev) 223 { 224 struct safe_softc *sc = device_get_softc(dev); 225 u_int32_t raddr; 226 u_int32_t i; 227 int rid; 228 229 bzero(sc, sizeof (*sc)); 230 sc->sc_dev = dev; 231 232 /* XXX handle power management */ 233 234 pci_enable_busmaster(dev); 235 236 /* 237 * Setup memory-mapping of PCI registers. 238 */ 239 rid = BS_BAR; 240 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 241 RF_ACTIVE); 242 if (sc->sc_sr == NULL) { 243 device_printf(dev, "cannot map register space\n"); 244 goto bad; 245 } 246 sc->sc_st = rman_get_bustag(sc->sc_sr); 247 sc->sc_sh = rman_get_bushandle(sc->sc_sr); 248 249 /* 250 * Arrange interrupt line. 251 */ 252 rid = 0; 253 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 254 RF_SHAREABLE|RF_ACTIVE); 255 if (sc->sc_irq == NULL) { 256 device_printf(dev, "could not map interrupt\n"); 257 goto bad1; 258 } 259 /* 260 * NB: Network code assumes we are blocked with splimp() 261 * so make sure the IRQ is mapped appropriately. 262 */ 263 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 264 NULL, safe_intr, sc, &sc->sc_ih)) { 265 device_printf(dev, "could not establish interrupt\n"); 266 goto bad2; 267 } 268 269 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session), 270 CRYPTOCAP_F_HARDWARE); 271 if (sc->sc_cid < 0) { 272 device_printf(dev, "could not get crypto driver id\n"); 273 goto bad3; 274 } 275 276 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) & 277 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN); 278 279 /* 280 * Setup DMA descriptor area. 281 */ 282 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 283 1, /* alignment */ 284 SAFE_DMA_BOUNDARY, /* boundary */ 285 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 286 BUS_SPACE_MAXADDR, /* highaddr */ 287 NULL, NULL, /* filter, filterarg */ 288 SAFE_MAX_DMA, /* maxsize */ 289 SAFE_MAX_PART, /* nsegments */ 290 SAFE_MAX_SSIZE, /* maxsegsize */ 291 BUS_DMA_ALLOCNOW, /* flags */ 292 NULL, NULL, /* locking */ 293 &sc->sc_srcdmat)) { 294 device_printf(dev, "cannot allocate DMA tag\n"); 295 goto bad4; 296 } 297 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 298 1, /* alignment */ 299 SAFE_MAX_DSIZE, /* boundary */ 300 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 301 BUS_SPACE_MAXADDR, /* highaddr */ 302 NULL, NULL, /* filter, filterarg */ 303 SAFE_MAX_DMA, /* maxsize */ 304 SAFE_MAX_PART, /* nsegments */ 305 SAFE_MAX_DSIZE, /* maxsegsize */ 306 BUS_DMA_ALLOCNOW, /* flags */ 307 NULL, NULL, /* locking */ 308 &sc->sc_dstdmat)) { 309 device_printf(dev, "cannot allocate DMA tag\n"); 310 goto bad4; 311 } 312 313 /* 314 * Allocate packet engine descriptors. 315 */ 316 if (safe_dma_malloc(sc, 317 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry), 318 &sc->sc_ringalloc, 0)) { 319 device_printf(dev, "cannot allocate PE descriptor ring\n"); 320 bus_dma_tag_destroy(sc->sc_srcdmat); 321 goto bad4; 322 } 323 /* 324 * Hookup the static portion of all our data structures. 325 */ 326 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr; 327 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE; 328 sc->sc_front = sc->sc_ring; 329 sc->sc_back = sc->sc_ring; 330 raddr = sc->sc_ringalloc.dma_paddr; 331 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry)); 332 for (i = 0; i < SAFE_MAX_NQUEUE; i++) { 333 struct safe_ringentry *re = &sc->sc_ring[i]; 334 335 re->re_desc.d_sa = raddr + 336 offsetof(struct safe_ringentry, re_sa); 337 re->re_sa.sa_staterec = raddr + 338 offsetof(struct safe_ringentry, re_sastate); 339 340 raddr += sizeof (struct safe_ringentry); 341 } 342 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev), 343 "packet engine ring", MTX_DEF); 344 345 /* 346 * Allocate scatter and gather particle descriptors. 347 */ 348 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc), 349 &sc->sc_spalloc, 0)) { 350 device_printf(dev, "cannot allocate source particle " 351 "descriptor ring\n"); 352 mtx_destroy(&sc->sc_ringmtx); 353 safe_dma_free(sc, &sc->sc_ringalloc); 354 bus_dma_tag_destroy(sc->sc_srcdmat); 355 goto bad4; 356 } 357 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr; 358 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART; 359 sc->sc_spfree = sc->sc_spring; 360 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc)); 361 362 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc), 363 &sc->sc_dpalloc, 0)) { 364 device_printf(dev, "cannot allocate destination particle " 365 "descriptor ring\n"); 366 mtx_destroy(&sc->sc_ringmtx); 367 safe_dma_free(sc, &sc->sc_spalloc); 368 safe_dma_free(sc, &sc->sc_ringalloc); 369 bus_dma_tag_destroy(sc->sc_dstdmat); 370 goto bad4; 371 } 372 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr; 373 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART; 374 sc->sc_dpfree = sc->sc_dpring; 375 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc)); 376 377 device_printf(sc->sc_dev, "%s", safe_partname(sc)); 378 379 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO); 380 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) { 381 sc->sc_flags |= SAFE_FLAGS_RNG; 382 printf(" rng"); 383 } 384 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) { 385 #if 0 386 printf(" key"); 387 sc->sc_flags |= SAFE_FLAGS_KEY; 388 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0); 389 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0); 390 #endif 391 } 392 if (sc->sc_devinfo & SAFE_DEVINFO_DES) { 393 printf(" des/3des"); 394 } 395 if (sc->sc_devinfo & SAFE_DEVINFO_AES) { 396 printf(" aes"); 397 } 398 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) { 399 printf(" md5"); 400 } 401 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) { 402 printf(" sha1"); 403 } 404 /* XXX other supported algorithms */ 405 printf("\n"); 406 407 safe_reset_board(sc); /* reset h/w */ 408 safe_init_pciregs(dev); /* init pci settings */ 409 safe_init_board(sc); /* init h/w */ 410 411 #ifndef SAFE_NO_RNG 412 if (sc->sc_flags & SAFE_FLAGS_RNG) { 413 #ifdef SAFE_RNDTEST 414 sc->sc_rndtest = rndtest_attach(dev); 415 if (sc->sc_rndtest) 416 sc->sc_harvest = rndtest_harvest; 417 else 418 sc->sc_harvest = default_harvest; 419 #else 420 sc->sc_harvest = default_harvest; 421 #endif 422 safe_rng_init(sc); 423 424 callout_init(&sc->sc_rngto, 1); 425 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc); 426 } 427 #endif /* SAFE_NO_RNG */ 428 #ifdef SAFE_DEBUG 429 safec = sc; /* for use by hw.safe.dump */ 430 #endif 431 return (0); 432 bad4: 433 crypto_unregister_all(sc->sc_cid); 434 bad3: 435 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 436 bad2: 437 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 438 bad1: 439 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 440 bad: 441 return (ENXIO); 442 } 443 444 /* 445 * Detach a device that successfully probed. 446 */ 447 static int 448 safe_detach(device_t dev) 449 { 450 struct safe_softc *sc = device_get_softc(dev); 451 452 /* XXX wait/abort active ops */ 453 454 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */ 455 456 callout_stop(&sc->sc_rngto); 457 458 crypto_unregister_all(sc->sc_cid); 459 460 #ifdef SAFE_RNDTEST 461 if (sc->sc_rndtest) 462 rndtest_detach(sc->sc_rndtest); 463 #endif 464 465 safe_cleanchip(sc); 466 safe_dma_free(sc, &sc->sc_dpalloc); 467 safe_dma_free(sc, &sc->sc_spalloc); 468 mtx_destroy(&sc->sc_ringmtx); 469 safe_dma_free(sc, &sc->sc_ringalloc); 470 471 bus_generic_detach(dev); 472 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih); 473 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 474 475 bus_dma_tag_destroy(sc->sc_srcdmat); 476 bus_dma_tag_destroy(sc->sc_dstdmat); 477 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr); 478 479 return (0); 480 } 481 482 /* 483 * Stop all chip i/o so that the kernel's probe routines don't 484 * get confused by errant DMAs when rebooting. 485 */ 486 static int 487 safe_shutdown(device_t dev) 488 { 489 #ifdef notyet 490 safe_stop(device_get_softc(dev)); 491 #endif 492 return (0); 493 } 494 495 /* 496 * Device suspend routine. 497 */ 498 static int 499 safe_suspend(device_t dev) 500 { 501 struct safe_softc *sc = device_get_softc(dev); 502 503 #ifdef notyet 504 /* XXX stop the device and save PCI settings */ 505 #endif 506 sc->sc_suspended = 1; 507 508 return (0); 509 } 510 511 static int 512 safe_resume(device_t dev) 513 { 514 struct safe_softc *sc = device_get_softc(dev); 515 516 #ifdef notyet 517 /* XXX retore PCI settings and start the device */ 518 #endif 519 sc->sc_suspended = 0; 520 return (0); 521 } 522 523 /* 524 * SafeXcel Interrupt routine 525 */ 526 static void 527 safe_intr(void *arg) 528 { 529 struct safe_softc *sc = arg; 530 volatile u_int32_t stat; 531 532 stat = READ_REG(sc, SAFE_HM_STAT); 533 if (stat == 0) /* shared irq, not for us */ 534 return; 535 536 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */ 537 538 if ((stat & SAFE_INT_PE_DDONE)) { 539 /* 540 * Descriptor(s) done; scan the ring and 541 * process completed operations. 542 */ 543 mtx_lock(&sc->sc_ringmtx); 544 while (sc->sc_back != sc->sc_front) { 545 struct safe_ringentry *re = sc->sc_back; 546 #ifdef SAFE_DEBUG 547 if (safe_debug) { 548 safe_dump_ringstate(sc, __func__); 549 safe_dump_request(sc, __func__, re); 550 } 551 #endif 552 /* 553 * safe_process marks ring entries that were allocated 554 * but not used with a csr of zero. This insures the 555 * ring front pointer never needs to be set backwards 556 * in the event that an entry is allocated but not used 557 * because of a setup error. 558 */ 559 if (re->re_desc.d_csr != 0) { 560 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) 561 break; 562 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) 563 break; 564 sc->sc_nqchip--; 565 safe_callback(sc, re); 566 } 567 if (++(sc->sc_back) == sc->sc_ringtop) 568 sc->sc_back = sc->sc_ring; 569 } 570 mtx_unlock(&sc->sc_ringmtx); 571 } 572 573 /* 574 * Check to see if we got any DMA Error 575 */ 576 if (stat & SAFE_INT_PE_ERROR) { 577 DPRINTF(("dmaerr dmastat %08x\n", 578 READ_REG(sc, SAFE_PE_DMASTAT))); 579 safestats.st_dmaerr++; 580 safe_totalreset(sc); 581 #if 0 582 safe_feed(sc); 583 #endif 584 } 585 586 if (sc->sc_needwakeup) { /* XXX check high watermark */ 587 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 588 DPRINTF(("%s: wakeup crypto %x\n", __func__, 589 sc->sc_needwakeup)); 590 sc->sc_needwakeup &= ~wakeup; 591 crypto_unblock(sc->sc_cid, wakeup); 592 } 593 } 594 595 /* 596 * safe_feed() - post a request to chip 597 */ 598 static void 599 safe_feed(struct safe_softc *sc, struct safe_ringentry *re) 600 { 601 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE); 602 if (re->re_dst_map != NULL) 603 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 604 BUS_DMASYNC_PREREAD); 605 /* XXX have no smaller granularity */ 606 safe_dma_sync(&sc->sc_ringalloc, 607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 608 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE); 609 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE); 610 611 #ifdef SAFE_DEBUG 612 if (safe_debug) { 613 safe_dump_ringstate(sc, __func__); 614 safe_dump_request(sc, __func__, re); 615 } 616 #endif 617 sc->sc_nqchip++; 618 if (sc->sc_nqchip > safestats.st_maxqchip) 619 safestats.st_maxqchip = sc->sc_nqchip; 620 /* poke h/w to check descriptor ring, any value can be written */ 621 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0); 622 } 623 624 #define N(a) (sizeof(a) / sizeof (a[0])) 625 static void 626 safe_setup_enckey(struct safe_session *ses, const void *key) 627 { 628 int i; 629 630 bcopy(key, ses->ses_key, ses->ses_klen); 631 632 /* PE is little-endian, insure proper byte order */ 633 for (i = 0; i < N(ses->ses_key); i++) 634 ses->ses_key[i] = htole32(ses->ses_key[i]); 635 } 636 637 static void 638 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key, 639 int klen) 640 { 641 MD5_CTX md5ctx; 642 SHA1_CTX sha1ctx; 643 int i; 644 645 if (algo == CRYPTO_MD5_HMAC) { 646 hmac_init_ipad(&auth_hash_hmac_md5, key, klen, &md5ctx); 647 bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state)); 648 649 hmac_init_opad(&auth_hash_hmac_md5, key, klen, &md5ctx); 650 bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state)); 651 652 explicit_bzero(&md5ctx, sizeof(md5ctx)); 653 } else { 654 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 655 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32)); 656 657 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx); 658 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32)); 659 660 explicit_bzero(&sha1ctx, sizeof(sha1ctx)); 661 } 662 663 /* PE is little-endian, insure proper byte order */ 664 for (i = 0; i < N(ses->ses_hminner); i++) { 665 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]); 666 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]); 667 } 668 } 669 #undef N 670 671 static bool 672 safe_auth_supported(struct safe_softc *sc, 673 const struct crypto_session_params *csp) 674 { 675 676 switch (csp->csp_auth_alg) { 677 case CRYPTO_MD5_HMAC: 678 if ((sc->sc_devinfo & SAFE_DEVINFO_MD5) == 0) 679 return (false); 680 break; 681 case CRYPTO_SHA1_HMAC: 682 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0) 683 return (false); 684 break; 685 default: 686 return (false); 687 } 688 return (true); 689 } 690 691 static bool 692 safe_cipher_supported(struct safe_softc *sc, 693 const struct crypto_session_params *csp) 694 { 695 696 switch (csp->csp_cipher_alg) { 697 case CRYPTO_DES_CBC: 698 case CRYPTO_3DES_CBC: 699 if ((sc->sc_devinfo & SAFE_DEVINFO_DES) == 0) 700 return (false); 701 if (csp->csp_ivlen != 8) 702 return (false); 703 if (csp->csp_cipher_alg == CRYPTO_DES_CBC) { 704 if (csp->csp_cipher_klen != 8) 705 return (false); 706 } else { 707 if (csp->csp_cipher_klen != 24) 708 return (false); 709 } 710 break; 711 case CRYPTO_AES_CBC: 712 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0) 713 return (false); 714 if (csp->csp_ivlen != 16) 715 return (false); 716 if (csp->csp_cipher_klen != 16 && 717 csp->csp_cipher_klen != 24 && 718 csp->csp_cipher_klen != 32) 719 return (false); 720 break; 721 } 722 return (true); 723 } 724 725 static int 726 safe_probesession(device_t dev, const struct crypto_session_params *csp) 727 { 728 struct safe_softc *sc = device_get_softc(dev); 729 730 if (csp->csp_flags != 0) 731 return (EINVAL); 732 switch (csp->csp_mode) { 733 case CSP_MODE_DIGEST: 734 if (!safe_auth_supported(sc, csp)) 735 return (EINVAL); 736 break; 737 case CSP_MODE_CIPHER: 738 if (!safe_cipher_supported(sc, csp)) 739 return (EINVAL); 740 break; 741 case CSP_MODE_ETA: 742 if (!safe_auth_supported(sc, csp) || 743 !safe_cipher_supported(sc, csp)) 744 return (EINVAL); 745 break; 746 default: 747 return (EINVAL); 748 } 749 750 return (CRYPTODEV_PROBE_HARDWARE); 751 } 752 753 /* 754 * Allocate a new 'session'. 755 */ 756 static int 757 safe_newsession(device_t dev, crypto_session_t cses, 758 const struct crypto_session_params *csp) 759 { 760 struct safe_session *ses; 761 762 ses = crypto_get_driver_session(cses); 763 if (csp->csp_cipher_alg != 0) { 764 ses->ses_klen = csp->csp_cipher_klen; 765 if (csp->csp_cipher_key != NULL) 766 safe_setup_enckey(ses, csp->csp_cipher_key); 767 } 768 769 if (csp->csp_auth_alg != 0) { 770 ses->ses_mlen = csp->csp_auth_mlen; 771 if (ses->ses_mlen == 0) { 772 if (csp->csp_auth_alg == CRYPTO_MD5_HMAC) 773 ses->ses_mlen = MD5_HASH_LEN; 774 else 775 ses->ses_mlen = SHA1_HASH_LEN; 776 } 777 778 if (csp->csp_auth_key != NULL) { 779 safe_setup_mackey(ses, csp->csp_auth_alg, 780 csp->csp_auth_key, csp->csp_auth_klen); 781 } 782 } 783 784 return (0); 785 } 786 787 static bus_size_t 788 safe_crp_length(struct cryptop *crp) 789 { 790 791 switch (crp->crp_buf_type) { 792 case CRYPTO_BUF_MBUF: 793 return (crp->crp_mbuf->m_pkthdr.len); 794 case CRYPTO_BUF_UIO: 795 return (crp->crp_uio->uio_resid); 796 case CRYPTO_BUF_CONTIG: 797 return (crp->crp_ilen); 798 default: 799 panic("bad crp buffer type"); 800 } 801 } 802 803 static void 804 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error) 805 { 806 struct safe_operand *op = arg; 807 808 DPRINTF(("%s: nsegs %d error %d\n", __func__, 809 nsegs, error)); 810 if (error != 0) 811 return; 812 op->nsegs = nsegs; 813 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 814 } 815 816 static int 817 safe_process(device_t dev, struct cryptop *crp, int hint) 818 { 819 struct safe_softc *sc = device_get_softc(dev); 820 const struct crypto_session_params *csp; 821 int err = 0, i, nicealign, uniform; 822 int bypass, oplen; 823 int16_t coffset; 824 struct safe_session *ses; 825 struct safe_ringentry *re; 826 struct safe_sarec *sa; 827 struct safe_pdesc *pd; 828 u_int32_t cmd0, cmd1, staterec; 829 830 mtx_lock(&sc->sc_ringmtx); 831 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) { 832 safestats.st_ringfull++; 833 sc->sc_needwakeup |= CRYPTO_SYMQ; 834 mtx_unlock(&sc->sc_ringmtx); 835 return (ERESTART); 836 } 837 re = sc->sc_front; 838 839 staterec = re->re_sa.sa_staterec; /* save */ 840 /* NB: zero everything but the PE descriptor */ 841 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc)); 842 re->re_sa.sa_staterec = staterec; /* restore */ 843 844 re->re_crp = crp; 845 846 sa = &re->re_sa; 847 ses = crypto_get_driver_session(crp->crp_session); 848 csp = crypto_get_params(crp->crp_session); 849 850 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */ 851 cmd1 = 0; 852 switch (csp->csp_mode) { 853 case CSP_MODE_DIGEST: 854 cmd0 |= SAFE_SA_CMD0_OP_HASH; 855 break; 856 case CSP_MODE_CIPHER: 857 cmd0 |= SAFE_SA_CMD0_OP_CRYPT; 858 break; 859 case CSP_MODE_ETA: 860 cmd0 |= SAFE_SA_CMD0_OP_BOTH; 861 break; 862 } 863 864 if (csp->csp_cipher_alg != 0) { 865 if (crp->crp_cipher_key != NULL) 866 safe_setup_enckey(ses, crp->crp_cipher_key); 867 868 switch (csp->csp_cipher_alg) { 869 case CRYPTO_DES_CBC: 870 cmd0 |= SAFE_SA_CMD0_DES; 871 cmd1 |= SAFE_SA_CMD1_CBC; 872 break; 873 case CRYPTO_3DES_CBC: 874 cmd0 |= SAFE_SA_CMD0_3DES; 875 cmd1 |= SAFE_SA_CMD1_CBC; 876 break; 877 case CRYPTO_AES_CBC: 878 cmd0 |= SAFE_SA_CMD0_AES; 879 cmd1 |= SAFE_SA_CMD1_CBC; 880 if (ses->ses_klen * 8 == 128) 881 cmd1 |= SAFE_SA_CMD1_AES128; 882 else if (ses->ses_klen * 8 == 192) 883 cmd1 |= SAFE_SA_CMD1_AES192; 884 else 885 cmd1 |= SAFE_SA_CMD1_AES256; 886 } 887 888 /* 889 * Setup encrypt/decrypt state. When using basic ops 890 * we can't use an inline IV because hash/crypt offset 891 * must be from the end of the IV to the start of the 892 * crypt data and this leaves out the preceding header 893 * from the hash calculation. Instead we place the IV 894 * in the state record and set the hash/crypt offset to 895 * copy both the header+IV. 896 */ 897 if (crp->crp_flags & CRYPTO_F_IV_GENERATE) { 898 arc4rand(re->re_sastate.sa_saved_iv, csp->csp_ivlen, 0); 899 crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, 900 re->re_sastate.sa_saved_iv); 901 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) 902 memcpy(re->re_sastate.sa_saved_iv, crp->crp_iv, 903 csp->csp_ivlen); 904 else 905 crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, 906 re->re_sastate.sa_saved_iv); 907 cmd0 |= SAFE_SA_CMD0_IVLD_STATE; 908 909 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 910 cmd0 |= SAFE_SA_CMD0_OUTBOUND; 911 912 /* 913 * XXX: I suspect we don't need this since we 914 * don't save the returned IV. 915 */ 916 cmd0 |= SAFE_SA_CMD0_SAVEIV; 917 } else { 918 cmd0 |= SAFE_SA_CMD0_INBOUND; 919 } 920 /* 921 * For basic encryption use the zero pad algorithm. 922 * This pads results to an 8-byte boundary and 923 * suppresses padding verification for inbound (i.e. 924 * decrypt) operations. 925 * 926 * NB: Not sure if the 8-byte pad boundary is a problem. 927 */ 928 cmd0 |= SAFE_SA_CMD0_PAD_ZERO; 929 930 /* XXX assert key bufs have the same size */ 931 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key)); 932 } 933 934 if (csp->csp_auth_alg != 0) { 935 if (crp->crp_auth_key != NULL) { 936 safe_setup_mackey(ses, csp->csp_auth_alg, 937 crp->crp_auth_key, csp->csp_auth_klen); 938 } 939 940 switch (csp->csp_auth_alg) { 941 case CRYPTO_MD5_HMAC: 942 cmd0 |= SAFE_SA_CMD0_MD5; 943 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 944 break; 945 case CRYPTO_SHA1_HMAC: 946 cmd0 |= SAFE_SA_CMD0_SHA1; 947 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */ 948 break; 949 } 950 951 /* 952 * Digest data is loaded from the SA and the hash 953 * result is saved to the state block where we 954 * retrieve it for return to the caller. 955 */ 956 /* XXX assert digest bufs have the same size */ 957 bcopy(ses->ses_hminner, sa->sa_indigest, 958 sizeof(sa->sa_indigest)); 959 bcopy(ses->ses_hmouter, sa->sa_outdigest, 960 sizeof(sa->sa_outdigest)); 961 962 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH; 963 re->re_flags |= SAFE_QFLAGS_COPYOUTICV; 964 } 965 966 if (csp->csp_mode == CSP_MODE_ETA) { 967 /* 968 * The driver only supports ETA requests where there 969 * is no gap between the AAD and payload. 970 */ 971 if (crp->crp_aad_length != 0 && 972 crp->crp_aad_start + crp->crp_aad_length != 973 crp->crp_payload_start) { 974 safestats.st_lenmismatch++; 975 err = EINVAL; 976 goto errout; 977 } 978 if (crp->crp_aad_length != 0) 979 bypass = crp->crp_aad_start; 980 else 981 bypass = crp->crp_payload_start; 982 coffset = crp->crp_aad_length; 983 oplen = crp->crp_payload_start + crp->crp_payload_length; 984 #ifdef SAFE_DEBUG 985 if (safe_debug) { 986 printf("AAD: skip %d, len %d, digest %d\n", 987 crp->crp_aad_start, crp->crp_aad_length, 988 crp->crp_digest_start); 989 printf("payload: skip %d, len %d, IV %d\n", 990 crp->crp_payload_start, crp->crp_payload_length, 991 crp->crp_iv_start); 992 printf("bypass %d coffset %d oplen %d\n", 993 bypass, coffset, oplen); 994 } 995 #endif 996 if (coffset & 3) { /* offset must be 32-bit aligned */ 997 DPRINTF(("%s: coffset %u misaligned\n", 998 __func__, coffset)); 999 safestats.st_coffmisaligned++; 1000 err = EINVAL; 1001 goto errout; 1002 } 1003 coffset >>= 2; 1004 if (coffset > 255) { /* offset must be <256 dwords */ 1005 DPRINTF(("%s: coffset %u too big\n", 1006 __func__, coffset)); 1007 safestats.st_cofftoobig++; 1008 err = EINVAL; 1009 goto errout; 1010 } 1011 /* 1012 * Tell the hardware to copy the header to the output. 1013 * The header is defined as the data from the end of 1014 * the bypass to the start of data to be encrypted. 1015 * Typically this is the inline IV. Note that you need 1016 * to do this even if src+dst are the same; it appears 1017 * that w/o this bit the crypted data is written 1018 * immediately after the bypass data. 1019 */ 1020 cmd1 |= SAFE_SA_CMD1_HDRCOPY; 1021 /* 1022 * Disable IP header mutable bit handling. This is 1023 * needed to get correct HMAC calculations. 1024 */ 1025 cmd1 |= SAFE_SA_CMD1_MUTABLE; 1026 } else { 1027 bypass = crp->crp_payload_start; 1028 oplen = bypass + crp->crp_payload_length; 1029 coffset = 0; 1030 } 1031 /* XXX verify multiple of 4 when using s/g */ 1032 if (bypass > 96) { /* bypass offset must be <= 96 bytes */ 1033 DPRINTF(("%s: bypass %u too big\n", __func__, bypass)); 1034 safestats.st_bypasstoobig++; 1035 err = EINVAL; 1036 goto errout; 1037 } 1038 1039 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) { 1040 safestats.st_nomap++; 1041 err = ENOMEM; 1042 goto errout; 1043 } 1044 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb, 1045 &re->re_src, BUS_DMA_NOWAIT) != 0) { 1046 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1047 re->re_src_map = NULL; 1048 safestats.st_noload++; 1049 err = ENOMEM; 1050 goto errout; 1051 } 1052 re->re_src_mapsize = safe_crp_length(crp); 1053 nicealign = safe_dmamap_aligned(&re->re_src); 1054 uniform = safe_dmamap_uniform(&re->re_src); 1055 1056 DPRINTF(("src nicealign %u uniform %u nsegs %u\n", 1057 nicealign, uniform, re->re_src.nsegs)); 1058 if (re->re_src.nsegs > 1) { 1059 re->re_desc.d_src = sc->sc_spalloc.dma_paddr + 1060 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring); 1061 for (i = 0; i < re->re_src_nsegs; i++) { 1062 /* NB: no need to check if there's space */ 1063 pd = sc->sc_spfree; 1064 if (++(sc->sc_spfree) == sc->sc_springtop) 1065 sc->sc_spfree = sc->sc_spring; 1066 1067 KASSERT((pd->pd_flags&3) == 0 || 1068 (pd->pd_flags&3) == SAFE_PD_DONE, 1069 ("bogus source particle descriptor; flags %x", 1070 pd->pd_flags)); 1071 pd->pd_addr = re->re_src_segs[i].ds_addr; 1072 pd->pd_size = re->re_src_segs[i].ds_len; 1073 pd->pd_flags = SAFE_PD_READY; 1074 } 1075 cmd0 |= SAFE_SA_CMD0_IGATHER; 1076 } else { 1077 /* 1078 * No need for gather, reference the operand directly. 1079 */ 1080 re->re_desc.d_src = re->re_src_segs[0].ds_addr; 1081 } 1082 1083 if (csp->csp_mode == CSP_MODE_DIGEST) { 1084 /* 1085 * Hash op; no destination needed. 1086 */ 1087 } else { 1088 if (nicealign && uniform == 1) { 1089 /* 1090 * Source layout is suitable for direct 1091 * sharing of the DMA map and segment list. 1092 */ 1093 re->re_dst = re->re_src; 1094 } else if (nicealign && uniform == 2) { 1095 /* 1096 * The source is properly aligned but requires a 1097 * different particle list to handle DMA of the 1098 * result. Create a new map and do the load to 1099 * create the segment list. The particle 1100 * descriptor setup code below will handle the 1101 * rest. 1102 */ 1103 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT, 1104 &re->re_dst_map)) { 1105 safestats.st_nomap++; 1106 err = ENOMEM; 1107 goto errout; 1108 } 1109 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map, 1110 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) != 1111 0) { 1112 bus_dmamap_destroy(sc->sc_dstdmat, 1113 re->re_dst_map); 1114 re->re_dst_map = NULL; 1115 safestats.st_noload++; 1116 err = ENOMEM; 1117 goto errout; 1118 } 1119 } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) { 1120 int totlen, len; 1121 struct mbuf *m, *top, **mp; 1122 1123 /* 1124 * DMA constraints require that we allocate a 1125 * new mbuf chain for the destination. We 1126 * allocate an entire new set of mbufs of 1127 * optimal/required size and then tell the 1128 * hardware to copy any bits that are not 1129 * created as a byproduct of the operation. 1130 */ 1131 if (!nicealign) 1132 safestats.st_unaligned++; 1133 if (!uniform) 1134 safestats.st_notuniform++; 1135 totlen = re->re_src_mapsize; 1136 if (crp->crp_mbuf->m_flags & M_PKTHDR) { 1137 len = MHLEN; 1138 MGETHDR(m, M_NOWAIT, MT_DATA); 1139 if (m && !m_dup_pkthdr(m, crp->crp_mbuf, 1140 M_NOWAIT)) { 1141 m_free(m); 1142 m = NULL; 1143 } 1144 } else { 1145 len = MLEN; 1146 MGET(m, M_NOWAIT, MT_DATA); 1147 } 1148 if (m == NULL) { 1149 safestats.st_nombuf++; 1150 err = sc->sc_nqchip ? ERESTART : ENOMEM; 1151 goto errout; 1152 } 1153 if (totlen >= MINCLSIZE) { 1154 if (!(MCLGET(m, M_NOWAIT))) { 1155 m_free(m); 1156 safestats.st_nomcl++; 1157 err = sc->sc_nqchip ? 1158 ERESTART : ENOMEM; 1159 goto errout; 1160 } 1161 len = MCLBYTES; 1162 } 1163 m->m_len = len; 1164 top = NULL; 1165 mp = ⊤ 1166 1167 while (totlen > 0) { 1168 if (top) { 1169 MGET(m, M_NOWAIT, MT_DATA); 1170 if (m == NULL) { 1171 m_freem(top); 1172 safestats.st_nombuf++; 1173 err = sc->sc_nqchip ? 1174 ERESTART : ENOMEM; 1175 goto errout; 1176 } 1177 len = MLEN; 1178 } 1179 if (top && totlen >= MINCLSIZE) { 1180 if (!(MCLGET(m, M_NOWAIT))) { 1181 *mp = m; 1182 m_freem(top); 1183 safestats.st_nomcl++; 1184 err = sc->sc_nqchip ? 1185 ERESTART : ENOMEM; 1186 goto errout; 1187 } 1188 len = MCLBYTES; 1189 } 1190 m->m_len = len = min(totlen, len); 1191 totlen -= len; 1192 *mp = m; 1193 mp = &m->m_next; 1194 } 1195 re->re_dst_m = top; 1196 if (bus_dmamap_create(sc->sc_dstdmat, 1197 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) { 1198 safestats.st_nomap++; 1199 err = ENOMEM; 1200 goto errout; 1201 } 1202 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat, 1203 re->re_dst_map, top, re->re_dst_segs, 1204 &re->re_dst_nsegs, 0) != 0) { 1205 bus_dmamap_destroy(sc->sc_dstdmat, 1206 re->re_dst_map); 1207 re->re_dst_map = NULL; 1208 safestats.st_noload++; 1209 err = ENOMEM; 1210 goto errout; 1211 } 1212 re->re_dst_mapsize = re->re_src_mapsize; 1213 if (re->re_src.mapsize > oplen) { 1214 /* 1215 * There's data following what the 1216 * hardware will copy for us. If this 1217 * isn't just the ICV (that's going to 1218 * be written on completion), copy it 1219 * to the new mbufs 1220 */ 1221 if (!(csp->csp_mode == CSP_MODE_ETA && 1222 (re->re_src.mapsize-oplen) == ses->ses_mlen && 1223 crp->crp_digest_start == oplen)) 1224 safe_mcopy(crp->crp_mbuf, re->re_dst_m, 1225 oplen); 1226 else 1227 safestats.st_noicvcopy++; 1228 } 1229 } else { 1230 if (!nicealign) { 1231 safestats.st_iovmisaligned++; 1232 err = EINVAL; 1233 goto errout; 1234 } else { 1235 /* 1236 * There's no way to handle the DMA 1237 * requirements with this uio. We 1238 * could create a separate DMA area for 1239 * the result and then copy it back, 1240 * but for now we just bail and return 1241 * an error. Note that uio requests 1242 * > SAFE_MAX_DSIZE are handled because 1243 * the DMA map and segment list for the 1244 * destination wil result in a 1245 * destination particle list that does 1246 * the necessary scatter DMA. 1247 */ 1248 safestats.st_iovnotuniform++; 1249 err = EINVAL; 1250 goto errout; 1251 } 1252 } 1253 1254 if (re->re_dst.nsegs > 1) { 1255 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr + 1256 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring); 1257 for (i = 0; i < re->re_dst_nsegs; i++) { 1258 pd = sc->sc_dpfree; 1259 KASSERT((pd->pd_flags&3) == 0 || 1260 (pd->pd_flags&3) == SAFE_PD_DONE, 1261 ("bogus dest particle descriptor; flags %x", 1262 pd->pd_flags)); 1263 if (++(sc->sc_dpfree) == sc->sc_dpringtop) 1264 sc->sc_dpfree = sc->sc_dpring; 1265 pd->pd_addr = re->re_dst_segs[i].ds_addr; 1266 pd->pd_flags = SAFE_PD_READY; 1267 } 1268 cmd0 |= SAFE_SA_CMD0_OSCATTER; 1269 } else { 1270 /* 1271 * No need for scatter, reference the operand directly. 1272 */ 1273 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr; 1274 } 1275 } 1276 1277 /* 1278 * All done with setup; fillin the SA command words 1279 * and the packet engine descriptor. The operation 1280 * is now ready for submission to the hardware. 1281 */ 1282 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI; 1283 sa->sa_cmd1 = cmd1 1284 | (coffset << SAFE_SA_CMD1_OFFSET_S) 1285 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */ 1286 | SAFE_SA_CMD1_SRPCI 1287 ; 1288 /* 1289 * NB: the order of writes is important here. In case the 1290 * chip is scanning the ring because of an outstanding request 1291 * it might nab this one too. In that case we need to make 1292 * sure the setup is complete before we write the length 1293 * field of the descriptor as it signals the descriptor is 1294 * ready for processing. 1295 */ 1296 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI; 1297 if (csp->csp_auth_alg != 0) 1298 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL; 1299 re->re_desc.d_len = oplen 1300 | SAFE_PE_LEN_READY 1301 | (bypass << SAFE_PE_LEN_BYPASS_S) 1302 ; 1303 1304 safestats.st_ipackets++; 1305 safestats.st_ibytes += oplen; 1306 1307 if (++(sc->sc_front) == sc->sc_ringtop) 1308 sc->sc_front = sc->sc_ring; 1309 1310 /* XXX honor batching */ 1311 safe_feed(sc, re); 1312 mtx_unlock(&sc->sc_ringmtx); 1313 return (0); 1314 1315 errout: 1316 if (re->re_dst_m != NULL) 1317 m_freem(re->re_dst_m); 1318 1319 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1320 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1321 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1322 } 1323 if (re->re_src_map != NULL) { 1324 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1325 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1326 } 1327 mtx_unlock(&sc->sc_ringmtx); 1328 if (err != ERESTART) { 1329 crp->crp_etype = err; 1330 crypto_done(crp); 1331 } else { 1332 sc->sc_needwakeup |= CRYPTO_SYMQ; 1333 } 1334 return (err); 1335 } 1336 1337 static void 1338 safe_callback(struct safe_softc *sc, struct safe_ringentry *re) 1339 { 1340 const struct crypto_session_params *csp; 1341 struct cryptop *crp = (struct cryptop *)re->re_crp; 1342 struct safe_session *ses; 1343 uint8_t hash[HASH_MAX_LEN]; 1344 1345 ses = crypto_get_driver_session(crp->crp_session); 1346 csp = crypto_get_params(crp->crp_session); 1347 1348 safestats.st_opackets++; 1349 safestats.st_obytes += re->re_dst.mapsize; 1350 1351 safe_dma_sync(&sc->sc_ringalloc, 1352 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1353 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) { 1354 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n", 1355 re->re_desc.d_csr, 1356 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1); 1357 safestats.st_peoperr++; 1358 crp->crp_etype = EIO; /* something more meaningful? */ 1359 } 1360 1361 /* XXX: Should crp_mbuf be updated to re->re_dst_m if it is non-NULL? */ 1362 1363 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) { 1364 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map, 1365 BUS_DMASYNC_POSTREAD); 1366 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map); 1367 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map); 1368 } 1369 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE); 1370 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map); 1371 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map); 1372 1373 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) { 1374 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) { 1375 /* 1376 * SHA-1 ICV's are byte-swapped; fix 'em up 1377 * before copying them to their destination. 1378 */ 1379 re->re_sastate.sa_saved_indigest[0] = 1380 bswap32(re->re_sastate.sa_saved_indigest[0]); 1381 re->re_sastate.sa_saved_indigest[1] = 1382 bswap32(re->re_sastate.sa_saved_indigest[1]); 1383 re->re_sastate.sa_saved_indigest[2] = 1384 bswap32(re->re_sastate.sa_saved_indigest[2]); 1385 } 1386 1387 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 1388 crypto_copydata(crp, crp->crp_digest_start, 1389 ses->ses_mlen, hash); 1390 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest, 1391 hash, ses->ses_mlen) != 0) 1392 crp->crp_etype = EBADMSG; 1393 } else 1394 crypto_copyback(crp, crp->crp_digest_start, 1395 ses->ses_mlen, re->re_sastate.sa_saved_indigest); 1396 } 1397 crypto_done(crp); 1398 } 1399 1400 /* 1401 * Copy all data past offset from srcm to dstm. 1402 */ 1403 static void 1404 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset) 1405 { 1406 u_int j, dlen, slen; 1407 caddr_t dptr, sptr; 1408 1409 /* 1410 * Advance src and dst to offset. 1411 */ 1412 j = offset; 1413 while (j >= srcm->m_len) { 1414 j -= srcm->m_len; 1415 srcm = srcm->m_next; 1416 if (srcm == NULL) 1417 return; 1418 } 1419 sptr = mtod(srcm, caddr_t) + j; 1420 slen = srcm->m_len - j; 1421 1422 j = offset; 1423 while (j >= dstm->m_len) { 1424 j -= dstm->m_len; 1425 dstm = dstm->m_next; 1426 if (dstm == NULL) 1427 return; 1428 } 1429 dptr = mtod(dstm, caddr_t) + j; 1430 dlen = dstm->m_len - j; 1431 1432 /* 1433 * Copy everything that remains. 1434 */ 1435 for (;;) { 1436 j = min(slen, dlen); 1437 bcopy(sptr, dptr, j); 1438 if (slen == j) { 1439 srcm = srcm->m_next; 1440 if (srcm == NULL) 1441 return; 1442 sptr = srcm->m_data; 1443 slen = srcm->m_len; 1444 } else 1445 sptr += j, slen -= j; 1446 if (dlen == j) { 1447 dstm = dstm->m_next; 1448 if (dstm == NULL) 1449 return; 1450 dptr = dstm->m_data; 1451 dlen = dstm->m_len; 1452 } else 1453 dptr += j, dlen -= j; 1454 } 1455 } 1456 1457 #ifndef SAFE_NO_RNG 1458 #define SAFE_RNG_MAXWAIT 1000 1459 1460 static void 1461 safe_rng_init(struct safe_softc *sc) 1462 { 1463 u_int32_t w, v; 1464 int i; 1465 1466 WRITE_REG(sc, SAFE_RNG_CTRL, 0); 1467 /* use default value according to the manual */ 1468 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */ 1469 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1470 1471 /* 1472 * There is a bug in rev 1.0 of the 1140 that when the RNG 1473 * is brought out of reset the ready status flag does not 1474 * work until the RNG has finished its internal initialization. 1475 * 1476 * So in order to determine the device is through its 1477 * initialization we must read the data register, using the 1478 * status reg in the read in case it is initialized. Then read 1479 * the data register until it changes from the first read. 1480 * Once it changes read the data register until it changes 1481 * again. At this time the RNG is considered initialized. 1482 * This could take between 750ms - 1000ms in time. 1483 */ 1484 i = 0; 1485 w = READ_REG(sc, SAFE_RNG_OUT); 1486 do { 1487 v = READ_REG(sc, SAFE_RNG_OUT); 1488 if (v != w) { 1489 w = v; 1490 break; 1491 } 1492 DELAY(10); 1493 } while (++i < SAFE_RNG_MAXWAIT); 1494 1495 /* Wait Until data changes again */ 1496 i = 0; 1497 do { 1498 v = READ_REG(sc, SAFE_RNG_OUT); 1499 if (v != w) 1500 break; 1501 DELAY(10); 1502 } while (++i < SAFE_RNG_MAXWAIT); 1503 } 1504 1505 static __inline void 1506 safe_rng_disable_short_cycle(struct safe_softc *sc) 1507 { 1508 WRITE_REG(sc, SAFE_RNG_CTRL, 1509 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN); 1510 } 1511 1512 static __inline void 1513 safe_rng_enable_short_cycle(struct safe_softc *sc) 1514 { 1515 WRITE_REG(sc, SAFE_RNG_CTRL, 1516 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN); 1517 } 1518 1519 static __inline u_int32_t 1520 safe_rng_read(struct safe_softc *sc) 1521 { 1522 int i; 1523 1524 i = 0; 1525 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT) 1526 ; 1527 return READ_REG(sc, SAFE_RNG_OUT); 1528 } 1529 1530 static void 1531 safe_rng(void *arg) 1532 { 1533 struct safe_softc *sc = arg; 1534 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */ 1535 u_int maxwords; 1536 int i; 1537 1538 safestats.st_rng++; 1539 /* 1540 * Fetch the next block of data. 1541 */ 1542 maxwords = safe_rngbufsize; 1543 if (maxwords > SAFE_RNG_MAXBUFSIZ) 1544 maxwords = SAFE_RNG_MAXBUFSIZ; 1545 retry: 1546 for (i = 0; i < maxwords; i++) 1547 buf[i] = safe_rng_read(sc); 1548 /* 1549 * Check the comparator alarm count and reset the h/w if 1550 * it exceeds our threshold. This guards against the 1551 * hardware oscillators resonating with external signals. 1552 */ 1553 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) { 1554 u_int32_t freq_inc, w; 1555 1556 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__, 1557 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm)); 1558 safestats.st_rngalarm++; 1559 safe_rng_enable_short_cycle(sc); 1560 freq_inc = 18; 1561 for (i = 0; i < 64; i++) { 1562 w = READ_REG(sc, SAFE_RNG_CNFG); 1563 freq_inc = ((w + freq_inc) & 0x3fL); 1564 w = ((w & ~0x3fL) | freq_inc); 1565 WRITE_REG(sc, SAFE_RNG_CNFG, w); 1566 1567 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1568 1569 (void) safe_rng_read(sc); 1570 DELAY(25); 1571 1572 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) { 1573 safe_rng_disable_short_cycle(sc); 1574 goto retry; 1575 } 1576 freq_inc = 1; 1577 } 1578 safe_rng_disable_short_cycle(sc); 1579 } else 1580 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0); 1581 1582 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t)); 1583 callout_reset(&sc->sc_rngto, 1584 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc); 1585 } 1586 #endif /* SAFE_NO_RNG */ 1587 1588 static void 1589 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1590 { 1591 bus_addr_t *paddr = (bus_addr_t*) arg; 1592 *paddr = segs->ds_addr; 1593 } 1594 1595 static int 1596 safe_dma_malloc( 1597 struct safe_softc *sc, 1598 bus_size_t size, 1599 struct safe_dma_alloc *dma, 1600 int mapflags 1601 ) 1602 { 1603 int r; 1604 1605 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ 1606 sizeof(u_int32_t), 0, /* alignment, bounds */ 1607 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1608 BUS_SPACE_MAXADDR, /* highaddr */ 1609 NULL, NULL, /* filter, filterarg */ 1610 size, /* maxsize */ 1611 1, /* nsegments */ 1612 size, /* maxsegsize */ 1613 BUS_DMA_ALLOCNOW, /* flags */ 1614 NULL, NULL, /* locking */ 1615 &dma->dma_tag); 1616 if (r != 0) { 1617 device_printf(sc->sc_dev, "safe_dma_malloc: " 1618 "bus_dma_tag_create failed; error %u\n", r); 1619 goto fail_0; 1620 } 1621 1622 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, 1623 BUS_DMA_NOWAIT, &dma->dma_map); 1624 if (r != 0) { 1625 device_printf(sc->sc_dev, "safe_dma_malloc: " 1626 "bus_dmammem_alloc failed; size %ju, error %u\n", 1627 (uintmax_t)size, r); 1628 goto fail_1; 1629 } 1630 1631 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, 1632 size, 1633 safe_dmamap_cb, 1634 &dma->dma_paddr, 1635 mapflags | BUS_DMA_NOWAIT); 1636 if (r != 0) { 1637 device_printf(sc->sc_dev, "safe_dma_malloc: " 1638 "bus_dmamap_load failed; error %u\n", r); 1639 goto fail_2; 1640 } 1641 1642 dma->dma_size = size; 1643 return (0); 1644 1645 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1646 fail_2: 1647 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1648 fail_1: 1649 bus_dma_tag_destroy(dma->dma_tag); 1650 fail_0: 1651 dma->dma_tag = NULL; 1652 return (r); 1653 } 1654 1655 static void 1656 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma) 1657 { 1658 bus_dmamap_unload(dma->dma_tag, dma->dma_map); 1659 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); 1660 bus_dma_tag_destroy(dma->dma_tag); 1661 } 1662 1663 /* 1664 * Resets the board. Values in the regesters are left as is 1665 * from the reset (i.e. initial values are assigned elsewhere). 1666 */ 1667 static void 1668 safe_reset_board(struct safe_softc *sc) 1669 { 1670 u_int32_t v; 1671 /* 1672 * Reset the device. The manual says no delay 1673 * is needed between marking and clearing reset. 1674 */ 1675 v = READ_REG(sc, SAFE_PE_DMACFG) &~ 1676 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET | 1677 SAFE_PE_DMACFG_SGRESET); 1678 WRITE_REG(sc, SAFE_PE_DMACFG, v 1679 | SAFE_PE_DMACFG_PERESET 1680 | SAFE_PE_DMACFG_PDRRESET 1681 | SAFE_PE_DMACFG_SGRESET); 1682 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1683 } 1684 1685 /* 1686 * Initialize registers we need to touch only once. 1687 */ 1688 static void 1689 safe_init_board(struct safe_softc *sc) 1690 { 1691 u_int32_t v, dwords; 1692 1693 v = READ_REG(sc, SAFE_PE_DMACFG); 1694 v &=~ SAFE_PE_DMACFG_PEMODE; 1695 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */ 1696 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */ 1697 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */ 1698 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */ 1699 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */ 1700 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */ 1701 ; 1702 WRITE_REG(sc, SAFE_PE_DMACFG, v); 1703 #if 0 1704 /* XXX select byte swap based on host byte order */ 1705 WRITE_REG(sc, SAFE_ENDIAN, 0x1b); 1706 #endif 1707 if (sc->sc_chiprev == SAFE_REV(1,0)) { 1708 /* 1709 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where 1710 * "target mode transfers" done while the chip is DMA'ing 1711 * >1020 bytes cause the hardware to lockup. To avoid this 1712 * we reduce the max PCI transfer size and use small source 1713 * particle descriptors (<= 256 bytes). 1714 */ 1715 WRITE_REG(sc, SAFE_DMA_CFG, 256); 1716 device_printf(sc->sc_dev, 1717 "Reduce max DMA size to %u words for rev %u.%u WAR\n", 1718 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff, 1719 SAFE_REV_MAJ(sc->sc_chiprev), 1720 SAFE_REV_MIN(sc->sc_chiprev)); 1721 } 1722 1723 /* NB: operands+results are overlaid */ 1724 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr); 1725 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr); 1726 /* 1727 * Configure ring entry size and number of items in the ring. 1728 */ 1729 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0, 1730 ("PE ring entry not 32-bit aligned!")); 1731 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t); 1732 WRITE_REG(sc, SAFE_PE_RINGCFG, 1733 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE); 1734 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */ 1735 1736 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr); 1737 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr); 1738 WRITE_REG(sc, SAFE_PE_PARTSIZE, 1739 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART); 1740 /* 1741 * NB: destination particles are fixed size. We use 1742 * an mbuf cluster and require all results go to 1743 * clusters or smaller. 1744 */ 1745 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE); 1746 1747 /* it's now safe to enable PE mode, do it */ 1748 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE); 1749 1750 /* 1751 * Configure hardware to use level-triggered interrupts and 1752 * to interrupt after each descriptor is processed. 1753 */ 1754 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL); 1755 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1); 1756 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR); 1757 } 1758 1759 /* 1760 * Init PCI registers 1761 */ 1762 static void 1763 safe_init_pciregs(device_t dev) 1764 { 1765 } 1766 1767 /* 1768 * Clean up after a chip crash. 1769 * It is assumed that the caller in splimp() 1770 */ 1771 static void 1772 safe_cleanchip(struct safe_softc *sc) 1773 { 1774 1775 if (sc->sc_nqchip != 0) { 1776 struct safe_ringentry *re = sc->sc_back; 1777 1778 while (re != sc->sc_front) { 1779 if (re->re_desc.d_csr != 0) 1780 safe_free_entry(sc, re); 1781 if (++re == sc->sc_ringtop) 1782 re = sc->sc_ring; 1783 } 1784 sc->sc_back = re; 1785 sc->sc_nqchip = 0; 1786 } 1787 } 1788 1789 /* 1790 * free a safe_q 1791 * It is assumed that the caller is within splimp(). 1792 */ 1793 static int 1794 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re) 1795 { 1796 struct cryptop *crp; 1797 1798 /* 1799 * Free header MCR 1800 */ 1801 if (re->re_dst_m != NULL) 1802 m_freem(re->re_dst_m); 1803 1804 crp = (struct cryptop *)re->re_crp; 1805 1806 re->re_desc.d_csr = 0; 1807 1808 crp->crp_etype = EFAULT; 1809 crypto_done(crp); 1810 return(0); 1811 } 1812 1813 /* 1814 * Routine to reset the chip and clean up. 1815 * It is assumed that the caller is in splimp() 1816 */ 1817 static void 1818 safe_totalreset(struct safe_softc *sc) 1819 { 1820 safe_reset_board(sc); 1821 safe_init_board(sc); 1822 safe_cleanchip(sc); 1823 } 1824 1825 /* 1826 * Is the operand suitable aligned for direct DMA. Each 1827 * segment must be aligned on a 32-bit boundary and all 1828 * but the last segment must be a multiple of 4 bytes. 1829 */ 1830 static int 1831 safe_dmamap_aligned(const struct safe_operand *op) 1832 { 1833 int i; 1834 1835 for (i = 0; i < op->nsegs; i++) { 1836 if (op->segs[i].ds_addr & 3) 1837 return (0); 1838 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3)) 1839 return (0); 1840 } 1841 return (1); 1842 } 1843 1844 /* 1845 * Is the operand suitable for direct DMA as the destination 1846 * of an operation. The hardware requires that each ``particle'' 1847 * but the last in an operation result have the same size. We 1848 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns 1849 * 0 if some segment is not a multiple of of this size, 1 if all 1850 * segments are exactly this size, or 2 if segments are at worst 1851 * a multple of this size. 1852 */ 1853 static int 1854 safe_dmamap_uniform(const struct safe_operand *op) 1855 { 1856 int result = 1; 1857 1858 if (op->nsegs > 0) { 1859 int i; 1860 1861 for (i = 0; i < op->nsegs-1; i++) { 1862 if (op->segs[i].ds_len % SAFE_MAX_DSIZE) 1863 return (0); 1864 if (op->segs[i].ds_len != SAFE_MAX_DSIZE) 1865 result = 2; 1866 } 1867 } 1868 return (result); 1869 } 1870 1871 #ifdef SAFE_DEBUG 1872 static void 1873 safe_dump_dmastatus(struct safe_softc *sc, const char *tag) 1874 { 1875 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n" 1876 , tag 1877 , READ_REG(sc, SAFE_DMA_ENDIAN) 1878 , READ_REG(sc, SAFE_DMA_SRCADDR) 1879 , READ_REG(sc, SAFE_DMA_DSTADDR) 1880 , READ_REG(sc, SAFE_DMA_STAT) 1881 ); 1882 } 1883 1884 static void 1885 safe_dump_intrstate(struct safe_softc *sc, const char *tag) 1886 { 1887 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n" 1888 , tag 1889 , READ_REG(sc, SAFE_HI_CFG) 1890 , READ_REG(sc, SAFE_HI_MASK) 1891 , READ_REG(sc, SAFE_HI_DESC_CNT) 1892 , READ_REG(sc, SAFE_HU_STAT) 1893 , READ_REG(sc, SAFE_HM_STAT) 1894 ); 1895 } 1896 1897 static void 1898 safe_dump_ringstate(struct safe_softc *sc, const char *tag) 1899 { 1900 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT); 1901 1902 /* NB: assume caller has lock on ring */ 1903 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n", 1904 tag, 1905 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S), 1906 (unsigned long)(sc->sc_back - sc->sc_ring), 1907 (unsigned long)(sc->sc_front - sc->sc_ring)); 1908 } 1909 1910 static void 1911 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re) 1912 { 1913 int ix, nsegs; 1914 1915 ix = re - sc->sc_ring; 1916 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n" 1917 , tag 1918 , re, ix 1919 , re->re_desc.d_csr 1920 , re->re_desc.d_src 1921 , re->re_desc.d_dst 1922 , re->re_desc.d_sa 1923 , re->re_desc.d_len 1924 ); 1925 if (re->re_src.nsegs > 1) { 1926 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) / 1927 sizeof(struct safe_pdesc); 1928 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) { 1929 printf(" spd[%u] %p: %p size %u flags %x" 1930 , ix, &sc->sc_spring[ix] 1931 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr 1932 , sc->sc_spring[ix].pd_size 1933 , sc->sc_spring[ix].pd_flags 1934 ); 1935 if (sc->sc_spring[ix].pd_size == 0) 1936 printf(" (zero!)"); 1937 printf("\n"); 1938 if (++ix == SAFE_TOTAL_SPART) 1939 ix = 0; 1940 } 1941 } 1942 if (re->re_dst.nsegs > 1) { 1943 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) / 1944 sizeof(struct safe_pdesc); 1945 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) { 1946 printf(" dpd[%u] %p: %p flags %x\n" 1947 , ix, &sc->sc_dpring[ix] 1948 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr 1949 , sc->sc_dpring[ix].pd_flags 1950 ); 1951 if (++ix == SAFE_TOTAL_DPART) 1952 ix = 0; 1953 } 1954 } 1955 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n", 1956 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec); 1957 printf("sa: key %x %x %x %x %x %x %x %x\n" 1958 , re->re_sa.sa_key[0] 1959 , re->re_sa.sa_key[1] 1960 , re->re_sa.sa_key[2] 1961 , re->re_sa.sa_key[3] 1962 , re->re_sa.sa_key[4] 1963 , re->re_sa.sa_key[5] 1964 , re->re_sa.sa_key[6] 1965 , re->re_sa.sa_key[7] 1966 ); 1967 printf("sa: indigest %x %x %x %x %x\n" 1968 , re->re_sa.sa_indigest[0] 1969 , re->re_sa.sa_indigest[1] 1970 , re->re_sa.sa_indigest[2] 1971 , re->re_sa.sa_indigest[3] 1972 , re->re_sa.sa_indigest[4] 1973 ); 1974 printf("sa: outdigest %x %x %x %x %x\n" 1975 , re->re_sa.sa_outdigest[0] 1976 , re->re_sa.sa_outdigest[1] 1977 , re->re_sa.sa_outdigest[2] 1978 , re->re_sa.sa_outdigest[3] 1979 , re->re_sa.sa_outdigest[4] 1980 ); 1981 printf("sr: iv %x %x %x %x\n" 1982 , re->re_sastate.sa_saved_iv[0] 1983 , re->re_sastate.sa_saved_iv[1] 1984 , re->re_sastate.sa_saved_iv[2] 1985 , re->re_sastate.sa_saved_iv[3] 1986 ); 1987 printf("sr: hashbc %u indigest %x %x %x %x %x\n" 1988 , re->re_sastate.sa_saved_hashbc 1989 , re->re_sastate.sa_saved_indigest[0] 1990 , re->re_sastate.sa_saved_indigest[1] 1991 , re->re_sastate.sa_saved_indigest[2] 1992 , re->re_sastate.sa_saved_indigest[3] 1993 , re->re_sastate.sa_saved_indigest[4] 1994 ); 1995 } 1996 1997 static void 1998 safe_dump_ring(struct safe_softc *sc, const char *tag) 1999 { 2000 mtx_lock(&sc->sc_ringmtx); 2001 printf("\nSafeNet Ring State:\n"); 2002 safe_dump_intrstate(sc, tag); 2003 safe_dump_dmastatus(sc, tag); 2004 safe_dump_ringstate(sc, tag); 2005 if (sc->sc_nqchip) { 2006 struct safe_ringentry *re = sc->sc_back; 2007 do { 2008 safe_dump_request(sc, tag, re); 2009 if (++re == sc->sc_ringtop) 2010 re = sc->sc_ring; 2011 } while (re != sc->sc_front); 2012 } 2013 mtx_unlock(&sc->sc_ringmtx); 2014 } 2015 2016 static int 2017 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS) 2018 { 2019 char dmode[64]; 2020 int error; 2021 2022 strncpy(dmode, "", sizeof(dmode) - 1); 2023 dmode[sizeof(dmode) - 1] = '\0'; 2024 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req); 2025 2026 if (error == 0 && req->newptr != NULL) { 2027 struct safe_softc *sc = safec; 2028 2029 if (!sc) 2030 return EINVAL; 2031 if (strncmp(dmode, "dma", 3) == 0) 2032 safe_dump_dmastatus(sc, "safe0"); 2033 else if (strncmp(dmode, "int", 3) == 0) 2034 safe_dump_intrstate(sc, "safe0"); 2035 else if (strncmp(dmode, "ring", 4) == 0) 2036 safe_dump_ring(sc, "safe0"); 2037 else 2038 return EINVAL; 2039 } 2040 return error; 2041 } 2042 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, 2043 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0, 2044 sysctl_hw_safe_dump, "A", 2045 "Dump driver state"); 2046 #endif /* SAFE_DEBUG */ 2047