1 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-3-Clause 5 * 6 * Invertex AEON / Hifn 7751 driver 7 * Copyright (c) 1999 Invertex Inc. All rights reserved. 8 * Copyright (c) 1999 Theo de Raadt 9 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 10 * http://www.netsec.net 11 * Copyright (c) 2003 Hifn Inc. 12 * 13 * This driver is based on a previous driver by Invertex, for which they 14 * requested: Please send any comments, feedback, bug-fixes, or feature 15 * requests to software@invertex.com. 16 * 17 * Redistribution and use in source and binary forms, with or without 18 * modification, are permitted provided that the following conditions 19 * are met: 20 * 21 * 1. Redistributions of source code must retain the above copyright 22 * notice, this list of conditions and the following disclaimer. 23 * 2. Redistributions in binary form must reproduce the above copyright 24 * notice, this list of conditions and the following disclaimer in the 25 * documentation and/or other materials provided with the distribution. 26 * 3. The name of the author may not be used to endorse or promote products 27 * derived from this software without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 30 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 31 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 32 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 33 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 34 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 38 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 39 * 40 * Effort sponsored in part by the Defense Advanced Research Projects 41 * Agency (DARPA) and Air Force Research Laboratory, Air Force 42 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 /* 49 * Driver for various Hifn encryption processors. 50 */ 51 #include "opt_hifn.h" 52 53 #include <sys/param.h> 54 #include <sys/systm.h> 55 #include <sys/proc.h> 56 #include <sys/errno.h> 57 #include <sys/malloc.h> 58 #include <sys/kernel.h> 59 #include <sys/module.h> 60 #include <sys/mbuf.h> 61 #include <sys/lock.h> 62 #include <sys/mutex.h> 63 #include <sys/sysctl.h> 64 65 #include <vm/vm.h> 66 #include <vm/pmap.h> 67 68 #include <machine/bus.h> 69 #include <machine/resource.h> 70 #include <sys/bus.h> 71 #include <sys/rman.h> 72 73 #include <opencrypto/cryptodev.h> 74 #include <sys/random.h> 75 #include <sys/kobj.h> 76 77 #include "cryptodev_if.h" 78 79 #include <dev/pci/pcivar.h> 80 #include <dev/pci/pcireg.h> 81 82 #ifdef HIFN_RNDTEST 83 #include <dev/rndtest/rndtest.h> 84 #endif 85 #include <dev/hifn/hifn7751reg.h> 86 #include <dev/hifn/hifn7751var.h> 87 88 #ifdef HIFN_VULCANDEV 89 #include <sys/conf.h> 90 #include <sys/uio.h> 91 92 static struct cdevsw vulcanpk_cdevsw; /* forward declaration */ 93 #endif 94 95 /* 96 * Prototypes and count for the pci_device structure 97 */ 98 static int hifn_probe(device_t); 99 static int hifn_attach(device_t); 100 static int hifn_detach(device_t); 101 static int hifn_suspend(device_t); 102 static int hifn_resume(device_t); 103 static int hifn_shutdown(device_t); 104 105 static int hifn_newsession(device_t, u_int32_t *, struct cryptoini *); 106 static int hifn_freesession(device_t, u_int64_t); 107 static int hifn_process(device_t, struct cryptop *, int); 108 109 static device_method_t hifn_methods[] = { 110 /* Device interface */ 111 DEVMETHOD(device_probe, hifn_probe), 112 DEVMETHOD(device_attach, hifn_attach), 113 DEVMETHOD(device_detach, hifn_detach), 114 DEVMETHOD(device_suspend, hifn_suspend), 115 DEVMETHOD(device_resume, hifn_resume), 116 DEVMETHOD(device_shutdown, hifn_shutdown), 117 118 /* crypto device methods */ 119 DEVMETHOD(cryptodev_newsession, hifn_newsession), 120 DEVMETHOD(cryptodev_freesession,hifn_freesession), 121 DEVMETHOD(cryptodev_process, hifn_process), 122 123 DEVMETHOD_END 124 }; 125 static driver_t hifn_driver = { 126 "hifn", 127 hifn_methods, 128 sizeof (struct hifn_softc) 129 }; 130 static devclass_t hifn_devclass; 131 132 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); 133 MODULE_DEPEND(hifn, crypto, 1, 1, 1); 134 #ifdef HIFN_RNDTEST 135 MODULE_DEPEND(hifn, rndtest, 1, 1, 1); 136 #endif 137 138 static void hifn_reset_board(struct hifn_softc *, int); 139 static void hifn_reset_puc(struct hifn_softc *); 140 static void hifn_puc_wait(struct hifn_softc *); 141 static int hifn_enable_crypto(struct hifn_softc *); 142 static void hifn_set_retry(struct hifn_softc *sc); 143 static void hifn_init_dma(struct hifn_softc *); 144 static void hifn_init_pci_registers(struct hifn_softc *); 145 static int hifn_sramsize(struct hifn_softc *); 146 static int hifn_dramsize(struct hifn_softc *); 147 static int hifn_ramtype(struct hifn_softc *); 148 static void hifn_sessions(struct hifn_softc *); 149 static void hifn_intr(void *); 150 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 151 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 152 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 153 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); 154 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 155 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 156 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 157 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 158 static int hifn_init_pubrng(struct hifn_softc *); 159 static void hifn_rng(void *); 160 static void hifn_tick(void *); 161 static void hifn_abort(struct hifn_softc *); 162 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 163 164 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); 165 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); 166 167 static __inline u_int32_t 168 READ_REG_0(struct hifn_softc *sc, bus_size_t reg) 169 { 170 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); 171 sc->sc_bar0_lastreg = (bus_size_t) -1; 172 return (v); 173 } 174 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) 175 176 static __inline u_int32_t 177 READ_REG_1(struct hifn_softc *sc, bus_size_t reg) 178 { 179 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); 180 sc->sc_bar1_lastreg = (bus_size_t) -1; 181 return (v); 182 } 183 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) 184 185 static SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, 186 "Hifn driver parameters"); 187 188 #ifdef HIFN_DEBUG 189 static int hifn_debug = 0; 190 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 191 0, "control debugging msgs"); 192 #endif 193 194 static struct hifn_stats hifnstats; 195 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, 196 hifn_stats, "driver statistics"); 197 static int hifn_maxbatch = 1; 198 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 199 0, "max ops to batch w/o interrupt"); 200 201 /* 202 * Probe for a supported device. The PCI vendor and device 203 * IDs are used to detect devices we know how to handle. 204 */ 205 static int 206 hifn_probe(device_t dev) 207 { 208 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && 209 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) 210 return (BUS_PROBE_DEFAULT); 211 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 212 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || 213 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 214 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 215 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || 216 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) 217 return (BUS_PROBE_DEFAULT); 218 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 219 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) 220 return (BUS_PROBE_DEFAULT); 221 return (ENXIO); 222 } 223 224 static void 225 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 226 { 227 bus_addr_t *paddr = (bus_addr_t*) arg; 228 *paddr = segs->ds_addr; 229 } 230 231 static const char* 232 hifn_partname(struct hifn_softc *sc) 233 { 234 /* XXX sprintf numbers when not decoded */ 235 switch (pci_get_vendor(sc->sc_dev)) { 236 case PCI_VENDOR_HIFN: 237 switch (pci_get_device(sc->sc_dev)) { 238 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; 239 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; 240 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; 241 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; 242 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; 243 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; 244 } 245 return "Hifn unknown-part"; 246 case PCI_VENDOR_INVERTEX: 247 switch (pci_get_device(sc->sc_dev)) { 248 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; 249 } 250 return "Invertex unknown-part"; 251 case PCI_VENDOR_NETSEC: 252 switch (pci_get_device(sc->sc_dev)) { 253 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; 254 } 255 return "NetSec unknown-part"; 256 } 257 return "Unknown-vendor unknown-part"; 258 } 259 260 static void 261 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 262 { 263 /* MarkM: FIX!! Check that this does not swamp the harvester! */ 264 random_harvest_queue(buf, count, count*NBBY/2, RANDOM_PURE_HIFN); 265 } 266 267 static u_int 268 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) 269 { 270 if (v > max) { 271 device_printf(dev, "Warning, %s %u out of range, " 272 "using max %u\n", what, v, max); 273 v = max; 274 } else if (v < min) { 275 device_printf(dev, "Warning, %s %u out of range, " 276 "using min %u\n", what, v, min); 277 v = min; 278 } 279 return v; 280 } 281 282 /* 283 * Select PLL configuration for 795x parts. This is complicated in 284 * that we cannot determine the optimal parameters without user input. 285 * The reference clock is derived from an external clock through a 286 * multiplier. The external clock is either the host bus (i.e. PCI) 287 * or an external clock generator. When using the PCI bus we assume 288 * the clock is either 33 or 66 MHz; for an external source we cannot 289 * tell the speed. 290 * 291 * PLL configuration is done with a string: "pci" for PCI bus, or "ext" 292 * for an external source, followed by the frequency. We calculate 293 * the appropriate multiplier and PLL register contents accordingly. 294 * When no configuration is given we default to "pci66" since that 295 * always will allow the card to work. If a card is using the PCI 296 * bus clock and in a 33MHz slot then it will be operating at half 297 * speed until the correct information is provided. 298 * 299 * We use a default setting of "ext66" because according to Mike Ham 300 * of HiFn, almost every board in existence has an external crystal 301 * populated at 66Mhz. Using PCI can be a problem on modern motherboards, 302 * because PCI33 can have clocks from 0 to 33Mhz, and some have 303 * non-PCI-compliant spread-spectrum clocks, which can confuse the pll. 304 */ 305 static void 306 hifn_getpllconfig(device_t dev, u_int *pll) 307 { 308 const char *pllspec; 309 u_int freq, mul, fl, fh; 310 u_int32_t pllconfig; 311 char *nxt; 312 313 if (resource_string_value("hifn", device_get_unit(dev), 314 "pllconfig", &pllspec)) 315 pllspec = "ext66"; 316 fl = 33, fh = 66; 317 pllconfig = 0; 318 if (strncmp(pllspec, "ext", 3) == 0) { 319 pllspec += 3; 320 pllconfig |= HIFN_PLL_REF_SEL; 321 switch (pci_get_device(dev)) { 322 case PCI_PRODUCT_HIFN_7955: 323 case PCI_PRODUCT_HIFN_7956: 324 fl = 20, fh = 100; 325 break; 326 #ifdef notyet 327 case PCI_PRODUCT_HIFN_7954: 328 fl = 20, fh = 66; 329 break; 330 #endif 331 } 332 } else if (strncmp(pllspec, "pci", 3) == 0) 333 pllspec += 3; 334 freq = strtoul(pllspec, &nxt, 10); 335 if (nxt == pllspec) 336 freq = 66; 337 else 338 freq = checkmaxmin(dev, "frequency", freq, fl, fh); 339 /* 340 * Calculate multiplier. We target a Fck of 266 MHz, 341 * allowing only even values, possibly rounded down. 342 * Multipliers > 8 must set the charge pump current. 343 */ 344 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); 345 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; 346 if (mul > 8) 347 pllconfig |= HIFN_PLL_IS; 348 *pll = pllconfig; 349 } 350 351 /* 352 * Attach an interface that successfully probed. 353 */ 354 static int 355 hifn_attach(device_t dev) 356 { 357 struct hifn_softc *sc = device_get_softc(dev); 358 caddr_t kva; 359 int rseg, rid; 360 char rbase; 361 u_int16_t ena, rev; 362 363 sc->sc_dev = dev; 364 365 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); 366 367 /* XXX handle power management */ 368 369 /* 370 * The 7951 and 795x have a random number generator and 371 * public key support; note this. 372 */ 373 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 374 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 375 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 376 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) 377 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; 378 /* 379 * The 7811 has a random number generator and 380 * we also note it's identity 'cuz of some quirks. 381 */ 382 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 383 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) 384 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; 385 386 /* 387 * The 795x parts support AES. 388 */ 389 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 390 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 391 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { 392 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; 393 /* 394 * Select PLL configuration. This depends on the 395 * bus and board design and must be manually configured 396 * if the default setting is unacceptable. 397 */ 398 hifn_getpllconfig(dev, &sc->sc_pllconfig); 399 } 400 401 /* 402 * Setup PCI resources. Note that we record the bus 403 * tag and handle for each register mapping, this is 404 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, 405 * and WRITE_REG_1 macros throughout the driver. 406 */ 407 pci_enable_busmaster(dev); 408 409 rid = HIFN_BAR0; 410 sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 411 RF_ACTIVE); 412 if (sc->sc_bar0res == NULL) { 413 device_printf(dev, "cannot map bar%d register space\n", 0); 414 goto fail_pci; 415 } 416 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); 417 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); 418 sc->sc_bar0_lastreg = (bus_size_t) -1; 419 420 rid = HIFN_BAR1; 421 sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 422 RF_ACTIVE); 423 if (sc->sc_bar1res == NULL) { 424 device_printf(dev, "cannot map bar%d register space\n", 1); 425 goto fail_io0; 426 } 427 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); 428 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); 429 sc->sc_bar1_lastreg = (bus_size_t) -1; 430 431 hifn_set_retry(sc); 432 433 /* 434 * Setup the area where the Hifn DMA's descriptors 435 * and associated data structures. 436 */ 437 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */ 438 1, 0, /* alignment,boundary */ 439 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 440 BUS_SPACE_MAXADDR, /* highaddr */ 441 NULL, NULL, /* filter, filterarg */ 442 HIFN_MAX_DMALEN, /* maxsize */ 443 MAX_SCATTER, /* nsegments */ 444 HIFN_MAX_SEGLEN, /* maxsegsize */ 445 BUS_DMA_ALLOCNOW, /* flags */ 446 NULL, /* lockfunc */ 447 NULL, /* lockarg */ 448 &sc->sc_dmat)) { 449 device_printf(dev, "cannot allocate DMA tag\n"); 450 goto fail_io1; 451 } 452 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 453 device_printf(dev, "cannot create dma map\n"); 454 bus_dma_tag_destroy(sc->sc_dmat); 455 goto fail_io1; 456 } 457 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 458 device_printf(dev, "cannot alloc dma buffer\n"); 459 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 460 bus_dma_tag_destroy(sc->sc_dmat); 461 goto fail_io1; 462 } 463 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, 464 sizeof (*sc->sc_dma), 465 hifn_dmamap_cb, &sc->sc_dma_physaddr, 466 BUS_DMA_NOWAIT)) { 467 device_printf(dev, "cannot load dma map\n"); 468 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); 469 bus_dma_tag_destroy(sc->sc_dmat); 470 goto fail_io1; 471 } 472 sc->sc_dma = (struct hifn_dma *)kva; 473 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 474 475 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); 476 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); 477 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); 478 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); 479 480 /* 481 * Reset the board and do the ``secret handshake'' 482 * to enable the crypto support. Then complete the 483 * initialization procedure by setting up the interrupt 484 * and hooking in to the system crypto support so we'll 485 * get used for system services like the crypto device, 486 * IPsec, RNG device, etc. 487 */ 488 hifn_reset_board(sc, 0); 489 490 if (hifn_enable_crypto(sc) != 0) { 491 device_printf(dev, "crypto enabling failed\n"); 492 goto fail_mem; 493 } 494 hifn_reset_puc(sc); 495 496 hifn_init_dma(sc); 497 hifn_init_pci_registers(sc); 498 499 /* XXX can't dynamically determine ram type for 795x; force dram */ 500 if (sc->sc_flags & HIFN_IS_7956) 501 sc->sc_drammodel = 1; 502 else if (hifn_ramtype(sc)) 503 goto fail_mem; 504 505 if (sc->sc_drammodel == 0) 506 hifn_sramsize(sc); 507 else 508 hifn_dramsize(sc); 509 510 /* 511 * Workaround for NetSec 7751 rev A: half ram size because two 512 * of the address lines were left floating 513 */ 514 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 515 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && 516 pci_get_revid(dev) == 0x61) /*XXX???*/ 517 sc->sc_ramsize >>= 1; 518 519 /* 520 * Arrange the interrupt line. 521 */ 522 rid = 0; 523 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 524 RF_SHAREABLE|RF_ACTIVE); 525 if (sc->sc_irq == NULL) { 526 device_printf(dev, "could not map interrupt\n"); 527 goto fail_mem; 528 } 529 /* 530 * NB: Network code assumes we are blocked with splimp() 531 * so make sure the IRQ is marked appropriately. 532 */ 533 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 534 NULL, hifn_intr, sc, &sc->sc_intrhand)) { 535 device_printf(dev, "could not setup interrupt\n"); 536 goto fail_intr2; 537 } 538 539 hifn_sessions(sc); 540 541 /* 542 * NB: Keep only the low 16 bits; this masks the chip id 543 * from the 7951. 544 */ 545 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; 546 547 rseg = sc->sc_ramsize / 1024; 548 rbase = 'K'; 549 if (sc->sc_ramsize >= (1024 * 1024)) { 550 rbase = 'M'; 551 rseg /= 1024; 552 } 553 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram", 554 hifn_partname(sc), rev, 555 rseg, rbase, sc->sc_drammodel ? 'd' : 's'); 556 if (sc->sc_flags & HIFN_IS_7956) 557 printf(", pll=0x%x<%s clk, %ux mult>", 558 sc->sc_pllconfig, 559 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 560 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); 561 printf("\n"); 562 563 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); 564 if (sc->sc_cid < 0) { 565 device_printf(dev, "could not get crypto driver id\n"); 566 goto fail_intr; 567 } 568 569 WRITE_REG_0(sc, HIFN_0_PUCNFG, 570 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 571 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 572 573 switch (ena) { 574 case HIFN_PUSTAT_ENA_2: 575 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); 576 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0); 577 if (sc->sc_flags & HIFN_HAS_AES) 578 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); 579 /*FALLTHROUGH*/ 580 case HIFN_PUSTAT_ENA_1: 581 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); 582 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); 583 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); 584 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); 585 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); 586 break; 587 } 588 589 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 590 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 591 592 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 593 hifn_init_pubrng(sc); 594 595 callout_init(&sc->sc_tickto, 1); 596 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 597 598 return (0); 599 600 fail_intr: 601 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 602 fail_intr2: 603 /* XXX don't store rid */ 604 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 605 fail_mem: 606 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 607 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 608 bus_dma_tag_destroy(sc->sc_dmat); 609 610 /* Turn off DMA polling */ 611 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 612 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 613 fail_io1: 614 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 615 fail_io0: 616 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 617 fail_pci: 618 mtx_destroy(&sc->sc_mtx); 619 return (ENXIO); 620 } 621 622 /* 623 * Detach an interface that successfully probed. 624 */ 625 static int 626 hifn_detach(device_t dev) 627 { 628 struct hifn_softc *sc = device_get_softc(dev); 629 630 KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); 631 632 /* disable interrupts */ 633 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); 634 635 /*XXX other resources */ 636 callout_stop(&sc->sc_tickto); 637 callout_stop(&sc->sc_rngto); 638 #ifdef HIFN_RNDTEST 639 if (sc->sc_rndtest) 640 rndtest_detach(sc->sc_rndtest); 641 #endif 642 643 /* Turn off DMA polling */ 644 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 645 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 646 647 crypto_unregister_all(sc->sc_cid); 648 649 bus_generic_detach(dev); /*XXX should be no children, right? */ 650 651 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 652 /* XXX don't store rid */ 653 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 654 655 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 656 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 657 bus_dma_tag_destroy(sc->sc_dmat); 658 659 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 660 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 661 662 mtx_destroy(&sc->sc_mtx); 663 664 return (0); 665 } 666 667 /* 668 * Stop all chip I/O so that the kernel's probe routines don't 669 * get confused by errant DMAs when rebooting. 670 */ 671 static int 672 hifn_shutdown(device_t dev) 673 { 674 #ifdef notyet 675 hifn_stop(device_get_softc(dev)); 676 #endif 677 return (0); 678 } 679 680 /* 681 * Device suspend routine. Stop the interface and save some PCI 682 * settings in case the BIOS doesn't restore them properly on 683 * resume. 684 */ 685 static int 686 hifn_suspend(device_t dev) 687 { 688 struct hifn_softc *sc = device_get_softc(dev); 689 #ifdef notyet 690 hifn_stop(sc); 691 #endif 692 sc->sc_suspended = 1; 693 694 return (0); 695 } 696 697 /* 698 * Device resume routine. Restore some PCI settings in case the BIOS 699 * doesn't, re-enable busmastering, and restart the interface if 700 * appropriate. 701 */ 702 static int 703 hifn_resume(device_t dev) 704 { 705 struct hifn_softc *sc = device_get_softc(dev); 706 #ifdef notyet 707 /* reinitialize interface if necessary */ 708 if (ifp->if_flags & IFF_UP) 709 rl_init(sc); 710 #endif 711 sc->sc_suspended = 0; 712 713 return (0); 714 } 715 716 static int 717 hifn_init_pubrng(struct hifn_softc *sc) 718 { 719 u_int32_t r; 720 int i; 721 722 #ifdef HIFN_RNDTEST 723 sc->sc_rndtest = rndtest_attach(sc->sc_dev); 724 if (sc->sc_rndtest) 725 sc->sc_harvest = rndtest_harvest; 726 else 727 sc->sc_harvest = default_harvest; 728 #else 729 sc->sc_harvest = default_harvest; 730 #endif 731 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 732 /* Reset 7951 public key/rng engine */ 733 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 734 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 735 736 for (i = 0; i < 100; i++) { 737 DELAY(1000); 738 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 739 HIFN_PUBRST_RESET) == 0) 740 break; 741 } 742 743 if (i == 100) { 744 device_printf(sc->sc_dev, "public key init failed\n"); 745 return (1); 746 } 747 } 748 749 /* Enable the rng, if available */ 750 if (sc->sc_flags & HIFN_HAS_RNG) { 751 if (sc->sc_flags & HIFN_IS_7811) { 752 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 753 if (r & HIFN_7811_RNGENA_ENA) { 754 r &= ~HIFN_7811_RNGENA_ENA; 755 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 756 } 757 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 758 HIFN_7811_RNGCFG_DEFL); 759 r |= HIFN_7811_RNGENA_ENA; 760 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 761 } else 762 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 763 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 764 HIFN_RNGCFG_ENA); 765 766 sc->sc_rngfirst = 1; 767 if (hz >= 100) 768 sc->sc_rnghz = hz / 100; 769 else 770 sc->sc_rnghz = 1; 771 callout_init(&sc->sc_rngto, 1); 772 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 773 } 774 775 /* Enable public key engine, if available */ 776 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 777 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 778 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 779 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 780 #ifdef HIFN_VULCANDEV 781 sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0, 782 UID_ROOT, GID_WHEEL, 0666, 783 "vulcanpk"); 784 sc->sc_pkdev->si_drv1 = sc; 785 #endif 786 } 787 788 return (0); 789 } 790 791 static void 792 hifn_rng(void *vsc) 793 { 794 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 795 struct hifn_softc *sc = vsc; 796 u_int32_t sts, num[2]; 797 int i; 798 799 if (sc->sc_flags & HIFN_IS_7811) { 800 /* ONLY VALID ON 7811!!!! */ 801 for (i = 0; i < 5; i++) { 802 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 803 if (sts & HIFN_7811_RNGSTS_UFL) { 804 device_printf(sc->sc_dev, 805 "RNG underflow: disabling\n"); 806 return; 807 } 808 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 809 break; 810 811 /* 812 * There are at least two words in the RNG FIFO 813 * at this point. 814 */ 815 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 816 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 817 /* NB: discard first data read */ 818 if (sc->sc_rngfirst) 819 sc->sc_rngfirst = 0; 820 else 821 (*sc->sc_harvest)(sc->sc_rndtest, 822 num, sizeof (num)); 823 } 824 } else { 825 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); 826 827 /* NB: discard first data read */ 828 if (sc->sc_rngfirst) 829 sc->sc_rngfirst = 0; 830 else 831 (*sc->sc_harvest)(sc->sc_rndtest, 832 num, sizeof (num[0])); 833 } 834 835 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 836 #undef RANDOM_BITS 837 } 838 839 static void 840 hifn_puc_wait(struct hifn_softc *sc) 841 { 842 int i; 843 int reg = HIFN_0_PUCTRL; 844 845 if (sc->sc_flags & HIFN_IS_7956) { 846 reg = HIFN_0_PUCTRL2; 847 } 848 849 for (i = 5000; i > 0; i--) { 850 DELAY(1); 851 if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET)) 852 break; 853 } 854 if (!i) 855 device_printf(sc->sc_dev, "proc unit did not reset\n"); 856 } 857 858 /* 859 * Reset the processing unit. 860 */ 861 static void 862 hifn_reset_puc(struct hifn_softc *sc) 863 { 864 /* Reset processing unit */ 865 int reg = HIFN_0_PUCTRL; 866 867 if (sc->sc_flags & HIFN_IS_7956) { 868 reg = HIFN_0_PUCTRL2; 869 } 870 WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA); 871 872 hifn_puc_wait(sc); 873 } 874 875 /* 876 * Set the Retry and TRDY registers; note that we set them to 877 * zero because the 7811 locks up when forced to retry (section 878 * 3.6 of "Specification Update SU-0014-04". Not clear if we 879 * should do this for all Hifn parts, but it doesn't seem to hurt. 880 */ 881 static void 882 hifn_set_retry(struct hifn_softc *sc) 883 { 884 /* NB: RETRY only responds to 8-bit reads/writes */ 885 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); 886 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 1); 887 } 888 889 /* 890 * Resets the board. Values in the regesters are left as is 891 * from the reset (i.e. initial values are assigned elsewhere). 892 */ 893 static void 894 hifn_reset_board(struct hifn_softc *sc, int full) 895 { 896 u_int32_t reg; 897 898 /* 899 * Set polling in the DMA configuration register to zero. 0x7 avoids 900 * resetting the board and zeros out the other fields. 901 */ 902 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 903 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 904 905 /* 906 * Now that polling has been disabled, we have to wait 1 ms 907 * before resetting the board. 908 */ 909 DELAY(1000); 910 911 /* Reset the DMA unit */ 912 if (full) { 913 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 914 DELAY(1000); 915 } else { 916 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 917 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 918 hifn_reset_puc(sc); 919 } 920 921 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); 922 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 923 924 /* Bring dma unit out of reset */ 925 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 926 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 927 928 hifn_puc_wait(sc); 929 hifn_set_retry(sc); 930 931 if (sc->sc_flags & HIFN_IS_7811) { 932 for (reg = 0; reg < 1000; reg++) { 933 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 934 HIFN_MIPSRST_CRAMINIT) 935 break; 936 DELAY(1000); 937 } 938 if (reg == 1000) 939 printf(": cram init timeout\n"); 940 } else { 941 /* set up DMA configuration register #2 */ 942 /* turn off all PK and BAR0 swaps */ 943 WRITE_REG_1(sc, HIFN_1_DMA_CNFG2, 944 (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)| 945 (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)| 946 (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)| 947 (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT)); 948 } 949 950 } 951 952 static u_int32_t 953 hifn_next_signature(u_int32_t a, u_int cnt) 954 { 955 int i; 956 u_int32_t v; 957 958 for (i = 0; i < cnt; i++) { 959 960 /* get the parity */ 961 v = a & 0x80080125; 962 v ^= v >> 16; 963 v ^= v >> 8; 964 v ^= v >> 4; 965 v ^= v >> 2; 966 v ^= v >> 1; 967 968 a = (v & 1) ^ (a << 1); 969 } 970 971 return a; 972 } 973 974 struct pci2id { 975 u_short pci_vendor; 976 u_short pci_prod; 977 char card_id[13]; 978 }; 979 static struct pci2id pci2id[] = { 980 { 981 PCI_VENDOR_HIFN, 982 PCI_PRODUCT_HIFN_7951, 983 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 984 0x00, 0x00, 0x00, 0x00, 0x00 } 985 }, { 986 PCI_VENDOR_HIFN, 987 PCI_PRODUCT_HIFN_7955, 988 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 989 0x00, 0x00, 0x00, 0x00, 0x00 } 990 }, { 991 PCI_VENDOR_HIFN, 992 PCI_PRODUCT_HIFN_7956, 993 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 994 0x00, 0x00, 0x00, 0x00, 0x00 } 995 }, { 996 PCI_VENDOR_NETSEC, 997 PCI_PRODUCT_NETSEC_7751, 998 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 999 0x00, 0x00, 0x00, 0x00, 0x00 } 1000 }, { 1001 PCI_VENDOR_INVERTEX, 1002 PCI_PRODUCT_INVERTEX_AEON, 1003 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1004 0x00, 0x00, 0x00, 0x00, 0x00 } 1005 }, { 1006 PCI_VENDOR_HIFN, 1007 PCI_PRODUCT_HIFN_7811, 1008 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1009 0x00, 0x00, 0x00, 0x00, 0x00 } 1010 }, { 1011 /* 1012 * Other vendors share this PCI ID as well, such as 1013 * http://www.powercrypt.com, and obviously they also 1014 * use the same key. 1015 */ 1016 PCI_VENDOR_HIFN, 1017 PCI_PRODUCT_HIFN_7751, 1018 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1019 0x00, 0x00, 0x00, 0x00, 0x00 } 1020 }, 1021 }; 1022 1023 /* 1024 * Checks to see if crypto is already enabled. If crypto isn't enable, 1025 * "hifn_enable_crypto" is called to enable it. The check is important, 1026 * as enabling crypto twice will lock the board. 1027 */ 1028 static int 1029 hifn_enable_crypto(struct hifn_softc *sc) 1030 { 1031 u_int32_t dmacfg, ramcfg, encl, addr, i; 1032 char *offtbl = NULL; 1033 1034 for (i = 0; i < nitems(pci2id); i++) { 1035 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && 1036 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { 1037 offtbl = pci2id[i].card_id; 1038 break; 1039 } 1040 } 1041 if (offtbl == NULL) { 1042 device_printf(sc->sc_dev, "Unknown card!\n"); 1043 return (1); 1044 } 1045 1046 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1047 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 1048 1049 /* 1050 * The RAM config register's encrypt level bit needs to be set before 1051 * every read performed on the encryption level register. 1052 */ 1053 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1054 1055 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1056 1057 /* 1058 * Make sure we don't re-unlock. Two unlocks kills chip until the 1059 * next reboot. 1060 */ 1061 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 1062 #ifdef HIFN_DEBUG 1063 if (hifn_debug) 1064 device_printf(sc->sc_dev, 1065 "Strong crypto already enabled!\n"); 1066 #endif 1067 goto report; 1068 } 1069 1070 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 1071 #ifdef HIFN_DEBUG 1072 if (hifn_debug) 1073 device_printf(sc->sc_dev, 1074 "Unknown encryption level 0x%x\n", encl); 1075 #endif 1076 return 1; 1077 } 1078 1079 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 1080 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 1081 DELAY(1000); 1082 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); 1083 DELAY(1000); 1084 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); 1085 DELAY(1000); 1086 1087 for (i = 0; i <= 12; i++) { 1088 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 1089 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); 1090 1091 DELAY(1000); 1092 } 1093 1094 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1095 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1096 1097 #ifdef HIFN_DEBUG 1098 if (hifn_debug) { 1099 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 1100 device_printf(sc->sc_dev, "Engine is permanently " 1101 "locked until next system reset!\n"); 1102 else 1103 device_printf(sc->sc_dev, "Engine enabled " 1104 "successfully!\n"); 1105 } 1106 #endif 1107 1108 report: 1109 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 1110 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 1111 1112 switch (encl) { 1113 case HIFN_PUSTAT_ENA_1: 1114 case HIFN_PUSTAT_ENA_2: 1115 break; 1116 case HIFN_PUSTAT_ENA_0: 1117 default: 1118 device_printf(sc->sc_dev, "disabled"); 1119 break; 1120 } 1121 1122 return 0; 1123 } 1124 1125 /* 1126 * Give initial values to the registers listed in the "Register Space" 1127 * section of the HIFN Software Development reference manual. 1128 */ 1129 static void 1130 hifn_init_pci_registers(struct hifn_softc *sc) 1131 { 1132 /* write fixed values needed by the Initialization registers */ 1133 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1134 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1135 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1136 1137 /* write all 4 ring address registers */ 1138 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + 1139 offsetof(struct hifn_dma, cmdr[0])); 1140 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + 1141 offsetof(struct hifn_dma, srcr[0])); 1142 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + 1143 offsetof(struct hifn_dma, dstr[0])); 1144 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + 1145 offsetof(struct hifn_dma, resr[0])); 1146 1147 DELAY(2000); 1148 1149 /* write status register */ 1150 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1151 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1152 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1153 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1154 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1155 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1156 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1157 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1158 HIFN_DMACSR_S_WAIT | 1159 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1160 HIFN_DMACSR_C_WAIT | 1161 HIFN_DMACSR_ENGINE | 1162 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1163 HIFN_DMACSR_PUBDONE : 0) | 1164 ((sc->sc_flags & HIFN_IS_7811) ? 1165 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1166 1167 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1168 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1169 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1170 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1171 ((sc->sc_flags & HIFN_IS_7811) ? 1172 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1173 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1174 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1175 1176 1177 if (sc->sc_flags & HIFN_IS_7956) { 1178 u_int32_t pll; 1179 1180 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1181 HIFN_PUCNFG_TCALLPHASES | 1182 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 1183 1184 /* turn off the clocks and insure bypass is set */ 1185 pll = READ_REG_1(sc, HIFN_1_PLL); 1186 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) 1187 | HIFN_PLL_BP | HIFN_PLL_MBSET; 1188 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1189 DELAY(10*1000); /* 10ms */ 1190 1191 /* change configuration */ 1192 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; 1193 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1194 DELAY(10*1000); /* 10ms */ 1195 1196 /* disable bypass */ 1197 pll &= ~HIFN_PLL_BP; 1198 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1199 /* enable clocks with new configuration */ 1200 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; 1201 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1202 } else { 1203 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1204 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1205 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1206 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1207 } 1208 1209 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1210 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1211 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1212 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1213 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1214 } 1215 1216 /* 1217 * The maximum number of sessions supported by the card 1218 * is dependent on the amount of context ram, which 1219 * encryption algorithms are enabled, and how compression 1220 * is configured. This should be configured before this 1221 * routine is called. 1222 */ 1223 static void 1224 hifn_sessions(struct hifn_softc *sc) 1225 { 1226 u_int32_t pucnfg; 1227 int ctxsize; 1228 1229 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1230 1231 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1232 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1233 ctxsize = 128; 1234 else 1235 ctxsize = 512; 1236 /* 1237 * 7955/7956 has internal context memory of 32K 1238 */ 1239 if (sc->sc_flags & HIFN_IS_7956) 1240 sc->sc_maxses = 32768 / ctxsize; 1241 else 1242 sc->sc_maxses = 1 + 1243 ((sc->sc_ramsize - 32768) / ctxsize); 1244 } else 1245 sc->sc_maxses = sc->sc_ramsize / 16384; 1246 1247 if (sc->sc_maxses > 2048) 1248 sc->sc_maxses = 2048; 1249 } 1250 1251 /* 1252 * Determine ram type (sram or dram). Board should be just out of a reset 1253 * state when this is called. 1254 */ 1255 static int 1256 hifn_ramtype(struct hifn_softc *sc) 1257 { 1258 u_int8_t data[8], dataexpect[8]; 1259 int i; 1260 1261 for (i = 0; i < sizeof(data); i++) 1262 data[i] = dataexpect[i] = 0x55; 1263 if (hifn_writeramaddr(sc, 0, data)) 1264 return (-1); 1265 if (hifn_readramaddr(sc, 0, data)) 1266 return (-1); 1267 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1268 sc->sc_drammodel = 1; 1269 return (0); 1270 } 1271 1272 for (i = 0; i < sizeof(data); i++) 1273 data[i] = dataexpect[i] = 0xaa; 1274 if (hifn_writeramaddr(sc, 0, data)) 1275 return (-1); 1276 if (hifn_readramaddr(sc, 0, data)) 1277 return (-1); 1278 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1279 sc->sc_drammodel = 1; 1280 return (0); 1281 } 1282 1283 return (0); 1284 } 1285 1286 #define HIFN_SRAM_MAX (32 << 20) 1287 #define HIFN_SRAM_STEP_SIZE 16384 1288 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1289 1290 static int 1291 hifn_sramsize(struct hifn_softc *sc) 1292 { 1293 u_int32_t a; 1294 u_int8_t data[8]; 1295 u_int8_t dataexpect[sizeof(data)]; 1296 int32_t i; 1297 1298 for (i = 0; i < sizeof(data); i++) 1299 data[i] = dataexpect[i] = i ^ 0x5a; 1300 1301 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1302 a = i * HIFN_SRAM_STEP_SIZE; 1303 bcopy(&i, data, sizeof(i)); 1304 hifn_writeramaddr(sc, a, data); 1305 } 1306 1307 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1308 a = i * HIFN_SRAM_STEP_SIZE; 1309 bcopy(&i, dataexpect, sizeof(i)); 1310 if (hifn_readramaddr(sc, a, data) < 0) 1311 return (0); 1312 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1313 return (0); 1314 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1315 } 1316 1317 return (0); 1318 } 1319 1320 /* 1321 * XXX For dram boards, one should really try all of the 1322 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1323 * is already set up correctly. 1324 */ 1325 static int 1326 hifn_dramsize(struct hifn_softc *sc) 1327 { 1328 u_int32_t cnfg; 1329 1330 if (sc->sc_flags & HIFN_IS_7956) { 1331 /* 1332 * 7955/7956 have a fixed internal ram of only 32K. 1333 */ 1334 sc->sc_ramsize = 32768; 1335 } else { 1336 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1337 HIFN_PUCNFG_DRAMMASK; 1338 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1339 } 1340 return (0); 1341 } 1342 1343 static void 1344 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) 1345 { 1346 struct hifn_dma *dma = sc->sc_dma; 1347 1348 if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { 1349 sc->sc_cmdi = 0; 1350 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1351 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1352 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1353 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1354 } 1355 *cmdp = sc->sc_cmdi++; 1356 sc->sc_cmdk = sc->sc_cmdi; 1357 1358 if (sc->sc_srci == HIFN_D_SRC_RSIZE) { 1359 sc->sc_srci = 0; 1360 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1361 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1362 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1363 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1364 } 1365 *srcp = sc->sc_srci++; 1366 sc->sc_srck = sc->sc_srci; 1367 1368 if (sc->sc_dsti == HIFN_D_DST_RSIZE) { 1369 sc->sc_dsti = 0; 1370 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1371 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1372 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1373 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1374 } 1375 *dstp = sc->sc_dsti++; 1376 sc->sc_dstk = sc->sc_dsti; 1377 1378 if (sc->sc_resi == HIFN_D_RES_RSIZE) { 1379 sc->sc_resi = 0; 1380 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1381 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1382 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1383 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1384 } 1385 *resp = sc->sc_resi++; 1386 sc->sc_resk = sc->sc_resi; 1387 } 1388 1389 static int 1390 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1391 { 1392 struct hifn_dma *dma = sc->sc_dma; 1393 hifn_base_command_t wc; 1394 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1395 int r, cmdi, resi, srci, dsti; 1396 1397 wc.masks = htole16(3 << 13); 1398 wc.session_num = htole16(addr >> 14); 1399 wc.total_source_count = htole16(8); 1400 wc.total_dest_count = htole16(addr & 0x3fff); 1401 1402 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1403 1404 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1405 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1406 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1407 1408 /* build write command */ 1409 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1410 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; 1411 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1412 1413 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr 1414 + offsetof(struct hifn_dma, test_src)); 1415 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr 1416 + offsetof(struct hifn_dma, test_dst)); 1417 1418 dma->cmdr[cmdi].l = htole32(16 | masks); 1419 dma->srcr[srci].l = htole32(8 | masks); 1420 dma->dstr[dsti].l = htole32(4 | masks); 1421 dma->resr[resi].l = htole32(4 | masks); 1422 1423 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1424 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1425 1426 for (r = 10000; r >= 0; r--) { 1427 DELAY(10); 1428 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1429 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1430 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1431 break; 1432 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1433 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1434 } 1435 if (r == 0) { 1436 device_printf(sc->sc_dev, "writeramaddr -- " 1437 "result[%d](addr %d) still valid\n", resi, addr); 1438 r = -1; 1439 return (-1); 1440 } else 1441 r = 0; 1442 1443 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1444 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1445 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1446 1447 return (r); 1448 } 1449 1450 static int 1451 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1452 { 1453 struct hifn_dma *dma = sc->sc_dma; 1454 hifn_base_command_t rc; 1455 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1456 int r, cmdi, srci, dsti, resi; 1457 1458 rc.masks = htole16(2 << 13); 1459 rc.session_num = htole16(addr >> 14); 1460 rc.total_source_count = htole16(addr & 0x3fff); 1461 rc.total_dest_count = htole16(8); 1462 1463 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1464 1465 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1466 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1467 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1468 1469 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1470 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; 1471 1472 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + 1473 offsetof(struct hifn_dma, test_src)); 1474 dma->test_src = 0; 1475 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + 1476 offsetof(struct hifn_dma, test_dst)); 1477 dma->test_dst = 0; 1478 dma->cmdr[cmdi].l = htole32(8 | masks); 1479 dma->srcr[srci].l = htole32(8 | masks); 1480 dma->dstr[dsti].l = htole32(8 | masks); 1481 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1482 1483 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1484 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1485 1486 for (r = 10000; r >= 0; r--) { 1487 DELAY(10); 1488 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1489 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1490 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1491 break; 1492 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1493 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1494 } 1495 if (r == 0) { 1496 device_printf(sc->sc_dev, "readramaddr -- " 1497 "result[%d](addr %d) still valid\n", resi, addr); 1498 r = -1; 1499 } else { 1500 r = 0; 1501 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1502 } 1503 1504 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1505 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1506 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1507 1508 return (r); 1509 } 1510 1511 /* 1512 * Initialize the descriptor rings. 1513 */ 1514 static void 1515 hifn_init_dma(struct hifn_softc *sc) 1516 { 1517 struct hifn_dma *dma = sc->sc_dma; 1518 int i; 1519 1520 hifn_set_retry(sc); 1521 1522 /* initialize static pointer values */ 1523 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1524 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + 1525 offsetof(struct hifn_dma, command_bufs[i][0])); 1526 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1527 dma->resr[i].p = htole32(sc->sc_dma_physaddr + 1528 offsetof(struct hifn_dma, result_bufs[i][0])); 1529 1530 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1531 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); 1532 dma->srcr[HIFN_D_SRC_RSIZE].p = 1533 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); 1534 dma->dstr[HIFN_D_DST_RSIZE].p = 1535 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); 1536 dma->resr[HIFN_D_RES_RSIZE].p = 1537 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); 1538 1539 sc->sc_cmdu = sc->sc_srcu = sc->sc_dstu = sc->sc_resu = 0; 1540 sc->sc_cmdi = sc->sc_srci = sc->sc_dsti = sc->sc_resi = 0; 1541 sc->sc_cmdk = sc->sc_srck = sc->sc_dstk = sc->sc_resk = 0; 1542 } 1543 1544 /* 1545 * Writes out the raw command buffer space. Returns the 1546 * command buffer size. 1547 */ 1548 static u_int 1549 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1550 { 1551 u_int8_t *buf_pos; 1552 hifn_base_command_t *base_cmd; 1553 hifn_mac_command_t *mac_cmd; 1554 hifn_crypt_command_t *cry_cmd; 1555 int using_mac, using_crypt, len, ivlen; 1556 u_int32_t dlen, slen; 1557 1558 buf_pos = buf; 1559 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1560 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1561 1562 base_cmd = (hifn_base_command_t *)buf_pos; 1563 base_cmd->masks = htole16(cmd->base_masks); 1564 slen = cmd->src_mapsize; 1565 if (cmd->sloplen) 1566 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); 1567 else 1568 dlen = cmd->dst_mapsize; 1569 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1570 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1571 dlen >>= 16; 1572 slen >>= 16; 1573 base_cmd->session_num = htole16( 1574 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1575 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1576 buf_pos += sizeof(hifn_base_command_t); 1577 1578 if (using_mac) { 1579 mac_cmd = (hifn_mac_command_t *)buf_pos; 1580 dlen = cmd->maccrd->crd_len; 1581 mac_cmd->source_count = htole16(dlen & 0xffff); 1582 dlen >>= 16; 1583 mac_cmd->masks = htole16(cmd->mac_masks | 1584 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1585 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1586 mac_cmd->reserved = 0; 1587 buf_pos += sizeof(hifn_mac_command_t); 1588 } 1589 1590 if (using_crypt) { 1591 cry_cmd = (hifn_crypt_command_t *)buf_pos; 1592 dlen = cmd->enccrd->crd_len; 1593 cry_cmd->source_count = htole16(dlen & 0xffff); 1594 dlen >>= 16; 1595 cry_cmd->masks = htole16(cmd->cry_masks | 1596 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1597 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1598 cry_cmd->reserved = 0; 1599 buf_pos += sizeof(hifn_crypt_command_t); 1600 } 1601 1602 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1603 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1604 buf_pos += HIFN_MAC_KEY_LENGTH; 1605 } 1606 1607 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1608 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1609 case HIFN_CRYPT_CMD_ALG_3DES: 1610 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1611 buf_pos += HIFN_3DES_KEY_LENGTH; 1612 break; 1613 case HIFN_CRYPT_CMD_ALG_DES: 1614 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1615 buf_pos += HIFN_DES_KEY_LENGTH; 1616 break; 1617 case HIFN_CRYPT_CMD_ALG_RC4: 1618 len = 256; 1619 do { 1620 int clen; 1621 1622 clen = MIN(cmd->cklen, len); 1623 bcopy(cmd->ck, buf_pos, clen); 1624 len -= clen; 1625 buf_pos += clen; 1626 } while (len > 0); 1627 bzero(buf_pos, 4); 1628 buf_pos += 4; 1629 break; 1630 case HIFN_CRYPT_CMD_ALG_AES: 1631 /* 1632 * AES keys are variable 128, 192 and 1633 * 256 bits (16, 24 and 32 bytes). 1634 */ 1635 bcopy(cmd->ck, buf_pos, cmd->cklen); 1636 buf_pos += cmd->cklen; 1637 break; 1638 } 1639 } 1640 1641 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1642 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1643 case HIFN_CRYPT_CMD_ALG_AES: 1644 ivlen = HIFN_AES_IV_LENGTH; 1645 break; 1646 default: 1647 ivlen = HIFN_IV_LENGTH; 1648 break; 1649 } 1650 bcopy(cmd->iv, buf_pos, ivlen); 1651 buf_pos += ivlen; 1652 } 1653 1654 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { 1655 bzero(buf_pos, 8); 1656 buf_pos += 8; 1657 } 1658 1659 return (buf_pos - buf); 1660 } 1661 1662 static int 1663 hifn_dmamap_aligned(struct hifn_operand *op) 1664 { 1665 int i; 1666 1667 for (i = 0; i < op->nsegs; i++) { 1668 if (op->segs[i].ds_addr & 3) 1669 return (0); 1670 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) 1671 return (0); 1672 } 1673 return (1); 1674 } 1675 1676 static __inline int 1677 hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx) 1678 { 1679 struct hifn_dma *dma = sc->sc_dma; 1680 1681 if (++idx == HIFN_D_DST_RSIZE) { 1682 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1683 HIFN_D_MASKDONEIRQ); 1684 HIFN_DSTR_SYNC(sc, idx, 1685 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1686 idx = 0; 1687 } 1688 return (idx); 1689 } 1690 1691 static int 1692 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1693 { 1694 struct hifn_dma *dma = sc->sc_dma; 1695 struct hifn_operand *dst = &cmd->dst; 1696 u_int32_t p, l; 1697 int idx, used = 0, i; 1698 1699 idx = sc->sc_dsti; 1700 for (i = 0; i < dst->nsegs - 1; i++) { 1701 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1702 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1703 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); 1704 HIFN_DSTR_SYNC(sc, idx, 1705 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1706 used++; 1707 1708 idx = hifn_dmamap_dstwrap(sc, idx); 1709 } 1710 1711 if (cmd->sloplen == 0) { 1712 p = dst->segs[i].ds_addr; 1713 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1714 dst->segs[i].ds_len; 1715 } else { 1716 p = sc->sc_dma_physaddr + 1717 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1718 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1719 sizeof(u_int32_t); 1720 1721 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { 1722 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1723 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1724 HIFN_D_MASKDONEIRQ | 1725 (dst->segs[i].ds_len - cmd->sloplen)); 1726 HIFN_DSTR_SYNC(sc, idx, 1727 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1728 used++; 1729 1730 idx = hifn_dmamap_dstwrap(sc, idx); 1731 } 1732 } 1733 dma->dstr[idx].p = htole32(p); 1734 dma->dstr[idx].l = htole32(l); 1735 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1736 used++; 1737 1738 idx = hifn_dmamap_dstwrap(sc, idx); 1739 1740 sc->sc_dsti = idx; 1741 sc->sc_dstu += used; 1742 return (idx); 1743 } 1744 1745 static __inline int 1746 hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx) 1747 { 1748 struct hifn_dma *dma = sc->sc_dma; 1749 1750 if (++idx == HIFN_D_SRC_RSIZE) { 1751 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1752 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1753 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1754 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1755 idx = 0; 1756 } 1757 return (idx); 1758 } 1759 1760 static int 1761 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1762 { 1763 struct hifn_dma *dma = sc->sc_dma; 1764 struct hifn_operand *src = &cmd->src; 1765 int idx, i; 1766 u_int32_t last = 0; 1767 1768 idx = sc->sc_srci; 1769 for (i = 0; i < src->nsegs; i++) { 1770 if (i == src->nsegs - 1) 1771 last = HIFN_D_LAST; 1772 1773 dma->srcr[idx].p = htole32(src->segs[i].ds_addr); 1774 dma->srcr[idx].l = htole32(src->segs[i].ds_len | 1775 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1776 HIFN_SRCR_SYNC(sc, idx, 1777 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1778 1779 idx = hifn_dmamap_srcwrap(sc, idx); 1780 } 1781 sc->sc_srci = idx; 1782 sc->sc_srcu += src->nsegs; 1783 return (idx); 1784 } 1785 1786 static void 1787 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1788 { 1789 struct hifn_operand *op = arg; 1790 1791 KASSERT(nsegs <= MAX_SCATTER, 1792 ("hifn_op_cb: too many DMA segments (%u > %u) " 1793 "returned when mapping operand", nsegs, MAX_SCATTER)); 1794 op->mapsize = mapsize; 1795 op->nsegs = nsegs; 1796 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1797 } 1798 1799 static int 1800 hifn_crypto( 1801 struct hifn_softc *sc, 1802 struct hifn_command *cmd, 1803 struct cryptop *crp, 1804 int hint) 1805 { 1806 struct hifn_dma *dma = sc->sc_dma; 1807 u_int32_t cmdlen, csr; 1808 int cmdi, resi, err = 0; 1809 1810 /* 1811 * need 1 cmd, and 1 res 1812 * 1813 * NB: check this first since it's easy. 1814 */ 1815 HIFN_LOCK(sc); 1816 if ((sc->sc_cmdu + 1) > HIFN_D_CMD_RSIZE || 1817 (sc->sc_resu + 1) > HIFN_D_RES_RSIZE) { 1818 #ifdef HIFN_DEBUG 1819 if (hifn_debug) { 1820 device_printf(sc->sc_dev, 1821 "cmd/result exhaustion, cmdu %u resu %u\n", 1822 sc->sc_cmdu, sc->sc_resu); 1823 } 1824 #endif 1825 hifnstats.hst_nomem_cr++; 1826 HIFN_UNLOCK(sc); 1827 return (ERESTART); 1828 } 1829 1830 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { 1831 hifnstats.hst_nomem_map++; 1832 HIFN_UNLOCK(sc); 1833 return (ENOMEM); 1834 } 1835 1836 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1837 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1838 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1839 hifnstats.hst_nomem_load++; 1840 err = ENOMEM; 1841 goto err_srcmap1; 1842 } 1843 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1844 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1845 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1846 hifnstats.hst_nomem_load++; 1847 err = ENOMEM; 1848 goto err_srcmap1; 1849 } 1850 } else { 1851 err = EINVAL; 1852 goto err_srcmap1; 1853 } 1854 1855 if (hifn_dmamap_aligned(&cmd->src)) { 1856 cmd->sloplen = cmd->src_mapsize & 3; 1857 cmd->dst = cmd->src; 1858 } else { 1859 if (crp->crp_flags & CRYPTO_F_IOV) { 1860 err = EINVAL; 1861 goto err_srcmap; 1862 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1863 int totlen, len; 1864 struct mbuf *m, *m0, *mlast; 1865 1866 KASSERT(cmd->dst_m == cmd->src_m, 1867 ("hifn_crypto: dst_m initialized improperly")); 1868 hifnstats.hst_unaligned++; 1869 /* 1870 * Source is not aligned on a longword boundary. 1871 * Copy the data to insure alignment. If we fail 1872 * to allocate mbufs or clusters while doing this 1873 * we return ERESTART so the operation is requeued 1874 * at the crypto later, but only if there are 1875 * ops already posted to the hardware; otherwise we 1876 * have no guarantee that we'll be re-entered. 1877 */ 1878 totlen = cmd->src_mapsize; 1879 if (cmd->src_m->m_flags & M_PKTHDR) { 1880 len = MHLEN; 1881 MGETHDR(m0, M_NOWAIT, MT_DATA); 1882 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) { 1883 m_free(m0); 1884 m0 = NULL; 1885 } 1886 } else { 1887 len = MLEN; 1888 MGET(m0, M_NOWAIT, MT_DATA); 1889 } 1890 if (m0 == NULL) { 1891 hifnstats.hst_nomem_mbuf++; 1892 err = sc->sc_cmdu ? ERESTART : ENOMEM; 1893 goto err_srcmap; 1894 } 1895 if (totlen >= MINCLSIZE) { 1896 if (!(MCLGET(m0, M_NOWAIT))) { 1897 hifnstats.hst_nomem_mcl++; 1898 err = sc->sc_cmdu ? ERESTART : ENOMEM; 1899 m_freem(m0); 1900 goto err_srcmap; 1901 } 1902 len = MCLBYTES; 1903 } 1904 totlen -= len; 1905 m0->m_pkthdr.len = m0->m_len = len; 1906 mlast = m0; 1907 1908 while (totlen > 0) { 1909 MGET(m, M_NOWAIT, MT_DATA); 1910 if (m == NULL) { 1911 hifnstats.hst_nomem_mbuf++; 1912 err = sc->sc_cmdu ? ERESTART : ENOMEM; 1913 m_freem(m0); 1914 goto err_srcmap; 1915 } 1916 len = MLEN; 1917 if (totlen >= MINCLSIZE) { 1918 if (!(MCLGET(m, M_NOWAIT))) { 1919 hifnstats.hst_nomem_mcl++; 1920 err = sc->sc_cmdu ? ERESTART : ENOMEM; 1921 mlast->m_next = m; 1922 m_freem(m0); 1923 goto err_srcmap; 1924 } 1925 len = MCLBYTES; 1926 } 1927 1928 m->m_len = len; 1929 m0->m_pkthdr.len += len; 1930 totlen -= len; 1931 1932 mlast->m_next = m; 1933 mlast = m; 1934 } 1935 cmd->dst_m = m0; 1936 } 1937 } 1938 1939 if (cmd->dst_map == NULL) { 1940 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1941 hifnstats.hst_nomem_map++; 1942 err = ENOMEM; 1943 goto err_srcmap; 1944 } 1945 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1946 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1947 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1948 hifnstats.hst_nomem_map++; 1949 err = ENOMEM; 1950 goto err_dstmap1; 1951 } 1952 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1953 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1954 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1955 hifnstats.hst_nomem_load++; 1956 err = ENOMEM; 1957 goto err_dstmap1; 1958 } 1959 } 1960 } 1961 1962 #ifdef HIFN_DEBUG 1963 if (hifn_debug) { 1964 device_printf(sc->sc_dev, 1965 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1966 READ_REG_1(sc, HIFN_1_DMA_CSR), 1967 READ_REG_1(sc, HIFN_1_DMA_IER), 1968 sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu, 1969 cmd->src_nsegs, cmd->dst_nsegs); 1970 } 1971 #endif 1972 1973 if (cmd->src_map == cmd->dst_map) { 1974 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1975 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1976 } else { 1977 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1978 BUS_DMASYNC_PREWRITE); 1979 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1980 BUS_DMASYNC_PREREAD); 1981 } 1982 1983 /* 1984 * need N src, and N dst 1985 */ 1986 if ((sc->sc_srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || 1987 (sc->sc_dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { 1988 #ifdef HIFN_DEBUG 1989 if (hifn_debug) { 1990 device_printf(sc->sc_dev, 1991 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", 1992 sc->sc_srcu, cmd->src_nsegs, 1993 sc->sc_dstu, cmd->dst_nsegs); 1994 } 1995 #endif 1996 hifnstats.hst_nomem_sd++; 1997 err = ERESTART; 1998 goto err_dstmap; 1999 } 2000 2001 if (sc->sc_cmdi == HIFN_D_CMD_RSIZE) { 2002 sc->sc_cmdi = 0; 2003 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 2004 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2005 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 2006 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2007 } 2008 cmdi = sc->sc_cmdi++; 2009 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 2010 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 2011 2012 /* .p for command/result already set */ 2013 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 2014 HIFN_D_MASKDONEIRQ); 2015 HIFN_CMDR_SYNC(sc, cmdi, 2016 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2017 sc->sc_cmdu++; 2018 2019 /* 2020 * We don't worry about missing an interrupt (which a "command wait" 2021 * interrupt salvages us from), unless there is more than one command 2022 * in the queue. 2023 */ 2024 if (sc->sc_cmdu > 1) { 2025 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2026 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2027 } 2028 2029 hifnstats.hst_ipackets++; 2030 hifnstats.hst_ibytes += cmd->src_mapsize; 2031 2032 hifn_dmamap_load_src(sc, cmd); 2033 2034 /* 2035 * Unlike other descriptors, we don't mask done interrupt from 2036 * result descriptor. 2037 */ 2038 #ifdef HIFN_DEBUG 2039 if (hifn_debug) 2040 printf("load res\n"); 2041 #endif 2042 if (sc->sc_resi == HIFN_D_RES_RSIZE) { 2043 sc->sc_resi = 0; 2044 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2045 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2046 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2047 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2048 } 2049 resi = sc->sc_resi++; 2050 KASSERT(sc->sc_hifn_commands[resi] == NULL, 2051 ("hifn_crypto: command slot %u busy", resi)); 2052 sc->sc_hifn_commands[resi] = cmd; 2053 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2054 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { 2055 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2056 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 2057 sc->sc_curbatch++; 2058 if (sc->sc_curbatch > hifnstats.hst_maxbatch) 2059 hifnstats.hst_maxbatch = sc->sc_curbatch; 2060 hifnstats.hst_totbatch++; 2061 } else { 2062 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2063 HIFN_D_VALID | HIFN_D_LAST); 2064 sc->sc_curbatch = 0; 2065 } 2066 HIFN_RESR_SYNC(sc, resi, 2067 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2068 sc->sc_resu++; 2069 2070 if (cmd->sloplen) 2071 cmd->slopidx = resi; 2072 2073 hifn_dmamap_load_dst(sc, cmd); 2074 2075 csr = 0; 2076 if (sc->sc_c_busy == 0) { 2077 csr |= HIFN_DMACSR_C_CTRL_ENA; 2078 sc->sc_c_busy = 1; 2079 } 2080 if (sc->sc_s_busy == 0) { 2081 csr |= HIFN_DMACSR_S_CTRL_ENA; 2082 sc->sc_s_busy = 1; 2083 } 2084 if (sc->sc_r_busy == 0) { 2085 csr |= HIFN_DMACSR_R_CTRL_ENA; 2086 sc->sc_r_busy = 1; 2087 } 2088 if (sc->sc_d_busy == 0) { 2089 csr |= HIFN_DMACSR_D_CTRL_ENA; 2090 sc->sc_d_busy = 1; 2091 } 2092 if (csr) 2093 WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr); 2094 2095 #ifdef HIFN_DEBUG 2096 if (hifn_debug) { 2097 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", 2098 READ_REG_1(sc, HIFN_1_DMA_CSR), 2099 READ_REG_1(sc, HIFN_1_DMA_IER)); 2100 } 2101 #endif 2102 2103 sc->sc_active = 5; 2104 HIFN_UNLOCK(sc); 2105 KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); 2106 return (err); /* success */ 2107 2108 err_dstmap: 2109 if (cmd->src_map != cmd->dst_map) 2110 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2111 err_dstmap1: 2112 if (cmd->src_map != cmd->dst_map) 2113 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2114 err_srcmap: 2115 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2116 if (cmd->src_m != cmd->dst_m) 2117 m_freem(cmd->dst_m); 2118 } 2119 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2120 err_srcmap1: 2121 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2122 HIFN_UNLOCK(sc); 2123 return (err); 2124 } 2125 2126 static void 2127 hifn_tick(void* vsc) 2128 { 2129 struct hifn_softc *sc = vsc; 2130 2131 HIFN_LOCK(sc); 2132 if (sc->sc_active == 0) { 2133 u_int32_t r = 0; 2134 2135 if (sc->sc_cmdu == 0 && sc->sc_c_busy) { 2136 sc->sc_c_busy = 0; 2137 r |= HIFN_DMACSR_C_CTRL_DIS; 2138 } 2139 if (sc->sc_srcu == 0 && sc->sc_s_busy) { 2140 sc->sc_s_busy = 0; 2141 r |= HIFN_DMACSR_S_CTRL_DIS; 2142 } 2143 if (sc->sc_dstu == 0 && sc->sc_d_busy) { 2144 sc->sc_d_busy = 0; 2145 r |= HIFN_DMACSR_D_CTRL_DIS; 2146 } 2147 if (sc->sc_resu == 0 && sc->sc_r_busy) { 2148 sc->sc_r_busy = 0; 2149 r |= HIFN_DMACSR_R_CTRL_DIS; 2150 } 2151 if (r) 2152 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 2153 } else 2154 sc->sc_active--; 2155 HIFN_UNLOCK(sc); 2156 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 2157 } 2158 2159 static void 2160 hifn_intr(void *arg) 2161 { 2162 struct hifn_softc *sc = arg; 2163 struct hifn_dma *dma; 2164 u_int32_t dmacsr, restart; 2165 int i, u; 2166 2167 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 2168 2169 /* Nothing in the DMA unit interrupted */ 2170 if ((dmacsr & sc->sc_dmaier) == 0) 2171 return; 2172 2173 HIFN_LOCK(sc); 2174 2175 dma = sc->sc_dma; 2176 2177 #ifdef HIFN_DEBUG 2178 if (hifn_debug) { 2179 device_printf(sc->sc_dev, 2180 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", 2181 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, 2182 sc->sc_cmdi, sc->sc_srci, sc->sc_dsti, sc->sc_resi, 2183 sc->sc_cmdk, sc->sc_srck, sc->sc_dstk, sc->sc_resk, 2184 sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); 2185 } 2186 #endif 2187 2188 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 2189 2190 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 2191 (dmacsr & HIFN_DMACSR_PUBDONE)) 2192 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 2193 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 2194 2195 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); 2196 if (restart) 2197 device_printf(sc->sc_dev, "overrun %x\n", dmacsr); 2198 2199 if (sc->sc_flags & HIFN_IS_7811) { 2200 if (dmacsr & HIFN_DMACSR_ILLR) 2201 device_printf(sc->sc_dev, "illegal read\n"); 2202 if (dmacsr & HIFN_DMACSR_ILLW) 2203 device_printf(sc->sc_dev, "illegal write\n"); 2204 } 2205 2206 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 2207 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 2208 if (restart) { 2209 device_printf(sc->sc_dev, "abort, resetting.\n"); 2210 hifnstats.hst_abort++; 2211 hifn_abort(sc); 2212 HIFN_UNLOCK(sc); 2213 return; 2214 } 2215 2216 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (sc->sc_cmdu == 0)) { 2217 /* 2218 * If no slots to process and we receive a "waiting on 2219 * command" interrupt, we disable the "waiting on command" 2220 * (by clearing it). 2221 */ 2222 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 2223 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2224 } 2225 2226 /* clear the rings */ 2227 i = sc->sc_resk; u = sc->sc_resu; 2228 while (u != 0) { 2229 HIFN_RESR_SYNC(sc, i, 2230 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2231 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2232 HIFN_RESR_SYNC(sc, i, 2233 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2234 break; 2235 } 2236 2237 if (i != HIFN_D_RES_RSIZE) { 2238 struct hifn_command *cmd; 2239 u_int8_t *macbuf = NULL; 2240 2241 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2242 cmd = sc->sc_hifn_commands[i]; 2243 KASSERT(cmd != NULL, 2244 ("hifn_intr: null command slot %u", i)); 2245 sc->sc_hifn_commands[i] = NULL; 2246 2247 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2248 macbuf = dma->result_bufs[i]; 2249 macbuf += 12; 2250 } 2251 2252 hifn_callback(sc, cmd, macbuf); 2253 hifnstats.hst_opackets++; 2254 u--; 2255 } 2256 2257 if (++i == (HIFN_D_RES_RSIZE + 1)) 2258 i = 0; 2259 } 2260 sc->sc_resk = i; sc->sc_resu = u; 2261 2262 i = sc->sc_srck; u = sc->sc_srcu; 2263 while (u != 0) { 2264 if (i == HIFN_D_SRC_RSIZE) 2265 i = 0; 2266 HIFN_SRCR_SYNC(sc, i, 2267 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2268 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2269 HIFN_SRCR_SYNC(sc, i, 2270 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2271 break; 2272 } 2273 i++, u--; 2274 } 2275 sc->sc_srck = i; sc->sc_srcu = u; 2276 2277 i = sc->sc_cmdk; u = sc->sc_cmdu; 2278 while (u != 0) { 2279 HIFN_CMDR_SYNC(sc, i, 2280 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2281 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2282 HIFN_CMDR_SYNC(sc, i, 2283 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2284 break; 2285 } 2286 if (i != HIFN_D_CMD_RSIZE) { 2287 u--; 2288 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2289 } 2290 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2291 i = 0; 2292 } 2293 sc->sc_cmdk = i; sc->sc_cmdu = u; 2294 2295 HIFN_UNLOCK(sc); 2296 2297 if (sc->sc_needwakeup) { /* XXX check high watermark */ 2298 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 2299 #ifdef HIFN_DEBUG 2300 if (hifn_debug) 2301 device_printf(sc->sc_dev, 2302 "wakeup crypto (%x) u %d/%d/%d/%d\n", 2303 sc->sc_needwakeup, 2304 sc->sc_cmdu, sc->sc_srcu, sc->sc_dstu, sc->sc_resu); 2305 #endif 2306 sc->sc_needwakeup &= ~wakeup; 2307 crypto_unblock(sc->sc_cid, wakeup); 2308 } 2309 } 2310 2311 /* 2312 * Allocate a new 'session' and return an encoded session id. 'sidp' 2313 * contains our registration id, and should contain an encoded session 2314 * id on successful allocation. 2315 */ 2316 static int 2317 hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri) 2318 { 2319 struct hifn_softc *sc = device_get_softc(dev); 2320 struct cryptoini *c; 2321 int mac = 0, cry = 0, sesn; 2322 struct hifn_session *ses = NULL; 2323 2324 KASSERT(sc != NULL, ("hifn_newsession: null softc")); 2325 if (sidp == NULL || cri == NULL || sc == NULL) 2326 return (EINVAL); 2327 2328 HIFN_LOCK(sc); 2329 if (sc->sc_sessions == NULL) { 2330 ses = sc->sc_sessions = (struct hifn_session *)malloc( 2331 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2332 if (ses == NULL) { 2333 HIFN_UNLOCK(sc); 2334 return (ENOMEM); 2335 } 2336 sesn = 0; 2337 sc->sc_nsessions = 1; 2338 } else { 2339 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 2340 if (!sc->sc_sessions[sesn].hs_used) { 2341 ses = &sc->sc_sessions[sesn]; 2342 break; 2343 } 2344 } 2345 2346 if (ses == NULL) { 2347 sesn = sc->sc_nsessions; 2348 ses = (struct hifn_session *)malloc((sesn + 1) * 2349 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2350 if (ses == NULL) { 2351 HIFN_UNLOCK(sc); 2352 return (ENOMEM); 2353 } 2354 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); 2355 bzero(sc->sc_sessions, sesn * sizeof(*ses)); 2356 free(sc->sc_sessions, M_DEVBUF); 2357 sc->sc_sessions = ses; 2358 ses = &sc->sc_sessions[sesn]; 2359 sc->sc_nsessions++; 2360 } 2361 } 2362 HIFN_UNLOCK(sc); 2363 2364 bzero(ses, sizeof(*ses)); 2365 ses->hs_used = 1; 2366 2367 for (c = cri; c != NULL; c = c->cri_next) { 2368 switch (c->cri_alg) { 2369 case CRYPTO_MD5: 2370 case CRYPTO_SHA1: 2371 case CRYPTO_MD5_HMAC: 2372 case CRYPTO_SHA1_HMAC: 2373 if (mac) 2374 return (EINVAL); 2375 mac = 1; 2376 ses->hs_mlen = c->cri_mlen; 2377 if (ses->hs_mlen == 0) { 2378 switch (c->cri_alg) { 2379 case CRYPTO_MD5: 2380 case CRYPTO_MD5_HMAC: 2381 ses->hs_mlen = 16; 2382 break; 2383 case CRYPTO_SHA1: 2384 case CRYPTO_SHA1_HMAC: 2385 ses->hs_mlen = 20; 2386 break; 2387 } 2388 } 2389 break; 2390 case CRYPTO_DES_CBC: 2391 case CRYPTO_3DES_CBC: 2392 case CRYPTO_AES_CBC: 2393 /* XXX this may read fewer, does it matter? */ 2394 read_random(ses->hs_iv, 2395 c->cri_alg == CRYPTO_AES_CBC ? 2396 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2397 /*FALLTHROUGH*/ 2398 case CRYPTO_ARC4: 2399 if (cry) 2400 return (EINVAL); 2401 cry = 1; 2402 break; 2403 default: 2404 return (EINVAL); 2405 } 2406 } 2407 if (mac == 0 && cry == 0) 2408 return (EINVAL); 2409 2410 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn); 2411 2412 return (0); 2413 } 2414 2415 /* 2416 * Deallocate a session. 2417 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2418 * XXX to blow away any keys already stored there. 2419 */ 2420 static int 2421 hifn_freesession(device_t dev, u_int64_t tid) 2422 { 2423 struct hifn_softc *sc = device_get_softc(dev); 2424 int session, error; 2425 u_int32_t sid = CRYPTO_SESID2LID(tid); 2426 2427 KASSERT(sc != NULL, ("hifn_freesession: null softc")); 2428 if (sc == NULL) 2429 return (EINVAL); 2430 2431 HIFN_LOCK(sc); 2432 session = HIFN_SESSION(sid); 2433 if (session < sc->sc_nsessions) { 2434 bzero(&sc->sc_sessions[session], sizeof(struct hifn_session)); 2435 error = 0; 2436 } else 2437 error = EINVAL; 2438 HIFN_UNLOCK(sc); 2439 2440 return (error); 2441 } 2442 2443 static int 2444 hifn_process(device_t dev, struct cryptop *crp, int hint) 2445 { 2446 struct hifn_softc *sc = device_get_softc(dev); 2447 struct hifn_command *cmd = NULL; 2448 int session, err, ivlen; 2449 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2450 2451 if (crp == NULL || crp->crp_callback == NULL) { 2452 hifnstats.hst_invalid++; 2453 return (EINVAL); 2454 } 2455 session = HIFN_SESSION(crp->crp_sid); 2456 2457 if (sc == NULL || session >= sc->sc_nsessions) { 2458 err = EINVAL; 2459 goto errout; 2460 } 2461 2462 cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); 2463 if (cmd == NULL) { 2464 hifnstats.hst_nomem++; 2465 err = ENOMEM; 2466 goto errout; 2467 } 2468 2469 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2470 cmd->src_m = (struct mbuf *)crp->crp_buf; 2471 cmd->dst_m = (struct mbuf *)crp->crp_buf; 2472 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2473 cmd->src_io = (struct uio *)crp->crp_buf; 2474 cmd->dst_io = (struct uio *)crp->crp_buf; 2475 } else { 2476 err = EINVAL; 2477 goto errout; /* XXX we don't handle contiguous buffers! */ 2478 } 2479 2480 crd1 = crp->crp_desc; 2481 if (crd1 == NULL) { 2482 err = EINVAL; 2483 goto errout; 2484 } 2485 crd2 = crd1->crd_next; 2486 2487 if (crd2 == NULL) { 2488 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2489 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2490 crd1->crd_alg == CRYPTO_SHA1 || 2491 crd1->crd_alg == CRYPTO_MD5) { 2492 maccrd = crd1; 2493 enccrd = NULL; 2494 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2495 crd1->crd_alg == CRYPTO_3DES_CBC || 2496 crd1->crd_alg == CRYPTO_AES_CBC || 2497 crd1->crd_alg == CRYPTO_ARC4) { 2498 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2499 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2500 maccrd = NULL; 2501 enccrd = crd1; 2502 } else { 2503 err = EINVAL; 2504 goto errout; 2505 } 2506 } else { 2507 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2508 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2509 crd1->crd_alg == CRYPTO_MD5 || 2510 crd1->crd_alg == CRYPTO_SHA1) && 2511 (crd2->crd_alg == CRYPTO_DES_CBC || 2512 crd2->crd_alg == CRYPTO_3DES_CBC || 2513 crd2->crd_alg == CRYPTO_AES_CBC || 2514 crd2->crd_alg == CRYPTO_ARC4) && 2515 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2516 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2517 maccrd = crd1; 2518 enccrd = crd2; 2519 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2520 crd1->crd_alg == CRYPTO_ARC4 || 2521 crd1->crd_alg == CRYPTO_3DES_CBC || 2522 crd1->crd_alg == CRYPTO_AES_CBC) && 2523 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2524 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2525 crd2->crd_alg == CRYPTO_MD5 || 2526 crd2->crd_alg == CRYPTO_SHA1) && 2527 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2528 enccrd = crd1; 2529 maccrd = crd2; 2530 } else { 2531 /* 2532 * We cannot order the 7751 as requested 2533 */ 2534 err = EINVAL; 2535 goto errout; 2536 } 2537 } 2538 2539 if (enccrd) { 2540 cmd->enccrd = enccrd; 2541 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2542 switch (enccrd->crd_alg) { 2543 case CRYPTO_ARC4: 2544 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2545 break; 2546 case CRYPTO_DES_CBC: 2547 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2548 HIFN_CRYPT_CMD_MODE_CBC | 2549 HIFN_CRYPT_CMD_NEW_IV; 2550 break; 2551 case CRYPTO_3DES_CBC: 2552 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2553 HIFN_CRYPT_CMD_MODE_CBC | 2554 HIFN_CRYPT_CMD_NEW_IV; 2555 break; 2556 case CRYPTO_AES_CBC: 2557 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2558 HIFN_CRYPT_CMD_MODE_CBC | 2559 HIFN_CRYPT_CMD_NEW_IV; 2560 break; 2561 default: 2562 err = EINVAL; 2563 goto errout; 2564 } 2565 if (enccrd->crd_alg != CRYPTO_ARC4) { 2566 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2567 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2568 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2569 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2570 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2571 else 2572 bcopy(sc->sc_sessions[session].hs_iv, 2573 cmd->iv, ivlen); 2574 2575 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2576 == 0) { 2577 crypto_copyback(crp->crp_flags, 2578 crp->crp_buf, enccrd->crd_inject, 2579 ivlen, cmd->iv); 2580 } 2581 } else { 2582 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2583 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2584 else { 2585 crypto_copydata(crp->crp_flags, 2586 crp->crp_buf, enccrd->crd_inject, 2587 ivlen, cmd->iv); 2588 } 2589 } 2590 } 2591 2592 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) 2593 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2594 cmd->ck = enccrd->crd_key; 2595 cmd->cklen = enccrd->crd_klen >> 3; 2596 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2597 2598 /* 2599 * Need to specify the size for the AES key in the masks. 2600 */ 2601 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2602 HIFN_CRYPT_CMD_ALG_AES) { 2603 switch (cmd->cklen) { 2604 case 16: 2605 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2606 break; 2607 case 24: 2608 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2609 break; 2610 case 32: 2611 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2612 break; 2613 default: 2614 err = EINVAL; 2615 goto errout; 2616 } 2617 } 2618 } 2619 2620 if (maccrd) { 2621 cmd->maccrd = maccrd; 2622 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2623 2624 switch (maccrd->crd_alg) { 2625 case CRYPTO_MD5: 2626 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2627 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2628 HIFN_MAC_CMD_POS_IPSEC; 2629 break; 2630 case CRYPTO_MD5_HMAC: 2631 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2632 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2633 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2634 break; 2635 case CRYPTO_SHA1: 2636 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2637 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2638 HIFN_MAC_CMD_POS_IPSEC; 2639 break; 2640 case CRYPTO_SHA1_HMAC: 2641 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2642 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2643 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2644 break; 2645 } 2646 2647 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2648 maccrd->crd_alg == CRYPTO_MD5_HMAC) { 2649 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2650 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2651 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2652 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2653 } 2654 } 2655 2656 cmd->crp = crp; 2657 cmd->session_num = session; 2658 cmd->softc = sc; 2659 2660 err = hifn_crypto(sc, cmd, crp, hint); 2661 if (!err) { 2662 return 0; 2663 } else if (err == ERESTART) { 2664 /* 2665 * There weren't enough resources to dispatch the request 2666 * to the part. Notify the caller so they'll requeue this 2667 * request and resubmit it again soon. 2668 */ 2669 #ifdef HIFN_DEBUG 2670 if (hifn_debug) 2671 device_printf(sc->sc_dev, "requeue request\n"); 2672 #endif 2673 free(cmd, M_DEVBUF); 2674 sc->sc_needwakeup |= CRYPTO_SYMQ; 2675 return (err); 2676 } 2677 2678 errout: 2679 if (cmd != NULL) 2680 free(cmd, M_DEVBUF); 2681 if (err == EINVAL) 2682 hifnstats.hst_invalid++; 2683 else 2684 hifnstats.hst_nomem++; 2685 crp->crp_etype = err; 2686 crypto_done(crp); 2687 return (err); 2688 } 2689 2690 static void 2691 hifn_abort(struct hifn_softc *sc) 2692 { 2693 struct hifn_dma *dma = sc->sc_dma; 2694 struct hifn_command *cmd; 2695 struct cryptop *crp; 2696 int i, u; 2697 2698 i = sc->sc_resk; u = sc->sc_resu; 2699 while (u != 0) { 2700 cmd = sc->sc_hifn_commands[i]; 2701 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); 2702 sc->sc_hifn_commands[i] = NULL; 2703 crp = cmd->crp; 2704 2705 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2706 /* Salvage what we can. */ 2707 u_int8_t *macbuf; 2708 2709 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2710 macbuf = dma->result_bufs[i]; 2711 macbuf += 12; 2712 } else 2713 macbuf = NULL; 2714 hifnstats.hst_opackets++; 2715 hifn_callback(sc, cmd, macbuf); 2716 } else { 2717 if (cmd->src_map == cmd->dst_map) { 2718 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2719 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2720 } else { 2721 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2722 BUS_DMASYNC_POSTWRITE); 2723 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2724 BUS_DMASYNC_POSTREAD); 2725 } 2726 2727 if (cmd->src_m != cmd->dst_m) { 2728 m_freem(cmd->src_m); 2729 crp->crp_buf = (caddr_t)cmd->dst_m; 2730 } 2731 2732 /* non-shared buffers cannot be restarted */ 2733 if (cmd->src_map != cmd->dst_map) { 2734 /* 2735 * XXX should be EAGAIN, delayed until 2736 * after the reset. 2737 */ 2738 crp->crp_etype = ENOMEM; 2739 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2740 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2741 } else 2742 crp->crp_etype = ENOMEM; 2743 2744 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2745 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2746 2747 free(cmd, M_DEVBUF); 2748 if (crp->crp_etype != EAGAIN) 2749 crypto_done(crp); 2750 } 2751 2752 if (++i == HIFN_D_RES_RSIZE) 2753 i = 0; 2754 u--; 2755 } 2756 sc->sc_resk = i; sc->sc_resu = u; 2757 2758 hifn_reset_board(sc, 1); 2759 hifn_init_dma(sc); 2760 hifn_init_pci_registers(sc); 2761 } 2762 2763 static void 2764 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) 2765 { 2766 struct hifn_dma *dma = sc->sc_dma; 2767 struct cryptop *crp = cmd->crp; 2768 struct cryptodesc *crd; 2769 struct mbuf *m; 2770 int totlen, i, u, ivlen; 2771 2772 if (cmd->src_map == cmd->dst_map) { 2773 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2774 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2775 } else { 2776 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2777 BUS_DMASYNC_POSTWRITE); 2778 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2779 BUS_DMASYNC_POSTREAD); 2780 } 2781 2782 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2783 if (cmd->src_m != cmd->dst_m) { 2784 crp->crp_buf = (caddr_t)cmd->dst_m; 2785 totlen = cmd->src_mapsize; 2786 for (m = cmd->dst_m; m != NULL; m = m->m_next) { 2787 if (totlen < m->m_len) { 2788 m->m_len = totlen; 2789 totlen = 0; 2790 } else 2791 totlen -= m->m_len; 2792 } 2793 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; 2794 m_freem(cmd->src_m); 2795 } 2796 } 2797 2798 if (cmd->sloplen != 0) { 2799 crypto_copyback(crp->crp_flags, crp->crp_buf, 2800 cmd->src_mapsize - cmd->sloplen, cmd->sloplen, 2801 (caddr_t)&dma->slop[cmd->slopidx]); 2802 } 2803 2804 i = sc->sc_dstk; u = sc->sc_dstu; 2805 while (u != 0) { 2806 if (i == HIFN_D_DST_RSIZE) 2807 i = 0; 2808 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2809 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2810 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2811 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2812 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2813 break; 2814 } 2815 i++, u--; 2816 } 2817 sc->sc_dstk = i; sc->sc_dstu = u; 2818 2819 hifnstats.hst_obytes += cmd->dst_mapsize; 2820 2821 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2822 HIFN_BASE_CMD_CRYPT) { 2823 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2824 if (crd->crd_alg != CRYPTO_DES_CBC && 2825 crd->crd_alg != CRYPTO_3DES_CBC && 2826 crd->crd_alg != CRYPTO_AES_CBC) 2827 continue; 2828 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2829 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2830 crypto_copydata(crp->crp_flags, crp->crp_buf, 2831 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2832 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2833 break; 2834 } 2835 } 2836 2837 if (macbuf != NULL) { 2838 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2839 int len; 2840 2841 if (crd->crd_alg != CRYPTO_MD5 && 2842 crd->crd_alg != CRYPTO_SHA1 && 2843 crd->crd_alg != CRYPTO_MD5_HMAC && 2844 crd->crd_alg != CRYPTO_SHA1_HMAC) { 2845 continue; 2846 } 2847 len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen; 2848 crypto_copyback(crp->crp_flags, crp->crp_buf, 2849 crd->crd_inject, len, macbuf); 2850 break; 2851 } 2852 } 2853 2854 if (cmd->src_map != cmd->dst_map) { 2855 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2856 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2857 } 2858 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2859 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2860 free(cmd, M_DEVBUF); 2861 crypto_done(crp); 2862 } 2863 2864 /* 2865 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2866 * and Group 1 registers; avoid conditions that could create 2867 * burst writes by doing a read in between the writes. 2868 * 2869 * NB: The read we interpose is always to the same register; 2870 * we do this because reading from an arbitrary (e.g. last) 2871 * register may not always work. 2872 */ 2873 static void 2874 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2875 { 2876 if (sc->sc_flags & HIFN_IS_7811) { 2877 if (sc->sc_bar0_lastreg == reg - 4) 2878 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); 2879 sc->sc_bar0_lastreg = reg; 2880 } 2881 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2882 } 2883 2884 static void 2885 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2886 { 2887 if (sc->sc_flags & HIFN_IS_7811) { 2888 if (sc->sc_bar1_lastreg == reg - 4) 2889 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2890 sc->sc_bar1_lastreg = reg; 2891 } 2892 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2893 } 2894 2895 #ifdef HIFN_VULCANDEV 2896 /* 2897 * this code provides support for mapping the PK engine's register 2898 * into a userspace program. 2899 * 2900 */ 2901 static int 2902 vulcanpk_mmap(struct cdev *dev, vm_ooffset_t offset, 2903 vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) 2904 { 2905 struct hifn_softc *sc; 2906 vm_paddr_t pd; 2907 void *b; 2908 2909 sc = dev->si_drv1; 2910 2911 pd = rman_get_start(sc->sc_bar1res); 2912 b = rman_get_virtual(sc->sc_bar1res); 2913 2914 #if 0 2915 printf("vpk mmap: %p(%016llx) offset=%lld\n", b, 2916 (unsigned long long)pd, offset); 2917 hexdump(b, HIFN_1_PUB_MEMEND, "vpk", 0); 2918 #endif 2919 2920 if (offset == 0) { 2921 *paddr = pd; 2922 return (0); 2923 } 2924 return (-1); 2925 } 2926 2927 static struct cdevsw vulcanpk_cdevsw = { 2928 .d_version = D_VERSION, 2929 .d_mmap = vulcanpk_mmap, 2930 .d_name = "vulcanpk", 2931 }; 2932 #endif /* HIFN_VULCANDEV */ 2933