1 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ 2 3 /*- 4 * Invertex AEON / Hifn 7751 driver 5 * Copyright (c) 1999 Invertex Inc. All rights reserved. 6 * Copyright (c) 1999 Theo de Raadt 7 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 8 * http://www.netsec.net 9 * Copyright (c) 2003 Hifn Inc. 10 * 11 * This driver is based on a previous driver by Invertex, for which they 12 * requested: Please send any comments, feedback, bug-fixes, or feature 13 * requests to software@invertex.com. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. The name of the author may not be used to endorse or promote products 25 * derived from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 * 38 * Effort sponsored in part by the Defense Advanced Research Projects 39 * Agency (DARPA) and Air Force Research Laboratory, Air Force 40 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 /* 47 * Driver for various Hifn encryption processors. 48 */ 49 #include "opt_hifn.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/errno.h> 55 #include <sys/malloc.h> 56 #include <sys/kernel.h> 57 #include <sys/module.h> 58 #include <sys/mbuf.h> 59 #include <sys/lock.h> 60 #include <sys/mutex.h> 61 #include <sys/sysctl.h> 62 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 66 #include <machine/clock.h> 67 #include <machine/bus.h> 68 #include <machine/resource.h> 69 #include <sys/bus.h> 70 #include <sys/rman.h> 71 72 #include <opencrypto/cryptodev.h> 73 #include <sys/random.h> 74 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcireg.h> 77 78 #ifdef HIFN_RNDTEST 79 #include <dev/rndtest/rndtest.h> 80 #endif 81 #include <dev/hifn/hifn7751reg.h> 82 #include <dev/hifn/hifn7751var.h> 83 84 /* 85 * Prototypes and count for the pci_device structure 86 */ 87 static int hifn_probe(device_t); 88 static int hifn_attach(device_t); 89 static int hifn_detach(device_t); 90 static int hifn_suspend(device_t); 91 static int hifn_resume(device_t); 92 static void hifn_shutdown(device_t); 93 94 static device_method_t hifn_methods[] = { 95 /* Device interface */ 96 DEVMETHOD(device_probe, hifn_probe), 97 DEVMETHOD(device_attach, hifn_attach), 98 DEVMETHOD(device_detach, hifn_detach), 99 DEVMETHOD(device_suspend, hifn_suspend), 100 DEVMETHOD(device_resume, hifn_resume), 101 DEVMETHOD(device_shutdown, hifn_shutdown), 102 103 /* bus interface */ 104 DEVMETHOD(bus_print_child, bus_generic_print_child), 105 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 106 107 { 0, 0 } 108 }; 109 static driver_t hifn_driver = { 110 "hifn", 111 hifn_methods, 112 sizeof (struct hifn_softc) 113 }; 114 static devclass_t hifn_devclass; 115 116 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); 117 MODULE_DEPEND(hifn, crypto, 1, 1, 1); 118 #ifdef HIFN_RNDTEST 119 MODULE_DEPEND(hifn, rndtest, 1, 1, 1); 120 #endif 121 122 static void hifn_reset_board(struct hifn_softc *, int); 123 static void hifn_reset_puc(struct hifn_softc *); 124 static void hifn_puc_wait(struct hifn_softc *); 125 static int hifn_enable_crypto(struct hifn_softc *); 126 static void hifn_set_retry(struct hifn_softc *sc); 127 static void hifn_init_dma(struct hifn_softc *); 128 static void hifn_init_pci_registers(struct hifn_softc *); 129 static int hifn_sramsize(struct hifn_softc *); 130 static int hifn_dramsize(struct hifn_softc *); 131 static int hifn_ramtype(struct hifn_softc *); 132 static void hifn_sessions(struct hifn_softc *); 133 static void hifn_intr(void *); 134 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 135 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 136 static int hifn_newsession(void *, u_int32_t *, struct cryptoini *); 137 static int hifn_freesession(void *, u_int64_t); 138 static int hifn_process(void *, struct cryptop *, int); 139 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 140 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); 141 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 142 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 143 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 144 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 145 static int hifn_init_pubrng(struct hifn_softc *); 146 static void hifn_rng(void *); 147 static void hifn_tick(void *); 148 static void hifn_abort(struct hifn_softc *); 149 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 150 151 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); 152 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); 153 154 static __inline u_int32_t 155 READ_REG_0(struct hifn_softc *sc, bus_size_t reg) 156 { 157 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); 158 sc->sc_bar0_lastreg = (bus_size_t) -1; 159 return (v); 160 } 161 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) 162 163 static __inline u_int32_t 164 READ_REG_1(struct hifn_softc *sc, bus_size_t reg) 165 { 166 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); 167 sc->sc_bar1_lastreg = (bus_size_t) -1; 168 return (v); 169 } 170 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) 171 172 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); 173 174 #ifdef HIFN_DEBUG 175 static int hifn_debug = 0; 176 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 177 0, "control debugging msgs"); 178 #endif 179 180 static struct hifn_stats hifnstats; 181 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, 182 hifn_stats, "driver statistics"); 183 static int hifn_maxbatch = 1; 184 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 185 0, "max ops to batch w/o interrupt"); 186 187 /* 188 * Probe for a supported device. The PCI vendor and device 189 * IDs are used to detect devices we know how to handle. 190 */ 191 static int 192 hifn_probe(device_t dev) 193 { 194 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && 195 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) 196 return (BUS_PROBE_DEFAULT); 197 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 198 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || 199 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 200 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 201 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || 202 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) 203 return (BUS_PROBE_DEFAULT); 204 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 205 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) 206 return (BUS_PROBE_DEFAULT); 207 return (ENXIO); 208 } 209 210 static void 211 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 212 { 213 bus_addr_t *paddr = (bus_addr_t*) arg; 214 *paddr = segs->ds_addr; 215 } 216 217 static const char* 218 hifn_partname(struct hifn_softc *sc) 219 { 220 /* XXX sprintf numbers when not decoded */ 221 switch (pci_get_vendor(sc->sc_dev)) { 222 case PCI_VENDOR_HIFN: 223 switch (pci_get_device(sc->sc_dev)) { 224 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; 225 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; 226 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; 227 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; 228 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; 229 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; 230 } 231 return "Hifn unknown-part"; 232 case PCI_VENDOR_INVERTEX: 233 switch (pci_get_device(sc->sc_dev)) { 234 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; 235 } 236 return "Invertex unknown-part"; 237 case PCI_VENDOR_NETSEC: 238 switch (pci_get_device(sc->sc_dev)) { 239 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; 240 } 241 return "NetSec unknown-part"; 242 } 243 return "Unknown-vendor unknown-part"; 244 } 245 246 static void 247 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 248 { 249 random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); 250 } 251 252 static u_int 253 checkmaxmin(device_t dev, const char *what, u_int v, u_int min, u_int max) 254 { 255 if (v > max) { 256 device_printf(dev, "Warning, %s %u out of range, " 257 "using max %u\n", what, v, max); 258 v = max; 259 } else if (v < min) { 260 device_printf(dev, "Warning, %s %u out of range, " 261 "using min %u\n", what, v, min); 262 v = min; 263 } 264 return v; 265 } 266 267 /* 268 * Select PLL configuration for 795x parts. This is complicated in 269 * that we cannot determine the optimal parameters without user input. 270 * The reference clock is derived from an external clock through a 271 * multiplier. The external clock is either the host bus (i.e. PCI) 272 * or an external clock generator. When using the PCI bus we assume 273 * the clock is either 33 or 66 MHz; for an external source we cannot 274 * tell the speed. 275 * 276 * PLL configuration is done with a string: "pci" for PCI bus, or "ext" 277 * for an external source, followed by the frequency. We calculate 278 * the appropriate multiplier and PLL register contents accordingly. 279 * When no configuration is given we default to "pci66" since that 280 * always will allow the card to work. If a card is using the PCI 281 * bus clock and in a 33MHz slot then it will be operating at half 282 * speed until the correct information is provided. 283 */ 284 static void 285 hifn_getpllconfig(device_t dev, u_int *pll) 286 { 287 const char *pllspec; 288 u_int freq, mul, fl, fh; 289 u_int32_t pllconfig; 290 char *nxt; 291 292 if (resource_string_value("hifn", device_get_unit(dev), 293 "pllconfig", &pllspec)) 294 pllspec = "pci66"; 295 fl = 33, fh = 66; 296 pllconfig = 0; 297 if (strncmp(pllspec, "ext", 3) == 0) { 298 pllspec += 3; 299 pllconfig |= HIFN_PLL_REF_SEL; 300 switch (pci_get_device(dev)) { 301 case PCI_PRODUCT_HIFN_7955: 302 case PCI_PRODUCT_HIFN_7956: 303 fl = 20, fh = 100; 304 break; 305 #ifdef notyet 306 case PCI_PRODUCT_HIFN_7954: 307 fl = 20, fh = 66; 308 break; 309 #endif 310 } 311 } else if (strncmp(pllspec, "pci", 3) == 0) 312 pllspec += 3; 313 freq = strtoul(pllspec, &nxt, 10); 314 if (nxt == pllspec) 315 freq = 66; 316 else 317 freq = checkmaxmin(dev, "frequency", freq, fl, fh); 318 /* 319 * Calculate multiplier. We target a Fck of 266 MHz, 320 * allowing only even values, possibly rounded down. 321 * Multipliers > 8 must set the charge pump current. 322 */ 323 mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12); 324 pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT; 325 if (mul > 8) 326 pllconfig |= HIFN_PLL_IS; 327 *pll = pllconfig; 328 } 329 330 /* 331 * Attach an interface that successfully probed. 332 */ 333 static int 334 hifn_attach(device_t dev) 335 { 336 struct hifn_softc *sc = device_get_softc(dev); 337 u_int32_t cmd; 338 caddr_t kva; 339 int rseg, rid; 340 char rbase; 341 u_int16_t ena, rev; 342 343 KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); 344 bzero(sc, sizeof (*sc)); 345 sc->sc_dev = dev; 346 347 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); 348 349 /* XXX handle power management */ 350 351 /* 352 * The 7951 and 795x have a random number generator and 353 * public key support; note this. 354 */ 355 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 356 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 357 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 358 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) 359 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; 360 /* 361 * The 7811 has a random number generator and 362 * we also note it's identity 'cuz of some quirks. 363 */ 364 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 365 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) 366 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; 367 368 /* 369 * The 795x parts support AES. 370 */ 371 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 372 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 373 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) { 374 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; 375 /* 376 * Select PLL configuration. This depends on the 377 * bus and board design and must be manually configured 378 * if the default setting is unacceptable. 379 */ 380 hifn_getpllconfig(dev, &sc->sc_pllconfig); 381 } 382 383 /* 384 * Configure support for memory-mapped access to 385 * registers and for DMA operations. 386 */ 387 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) 388 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 389 cmd |= PCIM_ENA; 390 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 391 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 392 if ((cmd & PCIM_ENA) != PCIM_ENA) { 393 device_printf(dev, "failed to enable %s\n", 394 (cmd & PCIM_ENA) == 0 ? 395 "memory mapping & bus mastering" : 396 (cmd & PCIM_CMD_MEMEN) == 0 ? 397 "memory mapping" : "bus mastering"); 398 goto fail_pci; 399 } 400 #undef PCIM_ENA 401 402 /* 403 * Setup PCI resources. Note that we record the bus 404 * tag and handle for each register mapping, this is 405 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, 406 * and WRITE_REG_1 macros throughout the driver. 407 */ 408 rid = HIFN_BAR0; 409 sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 410 RF_ACTIVE); 411 if (sc->sc_bar0res == NULL) { 412 device_printf(dev, "cannot map bar%d register space\n", 0); 413 goto fail_pci; 414 } 415 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); 416 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); 417 sc->sc_bar0_lastreg = (bus_size_t) -1; 418 419 rid = HIFN_BAR1; 420 sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 421 RF_ACTIVE); 422 if (sc->sc_bar1res == NULL) { 423 device_printf(dev, "cannot map bar%d register space\n", 1); 424 goto fail_io0; 425 } 426 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); 427 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); 428 sc->sc_bar1_lastreg = (bus_size_t) -1; 429 430 hifn_set_retry(sc); 431 432 /* 433 * Setup the area where the Hifn DMA's descriptors 434 * and associated data structures. 435 */ 436 if (bus_dma_tag_create(NULL, /* parent */ 437 1, 0, /* alignment,boundary */ 438 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 439 BUS_SPACE_MAXADDR, /* highaddr */ 440 NULL, NULL, /* filter, filterarg */ 441 HIFN_MAX_DMALEN, /* maxsize */ 442 MAX_SCATTER, /* nsegments */ 443 HIFN_MAX_SEGLEN, /* maxsegsize */ 444 BUS_DMA_ALLOCNOW, /* flags */ 445 NULL, /* lockfunc */ 446 NULL, /* lockarg */ 447 &sc->sc_dmat)) { 448 device_printf(dev, "cannot allocate DMA tag\n"); 449 goto fail_io1; 450 } 451 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 452 device_printf(dev, "cannot create dma map\n"); 453 bus_dma_tag_destroy(sc->sc_dmat); 454 goto fail_io1; 455 } 456 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 457 device_printf(dev, "cannot alloc dma buffer\n"); 458 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 459 bus_dma_tag_destroy(sc->sc_dmat); 460 goto fail_io1; 461 } 462 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, 463 sizeof (*sc->sc_dma), 464 hifn_dmamap_cb, &sc->sc_dma_physaddr, 465 BUS_DMA_NOWAIT)) { 466 device_printf(dev, "cannot load dma map\n"); 467 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); 468 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 469 bus_dma_tag_destroy(sc->sc_dmat); 470 goto fail_io1; 471 } 472 sc->sc_dma = (struct hifn_dma *)kva; 473 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 474 475 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); 476 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); 477 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); 478 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); 479 480 /* 481 * Reset the board and do the ``secret handshake'' 482 * to enable the crypto support. Then complete the 483 * initialization procedure by setting up the interrupt 484 * and hooking in to the system crypto support so we'll 485 * get used for system services like the crypto device, 486 * IPsec, RNG device, etc. 487 */ 488 hifn_reset_board(sc, 0); 489 490 if (hifn_enable_crypto(sc) != 0) { 491 device_printf(dev, "crypto enabling failed\n"); 492 goto fail_mem; 493 } 494 hifn_reset_puc(sc); 495 496 hifn_init_dma(sc); 497 hifn_init_pci_registers(sc); 498 499 /* XXX can't dynamically determine ram type for 795x; force dram */ 500 if (sc->sc_flags & HIFN_IS_7956) 501 sc->sc_drammodel = 1; 502 else if (hifn_ramtype(sc)) 503 goto fail_mem; 504 505 if (sc->sc_drammodel == 0) 506 hifn_sramsize(sc); 507 else 508 hifn_dramsize(sc); 509 510 /* 511 * Workaround for NetSec 7751 rev A: half ram size because two 512 * of the address lines were left floating 513 */ 514 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 515 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && 516 pci_get_revid(dev) == 0x61) /*XXX???*/ 517 sc->sc_ramsize >>= 1; 518 519 /* 520 * Arrange the interrupt line. 521 */ 522 rid = 0; 523 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 524 RF_SHAREABLE|RF_ACTIVE); 525 if (sc->sc_irq == NULL) { 526 device_printf(dev, "could not map interrupt\n"); 527 goto fail_mem; 528 } 529 /* 530 * NB: Network code assumes we are blocked with splimp() 531 * so make sure the IRQ is marked appropriately. 532 */ 533 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 534 hifn_intr, sc, &sc->sc_intrhand)) { 535 device_printf(dev, "could not setup interrupt\n"); 536 goto fail_intr2; 537 } 538 539 hifn_sessions(sc); 540 541 /* 542 * NB: Keep only the low 16 bits; this masks the chip id 543 * from the 7951. 544 */ 545 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; 546 547 rseg = sc->sc_ramsize / 1024; 548 rbase = 'K'; 549 if (sc->sc_ramsize >= (1024 * 1024)) { 550 rbase = 'M'; 551 rseg /= 1024; 552 } 553 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram", 554 hifn_partname(sc), rev, 555 rseg, rbase, sc->sc_drammodel ? 'd' : 's'); 556 if (sc->sc_flags & HIFN_IS_7956) 557 printf(", pll=0x%x<%s clk, %ux mult>", 558 sc->sc_pllconfig, 559 sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci", 560 2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11)); 561 printf("\n"); 562 563 sc->sc_cid = crypto_get_driverid(0); 564 if (sc->sc_cid < 0) { 565 device_printf(dev, "could not get crypto driver id\n"); 566 goto fail_intr; 567 } 568 569 WRITE_REG_0(sc, HIFN_0_PUCNFG, 570 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 571 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 572 573 switch (ena) { 574 case HIFN_PUSTAT_ENA_2: 575 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 576 hifn_newsession, hifn_freesession, hifn_process, sc); 577 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 578 hifn_newsession, hifn_freesession, hifn_process, sc); 579 if (sc->sc_flags & HIFN_HAS_AES) 580 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 581 hifn_newsession, hifn_freesession, 582 hifn_process, sc); 583 /*FALLTHROUGH*/ 584 case HIFN_PUSTAT_ENA_1: 585 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 586 hifn_newsession, hifn_freesession, hifn_process, sc); 587 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 588 hifn_newsession, hifn_freesession, hifn_process, sc); 589 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 590 hifn_newsession, hifn_freesession, hifn_process, sc); 591 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 592 hifn_newsession, hifn_freesession, hifn_process, sc); 593 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 594 hifn_newsession, hifn_freesession, hifn_process, sc); 595 break; 596 } 597 598 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 599 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 600 601 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 602 hifn_init_pubrng(sc); 603 604 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE); 605 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 606 607 return (0); 608 609 fail_intr: 610 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 611 fail_intr2: 612 /* XXX don't store rid */ 613 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 614 fail_mem: 615 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 616 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 617 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 618 bus_dma_tag_destroy(sc->sc_dmat); 619 620 /* Turn off DMA polling */ 621 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 622 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 623 fail_io1: 624 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 625 fail_io0: 626 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 627 fail_pci: 628 mtx_destroy(&sc->sc_mtx); 629 return (ENXIO); 630 } 631 632 /* 633 * Detach an interface that successfully probed. 634 */ 635 static int 636 hifn_detach(device_t dev) 637 { 638 struct hifn_softc *sc = device_get_softc(dev); 639 640 KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); 641 642 /* disable interrupts */ 643 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); 644 645 /*XXX other resources */ 646 callout_stop(&sc->sc_tickto); 647 callout_stop(&sc->sc_rngto); 648 #ifdef HIFN_RNDTEST 649 if (sc->sc_rndtest) 650 rndtest_detach(sc->sc_rndtest); 651 #endif 652 653 /* Turn off DMA polling */ 654 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 655 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 656 657 crypto_unregister_all(sc->sc_cid); 658 659 bus_generic_detach(dev); /*XXX should be no children, right? */ 660 661 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 662 /* XXX don't store rid */ 663 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 664 665 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 666 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 667 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 668 bus_dma_tag_destroy(sc->sc_dmat); 669 670 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 671 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 672 673 mtx_destroy(&sc->sc_mtx); 674 675 return (0); 676 } 677 678 /* 679 * Stop all chip I/O so that the kernel's probe routines don't 680 * get confused by errant DMAs when rebooting. 681 */ 682 static void 683 hifn_shutdown(device_t dev) 684 { 685 #ifdef notyet 686 hifn_stop(device_get_softc(dev)); 687 #endif 688 } 689 690 /* 691 * Device suspend routine. Stop the interface and save some PCI 692 * settings in case the BIOS doesn't restore them properly on 693 * resume. 694 */ 695 static int 696 hifn_suspend(device_t dev) 697 { 698 struct hifn_softc *sc = device_get_softc(dev); 699 #ifdef notyet 700 hifn_stop(sc); 701 #endif 702 sc->sc_suspended = 1; 703 704 return (0); 705 } 706 707 /* 708 * Device resume routine. Restore some PCI settings in case the BIOS 709 * doesn't, re-enable busmastering, and restart the interface if 710 * appropriate. 711 */ 712 static int 713 hifn_resume(device_t dev) 714 { 715 struct hifn_softc *sc = device_get_softc(dev); 716 #ifdef notyet 717 /* reenable busmastering */ 718 pci_enable_busmaster(dev); 719 pci_enable_io(dev, HIFN_RES); 720 721 /* reinitialize interface if necessary */ 722 if (ifp->if_flags & IFF_UP) 723 rl_init(sc); 724 #endif 725 sc->sc_suspended = 0; 726 727 return (0); 728 } 729 730 static int 731 hifn_init_pubrng(struct hifn_softc *sc) 732 { 733 u_int32_t r; 734 int i; 735 736 #ifdef HIFN_RNDTEST 737 sc->sc_rndtest = rndtest_attach(sc->sc_dev); 738 if (sc->sc_rndtest) 739 sc->sc_harvest = rndtest_harvest; 740 else 741 sc->sc_harvest = default_harvest; 742 #else 743 sc->sc_harvest = default_harvest; 744 #endif 745 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 746 /* Reset 7951 public key/rng engine */ 747 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 748 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 749 750 for (i = 0; i < 100; i++) { 751 DELAY(1000); 752 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 753 HIFN_PUBRST_RESET) == 0) 754 break; 755 } 756 757 if (i == 100) { 758 device_printf(sc->sc_dev, "public key init failed\n"); 759 return (1); 760 } 761 } 762 763 /* Enable the rng, if available */ 764 if (sc->sc_flags & HIFN_HAS_RNG) { 765 if (sc->sc_flags & HIFN_IS_7811) { 766 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 767 if (r & HIFN_7811_RNGENA_ENA) { 768 r &= ~HIFN_7811_RNGENA_ENA; 769 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 770 } 771 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 772 HIFN_7811_RNGCFG_DEFL); 773 r |= HIFN_7811_RNGENA_ENA; 774 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 775 } else 776 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 777 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 778 HIFN_RNGCFG_ENA); 779 780 sc->sc_rngfirst = 1; 781 if (hz >= 100) 782 sc->sc_rnghz = hz / 100; 783 else 784 sc->sc_rnghz = 1; 785 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); 786 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 787 } 788 789 /* Enable public key engine, if available */ 790 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 791 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 792 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 793 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 794 } 795 796 return (0); 797 } 798 799 static void 800 hifn_rng(void *vsc) 801 { 802 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 803 struct hifn_softc *sc = vsc; 804 u_int32_t sts, num[2]; 805 int i; 806 807 if (sc->sc_flags & HIFN_IS_7811) { 808 for (i = 0; i < 5; i++) { 809 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 810 if (sts & HIFN_7811_RNGSTS_UFL) { 811 device_printf(sc->sc_dev, 812 "RNG underflow: disabling\n"); 813 return; 814 } 815 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 816 break; 817 818 /* 819 * There are at least two words in the RNG FIFO 820 * at this point. 821 */ 822 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 823 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 824 /* NB: discard first data read */ 825 if (sc->sc_rngfirst) 826 sc->sc_rngfirst = 0; 827 else 828 (*sc->sc_harvest)(sc->sc_rndtest, 829 num, sizeof (num)); 830 } 831 } else { 832 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); 833 834 /* NB: discard first data read */ 835 if (sc->sc_rngfirst) 836 sc->sc_rngfirst = 0; 837 else 838 (*sc->sc_harvest)(sc->sc_rndtest, 839 num, sizeof (num[0])); 840 } 841 842 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 843 #undef RANDOM_BITS 844 } 845 846 static void 847 hifn_puc_wait(struct hifn_softc *sc) 848 { 849 int i; 850 851 for (i = 5000; i > 0; i--) { 852 DELAY(1); 853 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 854 break; 855 } 856 if (!i) 857 device_printf(sc->sc_dev, "proc unit did not reset\n"); 858 } 859 860 /* 861 * Reset the processing unit. 862 */ 863 static void 864 hifn_reset_puc(struct hifn_softc *sc) 865 { 866 /* Reset processing unit */ 867 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 868 hifn_puc_wait(sc); 869 } 870 871 /* 872 * Set the Retry and TRDY registers; note that we set them to 873 * zero because the 7811 locks up when forced to retry (section 874 * 3.6 of "Specification Update SU-0014-04". Not clear if we 875 * should do this for all Hifn parts, but it doesn't seem to hurt. 876 */ 877 static void 878 hifn_set_retry(struct hifn_softc *sc) 879 { 880 /* NB: RETRY only responds to 8-bit reads/writes */ 881 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); 882 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); 883 } 884 885 /* 886 * Resets the board. Values in the regesters are left as is 887 * from the reset (i.e. initial values are assigned elsewhere). 888 */ 889 static void 890 hifn_reset_board(struct hifn_softc *sc, int full) 891 { 892 u_int32_t reg; 893 894 /* 895 * Set polling in the DMA configuration register to zero. 0x7 avoids 896 * resetting the board and zeros out the other fields. 897 */ 898 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 899 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 900 901 /* 902 * Now that polling has been disabled, we have to wait 1 ms 903 * before resetting the board. 904 */ 905 DELAY(1000); 906 907 /* Reset the DMA unit */ 908 if (full) { 909 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 910 DELAY(1000); 911 } else { 912 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 913 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 914 hifn_reset_puc(sc); 915 } 916 917 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); 918 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 919 920 /* Bring dma unit out of reset */ 921 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 922 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 923 924 hifn_puc_wait(sc); 925 hifn_set_retry(sc); 926 927 if (sc->sc_flags & HIFN_IS_7811) { 928 for (reg = 0; reg < 1000; reg++) { 929 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 930 HIFN_MIPSRST_CRAMINIT) 931 break; 932 DELAY(1000); 933 } 934 if (reg == 1000) 935 printf(": cram init timeout\n"); 936 } 937 } 938 939 static u_int32_t 940 hifn_next_signature(u_int32_t a, u_int cnt) 941 { 942 int i; 943 u_int32_t v; 944 945 for (i = 0; i < cnt; i++) { 946 947 /* get the parity */ 948 v = a & 0x80080125; 949 v ^= v >> 16; 950 v ^= v >> 8; 951 v ^= v >> 4; 952 v ^= v >> 2; 953 v ^= v >> 1; 954 955 a = (v & 1) ^ (a << 1); 956 } 957 958 return a; 959 } 960 961 struct pci2id { 962 u_short pci_vendor; 963 u_short pci_prod; 964 char card_id[13]; 965 }; 966 static struct pci2id pci2id[] = { 967 { 968 PCI_VENDOR_HIFN, 969 PCI_PRODUCT_HIFN_7951, 970 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 971 0x00, 0x00, 0x00, 0x00, 0x00 } 972 }, { 973 PCI_VENDOR_HIFN, 974 PCI_PRODUCT_HIFN_7955, 975 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 976 0x00, 0x00, 0x00, 0x00, 0x00 } 977 }, { 978 PCI_VENDOR_HIFN, 979 PCI_PRODUCT_HIFN_7956, 980 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 981 0x00, 0x00, 0x00, 0x00, 0x00 } 982 }, { 983 PCI_VENDOR_NETSEC, 984 PCI_PRODUCT_NETSEC_7751, 985 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 986 0x00, 0x00, 0x00, 0x00, 0x00 } 987 }, { 988 PCI_VENDOR_INVERTEX, 989 PCI_PRODUCT_INVERTEX_AEON, 990 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 991 0x00, 0x00, 0x00, 0x00, 0x00 } 992 }, { 993 PCI_VENDOR_HIFN, 994 PCI_PRODUCT_HIFN_7811, 995 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 996 0x00, 0x00, 0x00, 0x00, 0x00 } 997 }, { 998 /* 999 * Other vendors share this PCI ID as well, such as 1000 * http://www.powercrypt.com, and obviously they also 1001 * use the same key. 1002 */ 1003 PCI_VENDOR_HIFN, 1004 PCI_PRODUCT_HIFN_7751, 1005 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1006 0x00, 0x00, 0x00, 0x00, 0x00 } 1007 }, 1008 }; 1009 1010 /* 1011 * Checks to see if crypto is already enabled. If crypto isn't enable, 1012 * "hifn_enable_crypto" is called to enable it. The check is important, 1013 * as enabling crypto twice will lock the board. 1014 */ 1015 static int 1016 hifn_enable_crypto(struct hifn_softc *sc) 1017 { 1018 u_int32_t dmacfg, ramcfg, encl, addr, i; 1019 char *offtbl = NULL; 1020 1021 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 1022 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && 1023 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { 1024 offtbl = pci2id[i].card_id; 1025 break; 1026 } 1027 } 1028 if (offtbl == NULL) { 1029 device_printf(sc->sc_dev, "Unknown card!\n"); 1030 return (1); 1031 } 1032 1033 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1034 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 1035 1036 /* 1037 * The RAM config register's encrypt level bit needs to be set before 1038 * every read performed on the encryption level register. 1039 */ 1040 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1041 1042 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1043 1044 /* 1045 * Make sure we don't re-unlock. Two unlocks kills chip until the 1046 * next reboot. 1047 */ 1048 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 1049 #ifdef HIFN_DEBUG 1050 if (hifn_debug) 1051 device_printf(sc->sc_dev, 1052 "Strong crypto already enabled!\n"); 1053 #endif 1054 goto report; 1055 } 1056 1057 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 1058 #ifdef HIFN_DEBUG 1059 if (hifn_debug) 1060 device_printf(sc->sc_dev, 1061 "Unknown encryption level 0x%x\n", encl); 1062 #endif 1063 return 1; 1064 } 1065 1066 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 1067 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 1068 DELAY(1000); 1069 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); 1070 DELAY(1000); 1071 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); 1072 DELAY(1000); 1073 1074 for (i = 0; i <= 12; i++) { 1075 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 1076 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); 1077 1078 DELAY(1000); 1079 } 1080 1081 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1082 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1083 1084 #ifdef HIFN_DEBUG 1085 if (hifn_debug) { 1086 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 1087 device_printf(sc->sc_dev, "Engine is permanently " 1088 "locked until next system reset!\n"); 1089 else 1090 device_printf(sc->sc_dev, "Engine enabled " 1091 "successfully!\n"); 1092 } 1093 #endif 1094 1095 report: 1096 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 1097 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 1098 1099 switch (encl) { 1100 case HIFN_PUSTAT_ENA_1: 1101 case HIFN_PUSTAT_ENA_2: 1102 break; 1103 case HIFN_PUSTAT_ENA_0: 1104 default: 1105 device_printf(sc->sc_dev, "disabled"); 1106 break; 1107 } 1108 1109 return 0; 1110 } 1111 1112 /* 1113 * Give initial values to the registers listed in the "Register Space" 1114 * section of the HIFN Software Development reference manual. 1115 */ 1116 static void 1117 hifn_init_pci_registers(struct hifn_softc *sc) 1118 { 1119 /* write fixed values needed by the Initialization registers */ 1120 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1121 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1122 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1123 1124 /* write all 4 ring address registers */ 1125 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + 1126 offsetof(struct hifn_dma, cmdr[0])); 1127 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + 1128 offsetof(struct hifn_dma, srcr[0])); 1129 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + 1130 offsetof(struct hifn_dma, dstr[0])); 1131 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + 1132 offsetof(struct hifn_dma, resr[0])); 1133 1134 DELAY(2000); 1135 1136 /* write status register */ 1137 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1138 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1139 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1140 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1141 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1142 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1143 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1144 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1145 HIFN_DMACSR_S_WAIT | 1146 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1147 HIFN_DMACSR_C_WAIT | 1148 HIFN_DMACSR_ENGINE | 1149 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1150 HIFN_DMACSR_PUBDONE : 0) | 1151 ((sc->sc_flags & HIFN_IS_7811) ? 1152 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1153 1154 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1155 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1156 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1157 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1158 ((sc->sc_flags & HIFN_IS_7811) ? 1159 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1160 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1161 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1162 1163 1164 if (sc->sc_flags & HIFN_IS_7956) { 1165 u_int32_t pll; 1166 1167 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1168 HIFN_PUCNFG_TCALLPHASES | 1169 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 1170 1171 /* turn off the clocks and insure bypass is set */ 1172 pll = READ_REG_1(sc, HIFN_1_PLL); 1173 pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL)) 1174 | HIFN_PLL_BP; 1175 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1176 DELAY(10*1000); /* 10ms */ 1177 /* change configuration */ 1178 pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig; 1179 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1180 DELAY(10*1000); /* 10ms */ 1181 /* disable bypass */ 1182 pll &= ~HIFN_PLL_BP; 1183 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1184 /* enable clocks with new configuration */ 1185 pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL; 1186 WRITE_REG_1(sc, HIFN_1_PLL, pll); 1187 } else { 1188 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1189 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1190 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1191 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1192 } 1193 1194 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1195 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1196 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1197 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1198 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1199 } 1200 1201 /* 1202 * The maximum number of sessions supported by the card 1203 * is dependent on the amount of context ram, which 1204 * encryption algorithms are enabled, and how compression 1205 * is configured. This should be configured before this 1206 * routine is called. 1207 */ 1208 static void 1209 hifn_sessions(struct hifn_softc *sc) 1210 { 1211 u_int32_t pucnfg; 1212 int ctxsize; 1213 1214 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1215 1216 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1217 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1218 ctxsize = 128; 1219 else 1220 ctxsize = 512; 1221 /* 1222 * 7955/7956 has internal context memory of 32K 1223 */ 1224 if (sc->sc_flags & HIFN_IS_7956) 1225 sc->sc_maxses = 32768 / ctxsize; 1226 else 1227 sc->sc_maxses = 1 + 1228 ((sc->sc_ramsize - 32768) / ctxsize); 1229 } else 1230 sc->sc_maxses = sc->sc_ramsize / 16384; 1231 1232 if (sc->sc_maxses > 2048) 1233 sc->sc_maxses = 2048; 1234 } 1235 1236 /* 1237 * Determine ram type (sram or dram). Board should be just out of a reset 1238 * state when this is called. 1239 */ 1240 static int 1241 hifn_ramtype(struct hifn_softc *sc) 1242 { 1243 u_int8_t data[8], dataexpect[8]; 1244 int i; 1245 1246 for (i = 0; i < sizeof(data); i++) 1247 data[i] = dataexpect[i] = 0x55; 1248 if (hifn_writeramaddr(sc, 0, data)) 1249 return (-1); 1250 if (hifn_readramaddr(sc, 0, data)) 1251 return (-1); 1252 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1253 sc->sc_drammodel = 1; 1254 return (0); 1255 } 1256 1257 for (i = 0; i < sizeof(data); i++) 1258 data[i] = dataexpect[i] = 0xaa; 1259 if (hifn_writeramaddr(sc, 0, data)) 1260 return (-1); 1261 if (hifn_readramaddr(sc, 0, data)) 1262 return (-1); 1263 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1264 sc->sc_drammodel = 1; 1265 return (0); 1266 } 1267 1268 return (0); 1269 } 1270 1271 #define HIFN_SRAM_MAX (32 << 20) 1272 #define HIFN_SRAM_STEP_SIZE 16384 1273 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1274 1275 static int 1276 hifn_sramsize(struct hifn_softc *sc) 1277 { 1278 u_int32_t a; 1279 u_int8_t data[8]; 1280 u_int8_t dataexpect[sizeof(data)]; 1281 int32_t i; 1282 1283 for (i = 0; i < sizeof(data); i++) 1284 data[i] = dataexpect[i] = i ^ 0x5a; 1285 1286 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1287 a = i * HIFN_SRAM_STEP_SIZE; 1288 bcopy(&i, data, sizeof(i)); 1289 hifn_writeramaddr(sc, a, data); 1290 } 1291 1292 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1293 a = i * HIFN_SRAM_STEP_SIZE; 1294 bcopy(&i, dataexpect, sizeof(i)); 1295 if (hifn_readramaddr(sc, a, data) < 0) 1296 return (0); 1297 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1298 return (0); 1299 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1300 } 1301 1302 return (0); 1303 } 1304 1305 /* 1306 * XXX For dram boards, one should really try all of the 1307 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1308 * is already set up correctly. 1309 */ 1310 static int 1311 hifn_dramsize(struct hifn_softc *sc) 1312 { 1313 u_int32_t cnfg; 1314 1315 if (sc->sc_flags & HIFN_IS_7956) { 1316 /* 1317 * 7955/7956 have a fixed internal ram of only 32K. 1318 */ 1319 sc->sc_ramsize = 32768; 1320 } else { 1321 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1322 HIFN_PUCNFG_DRAMMASK; 1323 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1324 } 1325 return (0); 1326 } 1327 1328 static void 1329 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) 1330 { 1331 struct hifn_dma *dma = sc->sc_dma; 1332 1333 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1334 dma->cmdi = 0; 1335 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1336 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1337 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1338 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1339 } 1340 *cmdp = dma->cmdi++; 1341 dma->cmdk = dma->cmdi; 1342 1343 if (dma->srci == HIFN_D_SRC_RSIZE) { 1344 dma->srci = 0; 1345 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1346 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1347 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1348 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1349 } 1350 *srcp = dma->srci++; 1351 dma->srck = dma->srci; 1352 1353 if (dma->dsti == HIFN_D_DST_RSIZE) { 1354 dma->dsti = 0; 1355 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1356 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1357 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1358 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1359 } 1360 *dstp = dma->dsti++; 1361 dma->dstk = dma->dsti; 1362 1363 if (dma->resi == HIFN_D_RES_RSIZE) { 1364 dma->resi = 0; 1365 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1366 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1367 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1368 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1369 } 1370 *resp = dma->resi++; 1371 dma->resk = dma->resi; 1372 } 1373 1374 static int 1375 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1376 { 1377 struct hifn_dma *dma = sc->sc_dma; 1378 hifn_base_command_t wc; 1379 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1380 int r, cmdi, resi, srci, dsti; 1381 1382 wc.masks = htole16(3 << 13); 1383 wc.session_num = htole16(addr >> 14); 1384 wc.total_source_count = htole16(8); 1385 wc.total_dest_count = htole16(addr & 0x3fff); 1386 1387 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1388 1389 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1390 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1391 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1392 1393 /* build write command */ 1394 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1395 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; 1396 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1397 1398 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr 1399 + offsetof(struct hifn_dma, test_src)); 1400 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr 1401 + offsetof(struct hifn_dma, test_dst)); 1402 1403 dma->cmdr[cmdi].l = htole32(16 | masks); 1404 dma->srcr[srci].l = htole32(8 | masks); 1405 dma->dstr[dsti].l = htole32(4 | masks); 1406 dma->resr[resi].l = htole32(4 | masks); 1407 1408 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1409 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1410 1411 for (r = 10000; r >= 0; r--) { 1412 DELAY(10); 1413 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1414 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1415 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1416 break; 1417 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1418 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1419 } 1420 if (r == 0) { 1421 device_printf(sc->sc_dev, "writeramaddr -- " 1422 "result[%d](addr %d) still valid\n", resi, addr); 1423 r = -1; 1424 return (-1); 1425 } else 1426 r = 0; 1427 1428 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1429 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1430 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1431 1432 return (r); 1433 } 1434 1435 static int 1436 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1437 { 1438 struct hifn_dma *dma = sc->sc_dma; 1439 hifn_base_command_t rc; 1440 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1441 int r, cmdi, srci, dsti, resi; 1442 1443 rc.masks = htole16(2 << 13); 1444 rc.session_num = htole16(addr >> 14); 1445 rc.total_source_count = htole16(addr & 0x3fff); 1446 rc.total_dest_count = htole16(8); 1447 1448 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1449 1450 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1451 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1452 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1453 1454 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1455 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; 1456 1457 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + 1458 offsetof(struct hifn_dma, test_src)); 1459 dma->test_src = 0; 1460 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + 1461 offsetof(struct hifn_dma, test_dst)); 1462 dma->test_dst = 0; 1463 dma->cmdr[cmdi].l = htole32(8 | masks); 1464 dma->srcr[srci].l = htole32(8 | masks); 1465 dma->dstr[dsti].l = htole32(8 | masks); 1466 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1467 1468 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1469 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1470 1471 for (r = 10000; r >= 0; r--) { 1472 DELAY(10); 1473 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1474 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1475 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1476 break; 1477 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1478 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1479 } 1480 if (r == 0) { 1481 device_printf(sc->sc_dev, "readramaddr -- " 1482 "result[%d](addr %d) still valid\n", resi, addr); 1483 r = -1; 1484 } else { 1485 r = 0; 1486 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1487 } 1488 1489 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1490 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1491 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1492 1493 return (r); 1494 } 1495 1496 /* 1497 * Initialize the descriptor rings. 1498 */ 1499 static void 1500 hifn_init_dma(struct hifn_softc *sc) 1501 { 1502 struct hifn_dma *dma = sc->sc_dma; 1503 int i; 1504 1505 hifn_set_retry(sc); 1506 1507 /* initialize static pointer values */ 1508 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1509 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + 1510 offsetof(struct hifn_dma, command_bufs[i][0])); 1511 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1512 dma->resr[i].p = htole32(sc->sc_dma_physaddr + 1513 offsetof(struct hifn_dma, result_bufs[i][0])); 1514 1515 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1516 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); 1517 dma->srcr[HIFN_D_SRC_RSIZE].p = 1518 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); 1519 dma->dstr[HIFN_D_DST_RSIZE].p = 1520 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); 1521 dma->resr[HIFN_D_RES_RSIZE].p = 1522 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); 1523 1524 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1525 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1526 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1527 } 1528 1529 /* 1530 * Writes out the raw command buffer space. Returns the 1531 * command buffer size. 1532 */ 1533 static u_int 1534 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1535 { 1536 u_int8_t *buf_pos; 1537 hifn_base_command_t *base_cmd; 1538 hifn_mac_command_t *mac_cmd; 1539 hifn_crypt_command_t *cry_cmd; 1540 int using_mac, using_crypt, len, ivlen; 1541 u_int32_t dlen, slen; 1542 1543 buf_pos = buf; 1544 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1545 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1546 1547 base_cmd = (hifn_base_command_t *)buf_pos; 1548 base_cmd->masks = htole16(cmd->base_masks); 1549 slen = cmd->src_mapsize; 1550 if (cmd->sloplen) 1551 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); 1552 else 1553 dlen = cmd->dst_mapsize; 1554 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1555 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1556 dlen >>= 16; 1557 slen >>= 16; 1558 base_cmd->session_num = htole16( 1559 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1560 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1561 buf_pos += sizeof(hifn_base_command_t); 1562 1563 if (using_mac) { 1564 mac_cmd = (hifn_mac_command_t *)buf_pos; 1565 dlen = cmd->maccrd->crd_len; 1566 mac_cmd->source_count = htole16(dlen & 0xffff); 1567 dlen >>= 16; 1568 mac_cmd->masks = htole16(cmd->mac_masks | 1569 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1570 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1571 mac_cmd->reserved = 0; 1572 buf_pos += sizeof(hifn_mac_command_t); 1573 } 1574 1575 if (using_crypt) { 1576 cry_cmd = (hifn_crypt_command_t *)buf_pos; 1577 dlen = cmd->enccrd->crd_len; 1578 cry_cmd->source_count = htole16(dlen & 0xffff); 1579 dlen >>= 16; 1580 cry_cmd->masks = htole16(cmd->cry_masks | 1581 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1582 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1583 cry_cmd->reserved = 0; 1584 buf_pos += sizeof(hifn_crypt_command_t); 1585 } 1586 1587 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1588 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1589 buf_pos += HIFN_MAC_KEY_LENGTH; 1590 } 1591 1592 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1593 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1594 case HIFN_CRYPT_CMD_ALG_3DES: 1595 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1596 buf_pos += HIFN_3DES_KEY_LENGTH; 1597 break; 1598 case HIFN_CRYPT_CMD_ALG_DES: 1599 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1600 buf_pos += HIFN_DES_KEY_LENGTH; 1601 break; 1602 case HIFN_CRYPT_CMD_ALG_RC4: 1603 len = 256; 1604 do { 1605 int clen; 1606 1607 clen = MIN(cmd->cklen, len); 1608 bcopy(cmd->ck, buf_pos, clen); 1609 len -= clen; 1610 buf_pos += clen; 1611 } while (len > 0); 1612 bzero(buf_pos, 4); 1613 buf_pos += 4; 1614 break; 1615 case HIFN_CRYPT_CMD_ALG_AES: 1616 /* 1617 * AES keys are variable 128, 192 and 1618 * 256 bits (16, 24 and 32 bytes). 1619 */ 1620 bcopy(cmd->ck, buf_pos, cmd->cklen); 1621 buf_pos += cmd->cklen; 1622 break; 1623 } 1624 } 1625 1626 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1627 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1628 case HIFN_CRYPT_CMD_ALG_AES: 1629 ivlen = HIFN_AES_IV_LENGTH; 1630 break; 1631 default: 1632 ivlen = HIFN_IV_LENGTH; 1633 break; 1634 } 1635 bcopy(cmd->iv, buf_pos, ivlen); 1636 buf_pos += ivlen; 1637 } 1638 1639 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { 1640 bzero(buf_pos, 8); 1641 buf_pos += 8; 1642 } 1643 1644 return (buf_pos - buf); 1645 } 1646 1647 static int 1648 hifn_dmamap_aligned(struct hifn_operand *op) 1649 { 1650 int i; 1651 1652 for (i = 0; i < op->nsegs; i++) { 1653 if (op->segs[i].ds_addr & 3) 1654 return (0); 1655 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) 1656 return (0); 1657 } 1658 return (1); 1659 } 1660 1661 static int 1662 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1663 { 1664 struct hifn_dma *dma = sc->sc_dma; 1665 struct hifn_operand *dst = &cmd->dst; 1666 u_int32_t p, l; 1667 int idx, used = 0, i; 1668 1669 idx = dma->dsti; 1670 for (i = 0; i < dst->nsegs - 1; i++) { 1671 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1672 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1673 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); 1674 HIFN_DSTR_SYNC(sc, idx, 1675 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1676 used++; 1677 1678 if (++idx == HIFN_D_DST_RSIZE) { 1679 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1680 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1681 HIFN_DSTR_SYNC(sc, idx, 1682 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1683 idx = 0; 1684 } 1685 } 1686 1687 if (cmd->sloplen == 0) { 1688 p = dst->segs[i].ds_addr; 1689 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1690 dst->segs[i].ds_len; 1691 } else { 1692 p = sc->sc_dma_physaddr + 1693 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1694 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1695 sizeof(u_int32_t); 1696 1697 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { 1698 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1699 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1700 HIFN_D_MASKDONEIRQ | 1701 (dst->segs[i].ds_len - cmd->sloplen)); 1702 HIFN_DSTR_SYNC(sc, idx, 1703 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1704 used++; 1705 1706 if (++idx == HIFN_D_DST_RSIZE) { 1707 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1708 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1709 HIFN_DSTR_SYNC(sc, idx, 1710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1711 idx = 0; 1712 } 1713 } 1714 } 1715 dma->dstr[idx].p = htole32(p); 1716 dma->dstr[idx].l = htole32(l); 1717 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1718 used++; 1719 1720 if (++idx == HIFN_D_DST_RSIZE) { 1721 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1722 HIFN_D_MASKDONEIRQ); 1723 HIFN_DSTR_SYNC(sc, idx, 1724 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1725 idx = 0; 1726 } 1727 1728 dma->dsti = idx; 1729 dma->dstu += used; 1730 return (idx); 1731 } 1732 1733 static int 1734 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1735 { 1736 struct hifn_dma *dma = sc->sc_dma; 1737 struct hifn_operand *src = &cmd->src; 1738 int idx, i; 1739 u_int32_t last = 0; 1740 1741 idx = dma->srci; 1742 for (i = 0; i < src->nsegs; i++) { 1743 if (i == src->nsegs - 1) 1744 last = HIFN_D_LAST; 1745 1746 dma->srcr[idx].p = htole32(src->segs[i].ds_addr); 1747 dma->srcr[idx].l = htole32(src->segs[i].ds_len | 1748 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1749 HIFN_SRCR_SYNC(sc, idx, 1750 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1751 1752 if (++idx == HIFN_D_SRC_RSIZE) { 1753 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1754 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1755 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1756 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1757 idx = 0; 1758 } 1759 } 1760 dma->srci = idx; 1761 dma->srcu += src->nsegs; 1762 return (idx); 1763 } 1764 1765 static void 1766 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1767 { 1768 struct hifn_operand *op = arg; 1769 1770 KASSERT(nsegs <= MAX_SCATTER, 1771 ("hifn_op_cb: too many DMA segments (%u > %u) " 1772 "returned when mapping operand", nsegs, MAX_SCATTER)); 1773 op->mapsize = mapsize; 1774 op->nsegs = nsegs; 1775 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1776 } 1777 1778 static int 1779 hifn_crypto( 1780 struct hifn_softc *sc, 1781 struct hifn_command *cmd, 1782 struct cryptop *crp, 1783 int hint) 1784 { 1785 struct hifn_dma *dma = sc->sc_dma; 1786 u_int32_t cmdlen; 1787 int cmdi, resi, err = 0; 1788 1789 /* 1790 * need 1 cmd, and 1 res 1791 * 1792 * NB: check this first since it's easy. 1793 */ 1794 HIFN_LOCK(sc); 1795 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1796 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1797 #ifdef HIFN_DEBUG 1798 if (hifn_debug) { 1799 device_printf(sc->sc_dev, 1800 "cmd/result exhaustion, cmdu %u resu %u\n", 1801 dma->cmdu, dma->resu); 1802 } 1803 #endif 1804 hifnstats.hst_nomem_cr++; 1805 HIFN_UNLOCK(sc); 1806 return (ERESTART); 1807 } 1808 1809 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { 1810 hifnstats.hst_nomem_map++; 1811 HIFN_UNLOCK(sc); 1812 return (ENOMEM); 1813 } 1814 1815 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1816 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1817 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1818 hifnstats.hst_nomem_load++; 1819 err = ENOMEM; 1820 goto err_srcmap1; 1821 } 1822 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1823 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1824 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1825 hifnstats.hst_nomem_load++; 1826 err = ENOMEM; 1827 goto err_srcmap1; 1828 } 1829 } else { 1830 err = EINVAL; 1831 goto err_srcmap1; 1832 } 1833 1834 if (hifn_dmamap_aligned(&cmd->src)) { 1835 cmd->sloplen = cmd->src_mapsize & 3; 1836 cmd->dst = cmd->src; 1837 } else { 1838 if (crp->crp_flags & CRYPTO_F_IOV) { 1839 err = EINVAL; 1840 goto err_srcmap; 1841 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1842 int totlen, len; 1843 struct mbuf *m, *m0, *mlast; 1844 1845 KASSERT(cmd->dst_m == cmd->src_m, 1846 ("hifn_crypto: dst_m initialized improperly")); 1847 hifnstats.hst_unaligned++; 1848 /* 1849 * Source is not aligned on a longword boundary. 1850 * Copy the data to insure alignment. If we fail 1851 * to allocate mbufs or clusters while doing this 1852 * we return ERESTART so the operation is requeued 1853 * at the crypto later, but only if there are 1854 * ops already posted to the hardware; otherwise we 1855 * have no guarantee that we'll be re-entered. 1856 */ 1857 totlen = cmd->src_mapsize; 1858 if (cmd->src_m->m_flags & M_PKTHDR) { 1859 len = MHLEN; 1860 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1861 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) { 1862 m_free(m0); 1863 m0 = NULL; 1864 } 1865 } else { 1866 len = MLEN; 1867 MGET(m0, M_DONTWAIT, MT_DATA); 1868 } 1869 if (m0 == NULL) { 1870 hifnstats.hst_nomem_mbuf++; 1871 err = dma->cmdu ? ERESTART : ENOMEM; 1872 goto err_srcmap; 1873 } 1874 if (totlen >= MINCLSIZE) { 1875 MCLGET(m0, M_DONTWAIT); 1876 if ((m0->m_flags & M_EXT) == 0) { 1877 hifnstats.hst_nomem_mcl++; 1878 err = dma->cmdu ? ERESTART : ENOMEM; 1879 m_freem(m0); 1880 goto err_srcmap; 1881 } 1882 len = MCLBYTES; 1883 } 1884 totlen -= len; 1885 m0->m_pkthdr.len = m0->m_len = len; 1886 mlast = m0; 1887 1888 while (totlen > 0) { 1889 MGET(m, M_DONTWAIT, MT_DATA); 1890 if (m == NULL) { 1891 hifnstats.hst_nomem_mbuf++; 1892 err = dma->cmdu ? ERESTART : ENOMEM; 1893 m_freem(m0); 1894 goto err_srcmap; 1895 } 1896 len = MLEN; 1897 if (totlen >= MINCLSIZE) { 1898 MCLGET(m, M_DONTWAIT); 1899 if ((m->m_flags & M_EXT) == 0) { 1900 hifnstats.hst_nomem_mcl++; 1901 err = dma->cmdu ? ERESTART : ENOMEM; 1902 mlast->m_next = m; 1903 m_freem(m0); 1904 goto err_srcmap; 1905 } 1906 len = MCLBYTES; 1907 } 1908 1909 m->m_len = len; 1910 m0->m_pkthdr.len += len; 1911 totlen -= len; 1912 1913 mlast->m_next = m; 1914 mlast = m; 1915 } 1916 cmd->dst_m = m0; 1917 } 1918 } 1919 1920 if (cmd->dst_map == NULL) { 1921 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1922 hifnstats.hst_nomem_map++; 1923 err = ENOMEM; 1924 goto err_srcmap; 1925 } 1926 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1927 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1928 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1929 hifnstats.hst_nomem_map++; 1930 err = ENOMEM; 1931 goto err_dstmap1; 1932 } 1933 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1934 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1935 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1936 hifnstats.hst_nomem_load++; 1937 err = ENOMEM; 1938 goto err_dstmap1; 1939 } 1940 } 1941 } 1942 1943 #ifdef HIFN_DEBUG 1944 if (hifn_debug) { 1945 device_printf(sc->sc_dev, 1946 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1947 READ_REG_1(sc, HIFN_1_DMA_CSR), 1948 READ_REG_1(sc, HIFN_1_DMA_IER), 1949 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1950 cmd->src_nsegs, cmd->dst_nsegs); 1951 } 1952 #endif 1953 1954 if (cmd->src_map == cmd->dst_map) { 1955 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1956 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1957 } else { 1958 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1959 BUS_DMASYNC_PREWRITE); 1960 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1961 BUS_DMASYNC_PREREAD); 1962 } 1963 1964 /* 1965 * need N src, and N dst 1966 */ 1967 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || 1968 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { 1969 #ifdef HIFN_DEBUG 1970 if (hifn_debug) { 1971 device_printf(sc->sc_dev, 1972 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", 1973 dma->srcu, cmd->src_nsegs, 1974 dma->dstu, cmd->dst_nsegs); 1975 } 1976 #endif 1977 hifnstats.hst_nomem_sd++; 1978 err = ERESTART; 1979 goto err_dstmap; 1980 } 1981 1982 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1983 dma->cmdi = 0; 1984 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1985 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1986 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1987 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1988 } 1989 cmdi = dma->cmdi++; 1990 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1991 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1992 1993 /* .p for command/result already set */ 1994 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1995 HIFN_D_MASKDONEIRQ); 1996 HIFN_CMDR_SYNC(sc, cmdi, 1997 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1998 dma->cmdu++; 1999 if (sc->sc_c_busy == 0) { 2000 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 2001 sc->sc_c_busy = 1; 2002 } 2003 2004 /* 2005 * We don't worry about missing an interrupt (which a "command wait" 2006 * interrupt salvages us from), unless there is more than one command 2007 * in the queue. 2008 */ 2009 if (dma->cmdu > 1) { 2010 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 2011 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2012 } 2013 2014 hifnstats.hst_ipackets++; 2015 hifnstats.hst_ibytes += cmd->src_mapsize; 2016 2017 hifn_dmamap_load_src(sc, cmd); 2018 if (sc->sc_s_busy == 0) { 2019 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 2020 sc->sc_s_busy = 1; 2021 } 2022 2023 /* 2024 * Unlike other descriptors, we don't mask done interrupt from 2025 * result descriptor. 2026 */ 2027 #ifdef HIFN_DEBUG 2028 if (hifn_debug) 2029 printf("load res\n"); 2030 #endif 2031 if (dma->resi == HIFN_D_RES_RSIZE) { 2032 dma->resi = 0; 2033 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 2034 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 2035 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 2036 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2037 } 2038 resi = dma->resi++; 2039 KASSERT(dma->hifn_commands[resi] == NULL, 2040 ("hifn_crypto: command slot %u busy", resi)); 2041 dma->hifn_commands[resi] = cmd; 2042 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 2043 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { 2044 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2045 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 2046 sc->sc_curbatch++; 2047 if (sc->sc_curbatch > hifnstats.hst_maxbatch) 2048 hifnstats.hst_maxbatch = sc->sc_curbatch; 2049 hifnstats.hst_totbatch++; 2050 } else { 2051 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 2052 HIFN_D_VALID | HIFN_D_LAST); 2053 sc->sc_curbatch = 0; 2054 } 2055 HIFN_RESR_SYNC(sc, resi, 2056 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2057 dma->resu++; 2058 if (sc->sc_r_busy == 0) { 2059 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 2060 sc->sc_r_busy = 1; 2061 } 2062 2063 if (cmd->sloplen) 2064 cmd->slopidx = resi; 2065 2066 hifn_dmamap_load_dst(sc, cmd); 2067 2068 if (sc->sc_d_busy == 0) { 2069 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 2070 sc->sc_d_busy = 1; 2071 } 2072 2073 #ifdef HIFN_DEBUG 2074 if (hifn_debug) { 2075 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", 2076 READ_REG_1(sc, HIFN_1_DMA_CSR), 2077 READ_REG_1(sc, HIFN_1_DMA_IER)); 2078 } 2079 #endif 2080 2081 sc->sc_active = 5; 2082 HIFN_UNLOCK(sc); 2083 KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); 2084 return (err); /* success */ 2085 2086 err_dstmap: 2087 if (cmd->src_map != cmd->dst_map) 2088 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2089 err_dstmap1: 2090 if (cmd->src_map != cmd->dst_map) 2091 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2092 err_srcmap: 2093 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2094 if (cmd->src_m != cmd->dst_m) 2095 m_freem(cmd->dst_m); 2096 } 2097 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2098 err_srcmap1: 2099 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2100 HIFN_UNLOCK(sc); 2101 return (err); 2102 } 2103 2104 static void 2105 hifn_tick(void* vsc) 2106 { 2107 struct hifn_softc *sc = vsc; 2108 2109 HIFN_LOCK(sc); 2110 if (sc->sc_active == 0) { 2111 struct hifn_dma *dma = sc->sc_dma; 2112 u_int32_t r = 0; 2113 2114 if (dma->cmdu == 0 && sc->sc_c_busy) { 2115 sc->sc_c_busy = 0; 2116 r |= HIFN_DMACSR_C_CTRL_DIS; 2117 } 2118 if (dma->srcu == 0 && sc->sc_s_busy) { 2119 sc->sc_s_busy = 0; 2120 r |= HIFN_DMACSR_S_CTRL_DIS; 2121 } 2122 if (dma->dstu == 0 && sc->sc_d_busy) { 2123 sc->sc_d_busy = 0; 2124 r |= HIFN_DMACSR_D_CTRL_DIS; 2125 } 2126 if (dma->resu == 0 && sc->sc_r_busy) { 2127 sc->sc_r_busy = 0; 2128 r |= HIFN_DMACSR_R_CTRL_DIS; 2129 } 2130 if (r) 2131 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 2132 } else 2133 sc->sc_active--; 2134 HIFN_UNLOCK(sc); 2135 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 2136 } 2137 2138 static void 2139 hifn_intr(void *arg) 2140 { 2141 struct hifn_softc *sc = arg; 2142 struct hifn_dma *dma; 2143 u_int32_t dmacsr, restart; 2144 int i, u; 2145 2146 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 2147 2148 /* Nothing in the DMA unit interrupted */ 2149 if ((dmacsr & sc->sc_dmaier) == 0) 2150 return; 2151 2152 HIFN_LOCK(sc); 2153 2154 dma = sc->sc_dma; 2155 2156 #ifdef HIFN_DEBUG 2157 if (hifn_debug) { 2158 device_printf(sc->sc_dev, 2159 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", 2160 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, 2161 dma->cmdi, dma->srci, dma->dsti, dma->resi, 2162 dma->cmdk, dma->srck, dma->dstk, dma->resk, 2163 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2164 } 2165 #endif 2166 2167 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 2168 2169 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 2170 (dmacsr & HIFN_DMACSR_PUBDONE)) 2171 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 2172 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 2173 2174 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); 2175 if (restart) 2176 device_printf(sc->sc_dev, "overrun %x\n", dmacsr); 2177 2178 if (sc->sc_flags & HIFN_IS_7811) { 2179 if (dmacsr & HIFN_DMACSR_ILLR) 2180 device_printf(sc->sc_dev, "illegal read\n"); 2181 if (dmacsr & HIFN_DMACSR_ILLW) 2182 device_printf(sc->sc_dev, "illegal write\n"); 2183 } 2184 2185 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 2186 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 2187 if (restart) { 2188 device_printf(sc->sc_dev, "abort, resetting.\n"); 2189 hifnstats.hst_abort++; 2190 hifn_abort(sc); 2191 HIFN_UNLOCK(sc); 2192 return; 2193 } 2194 2195 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { 2196 /* 2197 * If no slots to process and we receive a "waiting on 2198 * command" interrupt, we disable the "waiting on command" 2199 * (by clearing it). 2200 */ 2201 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 2202 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2203 } 2204 2205 /* clear the rings */ 2206 i = dma->resk; u = dma->resu; 2207 while (u != 0) { 2208 HIFN_RESR_SYNC(sc, i, 2209 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2210 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2211 HIFN_RESR_SYNC(sc, i, 2212 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2213 break; 2214 } 2215 2216 if (i != HIFN_D_RES_RSIZE) { 2217 struct hifn_command *cmd; 2218 u_int8_t *macbuf = NULL; 2219 2220 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2221 cmd = dma->hifn_commands[i]; 2222 KASSERT(cmd != NULL, 2223 ("hifn_intr: null command slot %u", i)); 2224 dma->hifn_commands[i] = NULL; 2225 2226 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2227 macbuf = dma->result_bufs[i]; 2228 macbuf += 12; 2229 } 2230 2231 hifn_callback(sc, cmd, macbuf); 2232 hifnstats.hst_opackets++; 2233 u--; 2234 } 2235 2236 if (++i == (HIFN_D_RES_RSIZE + 1)) 2237 i = 0; 2238 } 2239 dma->resk = i; dma->resu = u; 2240 2241 i = dma->srck; u = dma->srcu; 2242 while (u != 0) { 2243 if (i == HIFN_D_SRC_RSIZE) 2244 i = 0; 2245 HIFN_SRCR_SYNC(sc, i, 2246 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2247 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2248 HIFN_SRCR_SYNC(sc, i, 2249 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2250 break; 2251 } 2252 i++, u--; 2253 } 2254 dma->srck = i; dma->srcu = u; 2255 2256 i = dma->cmdk; u = dma->cmdu; 2257 while (u != 0) { 2258 HIFN_CMDR_SYNC(sc, i, 2259 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2260 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2261 HIFN_CMDR_SYNC(sc, i, 2262 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2263 break; 2264 } 2265 if (i != HIFN_D_CMD_RSIZE) { 2266 u--; 2267 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2268 } 2269 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2270 i = 0; 2271 } 2272 dma->cmdk = i; dma->cmdu = u; 2273 2274 HIFN_UNLOCK(sc); 2275 2276 if (sc->sc_needwakeup) { /* XXX check high watermark */ 2277 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 2278 #ifdef HIFN_DEBUG 2279 if (hifn_debug) 2280 device_printf(sc->sc_dev, 2281 "wakeup crypto (%x) u %d/%d/%d/%d\n", 2282 sc->sc_needwakeup, 2283 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2284 #endif 2285 sc->sc_needwakeup &= ~wakeup; 2286 crypto_unblock(sc->sc_cid, wakeup); 2287 } 2288 } 2289 2290 /* 2291 * Allocate a new 'session' and return an encoded session id. 'sidp' 2292 * contains our registration id, and should contain an encoded session 2293 * id on successful allocation. 2294 */ 2295 static int 2296 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 2297 { 2298 struct cryptoini *c; 2299 struct hifn_softc *sc = arg; 2300 int mac = 0, cry = 0, sesn; 2301 struct hifn_session *ses = NULL; 2302 2303 KASSERT(sc != NULL, ("hifn_newsession: null softc")); 2304 if (sidp == NULL || cri == NULL || sc == NULL) 2305 return (EINVAL); 2306 2307 if (sc->sc_sessions == NULL) { 2308 ses = sc->sc_sessions = (struct hifn_session *)malloc( 2309 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2310 if (ses == NULL) 2311 return (ENOMEM); 2312 sesn = 0; 2313 sc->sc_nsessions = 1; 2314 } else { 2315 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) { 2316 if (!sc->sc_sessions[sesn].hs_used) { 2317 ses = &sc->sc_sessions[sesn]; 2318 break; 2319 } 2320 } 2321 2322 if (ses == NULL) { 2323 sesn = sc->sc_nsessions; 2324 ses = (struct hifn_session *)malloc((sesn + 1) * 2325 sizeof(*ses), M_DEVBUF, M_NOWAIT); 2326 if (ses == NULL) 2327 return (ENOMEM); 2328 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses)); 2329 bzero(sc->sc_sessions, sesn * sizeof(*ses)); 2330 free(sc->sc_sessions, M_DEVBUF); 2331 sc->sc_sessions = ses; 2332 ses = &sc->sc_sessions[sesn]; 2333 sc->sc_nsessions++; 2334 } 2335 } 2336 bzero(ses, sizeof(*ses)); 2337 ses->hs_used = 1; 2338 2339 for (c = cri; c != NULL; c = c->cri_next) { 2340 switch (c->cri_alg) { 2341 case CRYPTO_MD5: 2342 case CRYPTO_SHA1: 2343 case CRYPTO_MD5_HMAC: 2344 case CRYPTO_SHA1_HMAC: 2345 if (mac) 2346 return (EINVAL); 2347 mac = 1; 2348 break; 2349 case CRYPTO_DES_CBC: 2350 case CRYPTO_3DES_CBC: 2351 case CRYPTO_AES_CBC: 2352 /* XXX this may read fewer, does it matter? */ 2353 read_random(ses->hs_iv, 2354 c->cri_alg == CRYPTO_AES_CBC ? 2355 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2356 /*FALLTHROUGH*/ 2357 case CRYPTO_ARC4: 2358 if (cry) 2359 return (EINVAL); 2360 cry = 1; 2361 break; 2362 default: 2363 return (EINVAL); 2364 } 2365 } 2366 if (mac == 0 && cry == 0) 2367 return (EINVAL); 2368 2369 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn); 2370 2371 return (0); 2372 } 2373 2374 /* 2375 * Deallocate a session. 2376 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2377 * XXX to blow away any keys already stored there. 2378 */ 2379 static int 2380 hifn_freesession(void *arg, u_int64_t tid) 2381 { 2382 struct hifn_softc *sc = arg; 2383 int session; 2384 u_int32_t sid = CRYPTO_SESID2LID(tid); 2385 2386 KASSERT(sc != NULL, ("hifn_freesession: null softc")); 2387 if (sc == NULL) 2388 return (EINVAL); 2389 2390 session = HIFN_SESSION(sid); 2391 if (session >= sc->sc_nsessions) 2392 return (EINVAL); 2393 2394 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 2395 return (0); 2396 } 2397 2398 static int 2399 hifn_process(void *arg, struct cryptop *crp, int hint) 2400 { 2401 struct hifn_softc *sc = arg; 2402 struct hifn_command *cmd = NULL; 2403 int session, err, ivlen; 2404 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2405 2406 if (crp == NULL || crp->crp_callback == NULL) { 2407 hifnstats.hst_invalid++; 2408 return (EINVAL); 2409 } 2410 session = HIFN_SESSION(crp->crp_sid); 2411 2412 if (sc == NULL || session >= sc->sc_nsessions) { 2413 err = EINVAL; 2414 goto errout; 2415 } 2416 2417 cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); 2418 if (cmd == NULL) { 2419 hifnstats.hst_nomem++; 2420 err = ENOMEM; 2421 goto errout; 2422 } 2423 2424 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2425 cmd->src_m = (struct mbuf *)crp->crp_buf; 2426 cmd->dst_m = (struct mbuf *)crp->crp_buf; 2427 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2428 cmd->src_io = (struct uio *)crp->crp_buf; 2429 cmd->dst_io = (struct uio *)crp->crp_buf; 2430 } else { 2431 err = EINVAL; 2432 goto errout; /* XXX we don't handle contiguous buffers! */ 2433 } 2434 2435 crd1 = crp->crp_desc; 2436 if (crd1 == NULL) { 2437 err = EINVAL; 2438 goto errout; 2439 } 2440 crd2 = crd1->crd_next; 2441 2442 if (crd2 == NULL) { 2443 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2444 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2445 crd1->crd_alg == CRYPTO_SHA1 || 2446 crd1->crd_alg == CRYPTO_MD5) { 2447 maccrd = crd1; 2448 enccrd = NULL; 2449 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2450 crd1->crd_alg == CRYPTO_3DES_CBC || 2451 crd1->crd_alg == CRYPTO_AES_CBC || 2452 crd1->crd_alg == CRYPTO_ARC4) { 2453 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2454 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2455 maccrd = NULL; 2456 enccrd = crd1; 2457 } else { 2458 err = EINVAL; 2459 goto errout; 2460 } 2461 } else { 2462 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2463 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2464 crd1->crd_alg == CRYPTO_MD5 || 2465 crd1->crd_alg == CRYPTO_SHA1) && 2466 (crd2->crd_alg == CRYPTO_DES_CBC || 2467 crd2->crd_alg == CRYPTO_3DES_CBC || 2468 crd2->crd_alg == CRYPTO_AES_CBC || 2469 crd2->crd_alg == CRYPTO_ARC4) && 2470 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2471 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2472 maccrd = crd1; 2473 enccrd = crd2; 2474 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2475 crd1->crd_alg == CRYPTO_ARC4 || 2476 crd1->crd_alg == CRYPTO_3DES_CBC || 2477 crd1->crd_alg == CRYPTO_AES_CBC) && 2478 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2479 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2480 crd2->crd_alg == CRYPTO_MD5 || 2481 crd2->crd_alg == CRYPTO_SHA1) && 2482 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2483 enccrd = crd1; 2484 maccrd = crd2; 2485 } else { 2486 /* 2487 * We cannot order the 7751 as requested 2488 */ 2489 err = EINVAL; 2490 goto errout; 2491 } 2492 } 2493 2494 if (enccrd) { 2495 cmd->enccrd = enccrd; 2496 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2497 switch (enccrd->crd_alg) { 2498 case CRYPTO_ARC4: 2499 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2500 break; 2501 case CRYPTO_DES_CBC: 2502 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2503 HIFN_CRYPT_CMD_MODE_CBC | 2504 HIFN_CRYPT_CMD_NEW_IV; 2505 break; 2506 case CRYPTO_3DES_CBC: 2507 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2508 HIFN_CRYPT_CMD_MODE_CBC | 2509 HIFN_CRYPT_CMD_NEW_IV; 2510 break; 2511 case CRYPTO_AES_CBC: 2512 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2513 HIFN_CRYPT_CMD_MODE_CBC | 2514 HIFN_CRYPT_CMD_NEW_IV; 2515 break; 2516 default: 2517 err = EINVAL; 2518 goto errout; 2519 } 2520 if (enccrd->crd_alg != CRYPTO_ARC4) { 2521 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2522 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2523 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2524 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2525 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2526 else 2527 bcopy(sc->sc_sessions[session].hs_iv, 2528 cmd->iv, ivlen); 2529 2530 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2531 == 0) { 2532 if (crp->crp_flags & CRYPTO_F_IMBUF) 2533 m_copyback(cmd->src_m, 2534 enccrd->crd_inject, 2535 ivlen, cmd->iv); 2536 else if (crp->crp_flags & CRYPTO_F_IOV) 2537 cuio_copyback(cmd->src_io, 2538 enccrd->crd_inject, 2539 ivlen, cmd->iv); 2540 } 2541 } else { 2542 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2543 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2544 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2545 m_copydata(cmd->src_m, 2546 enccrd->crd_inject, ivlen, cmd->iv); 2547 else if (crp->crp_flags & CRYPTO_F_IOV) 2548 cuio_copydata(cmd->src_io, 2549 enccrd->crd_inject, ivlen, cmd->iv); 2550 } 2551 } 2552 2553 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) 2554 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2555 cmd->ck = enccrd->crd_key; 2556 cmd->cklen = enccrd->crd_klen >> 3; 2557 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2558 2559 /* 2560 * Need to specify the size for the AES key in the masks. 2561 */ 2562 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2563 HIFN_CRYPT_CMD_ALG_AES) { 2564 switch (cmd->cklen) { 2565 case 16: 2566 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2567 break; 2568 case 24: 2569 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2570 break; 2571 case 32: 2572 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2573 break; 2574 default: 2575 err = EINVAL; 2576 goto errout; 2577 } 2578 } 2579 } 2580 2581 if (maccrd) { 2582 cmd->maccrd = maccrd; 2583 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2584 2585 switch (maccrd->crd_alg) { 2586 case CRYPTO_MD5: 2587 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2588 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2589 HIFN_MAC_CMD_POS_IPSEC; 2590 break; 2591 case CRYPTO_MD5_HMAC: 2592 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2593 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2594 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2595 break; 2596 case CRYPTO_SHA1: 2597 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2598 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2599 HIFN_MAC_CMD_POS_IPSEC; 2600 break; 2601 case CRYPTO_SHA1_HMAC: 2602 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2603 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2604 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2605 break; 2606 } 2607 2608 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2609 maccrd->crd_alg == CRYPTO_MD5_HMAC) { 2610 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2611 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2612 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2613 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2614 } 2615 } 2616 2617 cmd->crp = crp; 2618 cmd->session_num = session; 2619 cmd->softc = sc; 2620 2621 err = hifn_crypto(sc, cmd, crp, hint); 2622 if (!err) { 2623 return 0; 2624 } else if (err == ERESTART) { 2625 /* 2626 * There weren't enough resources to dispatch the request 2627 * to the part. Notify the caller so they'll requeue this 2628 * request and resubmit it again soon. 2629 */ 2630 #ifdef HIFN_DEBUG 2631 if (hifn_debug) 2632 device_printf(sc->sc_dev, "requeue request\n"); 2633 #endif 2634 free(cmd, M_DEVBUF); 2635 sc->sc_needwakeup |= CRYPTO_SYMQ; 2636 return (err); 2637 } 2638 2639 errout: 2640 if (cmd != NULL) 2641 free(cmd, M_DEVBUF); 2642 if (err == EINVAL) 2643 hifnstats.hst_invalid++; 2644 else 2645 hifnstats.hst_nomem++; 2646 crp->crp_etype = err; 2647 crypto_done(crp); 2648 return (err); 2649 } 2650 2651 static void 2652 hifn_abort(struct hifn_softc *sc) 2653 { 2654 struct hifn_dma *dma = sc->sc_dma; 2655 struct hifn_command *cmd; 2656 struct cryptop *crp; 2657 int i, u; 2658 2659 i = dma->resk; u = dma->resu; 2660 while (u != 0) { 2661 cmd = dma->hifn_commands[i]; 2662 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); 2663 dma->hifn_commands[i] = NULL; 2664 crp = cmd->crp; 2665 2666 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2667 /* Salvage what we can. */ 2668 u_int8_t *macbuf; 2669 2670 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2671 macbuf = dma->result_bufs[i]; 2672 macbuf += 12; 2673 } else 2674 macbuf = NULL; 2675 hifnstats.hst_opackets++; 2676 hifn_callback(sc, cmd, macbuf); 2677 } else { 2678 if (cmd->src_map == cmd->dst_map) { 2679 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2680 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2681 } else { 2682 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2683 BUS_DMASYNC_POSTWRITE); 2684 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2685 BUS_DMASYNC_POSTREAD); 2686 } 2687 2688 if (cmd->src_m != cmd->dst_m) { 2689 m_freem(cmd->src_m); 2690 crp->crp_buf = (caddr_t)cmd->dst_m; 2691 } 2692 2693 /* non-shared buffers cannot be restarted */ 2694 if (cmd->src_map != cmd->dst_map) { 2695 /* 2696 * XXX should be EAGAIN, delayed until 2697 * after the reset. 2698 */ 2699 crp->crp_etype = ENOMEM; 2700 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2701 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2702 } else 2703 crp->crp_etype = ENOMEM; 2704 2705 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2706 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2707 2708 free(cmd, M_DEVBUF); 2709 if (crp->crp_etype != EAGAIN) 2710 crypto_done(crp); 2711 } 2712 2713 if (++i == HIFN_D_RES_RSIZE) 2714 i = 0; 2715 u--; 2716 } 2717 dma->resk = i; dma->resu = u; 2718 2719 hifn_reset_board(sc, 1); 2720 hifn_init_dma(sc); 2721 hifn_init_pci_registers(sc); 2722 } 2723 2724 static void 2725 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) 2726 { 2727 struct hifn_dma *dma = sc->sc_dma; 2728 struct cryptop *crp = cmd->crp; 2729 struct cryptodesc *crd; 2730 struct mbuf *m; 2731 int totlen, i, u, ivlen; 2732 2733 if (cmd->src_map == cmd->dst_map) { 2734 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2735 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2736 } else { 2737 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2738 BUS_DMASYNC_POSTWRITE); 2739 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2740 BUS_DMASYNC_POSTREAD); 2741 } 2742 2743 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2744 if (cmd->src_m != cmd->dst_m) { 2745 crp->crp_buf = (caddr_t)cmd->dst_m; 2746 totlen = cmd->src_mapsize; 2747 for (m = cmd->dst_m; m != NULL; m = m->m_next) { 2748 if (totlen < m->m_len) { 2749 m->m_len = totlen; 2750 totlen = 0; 2751 } else 2752 totlen -= m->m_len; 2753 } 2754 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; 2755 m_freem(cmd->src_m); 2756 } 2757 } 2758 2759 if (cmd->sloplen != 0) { 2760 if (crp->crp_flags & CRYPTO_F_IMBUF) 2761 m_copyback((struct mbuf *)crp->crp_buf, 2762 cmd->src_mapsize - cmd->sloplen, 2763 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2764 else if (crp->crp_flags & CRYPTO_F_IOV) 2765 cuio_copyback((struct uio *)crp->crp_buf, 2766 cmd->src_mapsize - cmd->sloplen, 2767 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2768 } 2769 2770 i = dma->dstk; u = dma->dstu; 2771 while (u != 0) { 2772 if (i == HIFN_D_DST_RSIZE) 2773 i = 0; 2774 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2775 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2776 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2777 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2778 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2779 break; 2780 } 2781 i++, u--; 2782 } 2783 dma->dstk = i; dma->dstu = u; 2784 2785 hifnstats.hst_obytes += cmd->dst_mapsize; 2786 2787 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2788 HIFN_BASE_CMD_CRYPT) { 2789 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2790 if (crd->crd_alg != CRYPTO_DES_CBC && 2791 crd->crd_alg != CRYPTO_3DES_CBC && 2792 crd->crd_alg != CRYPTO_AES_CBC) 2793 continue; 2794 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2795 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2796 if (crp->crp_flags & CRYPTO_F_IMBUF) 2797 m_copydata((struct mbuf *)crp->crp_buf, 2798 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2799 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2800 else if (crp->crp_flags & CRYPTO_F_IOV) { 2801 cuio_copydata((struct uio *)crp->crp_buf, 2802 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2803 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2804 } 2805 break; 2806 } 2807 } 2808 2809 if (macbuf != NULL) { 2810 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2811 int len; 2812 2813 if (crd->crd_alg == CRYPTO_MD5) 2814 len = 16; 2815 else if (crd->crd_alg == CRYPTO_SHA1) 2816 len = 20; 2817 else if (crd->crd_alg == CRYPTO_MD5_HMAC || 2818 crd->crd_alg == CRYPTO_SHA1_HMAC) 2819 len = 12; 2820 else 2821 continue; 2822 2823 if (crp->crp_flags & CRYPTO_F_IMBUF) 2824 m_copyback((struct mbuf *)crp->crp_buf, 2825 crd->crd_inject, len, macbuf); 2826 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2827 bcopy((caddr_t)macbuf, crp->crp_mac, len); 2828 break; 2829 } 2830 } 2831 2832 if (cmd->src_map != cmd->dst_map) { 2833 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2834 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2835 } 2836 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2837 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2838 free(cmd, M_DEVBUF); 2839 crypto_done(crp); 2840 } 2841 2842 /* 2843 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2844 * and Group 1 registers; avoid conditions that could create 2845 * burst writes by doing a read in between the writes. 2846 * 2847 * NB: The read we interpose is always to the same register; 2848 * we do this because reading from an arbitrary (e.g. last) 2849 * register may not always work. 2850 */ 2851 static void 2852 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2853 { 2854 if (sc->sc_flags & HIFN_IS_7811) { 2855 if (sc->sc_bar0_lastreg == reg - 4) 2856 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); 2857 sc->sc_bar0_lastreg = reg; 2858 } 2859 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2860 } 2861 2862 static void 2863 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2864 { 2865 if (sc->sc_flags & HIFN_IS_7811) { 2866 if (sc->sc_bar1_lastreg == reg - 4) 2867 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2868 sc->sc_bar1_lastreg = reg; 2869 } 2870 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2871 } 2872