1 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */ 2 3 /* 4 * Invertex AEON / Hifn 7751 driver 5 * Copyright (c) 1999 Invertex Inc. All rights reserved. 6 * Copyright (c) 1999 Theo de Raadt 7 * Copyright (c) 2000-2001 Network Security Technologies, Inc. 8 * http://www.netsec.net 9 * Copyright (c) 2003 Hifn Inc. 10 * 11 * This driver is based on a previous driver by Invertex, for which they 12 * requested: Please send any comments, feedback, bug-fixes, or feature 13 * requests to software@invertex.com. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. The name of the author may not be used to endorse or promote products 25 * derived from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 29 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 30 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 31 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 32 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 33 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 34 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 35 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 36 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 37 * 38 * Effort sponsored in part by the Defense Advanced Research Projects 39 * Agency (DARPA) and Air Force Research Laboratory, Air Force 40 * Materiel Command, USAF, under agreement number F30602-01-2-0537. 41 */ 42 43 #include <sys/cdefs.h> 44 __FBSDID("$FreeBSD$"); 45 46 /* 47 * Driver for various Hifn encryption processors. 48 */ 49 #include "opt_hifn.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/proc.h> 54 #include <sys/errno.h> 55 #include <sys/malloc.h> 56 #include <sys/kernel.h> 57 #include <sys/mbuf.h> 58 #include <sys/lock.h> 59 #include <sys/mutex.h> 60 #include <sys/sysctl.h> 61 62 #include <vm/vm.h> 63 #include <vm/pmap.h> 64 65 #include <machine/clock.h> 66 #include <machine/bus.h> 67 #include <machine/resource.h> 68 #include <sys/bus.h> 69 #include <sys/rman.h> 70 71 #include <opencrypto/cryptodev.h> 72 #include <sys/random.h> 73 74 #include <dev/pci/pcivar.h> 75 #include <dev/pci/pcireg.h> 76 77 #ifdef HIFN_RNDTEST 78 #include <dev/rndtest/rndtest.h> 79 #endif 80 #include <dev/hifn/hifn7751reg.h> 81 #include <dev/hifn/hifn7751var.h> 82 83 /* 84 * Prototypes and count for the pci_device structure 85 */ 86 static int hifn_probe(device_t); 87 static int hifn_attach(device_t); 88 static int hifn_detach(device_t); 89 static int hifn_suspend(device_t); 90 static int hifn_resume(device_t); 91 static void hifn_shutdown(device_t); 92 93 static device_method_t hifn_methods[] = { 94 /* Device interface */ 95 DEVMETHOD(device_probe, hifn_probe), 96 DEVMETHOD(device_attach, hifn_attach), 97 DEVMETHOD(device_detach, hifn_detach), 98 DEVMETHOD(device_suspend, hifn_suspend), 99 DEVMETHOD(device_resume, hifn_resume), 100 DEVMETHOD(device_shutdown, hifn_shutdown), 101 102 /* bus interface */ 103 DEVMETHOD(bus_print_child, bus_generic_print_child), 104 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 105 106 { 0, 0 } 107 }; 108 static driver_t hifn_driver = { 109 "hifn", 110 hifn_methods, 111 sizeof (struct hifn_softc) 112 }; 113 static devclass_t hifn_devclass; 114 115 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0); 116 MODULE_DEPEND(hifn, crypto, 1, 1, 1); 117 #ifdef HIFN_RNDTEST 118 MODULE_DEPEND(hifn, rndtest, 1, 1, 1); 119 #endif 120 121 static void hifn_reset_board(struct hifn_softc *, int); 122 static void hifn_reset_puc(struct hifn_softc *); 123 static void hifn_puc_wait(struct hifn_softc *); 124 static int hifn_enable_crypto(struct hifn_softc *); 125 static void hifn_set_retry(struct hifn_softc *sc); 126 static void hifn_init_dma(struct hifn_softc *); 127 static void hifn_init_pci_registers(struct hifn_softc *); 128 static int hifn_sramsize(struct hifn_softc *); 129 static int hifn_dramsize(struct hifn_softc *); 130 static int hifn_ramtype(struct hifn_softc *); 131 static void hifn_sessions(struct hifn_softc *); 132 static void hifn_intr(void *); 133 static u_int hifn_write_command(struct hifn_command *, u_int8_t *); 134 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt); 135 static int hifn_newsession(void *, u_int32_t *, struct cryptoini *); 136 static int hifn_freesession(void *, u_int64_t); 137 static int hifn_process(void *, struct cryptop *, int); 138 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *); 139 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int); 140 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *); 141 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *); 142 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *); 143 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *); 144 static int hifn_init_pubrng(struct hifn_softc *); 145 static void hifn_rng(void *); 146 static void hifn_tick(void *); 147 static void hifn_abort(struct hifn_softc *); 148 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *); 149 150 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t); 151 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t); 152 153 static __inline__ u_int32_t 154 READ_REG_0(struct hifn_softc *sc, bus_size_t reg) 155 { 156 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg); 157 sc->sc_bar0_lastreg = (bus_size_t) -1; 158 return (v); 159 } 160 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val) 161 162 static __inline__ u_int32_t 163 READ_REG_1(struct hifn_softc *sc, bus_size_t reg) 164 { 165 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg); 166 sc->sc_bar1_lastreg = (bus_size_t) -1; 167 return (v); 168 } 169 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val) 170 171 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters"); 172 173 #ifdef HIFN_DEBUG 174 static int hifn_debug = 0; 175 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug, 176 0, "control debugging msgs"); 177 #endif 178 179 static struct hifn_stats hifnstats; 180 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats, 181 hifn_stats, "driver statistics"); 182 static int hifn_maxbatch = 1; 183 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch, 184 0, "max ops to batch w/o interrupt"); 185 186 /* 187 * Probe for a supported device. The PCI vendor and device 188 * IDs are used to detect devices we know how to handle. 189 */ 190 static int 191 hifn_probe(device_t dev) 192 { 193 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX && 194 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON) 195 return (0); 196 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 197 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 || 198 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 199 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 200 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 || 201 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)) 202 return (0); 203 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 204 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751) 205 return (0); 206 return (ENXIO); 207 } 208 209 static void 210 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 211 { 212 bus_addr_t *paddr = (bus_addr_t*) arg; 213 *paddr = segs->ds_addr; 214 } 215 216 static const char* 217 hifn_partname(struct hifn_softc *sc) 218 { 219 /* XXX sprintf numbers when not decoded */ 220 switch (pci_get_vendor(sc->sc_dev)) { 221 case PCI_VENDOR_HIFN: 222 switch (pci_get_device(sc->sc_dev)) { 223 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500"; 224 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751"; 225 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811"; 226 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951"; 227 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955"; 228 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956"; 229 } 230 return "Hifn unknown-part"; 231 case PCI_VENDOR_INVERTEX: 232 switch (pci_get_device(sc->sc_dev)) { 233 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON"; 234 } 235 return "Invertex unknown-part"; 236 case PCI_VENDOR_NETSEC: 237 switch (pci_get_device(sc->sc_dev)) { 238 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751"; 239 } 240 return "NetSec unknown-part"; 241 } 242 return "Unknown-vendor unknown-part"; 243 } 244 245 static void 246 default_harvest(struct rndtest_state *rsp, void *buf, u_int count) 247 { 248 random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE); 249 } 250 251 /* 252 * Attach an interface that successfully probed. 253 */ 254 static int 255 hifn_attach(device_t dev) 256 { 257 struct hifn_softc *sc = device_get_softc(dev); 258 u_int32_t cmd; 259 caddr_t kva; 260 int rseg, rid; 261 char rbase; 262 u_int16_t ena, rev; 263 264 KASSERT(sc != NULL, ("hifn_attach: null software carrier!")); 265 bzero(sc, sizeof (*sc)); 266 sc->sc_dev = dev; 267 268 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "hifn driver", MTX_DEF); 269 270 /* XXX handle power management */ 271 272 /* 273 * The 7951 and 795x have a random number generator and 274 * public key support; note this. 275 */ 276 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 277 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 || 278 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 279 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) 280 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC; 281 /* 282 * The 7811 has a random number generator and 283 * we also note it's identity 'cuz of some quirks. 284 */ 285 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 286 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811) 287 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG; 288 289 /* 290 * The 795x parts support AES. 291 */ 292 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN && 293 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 || 294 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) 295 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES; 296 297 /* 298 * Configure support for memory-mapped access to 299 * registers and for DMA operations. 300 */ 301 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN) 302 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 303 cmd |= PCIM_ENA; 304 pci_write_config(dev, PCIR_COMMAND, cmd, 4); 305 cmd = pci_read_config(dev, PCIR_COMMAND, 4); 306 if ((cmd & PCIM_ENA) != PCIM_ENA) { 307 device_printf(dev, "failed to enable %s\n", 308 (cmd & PCIM_ENA) == 0 ? 309 "memory mapping & bus mastering" : 310 (cmd & PCIM_CMD_MEMEN) == 0 ? 311 "memory mapping" : "bus mastering"); 312 goto fail_pci; 313 } 314 #undef PCIM_ENA 315 316 /* 317 * Setup PCI resources. Note that we record the bus 318 * tag and handle for each register mapping, this is 319 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1, 320 * and WRITE_REG_1 macros throughout the driver. 321 */ 322 rid = HIFN_BAR0; 323 sc->sc_bar0res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 324 RF_ACTIVE); 325 if (sc->sc_bar0res == NULL) { 326 device_printf(dev, "cannot map bar%d register space\n", 0); 327 goto fail_pci; 328 } 329 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res); 330 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res); 331 sc->sc_bar0_lastreg = (bus_size_t) -1; 332 333 rid = HIFN_BAR1; 334 sc->sc_bar1res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 335 RF_ACTIVE); 336 if (sc->sc_bar1res == NULL) { 337 device_printf(dev, "cannot map bar%d register space\n", 1); 338 goto fail_io0; 339 } 340 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res); 341 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res); 342 sc->sc_bar1_lastreg = (bus_size_t) -1; 343 344 hifn_set_retry(sc); 345 346 /* 347 * Setup the area where the Hifn DMA's descriptors 348 * and associated data structures. 349 */ 350 if (bus_dma_tag_create(NULL, /* parent */ 351 1, 0, /* alignment,boundary */ 352 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 353 BUS_SPACE_MAXADDR, /* highaddr */ 354 NULL, NULL, /* filter, filterarg */ 355 HIFN_MAX_DMALEN, /* maxsize */ 356 MAX_SCATTER, /* nsegments */ 357 HIFN_MAX_SEGLEN, /* maxsegsize */ 358 BUS_DMA_ALLOCNOW, /* flags */ 359 NULL, /* lockfunc */ 360 NULL, /* lockarg */ 361 &sc->sc_dmat)) { 362 device_printf(dev, "cannot allocate DMA tag\n"); 363 goto fail_io1; 364 } 365 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 366 device_printf(dev, "cannot create dma map\n"); 367 bus_dma_tag_destroy(sc->sc_dmat); 368 goto fail_io1; 369 } 370 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) { 371 device_printf(dev, "cannot alloc dma buffer\n"); 372 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 373 bus_dma_tag_destroy(sc->sc_dmat); 374 goto fail_io1; 375 } 376 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva, 377 sizeof (*sc->sc_dma), 378 hifn_dmamap_cb, &sc->sc_dma_physaddr, 379 BUS_DMA_NOWAIT)) { 380 device_printf(dev, "cannot load dma map\n"); 381 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap); 382 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 383 bus_dma_tag_destroy(sc->sc_dmat); 384 goto fail_io1; 385 } 386 sc->sc_dma = (struct hifn_dma *)kva; 387 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 388 389 KASSERT(sc->sc_st0 != 0, ("hifn_attach: null bar0 tag!")); 390 KASSERT(sc->sc_sh0 != 0, ("hifn_attach: null bar0 handle!")); 391 KASSERT(sc->sc_st1 != 0, ("hifn_attach: null bar1 tag!")); 392 KASSERT(sc->sc_sh1 != 0, ("hifn_attach: null bar1 handle!")); 393 394 /* 395 * Reset the board and do the ``secret handshake'' 396 * to enable the crypto support. Then complete the 397 * initialization procedure by setting up the interrupt 398 * and hooking in to the system crypto support so we'll 399 * get used for system services like the crypto device, 400 * IPsec, RNG device, etc. 401 */ 402 hifn_reset_board(sc, 0); 403 404 if (hifn_enable_crypto(sc) != 0) { 405 device_printf(dev, "crypto enabling failed\n"); 406 goto fail_mem; 407 } 408 hifn_reset_puc(sc); 409 410 hifn_init_dma(sc); 411 hifn_init_pci_registers(sc); 412 413 /* XXX can't dynamically determine ram type for 795x; force dram */ 414 if (sc->sc_flags & HIFN_IS_7956) 415 sc->sc_drammodel = 1; 416 else if (hifn_ramtype(sc)) 417 goto fail_mem; 418 419 if (sc->sc_drammodel == 0) 420 hifn_sramsize(sc); 421 else 422 hifn_dramsize(sc); 423 424 /* 425 * Workaround for NetSec 7751 rev A: half ram size because two 426 * of the address lines were left floating 427 */ 428 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC && 429 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 && 430 pci_get_revid(dev) == 0x61) /*XXX???*/ 431 sc->sc_ramsize >>= 1; 432 433 /* 434 * Arrange the interrupt line. 435 */ 436 rid = 0; 437 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 438 RF_SHAREABLE|RF_ACTIVE); 439 if (sc->sc_irq == NULL) { 440 device_printf(dev, "could not map interrupt\n"); 441 goto fail_mem; 442 } 443 /* 444 * NB: Network code assumes we are blocked with splimp() 445 * so make sure the IRQ is marked appropriately. 446 */ 447 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE, 448 hifn_intr, sc, &sc->sc_intrhand)) { 449 device_printf(dev, "could not setup interrupt\n"); 450 goto fail_intr2; 451 } 452 453 hifn_sessions(sc); 454 455 /* 456 * NB: Keep only the low 16 bits; this masks the chip id 457 * from the 7951. 458 */ 459 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff; 460 461 rseg = sc->sc_ramsize / 1024; 462 rbase = 'K'; 463 if (sc->sc_ramsize >= (1024 * 1024)) { 464 rbase = 'M'; 465 rseg /= 1024; 466 } 467 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram, %u sessions\n", 468 hifn_partname(sc), rev, 469 rseg, rbase, sc->sc_drammodel ? 'd' : 's', 470 sc->sc_maxses); 471 472 sc->sc_cid = crypto_get_driverid(0); 473 if (sc->sc_cid < 0) { 474 device_printf(dev, "could not get crypto driver id\n"); 475 goto fail_intr; 476 } 477 478 WRITE_REG_0(sc, HIFN_0_PUCNFG, 479 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID); 480 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 481 482 switch (ena) { 483 case HIFN_PUSTAT_ENA_2: 484 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0, 485 hifn_newsession, hifn_freesession, hifn_process, sc); 486 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0, 487 hifn_newsession, hifn_freesession, hifn_process, sc); 488 if (sc->sc_flags & HIFN_HAS_AES) 489 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0, 490 hifn_newsession, hifn_freesession, 491 hifn_process, sc); 492 /*FALLTHROUGH*/ 493 case HIFN_PUSTAT_ENA_1: 494 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0, 495 hifn_newsession, hifn_freesession, hifn_process, sc); 496 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0, 497 hifn_newsession, hifn_freesession, hifn_process, sc); 498 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0, 499 hifn_newsession, hifn_freesession, hifn_process, sc); 500 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0, 501 hifn_newsession, hifn_freesession, hifn_process, sc); 502 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0, 503 hifn_newsession, hifn_freesession, hifn_process, sc); 504 break; 505 } 506 507 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 508 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 509 510 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG)) 511 hifn_init_pubrng(sc); 512 513 callout_init(&sc->sc_tickto, CALLOUT_MPSAFE); 514 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 515 516 return (0); 517 518 fail_intr: 519 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 520 fail_intr2: 521 /* XXX don't store rid */ 522 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 523 fail_mem: 524 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 525 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 526 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 527 bus_dma_tag_destroy(sc->sc_dmat); 528 529 /* Turn off DMA polling */ 530 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 531 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 532 fail_io1: 533 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 534 fail_io0: 535 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 536 fail_pci: 537 mtx_destroy(&sc->sc_mtx); 538 return (ENXIO); 539 } 540 541 /* 542 * Detach an interface that successfully probed. 543 */ 544 static int 545 hifn_detach(device_t dev) 546 { 547 struct hifn_softc *sc = device_get_softc(dev); 548 549 KASSERT(sc != NULL, ("hifn_detach: null software carrier!")); 550 551 /* disable interrupts */ 552 WRITE_REG_1(sc, HIFN_1_DMA_IER, 0); 553 554 /*XXX other resources */ 555 callout_stop(&sc->sc_tickto); 556 callout_stop(&sc->sc_rngto); 557 #ifdef HIFN_RNDTEST 558 if (sc->sc_rndtest) 559 rndtest_detach(sc->sc_rndtest); 560 #endif 561 562 /* Turn off DMA polling */ 563 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 564 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 565 566 crypto_unregister_all(sc->sc_cid); 567 568 bus_generic_detach(dev); /*XXX should be no children, right? */ 569 570 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); 571 /* XXX don't store rid */ 572 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); 573 574 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap); 575 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap); 576 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap); 577 bus_dma_tag_destroy(sc->sc_dmat); 578 579 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res); 580 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res); 581 582 mtx_destroy(&sc->sc_mtx); 583 584 return (0); 585 } 586 587 /* 588 * Stop all chip I/O so that the kernel's probe routines don't 589 * get confused by errant DMAs when rebooting. 590 */ 591 static void 592 hifn_shutdown(device_t dev) 593 { 594 #ifdef notyet 595 hifn_stop(device_get_softc(dev)); 596 #endif 597 } 598 599 /* 600 * Device suspend routine. Stop the interface and save some PCI 601 * settings in case the BIOS doesn't restore them properly on 602 * resume. 603 */ 604 static int 605 hifn_suspend(device_t dev) 606 { 607 struct hifn_softc *sc = device_get_softc(dev); 608 #ifdef notyet 609 int i; 610 611 hifn_stop(sc); 612 for (i = 0; i < 5; i++) 613 sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4); 614 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 615 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 616 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 617 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 618 #endif 619 sc->sc_suspended = 1; 620 621 return (0); 622 } 623 624 /* 625 * Device resume routine. Restore some PCI settings in case the BIOS 626 * doesn't, re-enable busmastering, and restart the interface if 627 * appropriate. 628 */ 629 static int 630 hifn_resume(device_t dev) 631 { 632 struct hifn_softc *sc = device_get_softc(dev); 633 #ifdef notyet 634 int i; 635 636 /* better way to do this? */ 637 for (i = 0; i < 5; i++) 638 pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4); 639 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 640 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 641 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 642 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 643 644 /* reenable busmastering */ 645 pci_enable_busmaster(dev); 646 pci_enable_io(dev, HIFN_RES); 647 648 /* reinitialize interface if necessary */ 649 if (ifp->if_flags & IFF_UP) 650 rl_init(sc); 651 #endif 652 sc->sc_suspended = 0; 653 654 return (0); 655 } 656 657 static int 658 hifn_init_pubrng(struct hifn_softc *sc) 659 { 660 u_int32_t r; 661 int i; 662 663 #ifdef HIFN_RNDTEST 664 sc->sc_rndtest = rndtest_attach(sc->sc_dev); 665 if (sc->sc_rndtest) 666 sc->sc_harvest = rndtest_harvest; 667 else 668 sc->sc_harvest = default_harvest; 669 #else 670 sc->sc_harvest = default_harvest; 671 #endif 672 if ((sc->sc_flags & HIFN_IS_7811) == 0) { 673 /* Reset 7951 public key/rng engine */ 674 WRITE_REG_1(sc, HIFN_1_PUB_RESET, 675 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET); 676 677 for (i = 0; i < 100; i++) { 678 DELAY(1000); 679 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) & 680 HIFN_PUBRST_RESET) == 0) 681 break; 682 } 683 684 if (i == 100) { 685 device_printf(sc->sc_dev, "public key init failed\n"); 686 return (1); 687 } 688 } 689 690 /* Enable the rng, if available */ 691 if (sc->sc_flags & HIFN_HAS_RNG) { 692 if (sc->sc_flags & HIFN_IS_7811) { 693 r = READ_REG_1(sc, HIFN_1_7811_RNGENA); 694 if (r & HIFN_7811_RNGENA_ENA) { 695 r &= ~HIFN_7811_RNGENA_ENA; 696 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 697 } 698 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG, 699 HIFN_7811_RNGCFG_DEFL); 700 r |= HIFN_7811_RNGENA_ENA; 701 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r); 702 } else 703 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG, 704 READ_REG_1(sc, HIFN_1_RNG_CONFIG) | 705 HIFN_RNGCFG_ENA); 706 707 sc->sc_rngfirst = 1; 708 if (hz >= 100) 709 sc->sc_rnghz = hz / 100; 710 else 711 sc->sc_rnghz = 1; 712 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE); 713 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 714 } 715 716 /* Enable public key engine, if available */ 717 if (sc->sc_flags & HIFN_HAS_PUBLIC) { 718 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE); 719 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE; 720 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 721 } 722 723 return (0); 724 } 725 726 static void 727 hifn_rng(void *vsc) 728 { 729 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0 730 struct hifn_softc *sc = vsc; 731 u_int32_t sts, num[2]; 732 int i; 733 734 if (sc->sc_flags & HIFN_IS_7811) { 735 for (i = 0; i < 5; i++) { 736 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS); 737 if (sts & HIFN_7811_RNGSTS_UFL) { 738 device_printf(sc->sc_dev, 739 "RNG underflow: disabling\n"); 740 return; 741 } 742 if ((sts & HIFN_7811_RNGSTS_RDY) == 0) 743 break; 744 745 /* 746 * There are at least two words in the RNG FIFO 747 * at this point. 748 */ 749 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 750 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT); 751 /* NB: discard first data read */ 752 if (sc->sc_rngfirst) 753 sc->sc_rngfirst = 0; 754 else 755 (*sc->sc_harvest)(sc->sc_rndtest, 756 num, sizeof (num)); 757 } 758 } else { 759 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA); 760 761 /* NB: discard first data read */ 762 if (sc->sc_rngfirst) 763 sc->sc_rngfirst = 0; 764 else 765 (*sc->sc_harvest)(sc->sc_rndtest, 766 num, sizeof (num[0])); 767 } 768 769 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc); 770 #undef RANDOM_BITS 771 } 772 773 static void 774 hifn_puc_wait(struct hifn_softc *sc) 775 { 776 int i; 777 778 for (i = 5000; i > 0; i--) { 779 DELAY(1); 780 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET)) 781 break; 782 } 783 if (!i) 784 device_printf(sc->sc_dev, "proc unit did not reset\n"); 785 } 786 787 /* 788 * Reset the processing unit. 789 */ 790 static void 791 hifn_reset_puc(struct hifn_softc *sc) 792 { 793 /* Reset processing unit */ 794 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 795 hifn_puc_wait(sc); 796 } 797 798 /* 799 * Set the Retry and TRDY registers; note that we set them to 800 * zero because the 7811 locks up when forced to retry (section 801 * 3.6 of "Specification Update SU-0014-04". Not clear if we 802 * should do this for all Hifn parts, but it doesn't seem to hurt. 803 */ 804 static void 805 hifn_set_retry(struct hifn_softc *sc) 806 { 807 /* NB: RETRY only responds to 8-bit reads/writes */ 808 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1); 809 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4); 810 } 811 812 /* 813 * Resets the board. Values in the regesters are left as is 814 * from the reset (i.e. initial values are assigned elsewhere). 815 */ 816 static void 817 hifn_reset_board(struct hifn_softc *sc, int full) 818 { 819 u_int32_t reg; 820 821 /* 822 * Set polling in the DMA configuration register to zero. 0x7 avoids 823 * resetting the board and zeros out the other fields. 824 */ 825 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 826 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 827 828 /* 829 * Now that polling has been disabled, we have to wait 1 ms 830 * before resetting the board. 831 */ 832 DELAY(1000); 833 834 /* Reset the DMA unit */ 835 if (full) { 836 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE); 837 DELAY(1000); 838 } else { 839 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, 840 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET); 841 hifn_reset_puc(sc); 842 } 843 844 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!")); 845 bzero(sc->sc_dma, sizeof(*sc->sc_dma)); 846 847 /* Bring dma unit out of reset */ 848 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 849 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 850 851 hifn_puc_wait(sc); 852 hifn_set_retry(sc); 853 854 if (sc->sc_flags & HIFN_IS_7811) { 855 for (reg = 0; reg < 1000; reg++) { 856 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) & 857 HIFN_MIPSRST_CRAMINIT) 858 break; 859 DELAY(1000); 860 } 861 if (reg == 1000) 862 printf(": cram init timeout\n"); 863 } 864 } 865 866 static u_int32_t 867 hifn_next_signature(u_int32_t a, u_int cnt) 868 { 869 int i; 870 u_int32_t v; 871 872 for (i = 0; i < cnt; i++) { 873 874 /* get the parity */ 875 v = a & 0x80080125; 876 v ^= v >> 16; 877 v ^= v >> 8; 878 v ^= v >> 4; 879 v ^= v >> 2; 880 v ^= v >> 1; 881 882 a = (v & 1) ^ (a << 1); 883 } 884 885 return a; 886 } 887 888 struct pci2id { 889 u_short pci_vendor; 890 u_short pci_prod; 891 char card_id[13]; 892 }; 893 static struct pci2id pci2id[] = { 894 { 895 PCI_VENDOR_HIFN, 896 PCI_PRODUCT_HIFN_7951, 897 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 898 0x00, 0x00, 0x00, 0x00, 0x00 } 899 }, { 900 PCI_VENDOR_HIFN, 901 PCI_PRODUCT_HIFN_7955, 902 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 903 0x00, 0x00, 0x00, 0x00, 0x00 } 904 }, { 905 PCI_VENDOR_HIFN, 906 PCI_PRODUCT_HIFN_7956, 907 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 908 0x00, 0x00, 0x00, 0x00, 0x00 } 909 }, { 910 PCI_VENDOR_NETSEC, 911 PCI_PRODUCT_NETSEC_7751, 912 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 913 0x00, 0x00, 0x00, 0x00, 0x00 } 914 }, { 915 PCI_VENDOR_INVERTEX, 916 PCI_PRODUCT_INVERTEX_AEON, 917 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 918 0x00, 0x00, 0x00, 0x00, 0x00 } 919 }, { 920 PCI_VENDOR_HIFN, 921 PCI_PRODUCT_HIFN_7811, 922 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 923 0x00, 0x00, 0x00, 0x00, 0x00 } 924 }, { 925 /* 926 * Other vendors share this PCI ID as well, such as 927 * http://www.powercrypt.com, and obviously they also 928 * use the same key. 929 */ 930 PCI_VENDOR_HIFN, 931 PCI_PRODUCT_HIFN_7751, 932 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 933 0x00, 0x00, 0x00, 0x00, 0x00 } 934 }, 935 }; 936 937 /* 938 * Checks to see if crypto is already enabled. If crypto isn't enable, 939 * "hifn_enable_crypto" is called to enable it. The check is important, 940 * as enabling crypto twice will lock the board. 941 */ 942 static int 943 hifn_enable_crypto(struct hifn_softc *sc) 944 { 945 u_int32_t dmacfg, ramcfg, encl, addr, i; 946 char *offtbl = NULL; 947 948 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) { 949 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) && 950 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) { 951 offtbl = pci2id[i].card_id; 952 break; 953 } 954 } 955 if (offtbl == NULL) { 956 device_printf(sc->sc_dev, "Unknown card!\n"); 957 return (1); 958 } 959 960 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG); 961 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG); 962 963 /* 964 * The RAM config register's encrypt level bit needs to be set before 965 * every read performed on the encryption level register. 966 */ 967 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 968 969 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 970 971 /* 972 * Make sure we don't re-unlock. Two unlocks kills chip until the 973 * next reboot. 974 */ 975 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) { 976 #ifdef HIFN_DEBUG 977 if (hifn_debug) 978 device_printf(sc->sc_dev, 979 "Strong crypto already enabled!\n"); 980 #endif 981 goto report; 982 } 983 984 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) { 985 #ifdef HIFN_DEBUG 986 if (hifn_debug) 987 device_printf(sc->sc_dev, 988 "Unknown encryption level 0x%x\n", encl); 989 #endif 990 return 1; 991 } 992 993 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK | 994 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE); 995 DELAY(1000); 996 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1); 997 DELAY(1000); 998 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0); 999 DELAY(1000); 1000 1001 for (i = 0; i <= 12; i++) { 1002 addr = hifn_next_signature(addr, offtbl[i] + 0x101); 1003 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr); 1004 1005 DELAY(1000); 1006 } 1007 1008 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID); 1009 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA; 1010 1011 #ifdef HIFN_DEBUG 1012 if (hifn_debug) { 1013 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2) 1014 device_printf(sc->sc_dev, "Engine is permanently " 1015 "locked until next system reset!\n"); 1016 else 1017 device_printf(sc->sc_dev, "Engine enabled " 1018 "successfully!\n"); 1019 } 1020 #endif 1021 1022 report: 1023 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg); 1024 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg); 1025 1026 switch (encl) { 1027 case HIFN_PUSTAT_ENA_1: 1028 case HIFN_PUSTAT_ENA_2: 1029 break; 1030 case HIFN_PUSTAT_ENA_0: 1031 default: 1032 device_printf(sc->sc_dev, "disabled"); 1033 break; 1034 } 1035 1036 return 0; 1037 } 1038 1039 /* 1040 * Give initial values to the registers listed in the "Register Space" 1041 * section of the HIFN Software Development reference manual. 1042 */ 1043 static void 1044 hifn_init_pci_registers(struct hifn_softc *sc) 1045 { 1046 /* write fixed values needed by the Initialization registers */ 1047 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA); 1048 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD); 1049 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER); 1050 1051 /* write all 4 ring address registers */ 1052 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr + 1053 offsetof(struct hifn_dma, cmdr[0])); 1054 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr + 1055 offsetof(struct hifn_dma, srcr[0])); 1056 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr + 1057 offsetof(struct hifn_dma, dstr[0])); 1058 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr + 1059 offsetof(struct hifn_dma, resr[0])); 1060 1061 DELAY(2000); 1062 1063 /* write status register */ 1064 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1065 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS | 1066 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS | 1067 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST | 1068 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER | 1069 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST | 1070 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER | 1071 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST | 1072 HIFN_DMACSR_S_WAIT | 1073 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST | 1074 HIFN_DMACSR_C_WAIT | 1075 HIFN_DMACSR_ENGINE | 1076 ((sc->sc_flags & HIFN_HAS_PUBLIC) ? 1077 HIFN_DMACSR_PUBDONE : 0) | 1078 ((sc->sc_flags & HIFN_IS_7811) ? 1079 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0)); 1080 1081 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0; 1082 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT | 1083 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER | 1084 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT | 1085 ((sc->sc_flags & HIFN_IS_7811) ? 1086 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0); 1087 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 1088 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1089 1090 1091 if (sc->sc_flags & HIFN_IS_7956) { 1092 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1093 HIFN_PUCNFG_TCALLPHASES | 1094 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32); 1095 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956); 1096 } else { 1097 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING | 1098 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES | 1099 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 | 1100 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM)); 1101 } 1102 1103 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER); 1104 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET | 1105 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST | 1106 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) | 1107 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL)); 1108 } 1109 1110 /* 1111 * The maximum number of sessions supported by the card 1112 * is dependent on the amount of context ram, which 1113 * encryption algorithms are enabled, and how compression 1114 * is configured. This should be configured before this 1115 * routine is called. 1116 */ 1117 static void 1118 hifn_sessions(struct hifn_softc *sc) 1119 { 1120 u_int32_t pucnfg; 1121 int ctxsize; 1122 1123 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG); 1124 1125 if (pucnfg & HIFN_PUCNFG_COMPSING) { 1126 if (pucnfg & HIFN_PUCNFG_ENCCNFG) 1127 ctxsize = 128; 1128 else 1129 ctxsize = 512; 1130 /* 1131 * 7955/7956 has internal context memory of 32K 1132 */ 1133 if (sc->sc_flags & HIFN_IS_7956) 1134 sc->sc_maxses = 32768 / ctxsize; 1135 else 1136 sc->sc_maxses = 1 + 1137 ((sc->sc_ramsize - 32768) / ctxsize); 1138 } else 1139 sc->sc_maxses = sc->sc_ramsize / 16384; 1140 1141 if (sc->sc_maxses > 2048) 1142 sc->sc_maxses = 2048; 1143 } 1144 1145 /* 1146 * Determine ram type (sram or dram). Board should be just out of a reset 1147 * state when this is called. 1148 */ 1149 static int 1150 hifn_ramtype(struct hifn_softc *sc) 1151 { 1152 u_int8_t data[8], dataexpect[8]; 1153 int i; 1154 1155 for (i = 0; i < sizeof(data); i++) 1156 data[i] = dataexpect[i] = 0x55; 1157 if (hifn_writeramaddr(sc, 0, data)) 1158 return (-1); 1159 if (hifn_readramaddr(sc, 0, data)) 1160 return (-1); 1161 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1162 sc->sc_drammodel = 1; 1163 return (0); 1164 } 1165 1166 for (i = 0; i < sizeof(data); i++) 1167 data[i] = dataexpect[i] = 0xaa; 1168 if (hifn_writeramaddr(sc, 0, data)) 1169 return (-1); 1170 if (hifn_readramaddr(sc, 0, data)) 1171 return (-1); 1172 if (bcmp(data, dataexpect, sizeof(data)) != 0) { 1173 sc->sc_drammodel = 1; 1174 return (0); 1175 } 1176 1177 return (0); 1178 } 1179 1180 #define HIFN_SRAM_MAX (32 << 20) 1181 #define HIFN_SRAM_STEP_SIZE 16384 1182 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE) 1183 1184 static int 1185 hifn_sramsize(struct hifn_softc *sc) 1186 { 1187 u_int32_t a; 1188 u_int8_t data[8]; 1189 u_int8_t dataexpect[sizeof(data)]; 1190 int32_t i; 1191 1192 for (i = 0; i < sizeof(data); i++) 1193 data[i] = dataexpect[i] = i ^ 0x5a; 1194 1195 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) { 1196 a = i * HIFN_SRAM_STEP_SIZE; 1197 bcopy(&i, data, sizeof(i)); 1198 hifn_writeramaddr(sc, a, data); 1199 } 1200 1201 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) { 1202 a = i * HIFN_SRAM_STEP_SIZE; 1203 bcopy(&i, dataexpect, sizeof(i)); 1204 if (hifn_readramaddr(sc, a, data) < 0) 1205 return (0); 1206 if (bcmp(data, dataexpect, sizeof(data)) != 0) 1207 return (0); 1208 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE; 1209 } 1210 1211 return (0); 1212 } 1213 1214 /* 1215 * XXX For dram boards, one should really try all of the 1216 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG 1217 * is already set up correctly. 1218 */ 1219 static int 1220 hifn_dramsize(struct hifn_softc *sc) 1221 { 1222 u_int32_t cnfg; 1223 1224 if (sc->sc_flags & HIFN_IS_7956) { 1225 /* 1226 * 7955/7956 have a fixed internal ram of only 32K. 1227 */ 1228 sc->sc_ramsize = 32768; 1229 } else { 1230 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) & 1231 HIFN_PUCNFG_DRAMMASK; 1232 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18); 1233 } 1234 return (0); 1235 } 1236 1237 static void 1238 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp) 1239 { 1240 struct hifn_dma *dma = sc->sc_dma; 1241 1242 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1243 dma->cmdi = 0; 1244 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1245 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1246 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1247 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1248 } 1249 *cmdp = dma->cmdi++; 1250 dma->cmdk = dma->cmdi; 1251 1252 if (dma->srci == HIFN_D_SRC_RSIZE) { 1253 dma->srci = 0; 1254 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID | 1255 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1256 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1257 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1258 } 1259 *srcp = dma->srci++; 1260 dma->srck = dma->srci; 1261 1262 if (dma->dsti == HIFN_D_DST_RSIZE) { 1263 dma->dsti = 0; 1264 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID | 1265 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1266 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE, 1267 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1268 } 1269 *dstp = dma->dsti++; 1270 dma->dstk = dma->dsti; 1271 1272 if (dma->resi == HIFN_D_RES_RSIZE) { 1273 dma->resi = 0; 1274 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1275 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1276 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1277 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1278 } 1279 *resp = dma->resi++; 1280 dma->resk = dma->resi; 1281 } 1282 1283 static int 1284 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1285 { 1286 struct hifn_dma *dma = sc->sc_dma; 1287 hifn_base_command_t wc; 1288 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1289 int r, cmdi, resi, srci, dsti; 1290 1291 wc.masks = htole16(3 << 13); 1292 wc.session_num = htole16(addr >> 14); 1293 wc.total_source_count = htole16(8); 1294 wc.total_dest_count = htole16(addr & 0x3fff); 1295 1296 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1297 1298 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1299 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1300 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1301 1302 /* build write command */ 1303 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1304 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc; 1305 bcopy(data, &dma->test_src, sizeof(dma->test_src)); 1306 1307 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr 1308 + offsetof(struct hifn_dma, test_src)); 1309 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr 1310 + offsetof(struct hifn_dma, test_dst)); 1311 1312 dma->cmdr[cmdi].l = htole32(16 | masks); 1313 dma->srcr[srci].l = htole32(8 | masks); 1314 dma->dstr[dsti].l = htole32(4 | masks); 1315 dma->resr[resi].l = htole32(4 | masks); 1316 1317 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1318 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1319 1320 for (r = 10000; r >= 0; r--) { 1321 DELAY(10); 1322 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1323 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1324 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1325 break; 1326 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1327 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1328 } 1329 if (r == 0) { 1330 device_printf(sc->sc_dev, "writeramaddr -- " 1331 "result[%d](addr %d) still valid\n", resi, addr); 1332 r = -1; 1333 return (-1); 1334 } else 1335 r = 0; 1336 1337 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1338 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1339 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1340 1341 return (r); 1342 } 1343 1344 static int 1345 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data) 1346 { 1347 struct hifn_dma *dma = sc->sc_dma; 1348 hifn_base_command_t rc; 1349 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ; 1350 int r, cmdi, srci, dsti, resi; 1351 1352 rc.masks = htole16(2 << 13); 1353 rc.session_num = htole16(addr >> 14); 1354 rc.total_source_count = htole16(addr & 0x3fff); 1355 rc.total_dest_count = htole16(8); 1356 1357 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi); 1358 1359 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1360 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA | 1361 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA); 1362 1363 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND); 1364 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc; 1365 1366 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr + 1367 offsetof(struct hifn_dma, test_src)); 1368 dma->test_src = 0; 1369 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr + 1370 offsetof(struct hifn_dma, test_dst)); 1371 dma->test_dst = 0; 1372 dma->cmdr[cmdi].l = htole32(8 | masks); 1373 dma->srcr[srci].l = htole32(8 | masks); 1374 dma->dstr[dsti].l = htole32(8 | masks); 1375 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks); 1376 1377 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1379 1380 for (r = 10000; r >= 0; r--) { 1381 DELAY(10); 1382 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1383 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1384 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0) 1385 break; 1386 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 1387 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1388 } 1389 if (r == 0) { 1390 device_printf(sc->sc_dev, "readramaddr -- " 1391 "result[%d](addr %d) still valid\n", resi, addr); 1392 r = -1; 1393 } else { 1394 r = 0; 1395 bcopy(&dma->test_dst, data, sizeof(dma->test_dst)); 1396 } 1397 1398 WRITE_REG_1(sc, HIFN_1_DMA_CSR, 1399 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS | 1400 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS); 1401 1402 return (r); 1403 } 1404 1405 /* 1406 * Initialize the descriptor rings. 1407 */ 1408 static void 1409 hifn_init_dma(struct hifn_softc *sc) 1410 { 1411 struct hifn_dma *dma = sc->sc_dma; 1412 int i; 1413 1414 hifn_set_retry(sc); 1415 1416 /* initialize static pointer values */ 1417 for (i = 0; i < HIFN_D_CMD_RSIZE; i++) 1418 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr + 1419 offsetof(struct hifn_dma, command_bufs[i][0])); 1420 for (i = 0; i < HIFN_D_RES_RSIZE; i++) 1421 dma->resr[i].p = htole32(sc->sc_dma_physaddr + 1422 offsetof(struct hifn_dma, result_bufs[i][0])); 1423 1424 dma->cmdr[HIFN_D_CMD_RSIZE].p = 1425 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0])); 1426 dma->srcr[HIFN_D_SRC_RSIZE].p = 1427 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0])); 1428 dma->dstr[HIFN_D_DST_RSIZE].p = 1429 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0])); 1430 dma->resr[HIFN_D_RES_RSIZE].p = 1431 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0])); 1432 1433 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0; 1434 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0; 1435 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0; 1436 } 1437 1438 /* 1439 * Writes out the raw command buffer space. Returns the 1440 * command buffer size. 1441 */ 1442 static u_int 1443 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf) 1444 { 1445 u_int8_t *buf_pos; 1446 hifn_base_command_t *base_cmd; 1447 hifn_mac_command_t *mac_cmd; 1448 hifn_crypt_command_t *cry_cmd; 1449 int using_mac, using_crypt, len, ivlen; 1450 u_int32_t dlen, slen; 1451 1452 buf_pos = buf; 1453 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC; 1454 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT; 1455 1456 base_cmd = (hifn_base_command_t *)buf_pos; 1457 base_cmd->masks = htole16(cmd->base_masks); 1458 slen = cmd->src_mapsize; 1459 if (cmd->sloplen) 1460 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t); 1461 else 1462 dlen = cmd->dst_mapsize; 1463 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO); 1464 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO); 1465 dlen >>= 16; 1466 slen >>= 16; 1467 base_cmd->session_num = htole16(cmd->session_num | 1468 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) | 1469 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M)); 1470 buf_pos += sizeof(hifn_base_command_t); 1471 1472 if (using_mac) { 1473 mac_cmd = (hifn_mac_command_t *)buf_pos; 1474 dlen = cmd->maccrd->crd_len; 1475 mac_cmd->source_count = htole16(dlen & 0xffff); 1476 dlen >>= 16; 1477 mac_cmd->masks = htole16(cmd->mac_masks | 1478 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M)); 1479 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip); 1480 mac_cmd->reserved = 0; 1481 buf_pos += sizeof(hifn_mac_command_t); 1482 } 1483 1484 if (using_crypt) { 1485 cry_cmd = (hifn_crypt_command_t *)buf_pos; 1486 dlen = cmd->enccrd->crd_len; 1487 cry_cmd->source_count = htole16(dlen & 0xffff); 1488 dlen >>= 16; 1489 cry_cmd->masks = htole16(cmd->cry_masks | 1490 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M)); 1491 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip); 1492 cry_cmd->reserved = 0; 1493 buf_pos += sizeof(hifn_crypt_command_t); 1494 } 1495 1496 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) { 1497 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH); 1498 buf_pos += HIFN_MAC_KEY_LENGTH; 1499 } 1500 1501 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) { 1502 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1503 case HIFN_CRYPT_CMD_ALG_3DES: 1504 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH); 1505 buf_pos += HIFN_3DES_KEY_LENGTH; 1506 break; 1507 case HIFN_CRYPT_CMD_ALG_DES: 1508 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH); 1509 buf_pos += HIFN_DES_KEY_LENGTH; 1510 break; 1511 case HIFN_CRYPT_CMD_ALG_RC4: 1512 len = 256; 1513 do { 1514 int clen; 1515 1516 clen = MIN(cmd->cklen, len); 1517 bcopy(cmd->ck, buf_pos, clen); 1518 len -= clen; 1519 buf_pos += clen; 1520 } while (len > 0); 1521 bzero(buf_pos, 4); 1522 buf_pos += 4; 1523 break; 1524 case HIFN_CRYPT_CMD_ALG_AES: 1525 /* 1526 * AES keys are variable 128, 192 and 1527 * 256 bits (16, 24 and 32 bytes). 1528 */ 1529 bcopy(cmd->ck, buf_pos, cmd->cklen); 1530 buf_pos += cmd->cklen; 1531 break; 1532 } 1533 } 1534 1535 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) { 1536 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) { 1537 case HIFN_CRYPT_CMD_ALG_AES: 1538 ivlen = HIFN_AES_IV_LENGTH; 1539 break; 1540 default: 1541 ivlen = HIFN_IV_LENGTH; 1542 break; 1543 } 1544 bcopy(cmd->iv, buf_pos, ivlen); 1545 buf_pos += ivlen; 1546 } 1547 1548 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) { 1549 bzero(buf_pos, 8); 1550 buf_pos += 8; 1551 } 1552 1553 return (buf_pos - buf); 1554 } 1555 1556 static int 1557 hifn_dmamap_aligned(struct hifn_operand *op) 1558 { 1559 int i; 1560 1561 for (i = 0; i < op->nsegs; i++) { 1562 if (op->segs[i].ds_addr & 3) 1563 return (0); 1564 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3)) 1565 return (0); 1566 } 1567 return (1); 1568 } 1569 1570 static int 1571 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd) 1572 { 1573 struct hifn_dma *dma = sc->sc_dma; 1574 struct hifn_operand *dst = &cmd->dst; 1575 u_int32_t p, l; 1576 int idx, used = 0, i; 1577 1578 idx = dma->dsti; 1579 for (i = 0; i < dst->nsegs - 1; i++) { 1580 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1581 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1582 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len); 1583 HIFN_DSTR_SYNC(sc, idx, 1584 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1585 used++; 1586 1587 if (++idx == HIFN_D_DST_RSIZE) { 1588 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1589 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1590 HIFN_DSTR_SYNC(sc, idx, 1591 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1592 idx = 0; 1593 } 1594 } 1595 1596 if (cmd->sloplen == 0) { 1597 p = dst->segs[i].ds_addr; 1598 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1599 dst->segs[i].ds_len; 1600 } else { 1601 p = sc->sc_dma_physaddr + 1602 offsetof(struct hifn_dma, slop[cmd->slopidx]); 1603 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST | 1604 sizeof(u_int32_t); 1605 1606 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) { 1607 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr); 1608 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1609 HIFN_D_MASKDONEIRQ | 1610 (dst->segs[i].ds_len - cmd->sloplen)); 1611 HIFN_DSTR_SYNC(sc, idx, 1612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1613 used++; 1614 1615 if (++idx == HIFN_D_DST_RSIZE) { 1616 dma->dstr[idx].l = htole32(HIFN_D_VALID | 1617 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1618 HIFN_DSTR_SYNC(sc, idx, 1619 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1620 idx = 0; 1621 } 1622 } 1623 } 1624 dma->dstr[idx].p = htole32(p); 1625 dma->dstr[idx].l = htole32(l); 1626 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1627 used++; 1628 1629 if (++idx == HIFN_D_DST_RSIZE) { 1630 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP | 1631 HIFN_D_MASKDONEIRQ); 1632 HIFN_DSTR_SYNC(sc, idx, 1633 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1634 idx = 0; 1635 } 1636 1637 dma->dsti = idx; 1638 dma->dstu += used; 1639 return (idx); 1640 } 1641 1642 static int 1643 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd) 1644 { 1645 struct hifn_dma *dma = sc->sc_dma; 1646 struct hifn_operand *src = &cmd->src; 1647 int idx, i; 1648 u_int32_t last = 0; 1649 1650 idx = dma->srci; 1651 for (i = 0; i < src->nsegs; i++) { 1652 if (i == src->nsegs - 1) 1653 last = HIFN_D_LAST; 1654 1655 dma->srcr[idx].p = htole32(src->segs[i].ds_addr); 1656 dma->srcr[idx].l = htole32(src->segs[i].ds_len | 1657 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last); 1658 HIFN_SRCR_SYNC(sc, idx, 1659 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1660 1661 if (++idx == HIFN_D_SRC_RSIZE) { 1662 dma->srcr[idx].l = htole32(HIFN_D_VALID | 1663 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1664 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE, 1665 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1666 idx = 0; 1667 } 1668 } 1669 dma->srci = idx; 1670 dma->srcu += src->nsegs; 1671 return (idx); 1672 } 1673 1674 static void 1675 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error) 1676 { 1677 struct hifn_operand *op = arg; 1678 1679 KASSERT(nsegs <= MAX_SCATTER, 1680 ("hifn_op_cb: too many DMA segments (%u > %u) " 1681 "returned when mapping operand", nsegs, MAX_SCATTER)); 1682 op->mapsize = mapsize; 1683 op->nsegs = nsegs; 1684 bcopy(seg, op->segs, nsegs * sizeof (seg[0])); 1685 } 1686 1687 static int 1688 hifn_crypto( 1689 struct hifn_softc *sc, 1690 struct hifn_command *cmd, 1691 struct cryptop *crp, 1692 int hint) 1693 { 1694 struct hifn_dma *dma = sc->sc_dma; 1695 u_int32_t cmdlen; 1696 int cmdi, resi, err = 0; 1697 1698 /* 1699 * need 1 cmd, and 1 res 1700 * 1701 * NB: check this first since it's easy. 1702 */ 1703 HIFN_LOCK(sc); 1704 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE || 1705 (dma->resu + 1) > HIFN_D_RES_RSIZE) { 1706 #ifdef HIFN_DEBUG 1707 if (hifn_debug) { 1708 device_printf(sc->sc_dev, 1709 "cmd/result exhaustion, cmdu %u resu %u\n", 1710 dma->cmdu, dma->resu); 1711 } 1712 #endif 1713 hifnstats.hst_nomem_cr++; 1714 HIFN_UNLOCK(sc); 1715 return (ERESTART); 1716 } 1717 1718 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) { 1719 hifnstats.hst_nomem_map++; 1720 HIFN_UNLOCK(sc); 1721 return (ENOMEM); 1722 } 1723 1724 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1725 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map, 1726 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1727 hifnstats.hst_nomem_load++; 1728 err = ENOMEM; 1729 goto err_srcmap1; 1730 } 1731 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1732 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map, 1733 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) { 1734 hifnstats.hst_nomem_load++; 1735 err = ENOMEM; 1736 goto err_srcmap1; 1737 } 1738 } else { 1739 err = EINVAL; 1740 goto err_srcmap1; 1741 } 1742 1743 if (hifn_dmamap_aligned(&cmd->src)) { 1744 cmd->sloplen = cmd->src_mapsize & 3; 1745 cmd->dst = cmd->src; 1746 } else { 1747 if (crp->crp_flags & CRYPTO_F_IOV) { 1748 err = EINVAL; 1749 goto err_srcmap; 1750 } else if (crp->crp_flags & CRYPTO_F_IMBUF) { 1751 int totlen, len; 1752 struct mbuf *m, *m0, *mlast; 1753 1754 KASSERT(cmd->dst_m == cmd->src_m, 1755 ("hifn_crypto: dst_m initialized improperly")); 1756 hifnstats.hst_unaligned++; 1757 /* 1758 * Source is not aligned on a longword boundary. 1759 * Copy the data to insure alignment. If we fail 1760 * to allocate mbufs or clusters while doing this 1761 * we return ERESTART so the operation is requeued 1762 * at the crypto later, but only if there are 1763 * ops already posted to the hardware; otherwise we 1764 * have no guarantee that we'll be re-entered. 1765 */ 1766 totlen = cmd->src_mapsize; 1767 if (cmd->src_m->m_flags & M_PKTHDR) { 1768 len = MHLEN; 1769 MGETHDR(m0, M_DONTWAIT, MT_DATA); 1770 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) { 1771 m_free(m0); 1772 m0 = NULL; 1773 } 1774 } else { 1775 len = MLEN; 1776 MGET(m0, M_DONTWAIT, MT_DATA); 1777 } 1778 if (m0 == NULL) { 1779 hifnstats.hst_nomem_mbuf++; 1780 err = dma->cmdu ? ERESTART : ENOMEM; 1781 goto err_srcmap; 1782 } 1783 if (totlen >= MINCLSIZE) { 1784 MCLGET(m0, M_DONTWAIT); 1785 if ((m0->m_flags & M_EXT) == 0) { 1786 hifnstats.hst_nomem_mcl++; 1787 err = dma->cmdu ? ERESTART : ENOMEM; 1788 m_freem(m0); 1789 goto err_srcmap; 1790 } 1791 len = MCLBYTES; 1792 } 1793 totlen -= len; 1794 m0->m_pkthdr.len = m0->m_len = len; 1795 mlast = m0; 1796 1797 while (totlen > 0) { 1798 MGET(m, M_DONTWAIT, MT_DATA); 1799 if (m == NULL) { 1800 hifnstats.hst_nomem_mbuf++; 1801 err = dma->cmdu ? ERESTART : ENOMEM; 1802 m_freem(m0); 1803 goto err_srcmap; 1804 } 1805 len = MLEN; 1806 if (totlen >= MINCLSIZE) { 1807 MCLGET(m, M_DONTWAIT); 1808 if ((m->m_flags & M_EXT) == 0) { 1809 hifnstats.hst_nomem_mcl++; 1810 err = dma->cmdu ? ERESTART : ENOMEM; 1811 mlast->m_next = m; 1812 m_freem(m0); 1813 goto err_srcmap; 1814 } 1815 len = MCLBYTES; 1816 } 1817 1818 m->m_len = len; 1819 m0->m_pkthdr.len += len; 1820 totlen -= len; 1821 1822 mlast->m_next = m; 1823 mlast = m; 1824 } 1825 cmd->dst_m = m0; 1826 } 1827 } 1828 1829 if (cmd->dst_map == NULL) { 1830 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) { 1831 hifnstats.hst_nomem_map++; 1832 err = ENOMEM; 1833 goto err_srcmap; 1834 } 1835 if (crp->crp_flags & CRYPTO_F_IMBUF) { 1836 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map, 1837 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1838 hifnstats.hst_nomem_map++; 1839 err = ENOMEM; 1840 goto err_dstmap1; 1841 } 1842 } else if (crp->crp_flags & CRYPTO_F_IOV) { 1843 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map, 1844 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) { 1845 hifnstats.hst_nomem_load++; 1846 err = ENOMEM; 1847 goto err_dstmap1; 1848 } 1849 } 1850 } 1851 1852 #ifdef HIFN_DEBUG 1853 if (hifn_debug) { 1854 device_printf(sc->sc_dev, 1855 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n", 1856 READ_REG_1(sc, HIFN_1_DMA_CSR), 1857 READ_REG_1(sc, HIFN_1_DMA_IER), 1858 dma->cmdu, dma->srcu, dma->dstu, dma->resu, 1859 cmd->src_nsegs, cmd->dst_nsegs); 1860 } 1861 #endif 1862 1863 if (cmd->src_map == cmd->dst_map) { 1864 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1865 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1866 } else { 1867 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 1868 BUS_DMASYNC_PREWRITE); 1869 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 1870 BUS_DMASYNC_PREREAD); 1871 } 1872 1873 /* 1874 * need N src, and N dst 1875 */ 1876 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE || 1877 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) { 1878 #ifdef HIFN_DEBUG 1879 if (hifn_debug) { 1880 device_printf(sc->sc_dev, 1881 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n", 1882 dma->srcu, cmd->src_nsegs, 1883 dma->dstu, cmd->dst_nsegs); 1884 } 1885 #endif 1886 hifnstats.hst_nomem_sd++; 1887 err = ERESTART; 1888 goto err_dstmap; 1889 } 1890 1891 if (dma->cmdi == HIFN_D_CMD_RSIZE) { 1892 dma->cmdi = 0; 1893 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID | 1894 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1895 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE, 1896 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1897 } 1898 cmdi = dma->cmdi++; 1899 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]); 1900 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE); 1901 1902 /* .p for command/result already set */ 1903 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST | 1904 HIFN_D_MASKDONEIRQ); 1905 HIFN_CMDR_SYNC(sc, cmdi, 1906 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 1907 dma->cmdu++; 1908 if (sc->sc_c_busy == 0) { 1909 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA); 1910 sc->sc_c_busy = 1; 1911 } 1912 1913 /* 1914 * We don't worry about missing an interrupt (which a "command wait" 1915 * interrupt salvages us from), unless there is more than one command 1916 * in the queue. 1917 */ 1918 if (dma->cmdu > 1) { 1919 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT; 1920 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 1921 } 1922 1923 hifnstats.hst_ipackets++; 1924 hifnstats.hst_ibytes += cmd->src_mapsize; 1925 1926 hifn_dmamap_load_src(sc, cmd); 1927 if (sc->sc_s_busy == 0) { 1928 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA); 1929 sc->sc_s_busy = 1; 1930 } 1931 1932 /* 1933 * Unlike other descriptors, we don't mask done interrupt from 1934 * result descriptor. 1935 */ 1936 #ifdef HIFN_DEBUG 1937 if (hifn_debug) 1938 printf("load res\n"); 1939 #endif 1940 if (dma->resi == HIFN_D_RES_RSIZE) { 1941 dma->resi = 0; 1942 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID | 1943 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ); 1944 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE, 1945 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1946 } 1947 resi = dma->resi++; 1948 KASSERT(dma->hifn_commands[resi] == NULL, 1949 ("hifn_crypto: command slot %u busy", resi)); 1950 dma->hifn_commands[resi] = cmd; 1951 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD); 1952 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) { 1953 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1954 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ); 1955 sc->sc_curbatch++; 1956 if (sc->sc_curbatch > hifnstats.hst_maxbatch) 1957 hifnstats.hst_maxbatch = sc->sc_curbatch; 1958 hifnstats.hst_totbatch++; 1959 } else { 1960 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | 1961 HIFN_D_VALID | HIFN_D_LAST); 1962 sc->sc_curbatch = 0; 1963 } 1964 HIFN_RESR_SYNC(sc, resi, 1965 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1966 dma->resu++; 1967 if (sc->sc_r_busy == 0) { 1968 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA); 1969 sc->sc_r_busy = 1; 1970 } 1971 1972 if (cmd->sloplen) 1973 cmd->slopidx = resi; 1974 1975 hifn_dmamap_load_dst(sc, cmd); 1976 1977 if (sc->sc_d_busy == 0) { 1978 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA); 1979 sc->sc_d_busy = 1; 1980 } 1981 1982 #ifdef HIFN_DEBUG 1983 if (hifn_debug) { 1984 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n", 1985 READ_REG_1(sc, HIFN_1_DMA_CSR), 1986 READ_REG_1(sc, HIFN_1_DMA_IER)); 1987 } 1988 #endif 1989 1990 sc->sc_active = 5; 1991 HIFN_UNLOCK(sc); 1992 KASSERT(err == 0, ("hifn_crypto: success with error %u", err)); 1993 return (err); /* success */ 1994 1995 err_dstmap: 1996 if (cmd->src_map != cmd->dst_map) 1997 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 1998 err_dstmap1: 1999 if (cmd->src_map != cmd->dst_map) 2000 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2001 err_srcmap: 2002 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2003 if (cmd->src_m != cmd->dst_m) 2004 m_freem(cmd->dst_m); 2005 } 2006 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2007 err_srcmap1: 2008 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2009 HIFN_UNLOCK(sc); 2010 return (err); 2011 } 2012 2013 static void 2014 hifn_tick(void* vsc) 2015 { 2016 struct hifn_softc *sc = vsc; 2017 2018 HIFN_LOCK(sc); 2019 if (sc->sc_active == 0) { 2020 struct hifn_dma *dma = sc->sc_dma; 2021 u_int32_t r = 0; 2022 2023 if (dma->cmdu == 0 && sc->sc_c_busy) { 2024 sc->sc_c_busy = 0; 2025 r |= HIFN_DMACSR_C_CTRL_DIS; 2026 } 2027 if (dma->srcu == 0 && sc->sc_s_busy) { 2028 sc->sc_s_busy = 0; 2029 r |= HIFN_DMACSR_S_CTRL_DIS; 2030 } 2031 if (dma->dstu == 0 && sc->sc_d_busy) { 2032 sc->sc_d_busy = 0; 2033 r |= HIFN_DMACSR_D_CTRL_DIS; 2034 } 2035 if (dma->resu == 0 && sc->sc_r_busy) { 2036 sc->sc_r_busy = 0; 2037 r |= HIFN_DMACSR_R_CTRL_DIS; 2038 } 2039 if (r) 2040 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r); 2041 } else 2042 sc->sc_active--; 2043 HIFN_UNLOCK(sc); 2044 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc); 2045 } 2046 2047 static void 2048 hifn_intr(void *arg) 2049 { 2050 struct hifn_softc *sc = arg; 2051 struct hifn_dma *dma; 2052 u_int32_t dmacsr, restart; 2053 int i, u; 2054 2055 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR); 2056 2057 /* Nothing in the DMA unit interrupted */ 2058 if ((dmacsr & sc->sc_dmaier) == 0) 2059 return; 2060 2061 HIFN_LOCK(sc); 2062 2063 dma = sc->sc_dma; 2064 2065 #ifdef HIFN_DEBUG 2066 if (hifn_debug) { 2067 device_printf(sc->sc_dev, 2068 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n", 2069 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier, 2070 dma->cmdi, dma->srci, dma->dsti, dma->resi, 2071 dma->cmdk, dma->srck, dma->dstk, dma->resk, 2072 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2073 } 2074 #endif 2075 2076 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier); 2077 2078 if ((sc->sc_flags & HIFN_HAS_PUBLIC) && 2079 (dmacsr & HIFN_DMACSR_PUBDONE)) 2080 WRITE_REG_1(sc, HIFN_1_PUB_STATUS, 2081 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE); 2082 2083 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER); 2084 if (restart) 2085 device_printf(sc->sc_dev, "overrun %x\n", dmacsr); 2086 2087 if (sc->sc_flags & HIFN_IS_7811) { 2088 if (dmacsr & HIFN_DMACSR_ILLR) 2089 device_printf(sc->sc_dev, "illegal read\n"); 2090 if (dmacsr & HIFN_DMACSR_ILLW) 2091 device_printf(sc->sc_dev, "illegal write\n"); 2092 } 2093 2094 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT | 2095 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT); 2096 if (restart) { 2097 device_printf(sc->sc_dev, "abort, resetting.\n"); 2098 hifnstats.hst_abort++; 2099 hifn_abort(sc); 2100 HIFN_UNLOCK(sc); 2101 return; 2102 } 2103 2104 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) { 2105 /* 2106 * If no slots to process and we receive a "waiting on 2107 * command" interrupt, we disable the "waiting on command" 2108 * (by clearing it). 2109 */ 2110 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT; 2111 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier); 2112 } 2113 2114 /* clear the rings */ 2115 i = dma->resk; u = dma->resu; 2116 while (u != 0) { 2117 HIFN_RESR_SYNC(sc, i, 2118 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2119 if (dma->resr[i].l & htole32(HIFN_D_VALID)) { 2120 HIFN_RESR_SYNC(sc, i, 2121 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2122 break; 2123 } 2124 2125 if (i != HIFN_D_RES_RSIZE) { 2126 struct hifn_command *cmd; 2127 u_int8_t *macbuf = NULL; 2128 2129 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD); 2130 cmd = dma->hifn_commands[i]; 2131 KASSERT(cmd != NULL, 2132 ("hifn_intr: null command slot %u", i)); 2133 dma->hifn_commands[i] = NULL; 2134 2135 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2136 macbuf = dma->result_bufs[i]; 2137 macbuf += 12; 2138 } 2139 2140 hifn_callback(sc, cmd, macbuf); 2141 hifnstats.hst_opackets++; 2142 u--; 2143 } 2144 2145 if (++i == (HIFN_D_RES_RSIZE + 1)) 2146 i = 0; 2147 } 2148 dma->resk = i; dma->resu = u; 2149 2150 i = dma->srck; u = dma->srcu; 2151 while (u != 0) { 2152 if (i == HIFN_D_SRC_RSIZE) 2153 i = 0; 2154 HIFN_SRCR_SYNC(sc, i, 2155 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2156 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) { 2157 HIFN_SRCR_SYNC(sc, i, 2158 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2159 break; 2160 } 2161 i++, u--; 2162 } 2163 dma->srck = i; dma->srcu = u; 2164 2165 i = dma->cmdk; u = dma->cmdu; 2166 while (u != 0) { 2167 HIFN_CMDR_SYNC(sc, i, 2168 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2169 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) { 2170 HIFN_CMDR_SYNC(sc, i, 2171 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2172 break; 2173 } 2174 if (i != HIFN_D_CMD_RSIZE) { 2175 u--; 2176 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE); 2177 } 2178 if (++i == (HIFN_D_CMD_RSIZE + 1)) 2179 i = 0; 2180 } 2181 dma->cmdk = i; dma->cmdu = u; 2182 2183 HIFN_UNLOCK(sc); 2184 2185 if (sc->sc_needwakeup) { /* XXX check high watermark */ 2186 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ); 2187 #ifdef HIFN_DEBUG 2188 if (hifn_debug) 2189 device_printf(sc->sc_dev, 2190 "wakeup crypto (%x) u %d/%d/%d/%d\n", 2191 sc->sc_needwakeup, 2192 dma->cmdu, dma->srcu, dma->dstu, dma->resu); 2193 #endif 2194 sc->sc_needwakeup &= ~wakeup; 2195 crypto_unblock(sc->sc_cid, wakeup); 2196 } 2197 } 2198 2199 /* 2200 * Allocate a new 'session' and return an encoded session id. 'sidp' 2201 * contains our registration id, and should contain an encoded session 2202 * id on successful allocation. 2203 */ 2204 static int 2205 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri) 2206 { 2207 struct cryptoini *c; 2208 struct hifn_softc *sc = arg; 2209 int i, mac = 0, cry = 0; 2210 2211 KASSERT(sc != NULL, ("hifn_newsession: null softc")); 2212 if (sidp == NULL || cri == NULL || sc == NULL) 2213 return (EINVAL); 2214 2215 for (i = 0; i < sc->sc_maxses; i++) 2216 if (sc->sc_sessions[i].hs_state == HS_STATE_FREE) 2217 break; 2218 if (i == sc->sc_maxses) 2219 return (ENOMEM); 2220 2221 for (c = cri; c != NULL; c = c->cri_next) { 2222 switch (c->cri_alg) { 2223 case CRYPTO_MD5: 2224 case CRYPTO_SHA1: 2225 case CRYPTO_MD5_HMAC: 2226 case CRYPTO_SHA1_HMAC: 2227 if (mac) 2228 return (EINVAL); 2229 mac = 1; 2230 break; 2231 case CRYPTO_DES_CBC: 2232 case CRYPTO_3DES_CBC: 2233 case CRYPTO_AES_CBC: 2234 /* XXX this may read fewer, does it matter? */ 2235 read_random(sc->sc_sessions[i].hs_iv, 2236 c->cri_alg == CRYPTO_AES_CBC ? 2237 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2238 /*FALLTHROUGH*/ 2239 case CRYPTO_ARC4: 2240 if (cry) 2241 return (EINVAL); 2242 cry = 1; 2243 break; 2244 default: 2245 return (EINVAL); 2246 } 2247 } 2248 if (mac == 0 && cry == 0) 2249 return (EINVAL); 2250 2251 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), i); 2252 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2253 2254 return (0); 2255 } 2256 2257 /* 2258 * Deallocate a session. 2259 * XXX this routine should run a zero'd mac/encrypt key into context ram. 2260 * XXX to blow away any keys already stored there. 2261 */ 2262 static int 2263 hifn_freesession(void *arg, u_int64_t tid) 2264 { 2265 struct hifn_softc *sc = arg; 2266 int session; 2267 u_int32_t sid = CRYPTO_SESID2LID(tid); 2268 2269 KASSERT(sc != NULL, ("hifn_freesession: null softc")); 2270 if (sc == NULL) 2271 return (EINVAL); 2272 2273 session = HIFN_SESSION(sid); 2274 if (session >= sc->sc_maxses) 2275 return (EINVAL); 2276 2277 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session])); 2278 return (0); 2279 } 2280 2281 static int 2282 hifn_process(void *arg, struct cryptop *crp, int hint) 2283 { 2284 struct hifn_softc *sc = arg; 2285 struct hifn_command *cmd = NULL; 2286 int session, err, ivlen; 2287 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd; 2288 2289 if (crp == NULL || crp->crp_callback == NULL) { 2290 hifnstats.hst_invalid++; 2291 return (EINVAL); 2292 } 2293 session = HIFN_SESSION(crp->crp_sid); 2294 2295 if (sc == NULL || session >= sc->sc_maxses) { 2296 err = EINVAL; 2297 goto errout; 2298 } 2299 2300 cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO); 2301 if (cmd == NULL) { 2302 hifnstats.hst_nomem++; 2303 err = ENOMEM; 2304 goto errout; 2305 } 2306 2307 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2308 cmd->src_m = (struct mbuf *)crp->crp_buf; 2309 cmd->dst_m = (struct mbuf *)crp->crp_buf; 2310 } else if (crp->crp_flags & CRYPTO_F_IOV) { 2311 cmd->src_io = (struct uio *)crp->crp_buf; 2312 cmd->dst_io = (struct uio *)crp->crp_buf; 2313 } else { 2314 err = EINVAL; 2315 goto errout; /* XXX we don't handle contiguous buffers! */ 2316 } 2317 2318 crd1 = crp->crp_desc; 2319 if (crd1 == NULL) { 2320 err = EINVAL; 2321 goto errout; 2322 } 2323 crd2 = crd1->crd_next; 2324 2325 if (crd2 == NULL) { 2326 if (crd1->crd_alg == CRYPTO_MD5_HMAC || 2327 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2328 crd1->crd_alg == CRYPTO_SHA1 || 2329 crd1->crd_alg == CRYPTO_MD5) { 2330 maccrd = crd1; 2331 enccrd = NULL; 2332 } else if (crd1->crd_alg == CRYPTO_DES_CBC || 2333 crd1->crd_alg == CRYPTO_3DES_CBC || 2334 crd1->crd_alg == CRYPTO_AES_CBC || 2335 crd1->crd_alg == CRYPTO_ARC4) { 2336 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0) 2337 cmd->base_masks |= HIFN_BASE_CMD_DECODE; 2338 maccrd = NULL; 2339 enccrd = crd1; 2340 } else { 2341 err = EINVAL; 2342 goto errout; 2343 } 2344 } else { 2345 if ((crd1->crd_alg == CRYPTO_MD5_HMAC || 2346 crd1->crd_alg == CRYPTO_SHA1_HMAC || 2347 crd1->crd_alg == CRYPTO_MD5 || 2348 crd1->crd_alg == CRYPTO_SHA1) && 2349 (crd2->crd_alg == CRYPTO_DES_CBC || 2350 crd2->crd_alg == CRYPTO_3DES_CBC || 2351 crd2->crd_alg == CRYPTO_AES_CBC || 2352 crd2->crd_alg == CRYPTO_ARC4) && 2353 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) { 2354 cmd->base_masks = HIFN_BASE_CMD_DECODE; 2355 maccrd = crd1; 2356 enccrd = crd2; 2357 } else if ((crd1->crd_alg == CRYPTO_DES_CBC || 2358 crd1->crd_alg == CRYPTO_ARC4 || 2359 crd1->crd_alg == CRYPTO_3DES_CBC || 2360 crd1->crd_alg == CRYPTO_AES_CBC) && 2361 (crd2->crd_alg == CRYPTO_MD5_HMAC || 2362 crd2->crd_alg == CRYPTO_SHA1_HMAC || 2363 crd2->crd_alg == CRYPTO_MD5 || 2364 crd2->crd_alg == CRYPTO_SHA1) && 2365 (crd1->crd_flags & CRD_F_ENCRYPT)) { 2366 enccrd = crd1; 2367 maccrd = crd2; 2368 } else { 2369 /* 2370 * We cannot order the 7751 as requested 2371 */ 2372 err = EINVAL; 2373 goto errout; 2374 } 2375 } 2376 2377 if (enccrd) { 2378 cmd->enccrd = enccrd; 2379 cmd->base_masks |= HIFN_BASE_CMD_CRYPT; 2380 switch (enccrd->crd_alg) { 2381 case CRYPTO_ARC4: 2382 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4; 2383 if ((enccrd->crd_flags & CRD_F_ENCRYPT) 2384 != sc->sc_sessions[session].hs_prev_op) 2385 sc->sc_sessions[session].hs_state = 2386 HS_STATE_USED; 2387 break; 2388 case CRYPTO_DES_CBC: 2389 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES | 2390 HIFN_CRYPT_CMD_MODE_CBC | 2391 HIFN_CRYPT_CMD_NEW_IV; 2392 break; 2393 case CRYPTO_3DES_CBC: 2394 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES | 2395 HIFN_CRYPT_CMD_MODE_CBC | 2396 HIFN_CRYPT_CMD_NEW_IV; 2397 break; 2398 case CRYPTO_AES_CBC: 2399 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES | 2400 HIFN_CRYPT_CMD_MODE_CBC | 2401 HIFN_CRYPT_CMD_NEW_IV; 2402 break; 2403 default: 2404 err = EINVAL; 2405 goto errout; 2406 } 2407 if (enccrd->crd_alg != CRYPTO_ARC4) { 2408 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ? 2409 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2410 if (enccrd->crd_flags & CRD_F_ENCRYPT) { 2411 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2412 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2413 else 2414 bcopy(sc->sc_sessions[session].hs_iv, 2415 cmd->iv, ivlen); 2416 2417 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) 2418 == 0) { 2419 if (crp->crp_flags & CRYPTO_F_IMBUF) 2420 m_copyback(cmd->src_m, 2421 enccrd->crd_inject, 2422 ivlen, cmd->iv); 2423 else if (crp->crp_flags & CRYPTO_F_IOV) 2424 cuio_copyback(cmd->src_io, 2425 enccrd->crd_inject, 2426 ivlen, cmd->iv); 2427 } 2428 } else { 2429 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) 2430 bcopy(enccrd->crd_iv, cmd->iv, ivlen); 2431 else if (crp->crp_flags & CRYPTO_F_IMBUF) 2432 m_copydata(cmd->src_m, 2433 enccrd->crd_inject, ivlen, cmd->iv); 2434 else if (crp->crp_flags & CRYPTO_F_IOV) 2435 cuio_copydata(cmd->src_io, 2436 enccrd->crd_inject, ivlen, cmd->iv); 2437 } 2438 } 2439 2440 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) 2441 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2442 cmd->ck = enccrd->crd_key; 2443 cmd->cklen = enccrd->crd_klen >> 3; 2444 2445 /* 2446 * Need to specify the size for the AES key in the masks. 2447 */ 2448 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) == 2449 HIFN_CRYPT_CMD_ALG_AES) { 2450 switch (cmd->cklen) { 2451 case 16: 2452 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128; 2453 break; 2454 case 24: 2455 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192; 2456 break; 2457 case 32: 2458 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256; 2459 break; 2460 default: 2461 err = EINVAL; 2462 goto errout; 2463 } 2464 } 2465 2466 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2467 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY; 2468 } 2469 2470 if (maccrd) { 2471 cmd->maccrd = maccrd; 2472 cmd->base_masks |= HIFN_BASE_CMD_MAC; 2473 2474 switch (maccrd->crd_alg) { 2475 case CRYPTO_MD5: 2476 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2477 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2478 HIFN_MAC_CMD_POS_IPSEC; 2479 break; 2480 case CRYPTO_MD5_HMAC: 2481 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 | 2482 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2483 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2484 break; 2485 case CRYPTO_SHA1: 2486 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2487 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH | 2488 HIFN_MAC_CMD_POS_IPSEC; 2489 break; 2490 case CRYPTO_SHA1_HMAC: 2491 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 | 2492 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC | 2493 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC; 2494 break; 2495 } 2496 2497 if ((maccrd->crd_alg == CRYPTO_SHA1_HMAC || 2498 maccrd->crd_alg == CRYPTO_MD5_HMAC) && 2499 sc->sc_sessions[session].hs_state == HS_STATE_USED) { 2500 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY; 2501 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3); 2502 bzero(cmd->mac + (maccrd->crd_klen >> 3), 2503 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3)); 2504 } 2505 } 2506 2507 cmd->crp = crp; 2508 cmd->session_num = session; 2509 cmd->softc = sc; 2510 2511 err = hifn_crypto(sc, cmd, crp, hint); 2512 if (!err) { 2513 if (enccrd) 2514 sc->sc_sessions[session].hs_prev_op = 2515 enccrd->crd_flags & CRD_F_ENCRYPT; 2516 if (sc->sc_sessions[session].hs_state == HS_STATE_USED) 2517 sc->sc_sessions[session].hs_state = HS_STATE_KEY; 2518 return 0; 2519 } else if (err == ERESTART) { 2520 /* 2521 * There weren't enough resources to dispatch the request 2522 * to the part. Notify the caller so they'll requeue this 2523 * request and resubmit it again soon. 2524 */ 2525 #ifdef HIFN_DEBUG 2526 if (hifn_debug) 2527 device_printf(sc->sc_dev, "requeue request\n"); 2528 #endif 2529 free(cmd, M_DEVBUF); 2530 sc->sc_needwakeup |= CRYPTO_SYMQ; 2531 return (err); 2532 } 2533 2534 errout: 2535 if (cmd != NULL) 2536 free(cmd, M_DEVBUF); 2537 if (err == EINVAL) 2538 hifnstats.hst_invalid++; 2539 else 2540 hifnstats.hst_nomem++; 2541 crp->crp_etype = err; 2542 crypto_done(crp); 2543 return (err); 2544 } 2545 2546 static void 2547 hifn_abort(struct hifn_softc *sc) 2548 { 2549 struct hifn_dma *dma = sc->sc_dma; 2550 struct hifn_command *cmd; 2551 struct cryptop *crp; 2552 int i, u; 2553 2554 i = dma->resk; u = dma->resu; 2555 while (u != 0) { 2556 cmd = dma->hifn_commands[i]; 2557 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i)); 2558 dma->hifn_commands[i] = NULL; 2559 crp = cmd->crp; 2560 2561 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) { 2562 /* Salvage what we can. */ 2563 u_int8_t *macbuf; 2564 2565 if (cmd->base_masks & HIFN_BASE_CMD_MAC) { 2566 macbuf = dma->result_bufs[i]; 2567 macbuf += 12; 2568 } else 2569 macbuf = NULL; 2570 hifnstats.hst_opackets++; 2571 hifn_callback(sc, cmd, macbuf); 2572 } else { 2573 if (cmd->src_map == cmd->dst_map) { 2574 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2575 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 2576 } else { 2577 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2578 BUS_DMASYNC_POSTWRITE); 2579 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2580 BUS_DMASYNC_POSTREAD); 2581 } 2582 2583 if (cmd->src_m != cmd->dst_m) { 2584 m_freem(cmd->src_m); 2585 crp->crp_buf = (caddr_t)cmd->dst_m; 2586 } 2587 2588 /* non-shared buffers cannot be restarted */ 2589 if (cmd->src_map != cmd->dst_map) { 2590 /* 2591 * XXX should be EAGAIN, delayed until 2592 * after the reset. 2593 */ 2594 crp->crp_etype = ENOMEM; 2595 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2596 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2597 } else 2598 crp->crp_etype = ENOMEM; 2599 2600 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2601 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2602 2603 free(cmd, M_DEVBUF); 2604 if (crp->crp_etype != EAGAIN) 2605 crypto_done(crp); 2606 } 2607 2608 if (++i == HIFN_D_RES_RSIZE) 2609 i = 0; 2610 u--; 2611 } 2612 dma->resk = i; dma->resu = u; 2613 2614 /* Force upload of key next time */ 2615 for (i = 0; i < sc->sc_maxses; i++) 2616 if (sc->sc_sessions[i].hs_state == HS_STATE_KEY) 2617 sc->sc_sessions[i].hs_state = HS_STATE_USED; 2618 2619 hifn_reset_board(sc, 1); 2620 hifn_init_dma(sc); 2621 hifn_init_pci_registers(sc); 2622 } 2623 2624 static void 2625 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf) 2626 { 2627 struct hifn_dma *dma = sc->sc_dma; 2628 struct cryptop *crp = cmd->crp; 2629 struct cryptodesc *crd; 2630 struct mbuf *m; 2631 int totlen, i, u, ivlen; 2632 2633 if (cmd->src_map == cmd->dst_map) { 2634 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2635 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD); 2636 } else { 2637 bus_dmamap_sync(sc->sc_dmat, cmd->src_map, 2638 BUS_DMASYNC_POSTWRITE); 2639 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map, 2640 BUS_DMASYNC_POSTREAD); 2641 } 2642 2643 if (crp->crp_flags & CRYPTO_F_IMBUF) { 2644 if (cmd->src_m != cmd->dst_m) { 2645 crp->crp_buf = (caddr_t)cmd->dst_m; 2646 totlen = cmd->src_mapsize; 2647 for (m = cmd->dst_m; m != NULL; m = m->m_next) { 2648 if (totlen < m->m_len) { 2649 m->m_len = totlen; 2650 totlen = 0; 2651 } else 2652 totlen -= m->m_len; 2653 } 2654 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len; 2655 m_freem(cmd->src_m); 2656 } 2657 } 2658 2659 if (cmd->sloplen != 0) { 2660 if (crp->crp_flags & CRYPTO_F_IMBUF) 2661 m_copyback((struct mbuf *)crp->crp_buf, 2662 cmd->src_mapsize - cmd->sloplen, 2663 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2664 else if (crp->crp_flags & CRYPTO_F_IOV) 2665 cuio_copyback((struct uio *)crp->crp_buf, 2666 cmd->src_mapsize - cmd->sloplen, 2667 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]); 2668 } 2669 2670 i = dma->dstk; u = dma->dstu; 2671 while (u != 0) { 2672 if (i == HIFN_D_DST_RSIZE) 2673 i = 0; 2674 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2675 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2676 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) { 2677 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, 2678 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2679 break; 2680 } 2681 i++, u--; 2682 } 2683 dma->dstk = i; dma->dstu = u; 2684 2685 hifnstats.hst_obytes += cmd->dst_mapsize; 2686 2687 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) == 2688 HIFN_BASE_CMD_CRYPT) { 2689 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2690 if (crd->crd_alg != CRYPTO_DES_CBC && 2691 crd->crd_alg != CRYPTO_3DES_CBC && 2692 crd->crd_alg != CRYPTO_AES_CBC) 2693 continue; 2694 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ? 2695 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH); 2696 if (crp->crp_flags & CRYPTO_F_IMBUF) 2697 m_copydata((struct mbuf *)crp->crp_buf, 2698 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2699 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2700 else if (crp->crp_flags & CRYPTO_F_IOV) { 2701 cuio_copydata((struct uio *)crp->crp_buf, 2702 crd->crd_skip + crd->crd_len - ivlen, ivlen, 2703 cmd->softc->sc_sessions[cmd->session_num].hs_iv); 2704 } 2705 break; 2706 } 2707 } 2708 2709 if (macbuf != NULL) { 2710 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 2711 int len; 2712 2713 if (crd->crd_alg == CRYPTO_MD5) 2714 len = 16; 2715 else if (crd->crd_alg == CRYPTO_SHA1) 2716 len = 20; 2717 else if (crd->crd_alg == CRYPTO_MD5_HMAC || 2718 crd->crd_alg == CRYPTO_SHA1_HMAC) 2719 len = 12; 2720 else 2721 continue; 2722 2723 if (crp->crp_flags & CRYPTO_F_IMBUF) 2724 m_copyback((struct mbuf *)crp->crp_buf, 2725 crd->crd_inject, len, macbuf); 2726 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac) 2727 bcopy((caddr_t)macbuf, crp->crp_mac, len); 2728 break; 2729 } 2730 } 2731 2732 if (cmd->src_map != cmd->dst_map) { 2733 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map); 2734 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map); 2735 } 2736 bus_dmamap_unload(sc->sc_dmat, cmd->src_map); 2737 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map); 2738 free(cmd, M_DEVBUF); 2739 crypto_done(crp); 2740 } 2741 2742 /* 2743 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0 2744 * and Group 1 registers; avoid conditions that could create 2745 * burst writes by doing a read in between the writes. 2746 * 2747 * NB: The read we interpose is always to the same register; 2748 * we do this because reading from an arbitrary (e.g. last) 2749 * register may not always work. 2750 */ 2751 static void 2752 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2753 { 2754 if (sc->sc_flags & HIFN_IS_7811) { 2755 if (sc->sc_bar0_lastreg == reg - 4) 2756 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG); 2757 sc->sc_bar0_lastreg = reg; 2758 } 2759 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val); 2760 } 2761 2762 static void 2763 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val) 2764 { 2765 if (sc->sc_flags & HIFN_IS_7811) { 2766 if (sc->sc_bar1_lastreg == reg - 4) 2767 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID); 2768 sc->sc_bar1_lastreg = reg; 2769 } 2770 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val); 2771 } 2772