1 /* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD 5 * 6 * Copyright (c) 2001 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Jason R. Thorpe. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Device driver for the Sundance Tech. TC9021 10/100/1000 36 * Ethernet controller. 37 */ 38 39 #include <sys/cdefs.h> 40 __FBSDID("$FreeBSD$"); 41 42 #ifdef HAVE_KERNEL_OPTION_HEADERS 43 #include "opt_device_polling.h" 44 #endif 45 46 #include <sys/param.h> 47 #include <sys/systm.h> 48 #include <sys/endian.h> 49 #include <sys/mbuf.h> 50 #include <sys/malloc.h> 51 #include <sys/kernel.h> 52 #include <sys/module.h> 53 #include <sys/socket.h> 54 #include <sys/sockio.h> 55 #include <sys/sysctl.h> 56 #include <sys/taskqueue.h> 57 58 #include <net/bpf.h> 59 #include <net/ethernet.h> 60 #include <net/if.h> 61 #include <net/if_var.h> 62 #include <net/if_dl.h> 63 #include <net/if_media.h> 64 #include <net/if_types.h> 65 #include <net/if_vlan_var.h> 66 67 #include <machine/bus.h> 68 #include <machine/resource.h> 69 #include <sys/bus.h> 70 #include <sys/rman.h> 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/mii_bitbang.h> 74 #include <dev/mii/miivar.h> 75 76 #include <dev/pci/pcireg.h> 77 #include <dev/pci/pcivar.h> 78 79 #include <dev/stge/if_stgereg.h> 80 81 #define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 82 83 MODULE_DEPEND(stge, pci, 1, 1, 1); 84 MODULE_DEPEND(stge, ether, 1, 1, 1); 85 MODULE_DEPEND(stge, miibus, 1, 1, 1); 86 87 /* "device miibus" required. See GENERIC if you get errors here. */ 88 #include "miibus_if.h" 89 90 /* 91 * Devices supported by this driver. 92 */ 93 static const struct stge_product { 94 uint16_t stge_vendorid; 95 uint16_t stge_deviceid; 96 const char *stge_name; 97 } stge_products[] = { 98 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023, 99 "Sundance ST-1023 Gigabit Ethernet" }, 100 101 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021, 102 "Sundance ST-2021 Gigabit Ethernet" }, 103 104 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021, 105 "Tamarack TC9021 Gigabit Ethernet" }, 106 107 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT, 108 "Tamarack TC9021 Gigabit Ethernet" }, 109 110 /* 111 * The Sundance sample boards use the Sundance vendor ID, 112 * but the Tamarack product ID. 113 */ 114 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021, 115 "Sundance TC9021 Gigabit Ethernet" }, 116 117 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT, 118 "Sundance TC9021 Gigabit Ethernet" }, 119 120 { VENDOR_DLINK, DEVICEID_DLINK_DL4000, 121 "D-Link DL-4000 Gigabit Ethernet" }, 122 123 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021, 124 "Antares Gigabit Ethernet" } 125 }; 126 127 static int stge_probe(device_t); 128 static int stge_attach(device_t); 129 static int stge_detach(device_t); 130 static int stge_shutdown(device_t); 131 static int stge_suspend(device_t); 132 static int stge_resume(device_t); 133 134 static int stge_encap(struct stge_softc *, struct mbuf **); 135 static void stge_start(struct ifnet *); 136 static void stge_start_locked(struct ifnet *); 137 static void stge_watchdog(struct stge_softc *); 138 static int stge_ioctl(struct ifnet *, u_long, caddr_t); 139 static void stge_init(void *); 140 static void stge_init_locked(struct stge_softc *); 141 static void stge_vlan_setup(struct stge_softc *); 142 static void stge_stop(struct stge_softc *); 143 static void stge_start_tx(struct stge_softc *); 144 static void stge_start_rx(struct stge_softc *); 145 static void stge_stop_tx(struct stge_softc *); 146 static void stge_stop_rx(struct stge_softc *); 147 148 static void stge_reset(struct stge_softc *, uint32_t); 149 static int stge_eeprom_wait(struct stge_softc *); 150 static void stge_read_eeprom(struct stge_softc *, int, uint16_t *); 151 static void stge_tick(void *); 152 static void stge_stats_update(struct stge_softc *); 153 static void stge_set_filter(struct stge_softc *); 154 static void stge_set_multi(struct stge_softc *); 155 156 static void stge_link_task(void *, int); 157 static void stge_intr(void *); 158 static __inline int stge_tx_error(struct stge_softc *); 159 static void stge_txeof(struct stge_softc *); 160 static int stge_rxeof(struct stge_softc *); 161 static __inline void stge_discard_rxbuf(struct stge_softc *, int); 162 static int stge_newbuf(struct stge_softc *, int); 163 #ifndef __NO_STRICT_ALIGNMENT 164 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *); 165 #endif 166 167 static int stge_miibus_readreg(device_t, int, int); 168 static int stge_miibus_writereg(device_t, int, int, int); 169 static void stge_miibus_statchg(device_t); 170 static int stge_mediachange(struct ifnet *); 171 static void stge_mediastatus(struct ifnet *, struct ifmediareq *); 172 173 static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 174 static int stge_dma_alloc(struct stge_softc *); 175 static void stge_dma_free(struct stge_softc *); 176 static void stge_dma_wait(struct stge_softc *); 177 static void stge_init_tx_ring(struct stge_softc *); 178 static int stge_init_rx_ring(struct stge_softc *); 179 #ifdef DEVICE_POLLING 180 static int stge_poll(struct ifnet *, enum poll_cmd, int); 181 #endif 182 183 static void stge_setwol(struct stge_softc *); 184 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 185 static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS); 186 static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS); 187 188 /* 189 * MII bit-bang glue 190 */ 191 static uint32_t stge_mii_bitbang_read(device_t); 192 static void stge_mii_bitbang_write(device_t, uint32_t); 193 194 static const struct mii_bitbang_ops stge_mii_bitbang_ops = { 195 stge_mii_bitbang_read, 196 stge_mii_bitbang_write, 197 { 198 PC_MgmtData, /* MII_BIT_MDO */ 199 PC_MgmtData, /* MII_BIT_MDI */ 200 PC_MgmtClk, /* MII_BIT_MDC */ 201 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */ 202 0, /* MII_BIT_DIR_PHY_HOST */ 203 } 204 }; 205 206 static device_method_t stge_methods[] = { 207 /* Device interface */ 208 DEVMETHOD(device_probe, stge_probe), 209 DEVMETHOD(device_attach, stge_attach), 210 DEVMETHOD(device_detach, stge_detach), 211 DEVMETHOD(device_shutdown, stge_shutdown), 212 DEVMETHOD(device_suspend, stge_suspend), 213 DEVMETHOD(device_resume, stge_resume), 214 215 /* MII interface */ 216 DEVMETHOD(miibus_readreg, stge_miibus_readreg), 217 DEVMETHOD(miibus_writereg, stge_miibus_writereg), 218 DEVMETHOD(miibus_statchg, stge_miibus_statchg), 219 220 DEVMETHOD_END 221 }; 222 223 static driver_t stge_driver = { 224 "stge", 225 stge_methods, 226 sizeof(struct stge_softc) 227 }; 228 229 static devclass_t stge_devclass; 230 231 DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0); 232 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0); 233 234 static struct resource_spec stge_res_spec_io[] = { 235 { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE }, 236 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 237 { -1, 0, 0 } 238 }; 239 240 static struct resource_spec stge_res_spec_mem[] = { 241 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, 242 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 243 { -1, 0, 0 } 244 }; 245 246 /* 247 * stge_mii_bitbang_read: [mii bit-bang interface function] 248 * 249 * Read the MII serial port for the MII bit-bang module. 250 */ 251 static uint32_t 252 stge_mii_bitbang_read(device_t dev) 253 { 254 struct stge_softc *sc; 255 uint32_t val; 256 257 sc = device_get_softc(dev); 258 259 val = CSR_READ_1(sc, STGE_PhyCtrl); 260 CSR_BARRIER(sc, STGE_PhyCtrl, 1, 261 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 262 return (val); 263 } 264 265 /* 266 * stge_mii_bitbang_write: [mii big-bang interface function] 267 * 268 * Write the MII serial port for the MII bit-bang module. 269 */ 270 static void 271 stge_mii_bitbang_write(device_t dev, uint32_t val) 272 { 273 struct stge_softc *sc; 274 275 sc = device_get_softc(dev); 276 277 CSR_WRITE_1(sc, STGE_PhyCtrl, val); 278 CSR_BARRIER(sc, STGE_PhyCtrl, 1, 279 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 280 } 281 282 /* 283 * sc_miibus_readreg: [mii interface function] 284 * 285 * Read a PHY register on the MII of the TC9021. 286 */ 287 static int 288 stge_miibus_readreg(device_t dev, int phy, int reg) 289 { 290 struct stge_softc *sc; 291 int error, val; 292 293 sc = device_get_softc(dev); 294 295 if (reg == STGE_PhyCtrl) { 296 /* XXX allow ip1000phy read STGE_PhyCtrl register. */ 297 STGE_MII_LOCK(sc); 298 error = CSR_READ_1(sc, STGE_PhyCtrl); 299 STGE_MII_UNLOCK(sc); 300 return (error); 301 } 302 303 STGE_MII_LOCK(sc); 304 val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg); 305 STGE_MII_UNLOCK(sc); 306 return (val); 307 } 308 309 /* 310 * stge_miibus_writereg: [mii interface function] 311 * 312 * Write a PHY register on the MII of the TC9021. 313 */ 314 static int 315 stge_miibus_writereg(device_t dev, int phy, int reg, int val) 316 { 317 struct stge_softc *sc; 318 319 sc = device_get_softc(dev); 320 321 STGE_MII_LOCK(sc); 322 mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val); 323 STGE_MII_UNLOCK(sc); 324 return (0); 325 } 326 327 /* 328 * stge_miibus_statchg: [mii interface function] 329 * 330 * Callback from MII layer when media changes. 331 */ 332 static void 333 stge_miibus_statchg(device_t dev) 334 { 335 struct stge_softc *sc; 336 337 sc = device_get_softc(dev); 338 taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task); 339 } 340 341 /* 342 * stge_mediastatus: [ifmedia interface function] 343 * 344 * Get the current interface media status. 345 */ 346 static void 347 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 348 { 349 struct stge_softc *sc; 350 struct mii_data *mii; 351 352 sc = ifp->if_softc; 353 mii = device_get_softc(sc->sc_miibus); 354 355 mii_pollstat(mii); 356 ifmr->ifm_status = mii->mii_media_status; 357 ifmr->ifm_active = mii->mii_media_active; 358 } 359 360 /* 361 * stge_mediachange: [ifmedia interface function] 362 * 363 * Set hardware to newly-selected media. 364 */ 365 static int 366 stge_mediachange(struct ifnet *ifp) 367 { 368 struct stge_softc *sc; 369 struct mii_data *mii; 370 371 sc = ifp->if_softc; 372 mii = device_get_softc(sc->sc_miibus); 373 mii_mediachg(mii); 374 375 return (0); 376 } 377 378 static int 379 stge_eeprom_wait(struct stge_softc *sc) 380 { 381 int i; 382 383 for (i = 0; i < STGE_TIMEOUT; i++) { 384 DELAY(1000); 385 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0) 386 return (0); 387 } 388 return (1); 389 } 390 391 /* 392 * stge_read_eeprom: 393 * 394 * Read data from the serial EEPROM. 395 */ 396 static void 397 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data) 398 { 399 400 if (stge_eeprom_wait(sc)) 401 device_printf(sc->sc_dev, "EEPROM failed to come ready\n"); 402 403 CSR_WRITE_2(sc, STGE_EepromCtrl, 404 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR)); 405 if (stge_eeprom_wait(sc)) 406 device_printf(sc->sc_dev, "EEPROM read timed out\n"); 407 *data = CSR_READ_2(sc, STGE_EepromData); 408 } 409 410 411 static int 412 stge_probe(device_t dev) 413 { 414 const struct stge_product *sp; 415 int i; 416 uint16_t vendor, devid; 417 418 vendor = pci_get_vendor(dev); 419 devid = pci_get_device(dev); 420 sp = stge_products; 421 for (i = 0; i < nitems(stge_products); i++, sp++) { 422 if (vendor == sp->stge_vendorid && 423 devid == sp->stge_deviceid) { 424 device_set_desc(dev, sp->stge_name); 425 return (BUS_PROBE_DEFAULT); 426 } 427 } 428 429 return (ENXIO); 430 } 431 432 static int 433 stge_attach(device_t dev) 434 { 435 struct stge_softc *sc; 436 struct ifnet *ifp; 437 uint8_t enaddr[ETHER_ADDR_LEN]; 438 int error, flags, i; 439 uint16_t cmd; 440 uint32_t val; 441 442 error = 0; 443 sc = device_get_softc(dev); 444 sc->sc_dev = dev; 445 446 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 447 MTX_DEF); 448 mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF); 449 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 450 TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc); 451 452 /* 453 * Map the device. 454 */ 455 pci_enable_busmaster(dev); 456 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 457 val = pci_read_config(dev, PCIR_BAR(1), 4); 458 if (PCI_BAR_IO(val)) 459 sc->sc_spec = stge_res_spec_mem; 460 else { 461 val = pci_read_config(dev, PCIR_BAR(0), 4); 462 if (!PCI_BAR_IO(val)) { 463 device_printf(sc->sc_dev, "couldn't locate IO BAR\n"); 464 error = ENXIO; 465 goto fail; 466 } 467 sc->sc_spec = stge_res_spec_io; 468 } 469 error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res); 470 if (error != 0) { 471 device_printf(dev, "couldn't allocate %s resources\n", 472 sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O"); 473 goto fail; 474 } 475 sc->sc_rev = pci_get_revid(dev); 476 477 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 478 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 479 "rxint_nframe", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 480 &sc->sc_rxint_nframe, 0, sysctl_hw_stge_rxint_nframe, "I", 481 "stge rx interrupt nframe"); 482 483 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 484 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 485 "rxint_dmawait", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 486 &sc->sc_rxint_dmawait, 0, sysctl_hw_stge_rxint_dmawait, "I", 487 "stge rx interrupt dmawait"); 488 489 /* Pull in device tunables. */ 490 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 491 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 492 "rxint_nframe", &sc->sc_rxint_nframe); 493 if (error == 0) { 494 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN || 495 sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) { 496 device_printf(dev, "rxint_nframe value out of range; " 497 "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT); 498 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 499 } 500 } 501 502 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 503 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 504 "rxint_dmawait", &sc->sc_rxint_dmawait); 505 if (error == 0) { 506 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN || 507 sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) { 508 device_printf(dev, "rxint_dmawait value out of range; " 509 "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT); 510 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 511 } 512 } 513 514 if ((error = stge_dma_alloc(sc)) != 0) 515 goto fail; 516 517 /* 518 * Determine if we're copper or fiber. It affects how we 519 * reset the card. 520 */ 521 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia) 522 sc->sc_usefiber = 1; 523 else 524 sc->sc_usefiber = 0; 525 526 /* Load LED configuration from EEPROM. */ 527 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led); 528 529 /* 530 * Reset the chip to a known state. 531 */ 532 STGE_LOCK(sc); 533 stge_reset(sc, STGE_RESET_FULL); 534 STGE_UNLOCK(sc); 535 536 /* 537 * Reading the station address from the EEPROM doesn't seem 538 * to work, at least on my sample boards. Instead, since 539 * the reset sequence does AutoInit, read it from the station 540 * address registers. For Sundance 1023 you can only read it 541 * from EEPROM. 542 */ 543 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) { 544 uint16_t v; 545 546 v = CSR_READ_2(sc, STGE_StationAddress0); 547 enaddr[0] = v & 0xff; 548 enaddr[1] = v >> 8; 549 v = CSR_READ_2(sc, STGE_StationAddress1); 550 enaddr[2] = v & 0xff; 551 enaddr[3] = v >> 8; 552 v = CSR_READ_2(sc, STGE_StationAddress2); 553 enaddr[4] = v & 0xff; 554 enaddr[5] = v >> 8; 555 sc->sc_stge1023 = 0; 556 } else { 557 uint16_t myaddr[ETHER_ADDR_LEN / 2]; 558 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) { 559 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i, 560 &myaddr[i]); 561 myaddr[i] = le16toh(myaddr[i]); 562 } 563 bcopy(myaddr, enaddr, sizeof(enaddr)); 564 sc->sc_stge1023 = 1; 565 } 566 567 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 568 if (ifp == NULL) { 569 device_printf(sc->sc_dev, "failed to if_alloc()\n"); 570 error = ENXIO; 571 goto fail; 572 } 573 574 ifp->if_softc = sc; 575 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 576 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 577 ifp->if_ioctl = stge_ioctl; 578 ifp->if_start = stge_start; 579 ifp->if_init = stge_init; 580 ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1; 581 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 582 IFQ_SET_READY(&ifp->if_snd); 583 /* Revision B3 and earlier chips have checksum bug. */ 584 if (sc->sc_rev >= 0x0c) { 585 ifp->if_hwassist = STGE_CSUM_FEATURES; 586 ifp->if_capabilities = IFCAP_HWCSUM; 587 } else { 588 ifp->if_hwassist = 0; 589 ifp->if_capabilities = 0; 590 } 591 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 592 ifp->if_capenable = ifp->if_capabilities; 593 594 /* 595 * Read some important bits from the PhyCtrl register. 596 */ 597 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) & 598 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity); 599 600 /* Set up MII bus. */ 601 flags = MIIF_DOPAUSE; 602 if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e) 603 flags |= MIIF_MACPRIV0; 604 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange, 605 stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 606 flags); 607 if (error != 0) { 608 device_printf(sc->sc_dev, "attaching PHYs failed\n"); 609 goto fail; 610 } 611 612 ether_ifattach(ifp, enaddr); 613 614 /* VLAN capability setup */ 615 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 616 if (sc->sc_rev >= 0x0c) 617 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 618 ifp->if_capenable = ifp->if_capabilities; 619 #ifdef DEVICE_POLLING 620 ifp->if_capabilities |= IFCAP_POLLING; 621 #endif 622 /* 623 * Tell the upper layer(s) we support long frames. 624 * Must appear after the call to ether_ifattach() because 625 * ether_ifattach() sets ifi_hdrlen to the default value. 626 */ 627 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 628 629 /* 630 * The manual recommends disabling early transmit, so we 631 * do. It's disabled anyway, if using IP checksumming, 632 * since the entire packet must be in the FIFO in order 633 * for the chip to perform the checksum. 634 */ 635 sc->sc_txthresh = 0x0fff; 636 637 /* 638 * Disable MWI if the PCI layer tells us to. 639 */ 640 sc->sc_DMACtrl = 0; 641 if ((cmd & PCIM_CMD_MWRICEN) == 0) 642 sc->sc_DMACtrl |= DMAC_MWIDisable; 643 644 /* 645 * Hookup IRQ 646 */ 647 error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE, 648 NULL, stge_intr, sc, &sc->sc_ih); 649 if (error != 0) { 650 ether_ifdetach(ifp); 651 device_printf(sc->sc_dev, "couldn't set up IRQ\n"); 652 sc->sc_ifp = NULL; 653 goto fail; 654 } 655 656 fail: 657 if (error != 0) 658 stge_detach(dev); 659 660 return (error); 661 } 662 663 static int 664 stge_detach(device_t dev) 665 { 666 struct stge_softc *sc; 667 struct ifnet *ifp; 668 669 sc = device_get_softc(dev); 670 671 ifp = sc->sc_ifp; 672 #ifdef DEVICE_POLLING 673 if (ifp && ifp->if_capenable & IFCAP_POLLING) 674 ether_poll_deregister(ifp); 675 #endif 676 if (device_is_attached(dev)) { 677 STGE_LOCK(sc); 678 /* XXX */ 679 sc->sc_detach = 1; 680 stge_stop(sc); 681 STGE_UNLOCK(sc); 682 callout_drain(&sc->sc_tick_ch); 683 taskqueue_drain(taskqueue_swi, &sc->sc_link_task); 684 ether_ifdetach(ifp); 685 } 686 687 if (sc->sc_miibus != NULL) { 688 device_delete_child(dev, sc->sc_miibus); 689 sc->sc_miibus = NULL; 690 } 691 bus_generic_detach(dev); 692 stge_dma_free(sc); 693 694 if (ifp != NULL) { 695 if_free(ifp); 696 sc->sc_ifp = NULL; 697 } 698 699 if (sc->sc_ih) { 700 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih); 701 sc->sc_ih = NULL; 702 } 703 bus_release_resources(dev, sc->sc_spec, sc->sc_res); 704 705 mtx_destroy(&sc->sc_mii_mtx); 706 mtx_destroy(&sc->sc_mtx); 707 708 return (0); 709 } 710 711 struct stge_dmamap_arg { 712 bus_addr_t stge_busaddr; 713 }; 714 715 static void 716 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 717 { 718 struct stge_dmamap_arg *ctx; 719 720 if (error != 0) 721 return; 722 723 ctx = (struct stge_dmamap_arg *)arg; 724 ctx->stge_busaddr = segs[0].ds_addr; 725 } 726 727 static int 728 stge_dma_alloc(struct stge_softc *sc) 729 { 730 struct stge_dmamap_arg ctx; 731 struct stge_txdesc *txd; 732 struct stge_rxdesc *rxd; 733 int error, i; 734 735 /* create parent tag. */ 736 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */ 737 1, 0, /* algnmnt, boundary */ 738 STGE_DMA_MAXADDR, /* lowaddr */ 739 BUS_SPACE_MAXADDR, /* highaddr */ 740 NULL, NULL, /* filter, filterarg */ 741 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 742 0, /* nsegments */ 743 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 744 0, /* flags */ 745 NULL, NULL, /* lockfunc, lockarg */ 746 &sc->sc_cdata.stge_parent_tag); 747 if (error != 0) { 748 device_printf(sc->sc_dev, "failed to create parent DMA tag\n"); 749 goto fail; 750 } 751 /* create tag for Tx ring. */ 752 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 753 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 754 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 755 BUS_SPACE_MAXADDR, /* highaddr */ 756 NULL, NULL, /* filter, filterarg */ 757 STGE_TX_RING_SZ, /* maxsize */ 758 1, /* nsegments */ 759 STGE_TX_RING_SZ, /* maxsegsize */ 760 0, /* flags */ 761 NULL, NULL, /* lockfunc, lockarg */ 762 &sc->sc_cdata.stge_tx_ring_tag); 763 if (error != 0) { 764 device_printf(sc->sc_dev, 765 "failed to allocate Tx ring DMA tag\n"); 766 goto fail; 767 } 768 769 /* create tag for Rx ring. */ 770 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 771 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 772 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 773 BUS_SPACE_MAXADDR, /* highaddr */ 774 NULL, NULL, /* filter, filterarg */ 775 STGE_RX_RING_SZ, /* maxsize */ 776 1, /* nsegments */ 777 STGE_RX_RING_SZ, /* maxsegsize */ 778 0, /* flags */ 779 NULL, NULL, /* lockfunc, lockarg */ 780 &sc->sc_cdata.stge_rx_ring_tag); 781 if (error != 0) { 782 device_printf(sc->sc_dev, 783 "failed to allocate Rx ring DMA tag\n"); 784 goto fail; 785 } 786 787 /* create tag for Tx buffers. */ 788 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 789 1, 0, /* algnmnt, boundary */ 790 BUS_SPACE_MAXADDR, /* lowaddr */ 791 BUS_SPACE_MAXADDR, /* highaddr */ 792 NULL, NULL, /* filter, filterarg */ 793 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */ 794 STGE_MAXTXSEGS, /* nsegments */ 795 MCLBYTES, /* maxsegsize */ 796 0, /* flags */ 797 NULL, NULL, /* lockfunc, lockarg */ 798 &sc->sc_cdata.stge_tx_tag); 799 if (error != 0) { 800 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n"); 801 goto fail; 802 } 803 804 /* create tag for Rx buffers. */ 805 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 806 1, 0, /* algnmnt, boundary */ 807 BUS_SPACE_MAXADDR, /* lowaddr */ 808 BUS_SPACE_MAXADDR, /* highaddr */ 809 NULL, NULL, /* filter, filterarg */ 810 MCLBYTES, /* maxsize */ 811 1, /* nsegments */ 812 MCLBYTES, /* maxsegsize */ 813 0, /* flags */ 814 NULL, NULL, /* lockfunc, lockarg */ 815 &sc->sc_cdata.stge_rx_tag); 816 if (error != 0) { 817 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n"); 818 goto fail; 819 } 820 821 /* allocate DMA'able memory and load the DMA map for Tx ring. */ 822 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag, 823 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | 824 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map); 825 if (error != 0) { 826 device_printf(sc->sc_dev, 827 "failed to allocate DMA'able memory for Tx ring\n"); 828 goto fail; 829 } 830 831 ctx.stge_busaddr = 0; 832 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag, 833 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring, 834 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 835 if (error != 0 || ctx.stge_busaddr == 0) { 836 device_printf(sc->sc_dev, 837 "failed to load DMA'able memory for Tx ring\n"); 838 goto fail; 839 } 840 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr; 841 842 /* allocate DMA'able memory and load the DMA map for Rx ring. */ 843 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag, 844 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | 845 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map); 846 if (error != 0) { 847 device_printf(sc->sc_dev, 848 "failed to allocate DMA'able memory for Rx ring\n"); 849 goto fail; 850 } 851 852 ctx.stge_busaddr = 0; 853 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag, 854 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring, 855 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 856 if (error != 0 || ctx.stge_busaddr == 0) { 857 device_printf(sc->sc_dev, 858 "failed to load DMA'able memory for Rx ring\n"); 859 goto fail; 860 } 861 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr; 862 863 /* create DMA maps for Tx buffers. */ 864 for (i = 0; i < STGE_TX_RING_CNT; i++) { 865 txd = &sc->sc_cdata.stge_txdesc[i]; 866 txd->tx_m = NULL; 867 txd->tx_dmamap = 0; 868 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0, 869 &txd->tx_dmamap); 870 if (error != 0) { 871 device_printf(sc->sc_dev, 872 "failed to create Tx dmamap\n"); 873 goto fail; 874 } 875 } 876 /* create DMA maps for Rx buffers. */ 877 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 878 &sc->sc_cdata.stge_rx_sparemap)) != 0) { 879 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n"); 880 goto fail; 881 } 882 for (i = 0; i < STGE_RX_RING_CNT; i++) { 883 rxd = &sc->sc_cdata.stge_rxdesc[i]; 884 rxd->rx_m = NULL; 885 rxd->rx_dmamap = 0; 886 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 887 &rxd->rx_dmamap); 888 if (error != 0) { 889 device_printf(sc->sc_dev, 890 "failed to create Rx dmamap\n"); 891 goto fail; 892 } 893 } 894 895 fail: 896 return (error); 897 } 898 899 static void 900 stge_dma_free(struct stge_softc *sc) 901 { 902 struct stge_txdesc *txd; 903 struct stge_rxdesc *rxd; 904 int i; 905 906 /* Tx ring */ 907 if (sc->sc_cdata.stge_tx_ring_tag) { 908 if (sc->sc_rdata.stge_tx_ring_paddr) 909 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag, 910 sc->sc_cdata.stge_tx_ring_map); 911 if (sc->sc_rdata.stge_tx_ring) 912 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag, 913 sc->sc_rdata.stge_tx_ring, 914 sc->sc_cdata.stge_tx_ring_map); 915 sc->sc_rdata.stge_tx_ring = NULL; 916 sc->sc_rdata.stge_tx_ring_paddr = 0; 917 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag); 918 sc->sc_cdata.stge_tx_ring_tag = NULL; 919 } 920 /* Rx ring */ 921 if (sc->sc_cdata.stge_rx_ring_tag) { 922 if (sc->sc_rdata.stge_rx_ring_paddr) 923 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag, 924 sc->sc_cdata.stge_rx_ring_map); 925 if (sc->sc_rdata.stge_rx_ring) 926 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag, 927 sc->sc_rdata.stge_rx_ring, 928 sc->sc_cdata.stge_rx_ring_map); 929 sc->sc_rdata.stge_rx_ring = NULL; 930 sc->sc_rdata.stge_rx_ring_paddr = 0; 931 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag); 932 sc->sc_cdata.stge_rx_ring_tag = NULL; 933 } 934 /* Tx buffers */ 935 if (sc->sc_cdata.stge_tx_tag) { 936 for (i = 0; i < STGE_TX_RING_CNT; i++) { 937 txd = &sc->sc_cdata.stge_txdesc[i]; 938 if (txd->tx_dmamap) { 939 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag, 940 txd->tx_dmamap); 941 txd->tx_dmamap = 0; 942 } 943 } 944 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag); 945 sc->sc_cdata.stge_tx_tag = NULL; 946 } 947 /* Rx buffers */ 948 if (sc->sc_cdata.stge_rx_tag) { 949 for (i = 0; i < STGE_RX_RING_CNT; i++) { 950 rxd = &sc->sc_cdata.stge_rxdesc[i]; 951 if (rxd->rx_dmamap) { 952 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 953 rxd->rx_dmamap); 954 rxd->rx_dmamap = 0; 955 } 956 } 957 if (sc->sc_cdata.stge_rx_sparemap) { 958 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 959 sc->sc_cdata.stge_rx_sparemap); 960 sc->sc_cdata.stge_rx_sparemap = 0; 961 } 962 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag); 963 sc->sc_cdata.stge_rx_tag = NULL; 964 } 965 966 if (sc->sc_cdata.stge_parent_tag) { 967 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag); 968 sc->sc_cdata.stge_parent_tag = NULL; 969 } 970 } 971 972 /* 973 * stge_shutdown: 974 * 975 * Make sure the interface is stopped at reboot time. 976 */ 977 static int 978 stge_shutdown(device_t dev) 979 { 980 981 return (stge_suspend(dev)); 982 } 983 984 static void 985 stge_setwol(struct stge_softc *sc) 986 { 987 struct ifnet *ifp; 988 uint8_t v; 989 990 STGE_LOCK_ASSERT(sc); 991 992 ifp = sc->sc_ifp; 993 v = CSR_READ_1(sc, STGE_WakeEvent); 994 /* Disable all WOL bits. */ 995 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 996 WE_WakeOnLanEnable); 997 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 998 v |= WE_MagicPktEnable | WE_WakeOnLanEnable; 999 CSR_WRITE_1(sc, STGE_WakeEvent, v); 1000 /* Reset Tx and prevent transmission. */ 1001 CSR_WRITE_4(sc, STGE_AsicCtrl, 1002 CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset); 1003 /* 1004 * TC9021 automatically reset link speed to 100Mbps when it's put 1005 * into sleep so there is no need to try to resetting link speed. 1006 */ 1007 } 1008 1009 static int 1010 stge_suspend(device_t dev) 1011 { 1012 struct stge_softc *sc; 1013 1014 sc = device_get_softc(dev); 1015 1016 STGE_LOCK(sc); 1017 stge_stop(sc); 1018 sc->sc_suspended = 1; 1019 stge_setwol(sc); 1020 STGE_UNLOCK(sc); 1021 1022 return (0); 1023 } 1024 1025 static int 1026 stge_resume(device_t dev) 1027 { 1028 struct stge_softc *sc; 1029 struct ifnet *ifp; 1030 uint8_t v; 1031 1032 sc = device_get_softc(dev); 1033 1034 STGE_LOCK(sc); 1035 /* 1036 * Clear WOL bits, so special frames wouldn't interfere 1037 * normal Rx operation anymore. 1038 */ 1039 v = CSR_READ_1(sc, STGE_WakeEvent); 1040 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 1041 WE_WakeOnLanEnable); 1042 CSR_WRITE_1(sc, STGE_WakeEvent, v); 1043 ifp = sc->sc_ifp; 1044 if (ifp->if_flags & IFF_UP) 1045 stge_init_locked(sc); 1046 1047 sc->sc_suspended = 0; 1048 STGE_UNLOCK(sc); 1049 1050 return (0); 1051 } 1052 1053 static void 1054 stge_dma_wait(struct stge_softc *sc) 1055 { 1056 int i; 1057 1058 for (i = 0; i < STGE_TIMEOUT; i++) { 1059 DELAY(2); 1060 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0) 1061 break; 1062 } 1063 1064 if (i == STGE_TIMEOUT) 1065 device_printf(sc->sc_dev, "DMA wait timed out\n"); 1066 } 1067 1068 static int 1069 stge_encap(struct stge_softc *sc, struct mbuf **m_head) 1070 { 1071 struct stge_txdesc *txd; 1072 struct stge_tfd *tfd; 1073 struct mbuf *m; 1074 bus_dma_segment_t txsegs[STGE_MAXTXSEGS]; 1075 int error, i, nsegs, si; 1076 uint64_t csum_flags, tfc; 1077 1078 STGE_LOCK_ASSERT(sc); 1079 1080 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL) 1081 return (ENOBUFS); 1082 1083 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1084 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1085 if (error == EFBIG) { 1086 m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS); 1087 if (m == NULL) { 1088 m_freem(*m_head); 1089 *m_head = NULL; 1090 return (ENOMEM); 1091 } 1092 *m_head = m; 1093 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1094 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1095 if (error != 0) { 1096 m_freem(*m_head); 1097 *m_head = NULL; 1098 return (error); 1099 } 1100 } else if (error != 0) 1101 return (error); 1102 if (nsegs == 0) { 1103 m_freem(*m_head); 1104 *m_head = NULL; 1105 return (EIO); 1106 } 1107 1108 m = *m_head; 1109 csum_flags = 0; 1110 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) { 1111 if (m->m_pkthdr.csum_flags & CSUM_IP) 1112 csum_flags |= TFD_IPChecksumEnable; 1113 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1114 csum_flags |= TFD_TCPChecksumEnable; 1115 else if (m->m_pkthdr.csum_flags & CSUM_UDP) 1116 csum_flags |= TFD_UDPChecksumEnable; 1117 } 1118 1119 si = sc->sc_cdata.stge_tx_prod; 1120 tfd = &sc->sc_rdata.stge_tx_ring[si]; 1121 for (i = 0; i < nsegs; i++) 1122 tfd->tfd_frags[i].frag_word0 = 1123 htole64(FRAG_ADDR(txsegs[i].ds_addr) | 1124 FRAG_LEN(txsegs[i].ds_len)); 1125 sc->sc_cdata.stge_tx_cnt++; 1126 1127 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) | 1128 TFD_FragCount(nsegs) | csum_flags; 1129 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) 1130 tfc |= TFD_TxDMAIndicate; 1131 1132 /* Update producer index. */ 1133 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT; 1134 1135 /* Check if we have a VLAN tag to insert. */ 1136 if (m->m_flags & M_VLANTAG) 1137 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag)); 1138 tfd->tfd_control = htole64(tfc); 1139 1140 /* Update Tx Queue. */ 1141 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q); 1142 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q); 1143 txd->tx_m = m; 1144 1145 /* Sync descriptors. */ 1146 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1147 BUS_DMASYNC_PREWRITE); 1148 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1149 sc->sc_cdata.stge_tx_ring_map, 1150 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1151 1152 return (0); 1153 } 1154 1155 /* 1156 * stge_start: [ifnet interface function] 1157 * 1158 * Start packet transmission on the interface. 1159 */ 1160 static void 1161 stge_start(struct ifnet *ifp) 1162 { 1163 struct stge_softc *sc; 1164 1165 sc = ifp->if_softc; 1166 STGE_LOCK(sc); 1167 stge_start_locked(ifp); 1168 STGE_UNLOCK(sc); 1169 } 1170 1171 static void 1172 stge_start_locked(struct ifnet *ifp) 1173 { 1174 struct stge_softc *sc; 1175 struct mbuf *m_head; 1176 int enq; 1177 1178 sc = ifp->if_softc; 1179 1180 STGE_LOCK_ASSERT(sc); 1181 1182 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1183 IFF_DRV_RUNNING || sc->sc_link == 0) 1184 return; 1185 1186 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1187 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) { 1188 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1189 break; 1190 } 1191 1192 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1193 if (m_head == NULL) 1194 break; 1195 /* 1196 * Pack the data into the transmit ring. If we 1197 * don't have room, set the OACTIVE flag and wait 1198 * for the NIC to drain the ring. 1199 */ 1200 if (stge_encap(sc, &m_head)) { 1201 if (m_head == NULL) 1202 break; 1203 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1204 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1205 break; 1206 } 1207 1208 enq++; 1209 /* 1210 * If there's a BPF listener, bounce a copy of this frame 1211 * to him. 1212 */ 1213 ETHER_BPF_MTAP(ifp, m_head); 1214 } 1215 1216 if (enq > 0) { 1217 /* Transmit */ 1218 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow); 1219 1220 /* Set a timeout in case the chip goes out to lunch. */ 1221 sc->sc_watchdog_timer = 5; 1222 } 1223 } 1224 1225 /* 1226 * stge_watchdog: 1227 * 1228 * Watchdog timer handler. 1229 */ 1230 static void 1231 stge_watchdog(struct stge_softc *sc) 1232 { 1233 struct ifnet *ifp; 1234 1235 STGE_LOCK_ASSERT(sc); 1236 1237 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer) 1238 return; 1239 1240 ifp = sc->sc_ifp; 1241 if_printf(sc->sc_ifp, "device timeout\n"); 1242 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1243 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1244 stge_init_locked(sc); 1245 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1246 stge_start_locked(ifp); 1247 } 1248 1249 /* 1250 * stge_ioctl: [ifnet interface function] 1251 * 1252 * Handle control requests from the operator. 1253 */ 1254 static int 1255 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1256 { 1257 struct stge_softc *sc; 1258 struct ifreq *ifr; 1259 struct mii_data *mii; 1260 int error, mask; 1261 1262 sc = ifp->if_softc; 1263 ifr = (struct ifreq *)data; 1264 error = 0; 1265 switch (cmd) { 1266 case SIOCSIFMTU: 1267 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU) 1268 error = EINVAL; 1269 else if (ifp->if_mtu != ifr->ifr_mtu) { 1270 ifp->if_mtu = ifr->ifr_mtu; 1271 STGE_LOCK(sc); 1272 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1273 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1274 stge_init_locked(sc); 1275 } 1276 STGE_UNLOCK(sc); 1277 } 1278 break; 1279 case SIOCSIFFLAGS: 1280 STGE_LOCK(sc); 1281 if ((ifp->if_flags & IFF_UP) != 0) { 1282 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1283 if (((ifp->if_flags ^ sc->sc_if_flags) 1284 & IFF_PROMISC) != 0) 1285 stge_set_filter(sc); 1286 } else { 1287 if (sc->sc_detach == 0) 1288 stge_init_locked(sc); 1289 } 1290 } else { 1291 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1292 stge_stop(sc); 1293 } 1294 sc->sc_if_flags = ifp->if_flags; 1295 STGE_UNLOCK(sc); 1296 break; 1297 case SIOCADDMULTI: 1298 case SIOCDELMULTI: 1299 STGE_LOCK(sc); 1300 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1301 stge_set_multi(sc); 1302 STGE_UNLOCK(sc); 1303 break; 1304 case SIOCSIFMEDIA: 1305 case SIOCGIFMEDIA: 1306 mii = device_get_softc(sc->sc_miibus); 1307 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1308 break; 1309 case SIOCSIFCAP: 1310 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1311 #ifdef DEVICE_POLLING 1312 if ((mask & IFCAP_POLLING) != 0) { 1313 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1314 error = ether_poll_register(stge_poll, ifp); 1315 if (error != 0) 1316 break; 1317 STGE_LOCK(sc); 1318 CSR_WRITE_2(sc, STGE_IntEnable, 0); 1319 ifp->if_capenable |= IFCAP_POLLING; 1320 STGE_UNLOCK(sc); 1321 } else { 1322 error = ether_poll_deregister(ifp); 1323 if (error != 0) 1324 break; 1325 STGE_LOCK(sc); 1326 CSR_WRITE_2(sc, STGE_IntEnable, 1327 sc->sc_IntEnable); 1328 ifp->if_capenable &= ~IFCAP_POLLING; 1329 STGE_UNLOCK(sc); 1330 } 1331 } 1332 #endif 1333 if ((mask & IFCAP_HWCSUM) != 0) { 1334 ifp->if_capenable ^= IFCAP_HWCSUM; 1335 if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 && 1336 (IFCAP_HWCSUM & ifp->if_capabilities) != 0) 1337 ifp->if_hwassist = STGE_CSUM_FEATURES; 1338 else 1339 ifp->if_hwassist = 0; 1340 } 1341 if ((mask & IFCAP_WOL) != 0 && 1342 (ifp->if_capabilities & IFCAP_WOL) != 0) { 1343 if ((mask & IFCAP_WOL_MAGIC) != 0) 1344 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1345 } 1346 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 1347 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1348 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1349 STGE_LOCK(sc); 1350 stge_vlan_setup(sc); 1351 STGE_UNLOCK(sc); 1352 } 1353 } 1354 VLAN_CAPABILITIES(ifp); 1355 break; 1356 default: 1357 error = ether_ioctl(ifp, cmd, data); 1358 break; 1359 } 1360 1361 return (error); 1362 } 1363 1364 static void 1365 stge_link_task(void *arg, int pending) 1366 { 1367 struct stge_softc *sc; 1368 struct mii_data *mii; 1369 uint32_t v, ac; 1370 int i; 1371 1372 sc = (struct stge_softc *)arg; 1373 STGE_LOCK(sc); 1374 1375 mii = device_get_softc(sc->sc_miibus); 1376 if (mii->mii_media_status & IFM_ACTIVE) { 1377 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1378 sc->sc_link = 1; 1379 } else 1380 sc->sc_link = 0; 1381 1382 sc->sc_MACCtrl = 0; 1383 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 1384 sc->sc_MACCtrl |= MC_DuplexSelect; 1385 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0) 1386 sc->sc_MACCtrl |= MC_RxFlowControlEnable; 1387 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0) 1388 sc->sc_MACCtrl |= MC_TxFlowControlEnable; 1389 /* 1390 * Update STGE_MACCtrl register depending on link status. 1391 * (duplex, flow control etc) 1392 */ 1393 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 1394 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable); 1395 v |= sc->sc_MACCtrl; 1396 CSR_WRITE_4(sc, STGE_MACCtrl, v); 1397 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) { 1398 /* Duplex setting changed, reset Tx/Rx functions. */ 1399 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1400 ac |= AC_TxReset | AC_RxReset; 1401 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1402 for (i = 0; i < STGE_TIMEOUT; i++) { 1403 DELAY(100); 1404 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1405 break; 1406 } 1407 if (i == STGE_TIMEOUT) 1408 device_printf(sc->sc_dev, "reset failed to complete\n"); 1409 } 1410 STGE_UNLOCK(sc); 1411 } 1412 1413 static __inline int 1414 stge_tx_error(struct stge_softc *sc) 1415 { 1416 uint32_t txstat; 1417 int error; 1418 1419 for (error = 0;;) { 1420 txstat = CSR_READ_4(sc, STGE_TxStatus); 1421 if ((txstat & TS_TxComplete) == 0) 1422 break; 1423 /* Tx underrun */ 1424 if ((txstat & TS_TxUnderrun) != 0) { 1425 /* 1426 * XXX 1427 * There should be a more better way to recover 1428 * from Tx underrun instead of a full reset. 1429 */ 1430 if (sc->sc_nerr++ < STGE_MAXERR) 1431 device_printf(sc->sc_dev, "Tx underrun, " 1432 "resetting...\n"); 1433 if (sc->sc_nerr == STGE_MAXERR) 1434 device_printf(sc->sc_dev, "too many errors; " 1435 "not reporting any more\n"); 1436 error = -1; 1437 break; 1438 } 1439 /* Maximum/Late collisions, Re-enable Tx MAC. */ 1440 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0) 1441 CSR_WRITE_4(sc, STGE_MACCtrl, 1442 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) | 1443 MC_TxEnable); 1444 } 1445 1446 return (error); 1447 } 1448 1449 /* 1450 * stge_intr: 1451 * 1452 * Interrupt service routine. 1453 */ 1454 static void 1455 stge_intr(void *arg) 1456 { 1457 struct stge_softc *sc; 1458 struct ifnet *ifp; 1459 int reinit; 1460 uint16_t status; 1461 1462 sc = (struct stge_softc *)arg; 1463 ifp = sc->sc_ifp; 1464 1465 STGE_LOCK(sc); 1466 1467 #ifdef DEVICE_POLLING 1468 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1469 goto done_locked; 1470 #endif 1471 status = CSR_READ_2(sc, STGE_IntStatus); 1472 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0) 1473 goto done_locked; 1474 1475 /* Disable interrupts. */ 1476 for (reinit = 0;;) { 1477 status = CSR_READ_2(sc, STGE_IntStatusAck); 1478 status &= sc->sc_IntEnable; 1479 if (status == 0) 1480 break; 1481 /* Host interface errors. */ 1482 if ((status & IS_HostError) != 0) { 1483 device_printf(sc->sc_dev, 1484 "Host interface error, resetting...\n"); 1485 reinit = 1; 1486 goto force_init; 1487 } 1488 1489 /* Receive interrupts. */ 1490 if ((status & IS_RxDMAComplete) != 0) { 1491 stge_rxeof(sc); 1492 if ((status & IS_RFDListEnd) != 0) 1493 CSR_WRITE_4(sc, STGE_DMACtrl, 1494 DMAC_RxDMAPollNow); 1495 } 1496 1497 /* Transmit interrupts. */ 1498 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0) 1499 stge_txeof(sc); 1500 1501 /* Transmission errors.*/ 1502 if ((status & IS_TxComplete) != 0) { 1503 if ((reinit = stge_tx_error(sc)) != 0) 1504 break; 1505 } 1506 } 1507 1508 force_init: 1509 if (reinit != 0) { 1510 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1511 stge_init_locked(sc); 1512 } 1513 1514 /* Re-enable interrupts. */ 1515 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 1516 1517 /* Try to get more packets going. */ 1518 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1519 stge_start_locked(ifp); 1520 1521 done_locked: 1522 STGE_UNLOCK(sc); 1523 } 1524 1525 /* 1526 * stge_txeof: 1527 * 1528 * Helper; handle transmit interrupts. 1529 */ 1530 static void 1531 stge_txeof(struct stge_softc *sc) 1532 { 1533 struct ifnet *ifp; 1534 struct stge_txdesc *txd; 1535 uint64_t control; 1536 int cons; 1537 1538 STGE_LOCK_ASSERT(sc); 1539 1540 ifp = sc->sc_ifp; 1541 1542 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1543 if (txd == NULL) 1544 return; 1545 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1546 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD); 1547 1548 /* 1549 * Go through our Tx list and free mbufs for those 1550 * frames which have been transmitted. 1551 */ 1552 for (cons = sc->sc_cdata.stge_tx_cons;; 1553 cons = (cons + 1) % STGE_TX_RING_CNT) { 1554 if (sc->sc_cdata.stge_tx_cnt <= 0) 1555 break; 1556 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control); 1557 if ((control & TFD_TFDDone) == 0) 1558 break; 1559 sc->sc_cdata.stge_tx_cnt--; 1560 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1561 1562 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1563 BUS_DMASYNC_POSTWRITE); 1564 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); 1565 1566 /* Output counter is updated with statistics register */ 1567 m_freem(txd->tx_m); 1568 txd->tx_m = NULL; 1569 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q); 1570 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 1571 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1572 } 1573 sc->sc_cdata.stge_tx_cons = cons; 1574 if (sc->sc_cdata.stge_tx_cnt == 0) 1575 sc->sc_watchdog_timer = 0; 1576 1577 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1578 sc->sc_cdata.stge_tx_ring_map, 1579 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1580 } 1581 1582 static __inline void 1583 stge_discard_rxbuf(struct stge_softc *sc, int idx) 1584 { 1585 struct stge_rfd *rfd; 1586 1587 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 1588 rfd->rfd_status = 0; 1589 } 1590 1591 #ifndef __NO_STRICT_ALIGNMENT 1592 /* 1593 * It seems that TC9021's DMA engine has alignment restrictions in 1594 * DMA scatter operations. The first DMA segment has no address 1595 * alignment restrictins but the rest should be aligned on 4(?) bytes 1596 * boundary. Otherwise it would corrupt random memory. Since we don't 1597 * know which one is used for the first segment in advance we simply 1598 * don't align at all. 1599 * To avoid copying over an entire frame to align, we allocate a new 1600 * mbuf and copy ethernet header to the new mbuf. The new mbuf is 1601 * prepended into the existing mbuf chain. 1602 */ 1603 static __inline struct mbuf * 1604 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m) 1605 { 1606 struct mbuf *n; 1607 1608 n = NULL; 1609 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 1610 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 1611 m->m_data += ETHER_HDR_LEN; 1612 n = m; 1613 } else { 1614 MGETHDR(n, M_NOWAIT, MT_DATA); 1615 if (n != NULL) { 1616 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 1617 m->m_data += ETHER_HDR_LEN; 1618 m->m_len -= ETHER_HDR_LEN; 1619 n->m_len = ETHER_HDR_LEN; 1620 M_MOVE_PKTHDR(n, m); 1621 n->m_next = m; 1622 } else 1623 m_freem(m); 1624 } 1625 1626 return (n); 1627 } 1628 #endif 1629 1630 /* 1631 * stge_rxeof: 1632 * 1633 * Helper; handle receive interrupts. 1634 */ 1635 static int 1636 stge_rxeof(struct stge_softc *sc) 1637 { 1638 struct ifnet *ifp; 1639 struct stge_rxdesc *rxd; 1640 struct mbuf *mp, *m; 1641 uint64_t status64; 1642 uint32_t status; 1643 int cons, prog, rx_npkts; 1644 1645 STGE_LOCK_ASSERT(sc); 1646 1647 rx_npkts = 0; 1648 ifp = sc->sc_ifp; 1649 1650 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1651 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD); 1652 1653 prog = 0; 1654 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT; 1655 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) { 1656 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status); 1657 status = RFD_RxStatus(status64); 1658 if ((status & RFD_RFDDone) == 0) 1659 break; 1660 #ifdef DEVICE_POLLING 1661 if (ifp->if_capenable & IFCAP_POLLING) { 1662 if (sc->sc_cdata.stge_rxcycles <= 0) 1663 break; 1664 sc->sc_cdata.stge_rxcycles--; 1665 } 1666 #endif 1667 prog++; 1668 rxd = &sc->sc_cdata.stge_rxdesc[cons]; 1669 mp = rxd->rx_m; 1670 1671 /* 1672 * If the packet had an error, drop it. Note we count 1673 * the error later in the periodic stats update. 1674 */ 1675 if ((status & RFD_FrameEnd) != 0 && (status & 1676 (RFD_RxFIFOOverrun | RFD_RxRuntFrame | 1677 RFD_RxAlignmentError | RFD_RxFCSError | 1678 RFD_RxLengthError)) != 0) { 1679 stge_discard_rxbuf(sc, cons); 1680 if (sc->sc_cdata.stge_rxhead != NULL) { 1681 m_freem(sc->sc_cdata.stge_rxhead); 1682 STGE_RXCHAIN_RESET(sc); 1683 } 1684 continue; 1685 } 1686 /* 1687 * Add a new receive buffer to the ring. 1688 */ 1689 if (stge_newbuf(sc, cons) != 0) { 1690 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1691 stge_discard_rxbuf(sc, cons); 1692 if (sc->sc_cdata.stge_rxhead != NULL) { 1693 m_freem(sc->sc_cdata.stge_rxhead); 1694 STGE_RXCHAIN_RESET(sc); 1695 } 1696 continue; 1697 } 1698 1699 if ((status & RFD_FrameEnd) != 0) 1700 mp->m_len = RFD_RxDMAFrameLen(status) - 1701 sc->sc_cdata.stge_rxlen; 1702 sc->sc_cdata.stge_rxlen += mp->m_len; 1703 1704 /* Chain mbufs. */ 1705 if (sc->sc_cdata.stge_rxhead == NULL) { 1706 sc->sc_cdata.stge_rxhead = mp; 1707 sc->sc_cdata.stge_rxtail = mp; 1708 } else { 1709 mp->m_flags &= ~M_PKTHDR; 1710 sc->sc_cdata.stge_rxtail->m_next = mp; 1711 sc->sc_cdata.stge_rxtail = mp; 1712 } 1713 1714 if ((status & RFD_FrameEnd) != 0) { 1715 m = sc->sc_cdata.stge_rxhead; 1716 m->m_pkthdr.rcvif = ifp; 1717 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen; 1718 1719 if (m->m_pkthdr.len > sc->sc_if_framesize) { 1720 m_freem(m); 1721 STGE_RXCHAIN_RESET(sc); 1722 continue; 1723 } 1724 /* 1725 * Set the incoming checksum information for 1726 * the packet. 1727 */ 1728 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1729 if ((status & RFD_IPDetected) != 0) { 1730 m->m_pkthdr.csum_flags |= 1731 CSUM_IP_CHECKED; 1732 if ((status & RFD_IPError) == 0) 1733 m->m_pkthdr.csum_flags |= 1734 CSUM_IP_VALID; 1735 } 1736 if (((status & RFD_TCPDetected) != 0 && 1737 (status & RFD_TCPError) == 0) || 1738 ((status & RFD_UDPDetected) != 0 && 1739 (status & RFD_UDPError) == 0)) { 1740 m->m_pkthdr.csum_flags |= 1741 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1742 m->m_pkthdr.csum_data = 0xffff; 1743 } 1744 } 1745 1746 #ifndef __NO_STRICT_ALIGNMENT 1747 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) { 1748 if ((m = stge_fixup_rx(sc, m)) == NULL) { 1749 STGE_RXCHAIN_RESET(sc); 1750 continue; 1751 } 1752 } 1753 #endif 1754 /* Check for VLAN tagged packets. */ 1755 if ((status & RFD_VLANDetected) != 0 && 1756 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1757 m->m_pkthdr.ether_vtag = RFD_TCI(status64); 1758 m->m_flags |= M_VLANTAG; 1759 } 1760 1761 STGE_UNLOCK(sc); 1762 /* Pass it on. */ 1763 (*ifp->if_input)(ifp, m); 1764 STGE_LOCK(sc); 1765 rx_npkts++; 1766 1767 STGE_RXCHAIN_RESET(sc); 1768 } 1769 } 1770 1771 if (prog > 0) { 1772 /* Update the consumer index. */ 1773 sc->sc_cdata.stge_rx_cons = cons; 1774 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1775 sc->sc_cdata.stge_rx_ring_map, 1776 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1777 } 1778 return (rx_npkts); 1779 } 1780 1781 #ifdef DEVICE_POLLING 1782 static int 1783 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1784 { 1785 struct stge_softc *sc; 1786 uint16_t status; 1787 int rx_npkts; 1788 1789 rx_npkts = 0; 1790 sc = ifp->if_softc; 1791 STGE_LOCK(sc); 1792 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1793 STGE_UNLOCK(sc); 1794 return (rx_npkts); 1795 } 1796 1797 sc->sc_cdata.stge_rxcycles = count; 1798 rx_npkts = stge_rxeof(sc); 1799 stge_txeof(sc); 1800 1801 if (cmd == POLL_AND_CHECK_STATUS) { 1802 status = CSR_READ_2(sc, STGE_IntStatus); 1803 status &= sc->sc_IntEnable; 1804 if (status != 0) { 1805 if ((status & IS_HostError) != 0) { 1806 device_printf(sc->sc_dev, 1807 "Host interface error, resetting...\n"); 1808 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1809 stge_init_locked(sc); 1810 } 1811 if ((status & IS_TxComplete) != 0) { 1812 if (stge_tx_error(sc) != 0) { 1813 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1814 stge_init_locked(sc); 1815 } 1816 } 1817 } 1818 1819 } 1820 1821 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1822 stge_start_locked(ifp); 1823 1824 STGE_UNLOCK(sc); 1825 return (rx_npkts); 1826 } 1827 #endif /* DEVICE_POLLING */ 1828 1829 /* 1830 * stge_tick: 1831 * 1832 * One second timer, used to tick the MII. 1833 */ 1834 static void 1835 stge_tick(void *arg) 1836 { 1837 struct stge_softc *sc; 1838 struct mii_data *mii; 1839 1840 sc = (struct stge_softc *)arg; 1841 1842 STGE_LOCK_ASSERT(sc); 1843 1844 mii = device_get_softc(sc->sc_miibus); 1845 mii_tick(mii); 1846 1847 /* Update statistics counters. */ 1848 stge_stats_update(sc); 1849 1850 /* 1851 * Relcaim any pending Tx descriptors to release mbufs in a 1852 * timely manner as we don't generate Tx completion interrupts 1853 * for every frame. This limits the delay to a maximum of one 1854 * second. 1855 */ 1856 if (sc->sc_cdata.stge_tx_cnt != 0) 1857 stge_txeof(sc); 1858 1859 stge_watchdog(sc); 1860 1861 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 1862 } 1863 1864 /* 1865 * stge_stats_update: 1866 * 1867 * Read the TC9021 statistics counters. 1868 */ 1869 static void 1870 stge_stats_update(struct stge_softc *sc) 1871 { 1872 struct ifnet *ifp; 1873 1874 STGE_LOCK_ASSERT(sc); 1875 1876 ifp = sc->sc_ifp; 1877 1878 CSR_READ_4(sc,STGE_OctetRcvOk); 1879 1880 if_inc_counter(ifp, IFCOUNTER_IPACKETS, CSR_READ_4(sc, STGE_FramesRcvdOk)); 1881 1882 if_inc_counter(ifp, IFCOUNTER_IERRORS, CSR_READ_2(sc, STGE_FramesLostRxErrors)); 1883 1884 CSR_READ_4(sc, STGE_OctetXmtdOk); 1885 1886 if_inc_counter(ifp, IFCOUNTER_OPACKETS, CSR_READ_4(sc, STGE_FramesXmtdOk)); 1887 1888 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1889 CSR_READ_4(sc, STGE_LateCollisions) + 1890 CSR_READ_4(sc, STGE_MultiColFrames) + 1891 CSR_READ_4(sc, STGE_SingleColFrames)); 1892 1893 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1894 CSR_READ_2(sc, STGE_FramesAbortXSColls) + 1895 CSR_READ_2(sc, STGE_FramesWEXDeferal)); 1896 } 1897 1898 /* 1899 * stge_reset: 1900 * 1901 * Perform a soft reset on the TC9021. 1902 */ 1903 static void 1904 stge_reset(struct stge_softc *sc, uint32_t how) 1905 { 1906 uint32_t ac; 1907 uint8_t v; 1908 int i, dv; 1909 1910 STGE_LOCK_ASSERT(sc); 1911 1912 dv = 5000; 1913 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1914 switch (how) { 1915 case STGE_RESET_TX: 1916 ac |= AC_TxReset | AC_FIFO; 1917 dv = 100; 1918 break; 1919 case STGE_RESET_RX: 1920 ac |= AC_RxReset | AC_FIFO; 1921 dv = 100; 1922 break; 1923 case STGE_RESET_FULL: 1924 default: 1925 /* 1926 * Only assert RstOut if we're fiber. We need GMII clocks 1927 * to be present in order for the reset to complete on fiber 1928 * cards. 1929 */ 1930 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset | 1931 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | 1932 (sc->sc_usefiber ? AC_RstOut : 0); 1933 break; 1934 } 1935 1936 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1937 1938 /* Account for reset problem at 10Mbps. */ 1939 DELAY(dv); 1940 1941 for (i = 0; i < STGE_TIMEOUT; i++) { 1942 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1943 break; 1944 DELAY(dv); 1945 } 1946 1947 if (i == STGE_TIMEOUT) 1948 device_printf(sc->sc_dev, "reset failed to complete\n"); 1949 1950 /* Set LED, from Linux IPG driver. */ 1951 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1952 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1); 1953 if ((sc->sc_led & 0x01) != 0) 1954 ac |= AC_LEDMode; 1955 if ((sc->sc_led & 0x03) != 0) 1956 ac |= AC_LEDModeBit1; 1957 if ((sc->sc_led & 0x08) != 0) 1958 ac |= AC_LEDSpeed; 1959 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1960 1961 /* Set PHY, from Linux IPG driver */ 1962 v = CSR_READ_1(sc, STGE_PhySet); 1963 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet); 1964 v |= ((sc->sc_led & 0x70) >> 4); 1965 CSR_WRITE_1(sc, STGE_PhySet, v); 1966 } 1967 1968 /* 1969 * stge_init: [ ifnet interface function ] 1970 * 1971 * Initialize the interface. 1972 */ 1973 static void 1974 stge_init(void *xsc) 1975 { 1976 struct stge_softc *sc; 1977 1978 sc = (struct stge_softc *)xsc; 1979 STGE_LOCK(sc); 1980 stge_init_locked(sc); 1981 STGE_UNLOCK(sc); 1982 } 1983 1984 static void 1985 stge_init_locked(struct stge_softc *sc) 1986 { 1987 struct ifnet *ifp; 1988 struct mii_data *mii; 1989 uint16_t eaddr[3]; 1990 uint32_t v; 1991 int error; 1992 1993 STGE_LOCK_ASSERT(sc); 1994 1995 ifp = sc->sc_ifp; 1996 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1997 return; 1998 mii = device_get_softc(sc->sc_miibus); 1999 2000 /* 2001 * Cancel any pending I/O. 2002 */ 2003 stge_stop(sc); 2004 2005 /* 2006 * Reset the chip to a known state. 2007 */ 2008 stge_reset(sc, STGE_RESET_FULL); 2009 2010 /* Init descriptors. */ 2011 error = stge_init_rx_ring(sc); 2012 if (error != 0) { 2013 device_printf(sc->sc_dev, 2014 "initialization failed: no memory for rx buffers\n"); 2015 stge_stop(sc); 2016 goto out; 2017 } 2018 stge_init_tx_ring(sc); 2019 2020 /* Set the station address. */ 2021 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2022 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0])); 2023 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1])); 2024 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2])); 2025 2026 /* 2027 * Set the statistics masks. Disable all the RMON stats, 2028 * and disable selected stats in the non-RMON stats registers. 2029 */ 2030 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff); 2031 CSR_WRITE_4(sc, STGE_StatisticsMask, 2032 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) | 2033 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) | 2034 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) | 2035 (1U << 21)); 2036 2037 /* Set up the receive filter. */ 2038 stge_set_filter(sc); 2039 /* Program multicast filter. */ 2040 stge_set_multi(sc); 2041 2042 /* 2043 * Give the transmit and receive ring to the chip. 2044 */ 2045 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 2046 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0))); 2047 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 2048 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0))); 2049 2050 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 2051 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0))); 2052 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 2053 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0))); 2054 2055 /* 2056 * Initialize the Tx auto-poll period. It's OK to make this number 2057 * large (255 is the max, but we use 127) -- we explicitly kick the 2058 * transmit engine when there's actually a packet. 2059 */ 2060 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2061 2062 /* ..and the Rx auto-poll period. */ 2063 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2064 2065 /* Initialize the Tx start threshold. */ 2066 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh); 2067 2068 /* Rx DMA thresholds, from Linux */ 2069 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30); 2070 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30); 2071 2072 /* Rx early threhold, from Linux */ 2073 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff); 2074 2075 /* Tx DMA thresholds, from Linux */ 2076 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30); 2077 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04); 2078 2079 /* 2080 * Initialize the Rx DMA interrupt control register. We 2081 * request an interrupt after every incoming packet, but 2082 * defer it for sc_rxint_dmawait us. When the number of 2083 * interrupts pending reaches STGE_RXINT_NFRAME, we stop 2084 * deferring the interrupt, and signal it immediately. 2085 */ 2086 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl, 2087 RDIC_RxFrameCount(sc->sc_rxint_nframe) | 2088 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait))); 2089 2090 /* 2091 * Initialize the interrupt mask. 2092 */ 2093 sc->sc_IntEnable = IS_HostError | IS_TxComplete | 2094 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd; 2095 #ifdef DEVICE_POLLING 2096 /* Disable interrupts if we are polling. */ 2097 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2098 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2099 else 2100 #endif 2101 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 2102 2103 /* 2104 * Configure the DMA engine. 2105 * XXX Should auto-tune TxBurstLimit. 2106 */ 2107 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3)); 2108 2109 /* 2110 * Send a PAUSE frame when we reach 29,696 bytes in the Rx 2111 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes 2112 * in the Rx FIFO. 2113 */ 2114 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16); 2115 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16); 2116 2117 /* 2118 * Set the maximum frame size. 2119 */ 2120 sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2121 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize); 2122 2123 /* 2124 * Initialize MacCtrl -- do it before setting the media, 2125 * as setting the media will actually program the register. 2126 * 2127 * Note: We have to poke the IFS value before poking 2128 * anything else. 2129 */ 2130 /* Tx/Rx MAC should be disabled before programming IFS.*/ 2131 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit)); 2132 2133 stge_vlan_setup(sc); 2134 2135 if (sc->sc_rev >= 6) { /* >= B.2 */ 2136 /* Multi-frag frame bug work-around. */ 2137 CSR_WRITE_2(sc, STGE_DebugCtrl, 2138 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200); 2139 2140 /* Tx Poll Now bug work-around. */ 2141 CSR_WRITE_2(sc, STGE_DebugCtrl, 2142 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010); 2143 /* Tx Poll Now bug work-around. */ 2144 CSR_WRITE_2(sc, STGE_DebugCtrl, 2145 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020); 2146 } 2147 2148 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2149 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable; 2150 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2151 /* 2152 * It seems that transmitting frames without checking the state of 2153 * Rx/Tx MAC wedge the hardware. 2154 */ 2155 stge_start_tx(sc); 2156 stge_start_rx(sc); 2157 2158 sc->sc_link = 0; 2159 /* 2160 * Set the current media. 2161 */ 2162 mii_mediachg(mii); 2163 2164 /* 2165 * Start the one second MII clock. 2166 */ 2167 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 2168 2169 /* 2170 * ...all done! 2171 */ 2172 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2173 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2174 2175 out: 2176 if (error != 0) 2177 device_printf(sc->sc_dev, "interface not running\n"); 2178 } 2179 2180 static void 2181 stge_vlan_setup(struct stge_softc *sc) 2182 { 2183 struct ifnet *ifp; 2184 uint32_t v; 2185 2186 ifp = sc->sc_ifp; 2187 /* 2188 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl 2189 * MC_AutoVLANuntagging bit. 2190 * MC_AutoVLANtagging bit selects which VLAN source to use 2191 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert 2192 * bit has priority over MC_AutoVLANtagging bit. So we always 2193 * use TFC instead of STGE_VLANTag register. 2194 */ 2195 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2196 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2197 v |= MC_AutoVLANuntagging; 2198 else 2199 v &= ~MC_AutoVLANuntagging; 2200 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2201 } 2202 2203 /* 2204 * Stop transmission on the interface. 2205 */ 2206 static void 2207 stge_stop(struct stge_softc *sc) 2208 { 2209 struct ifnet *ifp; 2210 struct stge_txdesc *txd; 2211 struct stge_rxdesc *rxd; 2212 uint32_t v; 2213 int i; 2214 2215 STGE_LOCK_ASSERT(sc); 2216 /* 2217 * Stop the one second clock. 2218 */ 2219 callout_stop(&sc->sc_tick_ch); 2220 sc->sc_watchdog_timer = 0; 2221 2222 /* 2223 * Disable interrupts. 2224 */ 2225 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2226 2227 /* 2228 * Stop receiver, transmitter, and stats update. 2229 */ 2230 stge_stop_rx(sc); 2231 stge_stop_tx(sc); 2232 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2233 v |= MC_StatisticsDisable; 2234 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2235 2236 /* 2237 * Stop the transmit and receive DMA. 2238 */ 2239 stge_dma_wait(sc); 2240 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); 2241 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0); 2242 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); 2243 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0); 2244 2245 /* 2246 * Free RX and TX mbufs still in the queues. 2247 */ 2248 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2249 rxd = &sc->sc_cdata.stge_rxdesc[i]; 2250 if (rxd->rx_m != NULL) { 2251 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, 2252 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2253 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, 2254 rxd->rx_dmamap); 2255 m_freem(rxd->rx_m); 2256 rxd->rx_m = NULL; 2257 } 2258 } 2259 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2260 txd = &sc->sc_cdata.stge_txdesc[i]; 2261 if (txd->tx_m != NULL) { 2262 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, 2263 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2264 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, 2265 txd->tx_dmamap); 2266 m_freem(txd->tx_m); 2267 txd->tx_m = NULL; 2268 } 2269 } 2270 2271 /* 2272 * Mark the interface down and cancel the watchdog timer. 2273 */ 2274 ifp = sc->sc_ifp; 2275 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2276 sc->sc_link = 0; 2277 } 2278 2279 static void 2280 stge_start_tx(struct stge_softc *sc) 2281 { 2282 uint32_t v; 2283 int i; 2284 2285 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2286 if ((v & MC_TxEnabled) != 0) 2287 return; 2288 v |= MC_TxEnable; 2289 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2290 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2291 for (i = STGE_TIMEOUT; i > 0; i--) { 2292 DELAY(10); 2293 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2294 if ((v & MC_TxEnabled) != 0) 2295 break; 2296 } 2297 if (i == 0) 2298 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n"); 2299 } 2300 2301 static void 2302 stge_start_rx(struct stge_softc *sc) 2303 { 2304 uint32_t v; 2305 int i; 2306 2307 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2308 if ((v & MC_RxEnabled) != 0) 2309 return; 2310 v |= MC_RxEnable; 2311 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2312 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2313 for (i = STGE_TIMEOUT; i > 0; i--) { 2314 DELAY(10); 2315 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2316 if ((v & MC_RxEnabled) != 0) 2317 break; 2318 } 2319 if (i == 0) 2320 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n"); 2321 } 2322 2323 static void 2324 stge_stop_tx(struct stge_softc *sc) 2325 { 2326 uint32_t v; 2327 int i; 2328 2329 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2330 if ((v & MC_TxEnabled) == 0) 2331 return; 2332 v |= MC_TxDisable; 2333 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2334 for (i = STGE_TIMEOUT; i > 0; i--) { 2335 DELAY(10); 2336 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2337 if ((v & MC_TxEnabled) == 0) 2338 break; 2339 } 2340 if (i == 0) 2341 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n"); 2342 } 2343 2344 static void 2345 stge_stop_rx(struct stge_softc *sc) 2346 { 2347 uint32_t v; 2348 int i; 2349 2350 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2351 if ((v & MC_RxEnabled) == 0) 2352 return; 2353 v |= MC_RxDisable; 2354 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2355 for (i = STGE_TIMEOUT; i > 0; i--) { 2356 DELAY(10); 2357 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2358 if ((v & MC_RxEnabled) == 0) 2359 break; 2360 } 2361 if (i == 0) 2362 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n"); 2363 } 2364 2365 static void 2366 stge_init_tx_ring(struct stge_softc *sc) 2367 { 2368 struct stge_ring_data *rd; 2369 struct stge_txdesc *txd; 2370 bus_addr_t addr; 2371 int i; 2372 2373 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq); 2374 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq); 2375 2376 sc->sc_cdata.stge_tx_prod = 0; 2377 sc->sc_cdata.stge_tx_cons = 0; 2378 sc->sc_cdata.stge_tx_cnt = 0; 2379 2380 rd = &sc->sc_rdata; 2381 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ); 2382 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2383 if (i == (STGE_TX_RING_CNT - 1)) 2384 addr = STGE_TX_RING_ADDR(sc, 0); 2385 else 2386 addr = STGE_TX_RING_ADDR(sc, i + 1); 2387 rd->stge_tx_ring[i].tfd_next = htole64(addr); 2388 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone); 2389 txd = &sc->sc_cdata.stge_txdesc[i]; 2390 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 2391 } 2392 2393 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 2394 sc->sc_cdata.stge_tx_ring_map, 2395 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2396 2397 } 2398 2399 static int 2400 stge_init_rx_ring(struct stge_softc *sc) 2401 { 2402 struct stge_ring_data *rd; 2403 bus_addr_t addr; 2404 int i; 2405 2406 sc->sc_cdata.stge_rx_cons = 0; 2407 STGE_RXCHAIN_RESET(sc); 2408 2409 rd = &sc->sc_rdata; 2410 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ); 2411 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2412 if (stge_newbuf(sc, i) != 0) 2413 return (ENOBUFS); 2414 if (i == (STGE_RX_RING_CNT - 1)) 2415 addr = STGE_RX_RING_ADDR(sc, 0); 2416 else 2417 addr = STGE_RX_RING_ADDR(sc, i + 1); 2418 rd->stge_rx_ring[i].rfd_next = htole64(addr); 2419 rd->stge_rx_ring[i].rfd_status = 0; 2420 } 2421 2422 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 2423 sc->sc_cdata.stge_rx_ring_map, 2424 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2425 2426 return (0); 2427 } 2428 2429 /* 2430 * stge_newbuf: 2431 * 2432 * Add a receive buffer to the indicated descriptor. 2433 */ 2434 static int 2435 stge_newbuf(struct stge_softc *sc, int idx) 2436 { 2437 struct stge_rxdesc *rxd; 2438 struct stge_rfd *rfd; 2439 struct mbuf *m; 2440 bus_dma_segment_t segs[1]; 2441 bus_dmamap_t map; 2442 int nsegs; 2443 2444 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2445 if (m == NULL) 2446 return (ENOBUFS); 2447 m->m_len = m->m_pkthdr.len = MCLBYTES; 2448 /* 2449 * The hardware requires 4bytes aligned DMA address when JUMBO 2450 * frame is used. 2451 */ 2452 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN)) 2453 m_adj(m, ETHER_ALIGN); 2454 2455 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag, 2456 sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 2457 m_freem(m); 2458 return (ENOBUFS); 2459 } 2460 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2461 2462 rxd = &sc->sc_cdata.stge_rxdesc[idx]; 2463 if (rxd->rx_m != NULL) { 2464 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2465 BUS_DMASYNC_POSTREAD); 2466 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); 2467 } 2468 map = rxd->rx_dmamap; 2469 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap; 2470 sc->sc_cdata.stge_rx_sparemap = map; 2471 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2472 BUS_DMASYNC_PREREAD); 2473 rxd->rx_m = m; 2474 2475 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 2476 rfd->rfd_frag.frag_word0 = 2477 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len)); 2478 rfd->rfd_status = 0; 2479 2480 return (0); 2481 } 2482 2483 /* 2484 * stge_set_filter: 2485 * 2486 * Set up the receive filter. 2487 */ 2488 static void 2489 stge_set_filter(struct stge_softc *sc) 2490 { 2491 struct ifnet *ifp; 2492 uint16_t mode; 2493 2494 STGE_LOCK_ASSERT(sc); 2495 2496 ifp = sc->sc_ifp; 2497 2498 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2499 mode |= RM_ReceiveUnicast; 2500 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2501 mode |= RM_ReceiveBroadcast; 2502 else 2503 mode &= ~RM_ReceiveBroadcast; 2504 if ((ifp->if_flags & IFF_PROMISC) != 0) 2505 mode |= RM_ReceiveAllFrames; 2506 else 2507 mode &= ~RM_ReceiveAllFrames; 2508 2509 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2510 } 2511 2512 static u_int 2513 stge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2514 { 2515 uint32_t crc, *mchash = arg; 2516 2517 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 2518 /* Just want the 6 least significant bits. */ 2519 crc &= 0x3f; 2520 /* Set the corresponding bit in the hash table. */ 2521 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2522 2523 return (1); 2524 } 2525 2526 static void 2527 stge_set_multi(struct stge_softc *sc) 2528 { 2529 struct ifnet *ifp; 2530 uint32_t mchash[2]; 2531 uint16_t mode; 2532 int count; 2533 2534 STGE_LOCK_ASSERT(sc); 2535 2536 ifp = sc->sc_ifp; 2537 2538 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2539 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2540 if ((ifp->if_flags & IFF_PROMISC) != 0) 2541 mode |= RM_ReceiveAllFrames; 2542 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2543 mode |= RM_ReceiveMulticast; 2544 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2545 return; 2546 } 2547 2548 /* clear existing filters. */ 2549 CSR_WRITE_4(sc, STGE_HashTable0, 0); 2550 CSR_WRITE_4(sc, STGE_HashTable1, 0); 2551 2552 /* 2553 * Set up the multicast address filter by passing all multicast 2554 * addresses through a CRC generator, and then using the low-order 2555 * 6 bits as an index into the 64 bit multicast hash table. The 2556 * high order bits select the register, while the rest of the bits 2557 * select the bit within the register. 2558 */ 2559 bzero(mchash, sizeof(mchash)); 2560 count = if_foreach_llmaddr(ifp, stge_hash_maddr, mchash); 2561 2562 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames); 2563 if (count > 0) 2564 mode |= RM_ReceiveMulticastHash; 2565 else 2566 mode &= ~RM_ReceiveMulticastHash; 2567 2568 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]); 2569 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]); 2570 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2571 } 2572 2573 static int 2574 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2575 { 2576 int error, value; 2577 2578 if (!arg1) 2579 return (EINVAL); 2580 value = *(int *)arg1; 2581 error = sysctl_handle_int(oidp, &value, 0, req); 2582 if (error || !req->newptr) 2583 return (error); 2584 if (value < low || value > high) 2585 return (EINVAL); 2586 *(int *)arg1 = value; 2587 2588 return (0); 2589 } 2590 2591 static int 2592 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS) 2593 { 2594 return (sysctl_int_range(oidp, arg1, arg2, req, 2595 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX)); 2596 } 2597 2598 static int 2599 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS) 2600 { 2601 return (sysctl_int_range(oidp, arg1, arg2, req, 2602 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX)); 2603 } 2604