1 /* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */ 2 3 /*- 4 * SPDX-License-Identifier: BSD-2-Clause 5 * 6 * Copyright (c) 2001 The NetBSD Foundation, Inc. 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to The NetBSD Foundation 10 * by Jason R. Thorpe. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 31 * POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * Device driver for the Sundance Tech. TC9021 10/100/1000 36 * Ethernet controller. 37 */ 38 39 #include <sys/cdefs.h> 40 #ifdef HAVE_KERNEL_OPTION_HEADERS 41 #include "opt_device_polling.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/endian.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/kernel.h> 50 #include <sys/module.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <sys/taskqueue.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_var.h> 60 #include <net/if_dl.h> 61 #include <net/if_media.h> 62 #include <net/if_types.h> 63 #include <net/if_vlan_var.h> 64 65 #include <machine/bus.h> 66 #include <machine/resource.h> 67 #include <sys/bus.h> 68 #include <sys/rman.h> 69 70 #include <dev/mii/mii.h> 71 #include <dev/mii/mii_bitbang.h> 72 #include <dev/mii/miivar.h> 73 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 77 #include <dev/stge/if_stgereg.h> 78 79 #define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 80 81 MODULE_DEPEND(stge, pci, 1, 1, 1); 82 MODULE_DEPEND(stge, ether, 1, 1, 1); 83 MODULE_DEPEND(stge, miibus, 1, 1, 1); 84 85 /* "device miibus" required. See GENERIC if you get errors here. */ 86 #include "miibus_if.h" 87 88 /* 89 * Devices supported by this driver. 90 */ 91 static const struct stge_product { 92 uint16_t stge_vendorid; 93 uint16_t stge_deviceid; 94 const char *stge_name; 95 } stge_products[] = { 96 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023, 97 "Sundance ST-1023 Gigabit Ethernet" }, 98 99 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021, 100 "Sundance ST-2021 Gigabit Ethernet" }, 101 102 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021, 103 "Tamarack TC9021 Gigabit Ethernet" }, 104 105 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT, 106 "Tamarack TC9021 Gigabit Ethernet" }, 107 108 /* 109 * The Sundance sample boards use the Sundance vendor ID, 110 * but the Tamarack product ID. 111 */ 112 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021, 113 "Sundance TC9021 Gigabit Ethernet" }, 114 115 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT, 116 "Sundance TC9021 Gigabit Ethernet" }, 117 118 { VENDOR_DLINK, DEVICEID_DLINK_DL4000, 119 "D-Link DL-4000 Gigabit Ethernet" }, 120 121 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021, 122 "Antares Gigabit Ethernet" } 123 }; 124 125 static int stge_probe(device_t); 126 static int stge_attach(device_t); 127 static int stge_detach(device_t); 128 static int stge_shutdown(device_t); 129 static int stge_suspend(device_t); 130 static int stge_resume(device_t); 131 132 static int stge_encap(struct stge_softc *, struct mbuf **); 133 static void stge_start(if_t); 134 static void stge_start_locked(if_t); 135 static void stge_watchdog(struct stge_softc *); 136 static int stge_ioctl(if_t, u_long, caddr_t); 137 static void stge_init(void *); 138 static void stge_init_locked(struct stge_softc *); 139 static void stge_vlan_setup(struct stge_softc *); 140 static void stge_stop(struct stge_softc *); 141 static void stge_start_tx(struct stge_softc *); 142 static void stge_start_rx(struct stge_softc *); 143 static void stge_stop_tx(struct stge_softc *); 144 static void stge_stop_rx(struct stge_softc *); 145 146 static void stge_reset(struct stge_softc *, uint32_t); 147 static int stge_eeprom_wait(struct stge_softc *); 148 static void stge_read_eeprom(struct stge_softc *, int, uint16_t *); 149 static void stge_tick(void *); 150 static void stge_stats_update(struct stge_softc *); 151 static void stge_set_filter(struct stge_softc *); 152 static void stge_set_multi(struct stge_softc *); 153 154 static void stge_link_task(void *, int); 155 static void stge_intr(void *); 156 static __inline int stge_tx_error(struct stge_softc *); 157 static void stge_txeof(struct stge_softc *); 158 static int stge_rxeof(struct stge_softc *); 159 static __inline void stge_discard_rxbuf(struct stge_softc *, int); 160 static int stge_newbuf(struct stge_softc *, int); 161 #ifndef __NO_STRICT_ALIGNMENT 162 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *); 163 #endif 164 165 static int stge_miibus_readreg(device_t, int, int); 166 static int stge_miibus_writereg(device_t, int, int, int); 167 static void stge_miibus_statchg(device_t); 168 static int stge_mediachange(if_t); 169 static void stge_mediastatus(if_t, struct ifmediareq *); 170 171 static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 172 static int stge_dma_alloc(struct stge_softc *); 173 static void stge_dma_free(struct stge_softc *); 174 static void stge_dma_wait(struct stge_softc *); 175 static void stge_init_tx_ring(struct stge_softc *); 176 static int stge_init_rx_ring(struct stge_softc *); 177 #ifdef DEVICE_POLLING 178 static int stge_poll(if_t, enum poll_cmd, int); 179 #endif 180 181 static void stge_setwol(struct stge_softc *); 182 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 183 static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS); 184 static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS); 185 186 /* 187 * MII bit-bang glue 188 */ 189 static uint32_t stge_mii_bitbang_read(device_t); 190 static void stge_mii_bitbang_write(device_t, uint32_t); 191 192 static const struct mii_bitbang_ops stge_mii_bitbang_ops = { 193 stge_mii_bitbang_read, 194 stge_mii_bitbang_write, 195 { 196 PC_MgmtData, /* MII_BIT_MDO */ 197 PC_MgmtData, /* MII_BIT_MDI */ 198 PC_MgmtClk, /* MII_BIT_MDC */ 199 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */ 200 0, /* MII_BIT_DIR_PHY_HOST */ 201 } 202 }; 203 204 static device_method_t stge_methods[] = { 205 /* Device interface */ 206 DEVMETHOD(device_probe, stge_probe), 207 DEVMETHOD(device_attach, stge_attach), 208 DEVMETHOD(device_detach, stge_detach), 209 DEVMETHOD(device_shutdown, stge_shutdown), 210 DEVMETHOD(device_suspend, stge_suspend), 211 DEVMETHOD(device_resume, stge_resume), 212 213 /* MII interface */ 214 DEVMETHOD(miibus_readreg, stge_miibus_readreg), 215 DEVMETHOD(miibus_writereg, stge_miibus_writereg), 216 DEVMETHOD(miibus_statchg, stge_miibus_statchg), 217 218 DEVMETHOD_END 219 }; 220 221 static driver_t stge_driver = { 222 "stge", 223 stge_methods, 224 sizeof(struct stge_softc) 225 }; 226 227 DRIVER_MODULE(stge, pci, stge_driver, 0, 0); 228 DRIVER_MODULE(miibus, stge, miibus_driver, 0, 0); 229 230 static struct resource_spec stge_res_spec_io[] = { 231 { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE }, 232 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 233 { -1, 0, 0 } 234 }; 235 236 static struct resource_spec stge_res_spec_mem[] = { 237 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, 238 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 239 { -1, 0, 0 } 240 }; 241 242 /* 243 * stge_mii_bitbang_read: [mii bit-bang interface function] 244 * 245 * Read the MII serial port for the MII bit-bang module. 246 */ 247 static uint32_t 248 stge_mii_bitbang_read(device_t dev) 249 { 250 struct stge_softc *sc; 251 uint32_t val; 252 253 sc = device_get_softc(dev); 254 255 val = CSR_READ_1(sc, STGE_PhyCtrl); 256 CSR_BARRIER(sc, STGE_PhyCtrl, 1, 257 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 258 return (val); 259 } 260 261 /* 262 * stge_mii_bitbang_write: [mii big-bang interface function] 263 * 264 * Write the MII serial port for the MII bit-bang module. 265 */ 266 static void 267 stge_mii_bitbang_write(device_t dev, uint32_t val) 268 { 269 struct stge_softc *sc; 270 271 sc = device_get_softc(dev); 272 273 CSR_WRITE_1(sc, STGE_PhyCtrl, val); 274 CSR_BARRIER(sc, STGE_PhyCtrl, 1, 275 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 276 } 277 278 /* 279 * sc_miibus_readreg: [mii interface function] 280 * 281 * Read a PHY register on the MII of the TC9021. 282 */ 283 static int 284 stge_miibus_readreg(device_t dev, int phy, int reg) 285 { 286 struct stge_softc *sc; 287 int error, val; 288 289 sc = device_get_softc(dev); 290 291 if (reg == STGE_PhyCtrl) { 292 /* XXX allow ip1000phy read STGE_PhyCtrl register. */ 293 STGE_MII_LOCK(sc); 294 error = CSR_READ_1(sc, STGE_PhyCtrl); 295 STGE_MII_UNLOCK(sc); 296 return (error); 297 } 298 299 STGE_MII_LOCK(sc); 300 val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg); 301 STGE_MII_UNLOCK(sc); 302 return (val); 303 } 304 305 /* 306 * stge_miibus_writereg: [mii interface function] 307 * 308 * Write a PHY register on the MII of the TC9021. 309 */ 310 static int 311 stge_miibus_writereg(device_t dev, int phy, int reg, int val) 312 { 313 struct stge_softc *sc; 314 315 sc = device_get_softc(dev); 316 317 STGE_MII_LOCK(sc); 318 mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val); 319 STGE_MII_UNLOCK(sc); 320 return (0); 321 } 322 323 /* 324 * stge_miibus_statchg: [mii interface function] 325 * 326 * Callback from MII layer when media changes. 327 */ 328 static void 329 stge_miibus_statchg(device_t dev) 330 { 331 struct stge_softc *sc; 332 333 sc = device_get_softc(dev); 334 taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task); 335 } 336 337 /* 338 * stge_mediastatus: [ifmedia interface function] 339 * 340 * Get the current interface media status. 341 */ 342 static void 343 stge_mediastatus(if_t ifp, struct ifmediareq *ifmr) 344 { 345 struct stge_softc *sc; 346 struct mii_data *mii; 347 348 sc = if_getsoftc(ifp); 349 mii = device_get_softc(sc->sc_miibus); 350 351 mii_pollstat(mii); 352 ifmr->ifm_status = mii->mii_media_status; 353 ifmr->ifm_active = mii->mii_media_active; 354 } 355 356 /* 357 * stge_mediachange: [ifmedia interface function] 358 * 359 * Set hardware to newly-selected media. 360 */ 361 static int 362 stge_mediachange(if_t ifp) 363 { 364 struct stge_softc *sc; 365 struct mii_data *mii; 366 367 sc = if_getsoftc(ifp); 368 mii = device_get_softc(sc->sc_miibus); 369 mii_mediachg(mii); 370 371 return (0); 372 } 373 374 static int 375 stge_eeprom_wait(struct stge_softc *sc) 376 { 377 int i; 378 379 for (i = 0; i < STGE_TIMEOUT; i++) { 380 DELAY(1000); 381 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0) 382 return (0); 383 } 384 return (1); 385 } 386 387 /* 388 * stge_read_eeprom: 389 * 390 * Read data from the serial EEPROM. 391 */ 392 static void 393 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data) 394 { 395 396 if (stge_eeprom_wait(sc)) 397 device_printf(sc->sc_dev, "EEPROM failed to come ready\n"); 398 399 CSR_WRITE_2(sc, STGE_EepromCtrl, 400 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR)); 401 if (stge_eeprom_wait(sc)) 402 device_printf(sc->sc_dev, "EEPROM read timed out\n"); 403 *data = CSR_READ_2(sc, STGE_EepromData); 404 } 405 406 static int 407 stge_probe(device_t dev) 408 { 409 const struct stge_product *sp; 410 int i; 411 uint16_t vendor, devid; 412 413 vendor = pci_get_vendor(dev); 414 devid = pci_get_device(dev); 415 sp = stge_products; 416 for (i = 0; i < nitems(stge_products); i++, sp++) { 417 if (vendor == sp->stge_vendorid && 418 devid == sp->stge_deviceid) { 419 device_set_desc(dev, sp->stge_name); 420 return (BUS_PROBE_DEFAULT); 421 } 422 } 423 424 return (ENXIO); 425 } 426 427 static int 428 stge_attach(device_t dev) 429 { 430 struct stge_softc *sc; 431 if_t ifp; 432 uint8_t enaddr[ETHER_ADDR_LEN]; 433 int error, flags, i; 434 uint16_t cmd; 435 uint32_t val; 436 437 error = 0; 438 sc = device_get_softc(dev); 439 sc->sc_dev = dev; 440 441 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 442 MTX_DEF); 443 mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF); 444 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 445 TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc); 446 447 /* 448 * Map the device. 449 */ 450 pci_enable_busmaster(dev); 451 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 452 val = pci_read_config(dev, PCIR_BAR(1), 4); 453 if (PCI_BAR_IO(val)) 454 sc->sc_spec = stge_res_spec_mem; 455 else { 456 val = pci_read_config(dev, PCIR_BAR(0), 4); 457 if (!PCI_BAR_IO(val)) { 458 device_printf(sc->sc_dev, "couldn't locate IO BAR\n"); 459 error = ENXIO; 460 goto fail; 461 } 462 sc->sc_spec = stge_res_spec_io; 463 } 464 error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res); 465 if (error != 0) { 466 device_printf(dev, "couldn't allocate %s resources\n", 467 sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O"); 468 goto fail; 469 } 470 sc->sc_rev = pci_get_revid(dev); 471 472 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 473 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 474 "rxint_nframe", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 475 &sc->sc_rxint_nframe, 0, sysctl_hw_stge_rxint_nframe, "I", 476 "stge rx interrupt nframe"); 477 478 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 479 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 480 "rxint_dmawait", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 481 &sc->sc_rxint_dmawait, 0, sysctl_hw_stge_rxint_dmawait, "I", 482 "stge rx interrupt dmawait"); 483 484 /* Pull in device tunables. */ 485 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 486 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 487 "rxint_nframe", &sc->sc_rxint_nframe); 488 if (error == 0) { 489 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN || 490 sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) { 491 device_printf(dev, "rxint_nframe value out of range; " 492 "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT); 493 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 494 } 495 } 496 497 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 498 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 499 "rxint_dmawait", &sc->sc_rxint_dmawait); 500 if (error == 0) { 501 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN || 502 sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) { 503 device_printf(dev, "rxint_dmawait value out of range; " 504 "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT); 505 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 506 } 507 } 508 509 if ((error = stge_dma_alloc(sc)) != 0) 510 goto fail; 511 512 /* 513 * Determine if we're copper or fiber. It affects how we 514 * reset the card. 515 */ 516 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia) 517 sc->sc_usefiber = 1; 518 else 519 sc->sc_usefiber = 0; 520 521 /* Load LED configuration from EEPROM. */ 522 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led); 523 524 /* 525 * Reset the chip to a known state. 526 */ 527 STGE_LOCK(sc); 528 stge_reset(sc, STGE_RESET_FULL); 529 STGE_UNLOCK(sc); 530 531 /* 532 * Reading the station address from the EEPROM doesn't seem 533 * to work, at least on my sample boards. Instead, since 534 * the reset sequence does AutoInit, read it from the station 535 * address registers. For Sundance 1023 you can only read it 536 * from EEPROM. 537 */ 538 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) { 539 uint16_t v; 540 541 v = CSR_READ_2(sc, STGE_StationAddress0); 542 enaddr[0] = v & 0xff; 543 enaddr[1] = v >> 8; 544 v = CSR_READ_2(sc, STGE_StationAddress1); 545 enaddr[2] = v & 0xff; 546 enaddr[3] = v >> 8; 547 v = CSR_READ_2(sc, STGE_StationAddress2); 548 enaddr[4] = v & 0xff; 549 enaddr[5] = v >> 8; 550 sc->sc_stge1023 = 0; 551 } else { 552 uint16_t myaddr[ETHER_ADDR_LEN / 2]; 553 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) { 554 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i, 555 &myaddr[i]); 556 myaddr[i] = le16toh(myaddr[i]); 557 } 558 bcopy(myaddr, enaddr, sizeof(enaddr)); 559 sc->sc_stge1023 = 1; 560 } 561 562 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 563 if_setsoftc(ifp, sc); 564 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 565 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 566 if_setioctlfn(ifp, stge_ioctl); 567 if_setstartfn(ifp, stge_start); 568 if_setinitfn(ifp, stge_init); 569 if_setsendqlen(ifp, STGE_TX_RING_CNT - 1); 570 if_setsendqready(ifp); 571 /* Revision B3 and earlier chips have checksum bug. */ 572 if (sc->sc_rev >= 0x0c) { 573 if_sethwassist(ifp, STGE_CSUM_FEATURES); 574 if_setcapabilities(ifp, IFCAP_HWCSUM); 575 } else { 576 if_sethwassist(ifp, 0); 577 if_setcapabilities(ifp, 0); 578 } 579 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0); 580 if_setcapenable(ifp, if_getcapabilities(ifp)); 581 582 /* 583 * Read some important bits from the PhyCtrl register. 584 */ 585 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) & 586 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity); 587 588 /* Set up MII bus. */ 589 flags = MIIF_DOPAUSE; 590 if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e) 591 flags |= MIIF_MACPRIV0; 592 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange, 593 stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 594 flags); 595 if (error != 0) { 596 device_printf(sc->sc_dev, "attaching PHYs failed\n"); 597 goto fail; 598 } 599 600 ether_ifattach(ifp, enaddr); 601 602 /* VLAN capability setup */ 603 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING, 0); 604 if (sc->sc_rev >= 0x0c) 605 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); 606 if_setcapenable(ifp, if_getcapabilities(ifp)); 607 #ifdef DEVICE_POLLING 608 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 609 #endif 610 /* 611 * Tell the upper layer(s) we support long frames. 612 * Must appear after the call to ether_ifattach() because 613 * ether_ifattach() sets ifi_hdrlen to the default value. 614 */ 615 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 616 617 /* 618 * The manual recommends disabling early transmit, so we 619 * do. It's disabled anyway, if using IP checksumming, 620 * since the entire packet must be in the FIFO in order 621 * for the chip to perform the checksum. 622 */ 623 sc->sc_txthresh = 0x0fff; 624 625 /* 626 * Disable MWI if the PCI layer tells us to. 627 */ 628 sc->sc_DMACtrl = 0; 629 if ((cmd & PCIM_CMD_MWRICEN) == 0) 630 sc->sc_DMACtrl |= DMAC_MWIDisable; 631 632 /* 633 * Hookup IRQ 634 */ 635 error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE, 636 NULL, stge_intr, sc, &sc->sc_ih); 637 if (error != 0) { 638 ether_ifdetach(ifp); 639 device_printf(sc->sc_dev, "couldn't set up IRQ\n"); 640 sc->sc_ifp = NULL; 641 goto fail; 642 } 643 644 fail: 645 if (error != 0) 646 stge_detach(dev); 647 648 return (error); 649 } 650 651 static int 652 stge_detach(device_t dev) 653 { 654 struct stge_softc *sc; 655 if_t ifp; 656 657 sc = device_get_softc(dev); 658 659 ifp = sc->sc_ifp; 660 #ifdef DEVICE_POLLING 661 if (ifp && if_getcapenable(ifp) & IFCAP_POLLING) 662 ether_poll_deregister(ifp); 663 #endif 664 if (device_is_attached(dev)) { 665 STGE_LOCK(sc); 666 /* XXX */ 667 sc->sc_detach = 1; 668 stge_stop(sc); 669 STGE_UNLOCK(sc); 670 callout_drain(&sc->sc_tick_ch); 671 taskqueue_drain(taskqueue_swi, &sc->sc_link_task); 672 ether_ifdetach(ifp); 673 } 674 675 if (sc->sc_miibus != NULL) { 676 device_delete_child(dev, sc->sc_miibus); 677 sc->sc_miibus = NULL; 678 } 679 bus_generic_detach(dev); 680 stge_dma_free(sc); 681 682 if (ifp != NULL) { 683 if_free(ifp); 684 sc->sc_ifp = NULL; 685 } 686 687 if (sc->sc_ih) { 688 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih); 689 sc->sc_ih = NULL; 690 } 691 692 if (sc->sc_spec) 693 bus_release_resources(dev, sc->sc_spec, sc->sc_res); 694 695 mtx_destroy(&sc->sc_mii_mtx); 696 mtx_destroy(&sc->sc_mtx); 697 698 return (0); 699 } 700 701 struct stge_dmamap_arg { 702 bus_addr_t stge_busaddr; 703 }; 704 705 static void 706 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 707 { 708 struct stge_dmamap_arg *ctx; 709 710 if (error != 0) 711 return; 712 713 ctx = (struct stge_dmamap_arg *)arg; 714 ctx->stge_busaddr = segs[0].ds_addr; 715 } 716 717 static int 718 stge_dma_alloc(struct stge_softc *sc) 719 { 720 struct stge_dmamap_arg ctx; 721 struct stge_txdesc *txd; 722 struct stge_rxdesc *rxd; 723 int error, i; 724 725 /* create parent tag. */ 726 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */ 727 1, 0, /* algnmnt, boundary */ 728 STGE_DMA_MAXADDR, /* lowaddr */ 729 BUS_SPACE_MAXADDR, /* highaddr */ 730 NULL, NULL, /* filter, filterarg */ 731 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 732 0, /* nsegments */ 733 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 734 0, /* flags */ 735 NULL, NULL, /* lockfunc, lockarg */ 736 &sc->sc_cdata.stge_parent_tag); 737 if (error != 0) { 738 device_printf(sc->sc_dev, "failed to create parent DMA tag\n"); 739 goto fail; 740 } 741 /* create tag for Tx ring. */ 742 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 743 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 744 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 745 BUS_SPACE_MAXADDR, /* highaddr */ 746 NULL, NULL, /* filter, filterarg */ 747 STGE_TX_RING_SZ, /* maxsize */ 748 1, /* nsegments */ 749 STGE_TX_RING_SZ, /* maxsegsize */ 750 0, /* flags */ 751 NULL, NULL, /* lockfunc, lockarg */ 752 &sc->sc_cdata.stge_tx_ring_tag); 753 if (error != 0) { 754 device_printf(sc->sc_dev, 755 "failed to allocate Tx ring DMA tag\n"); 756 goto fail; 757 } 758 759 /* create tag for Rx ring. */ 760 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 761 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 762 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 763 BUS_SPACE_MAXADDR, /* highaddr */ 764 NULL, NULL, /* filter, filterarg */ 765 STGE_RX_RING_SZ, /* maxsize */ 766 1, /* nsegments */ 767 STGE_RX_RING_SZ, /* maxsegsize */ 768 0, /* flags */ 769 NULL, NULL, /* lockfunc, lockarg */ 770 &sc->sc_cdata.stge_rx_ring_tag); 771 if (error != 0) { 772 device_printf(sc->sc_dev, 773 "failed to allocate Rx ring DMA tag\n"); 774 goto fail; 775 } 776 777 /* create tag for Tx buffers. */ 778 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 779 1, 0, /* algnmnt, boundary */ 780 BUS_SPACE_MAXADDR, /* lowaddr */ 781 BUS_SPACE_MAXADDR, /* highaddr */ 782 NULL, NULL, /* filter, filterarg */ 783 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */ 784 STGE_MAXTXSEGS, /* nsegments */ 785 MCLBYTES, /* maxsegsize */ 786 0, /* flags */ 787 NULL, NULL, /* lockfunc, lockarg */ 788 &sc->sc_cdata.stge_tx_tag); 789 if (error != 0) { 790 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n"); 791 goto fail; 792 } 793 794 /* create tag for Rx buffers. */ 795 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 796 1, 0, /* algnmnt, boundary */ 797 BUS_SPACE_MAXADDR, /* lowaddr */ 798 BUS_SPACE_MAXADDR, /* highaddr */ 799 NULL, NULL, /* filter, filterarg */ 800 MCLBYTES, /* maxsize */ 801 1, /* nsegments */ 802 MCLBYTES, /* maxsegsize */ 803 0, /* flags */ 804 NULL, NULL, /* lockfunc, lockarg */ 805 &sc->sc_cdata.stge_rx_tag); 806 if (error != 0) { 807 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n"); 808 goto fail; 809 } 810 811 /* allocate DMA'able memory and load the DMA map for Tx ring. */ 812 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag, 813 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | 814 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map); 815 if (error != 0) { 816 device_printf(sc->sc_dev, 817 "failed to allocate DMA'able memory for Tx ring\n"); 818 goto fail; 819 } 820 821 ctx.stge_busaddr = 0; 822 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag, 823 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring, 824 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 825 if (error != 0 || ctx.stge_busaddr == 0) { 826 device_printf(sc->sc_dev, 827 "failed to load DMA'able memory for Tx ring\n"); 828 goto fail; 829 } 830 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr; 831 832 /* allocate DMA'able memory and load the DMA map for Rx ring. */ 833 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag, 834 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | 835 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map); 836 if (error != 0) { 837 device_printf(sc->sc_dev, 838 "failed to allocate DMA'able memory for Rx ring\n"); 839 goto fail; 840 } 841 842 ctx.stge_busaddr = 0; 843 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag, 844 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring, 845 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 846 if (error != 0 || ctx.stge_busaddr == 0) { 847 device_printf(sc->sc_dev, 848 "failed to load DMA'able memory for Rx ring\n"); 849 goto fail; 850 } 851 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr; 852 853 /* create DMA maps for Tx buffers. */ 854 for (i = 0; i < STGE_TX_RING_CNT; i++) { 855 txd = &sc->sc_cdata.stge_txdesc[i]; 856 txd->tx_m = NULL; 857 txd->tx_dmamap = 0; 858 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0, 859 &txd->tx_dmamap); 860 if (error != 0) { 861 device_printf(sc->sc_dev, 862 "failed to create Tx dmamap\n"); 863 goto fail; 864 } 865 } 866 /* create DMA maps for Rx buffers. */ 867 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 868 &sc->sc_cdata.stge_rx_sparemap)) != 0) { 869 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n"); 870 goto fail; 871 } 872 for (i = 0; i < STGE_RX_RING_CNT; i++) { 873 rxd = &sc->sc_cdata.stge_rxdesc[i]; 874 rxd->rx_m = NULL; 875 rxd->rx_dmamap = 0; 876 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 877 &rxd->rx_dmamap); 878 if (error != 0) { 879 device_printf(sc->sc_dev, 880 "failed to create Rx dmamap\n"); 881 goto fail; 882 } 883 } 884 885 fail: 886 return (error); 887 } 888 889 static void 890 stge_dma_free(struct stge_softc *sc) 891 { 892 struct stge_txdesc *txd; 893 struct stge_rxdesc *rxd; 894 int i; 895 896 /* Tx ring */ 897 if (sc->sc_cdata.stge_tx_ring_tag) { 898 if (sc->sc_rdata.stge_tx_ring_paddr) 899 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag, 900 sc->sc_cdata.stge_tx_ring_map); 901 if (sc->sc_rdata.stge_tx_ring) 902 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag, 903 sc->sc_rdata.stge_tx_ring, 904 sc->sc_cdata.stge_tx_ring_map); 905 sc->sc_rdata.stge_tx_ring = NULL; 906 sc->sc_rdata.stge_tx_ring_paddr = 0; 907 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag); 908 sc->sc_cdata.stge_tx_ring_tag = NULL; 909 } 910 /* Rx ring */ 911 if (sc->sc_cdata.stge_rx_ring_tag) { 912 if (sc->sc_rdata.stge_rx_ring_paddr) 913 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag, 914 sc->sc_cdata.stge_rx_ring_map); 915 if (sc->sc_rdata.stge_rx_ring) 916 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag, 917 sc->sc_rdata.stge_rx_ring, 918 sc->sc_cdata.stge_rx_ring_map); 919 sc->sc_rdata.stge_rx_ring = NULL; 920 sc->sc_rdata.stge_rx_ring_paddr = 0; 921 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag); 922 sc->sc_cdata.stge_rx_ring_tag = NULL; 923 } 924 /* Tx buffers */ 925 if (sc->sc_cdata.stge_tx_tag) { 926 for (i = 0; i < STGE_TX_RING_CNT; i++) { 927 txd = &sc->sc_cdata.stge_txdesc[i]; 928 if (txd->tx_dmamap) { 929 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag, 930 txd->tx_dmamap); 931 txd->tx_dmamap = 0; 932 } 933 } 934 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag); 935 sc->sc_cdata.stge_tx_tag = NULL; 936 } 937 /* Rx buffers */ 938 if (sc->sc_cdata.stge_rx_tag) { 939 for (i = 0; i < STGE_RX_RING_CNT; i++) { 940 rxd = &sc->sc_cdata.stge_rxdesc[i]; 941 if (rxd->rx_dmamap) { 942 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 943 rxd->rx_dmamap); 944 rxd->rx_dmamap = 0; 945 } 946 } 947 if (sc->sc_cdata.stge_rx_sparemap) { 948 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 949 sc->sc_cdata.stge_rx_sparemap); 950 sc->sc_cdata.stge_rx_sparemap = 0; 951 } 952 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag); 953 sc->sc_cdata.stge_rx_tag = NULL; 954 } 955 956 if (sc->sc_cdata.stge_parent_tag) { 957 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag); 958 sc->sc_cdata.stge_parent_tag = NULL; 959 } 960 } 961 962 /* 963 * stge_shutdown: 964 * 965 * Make sure the interface is stopped at reboot time. 966 */ 967 static int 968 stge_shutdown(device_t dev) 969 { 970 971 return (stge_suspend(dev)); 972 } 973 974 static void 975 stge_setwol(struct stge_softc *sc) 976 { 977 if_t ifp; 978 uint8_t v; 979 980 STGE_LOCK_ASSERT(sc); 981 982 ifp = sc->sc_ifp; 983 v = CSR_READ_1(sc, STGE_WakeEvent); 984 /* Disable all WOL bits. */ 985 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 986 WE_WakeOnLanEnable); 987 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) 988 v |= WE_MagicPktEnable | WE_WakeOnLanEnable; 989 CSR_WRITE_1(sc, STGE_WakeEvent, v); 990 /* Reset Tx and prevent transmission. */ 991 CSR_WRITE_4(sc, STGE_AsicCtrl, 992 CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset); 993 /* 994 * TC9021 automatically reset link speed to 100Mbps when it's put 995 * into sleep so there is no need to try to resetting link speed. 996 */ 997 } 998 999 static int 1000 stge_suspend(device_t dev) 1001 { 1002 struct stge_softc *sc; 1003 1004 sc = device_get_softc(dev); 1005 1006 STGE_LOCK(sc); 1007 stge_stop(sc); 1008 sc->sc_suspended = 1; 1009 stge_setwol(sc); 1010 STGE_UNLOCK(sc); 1011 1012 return (0); 1013 } 1014 1015 static int 1016 stge_resume(device_t dev) 1017 { 1018 struct stge_softc *sc; 1019 if_t ifp; 1020 uint8_t v; 1021 1022 sc = device_get_softc(dev); 1023 1024 STGE_LOCK(sc); 1025 /* 1026 * Clear WOL bits, so special frames wouldn't interfere 1027 * normal Rx operation anymore. 1028 */ 1029 v = CSR_READ_1(sc, STGE_WakeEvent); 1030 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 1031 WE_WakeOnLanEnable); 1032 CSR_WRITE_1(sc, STGE_WakeEvent, v); 1033 ifp = sc->sc_ifp; 1034 if (if_getflags(ifp) & IFF_UP) 1035 stge_init_locked(sc); 1036 1037 sc->sc_suspended = 0; 1038 STGE_UNLOCK(sc); 1039 1040 return (0); 1041 } 1042 1043 static void 1044 stge_dma_wait(struct stge_softc *sc) 1045 { 1046 int i; 1047 1048 for (i = 0; i < STGE_TIMEOUT; i++) { 1049 DELAY(2); 1050 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0) 1051 break; 1052 } 1053 1054 if (i == STGE_TIMEOUT) 1055 device_printf(sc->sc_dev, "DMA wait timed out\n"); 1056 } 1057 1058 static int 1059 stge_encap(struct stge_softc *sc, struct mbuf **m_head) 1060 { 1061 struct stge_txdesc *txd; 1062 struct stge_tfd *tfd; 1063 struct mbuf *m; 1064 bus_dma_segment_t txsegs[STGE_MAXTXSEGS]; 1065 int error, i, nsegs, si; 1066 uint64_t csum_flags, tfc; 1067 1068 STGE_LOCK_ASSERT(sc); 1069 1070 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL) 1071 return (ENOBUFS); 1072 1073 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1074 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1075 if (error == EFBIG) { 1076 m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS); 1077 if (m == NULL) { 1078 m_freem(*m_head); 1079 *m_head = NULL; 1080 return (ENOMEM); 1081 } 1082 *m_head = m; 1083 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1084 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1085 if (error != 0) { 1086 m_freem(*m_head); 1087 *m_head = NULL; 1088 return (error); 1089 } 1090 } else if (error != 0) 1091 return (error); 1092 if (nsegs == 0) { 1093 m_freem(*m_head); 1094 *m_head = NULL; 1095 return (EIO); 1096 } 1097 1098 m = *m_head; 1099 csum_flags = 0; 1100 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) { 1101 if (m->m_pkthdr.csum_flags & CSUM_IP) 1102 csum_flags |= TFD_IPChecksumEnable; 1103 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1104 csum_flags |= TFD_TCPChecksumEnable; 1105 else if (m->m_pkthdr.csum_flags & CSUM_UDP) 1106 csum_flags |= TFD_UDPChecksumEnable; 1107 } 1108 1109 si = sc->sc_cdata.stge_tx_prod; 1110 tfd = &sc->sc_rdata.stge_tx_ring[si]; 1111 for (i = 0; i < nsegs; i++) 1112 tfd->tfd_frags[i].frag_word0 = 1113 htole64(FRAG_ADDR(txsegs[i].ds_addr) | 1114 FRAG_LEN(txsegs[i].ds_len)); 1115 sc->sc_cdata.stge_tx_cnt++; 1116 1117 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) | 1118 TFD_FragCount(nsegs) | csum_flags; 1119 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) 1120 tfc |= TFD_TxDMAIndicate; 1121 1122 /* Update producer index. */ 1123 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT; 1124 1125 /* Check if we have a VLAN tag to insert. */ 1126 if (m->m_flags & M_VLANTAG) 1127 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag)); 1128 tfd->tfd_control = htole64(tfc); 1129 1130 /* Update Tx Queue. */ 1131 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q); 1132 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q); 1133 txd->tx_m = m; 1134 1135 /* Sync descriptors. */ 1136 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1137 BUS_DMASYNC_PREWRITE); 1138 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1139 sc->sc_cdata.stge_tx_ring_map, 1140 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1141 1142 return (0); 1143 } 1144 1145 /* 1146 * stge_start: [ifnet interface function] 1147 * 1148 * Start packet transmission on the interface. 1149 */ 1150 static void 1151 stge_start(if_t ifp) 1152 { 1153 struct stge_softc *sc; 1154 1155 sc = if_getsoftc(ifp); 1156 STGE_LOCK(sc); 1157 stge_start_locked(ifp); 1158 STGE_UNLOCK(sc); 1159 } 1160 1161 static void 1162 stge_start_locked(if_t ifp) 1163 { 1164 struct stge_softc *sc; 1165 struct mbuf *m_head; 1166 int enq; 1167 1168 sc = if_getsoftc(ifp); 1169 1170 STGE_LOCK_ASSERT(sc); 1171 1172 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1173 IFF_DRV_RUNNING || sc->sc_link == 0) 1174 return; 1175 1176 for (enq = 0; !if_sendq_empty(ifp); ) { 1177 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) { 1178 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1179 break; 1180 } 1181 1182 m_head = if_dequeue(ifp); 1183 if (m_head == NULL) 1184 break; 1185 /* 1186 * Pack the data into the transmit ring. If we 1187 * don't have room, set the OACTIVE flag and wait 1188 * for the NIC to drain the ring. 1189 */ 1190 if (stge_encap(sc, &m_head)) { 1191 if (m_head == NULL) 1192 break; 1193 if_sendq_prepend(ifp, m_head); 1194 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1195 break; 1196 } 1197 1198 enq++; 1199 /* 1200 * If there's a BPF listener, bounce a copy of this frame 1201 * to him. 1202 */ 1203 ETHER_BPF_MTAP(ifp, m_head); 1204 } 1205 1206 if (enq > 0) { 1207 /* Transmit */ 1208 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow); 1209 1210 /* Set a timeout in case the chip goes out to lunch. */ 1211 sc->sc_watchdog_timer = 5; 1212 } 1213 } 1214 1215 /* 1216 * stge_watchdog: 1217 * 1218 * Watchdog timer handler. 1219 */ 1220 static void 1221 stge_watchdog(struct stge_softc *sc) 1222 { 1223 if_t ifp; 1224 1225 STGE_LOCK_ASSERT(sc); 1226 1227 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer) 1228 return; 1229 1230 ifp = sc->sc_ifp; 1231 if_printf(sc->sc_ifp, "device timeout\n"); 1232 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1233 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1234 stge_init_locked(sc); 1235 if (!if_sendq_empty(ifp)) 1236 stge_start_locked(ifp); 1237 } 1238 1239 /* 1240 * stge_ioctl: [ifnet interface function] 1241 * 1242 * Handle control requests from the operator. 1243 */ 1244 static int 1245 stge_ioctl(if_t ifp, u_long cmd, caddr_t data) 1246 { 1247 struct stge_softc *sc; 1248 struct ifreq *ifr; 1249 struct mii_data *mii; 1250 int error, mask; 1251 1252 sc = if_getsoftc(ifp); 1253 ifr = (struct ifreq *)data; 1254 error = 0; 1255 switch (cmd) { 1256 case SIOCSIFMTU: 1257 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU) 1258 error = EINVAL; 1259 else if (if_getmtu(ifp) != ifr->ifr_mtu) { 1260 if_setmtu(ifp, ifr->ifr_mtu); 1261 STGE_LOCK(sc); 1262 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1263 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1264 stge_init_locked(sc); 1265 } 1266 STGE_UNLOCK(sc); 1267 } 1268 break; 1269 case SIOCSIFFLAGS: 1270 STGE_LOCK(sc); 1271 if ((if_getflags(ifp) & IFF_UP) != 0) { 1272 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1273 if (((if_getflags(ifp) ^ sc->sc_if_flags) 1274 & IFF_PROMISC) != 0) 1275 stge_set_filter(sc); 1276 } else { 1277 if (sc->sc_detach == 0) 1278 stge_init_locked(sc); 1279 } 1280 } else { 1281 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1282 stge_stop(sc); 1283 } 1284 sc->sc_if_flags = if_getflags(ifp); 1285 STGE_UNLOCK(sc); 1286 break; 1287 case SIOCADDMULTI: 1288 case SIOCDELMULTI: 1289 STGE_LOCK(sc); 1290 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1291 stge_set_multi(sc); 1292 STGE_UNLOCK(sc); 1293 break; 1294 case SIOCSIFMEDIA: 1295 case SIOCGIFMEDIA: 1296 mii = device_get_softc(sc->sc_miibus); 1297 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1298 break; 1299 case SIOCSIFCAP: 1300 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1301 #ifdef DEVICE_POLLING 1302 if ((mask & IFCAP_POLLING) != 0) { 1303 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1304 error = ether_poll_register(stge_poll, ifp); 1305 if (error != 0) 1306 break; 1307 STGE_LOCK(sc); 1308 CSR_WRITE_2(sc, STGE_IntEnable, 0); 1309 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 1310 STGE_UNLOCK(sc); 1311 } else { 1312 error = ether_poll_deregister(ifp); 1313 if (error != 0) 1314 break; 1315 STGE_LOCK(sc); 1316 CSR_WRITE_2(sc, STGE_IntEnable, 1317 sc->sc_IntEnable); 1318 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1319 STGE_UNLOCK(sc); 1320 } 1321 } 1322 #endif 1323 if ((mask & IFCAP_HWCSUM) != 0) { 1324 if_togglecapenable(ifp, IFCAP_HWCSUM); 1325 if ((IFCAP_HWCSUM & if_getcapenable(ifp)) != 0 && 1326 (IFCAP_HWCSUM & if_getcapabilities(ifp)) != 0) 1327 if_sethwassist(ifp, STGE_CSUM_FEATURES); 1328 else 1329 if_sethwassist(ifp, 0); 1330 } 1331 if ((mask & IFCAP_WOL) != 0 && 1332 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) { 1333 if ((mask & IFCAP_WOL_MAGIC) != 0) 1334 if_togglecapenable(ifp, IFCAP_WOL_MAGIC); 1335 } 1336 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 1337 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1338 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1339 STGE_LOCK(sc); 1340 stge_vlan_setup(sc); 1341 STGE_UNLOCK(sc); 1342 } 1343 } 1344 VLAN_CAPABILITIES(ifp); 1345 break; 1346 default: 1347 error = ether_ioctl(ifp, cmd, data); 1348 break; 1349 } 1350 1351 return (error); 1352 } 1353 1354 static void 1355 stge_link_task(void *arg, int pending) 1356 { 1357 struct stge_softc *sc; 1358 struct mii_data *mii; 1359 uint32_t v, ac; 1360 int i; 1361 1362 sc = (struct stge_softc *)arg; 1363 STGE_LOCK(sc); 1364 1365 mii = device_get_softc(sc->sc_miibus); 1366 if (mii->mii_media_status & IFM_ACTIVE) { 1367 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1368 sc->sc_link = 1; 1369 } else 1370 sc->sc_link = 0; 1371 1372 sc->sc_MACCtrl = 0; 1373 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 1374 sc->sc_MACCtrl |= MC_DuplexSelect; 1375 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0) 1376 sc->sc_MACCtrl |= MC_RxFlowControlEnable; 1377 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0) 1378 sc->sc_MACCtrl |= MC_TxFlowControlEnable; 1379 /* 1380 * Update STGE_MACCtrl register depending on link status. 1381 * (duplex, flow control etc) 1382 */ 1383 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 1384 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable); 1385 v |= sc->sc_MACCtrl; 1386 CSR_WRITE_4(sc, STGE_MACCtrl, v); 1387 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) { 1388 /* Duplex setting changed, reset Tx/Rx functions. */ 1389 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1390 ac |= AC_TxReset | AC_RxReset; 1391 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1392 for (i = 0; i < STGE_TIMEOUT; i++) { 1393 DELAY(100); 1394 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1395 break; 1396 } 1397 if (i == STGE_TIMEOUT) 1398 device_printf(sc->sc_dev, "reset failed to complete\n"); 1399 } 1400 STGE_UNLOCK(sc); 1401 } 1402 1403 static __inline int 1404 stge_tx_error(struct stge_softc *sc) 1405 { 1406 uint32_t txstat; 1407 int error; 1408 1409 for (error = 0;;) { 1410 txstat = CSR_READ_4(sc, STGE_TxStatus); 1411 if ((txstat & TS_TxComplete) == 0) 1412 break; 1413 /* Tx underrun */ 1414 if ((txstat & TS_TxUnderrun) != 0) { 1415 /* 1416 * XXX 1417 * There should be a more better way to recover 1418 * from Tx underrun instead of a full reset. 1419 */ 1420 if (sc->sc_nerr++ < STGE_MAXERR) 1421 device_printf(sc->sc_dev, "Tx underrun, " 1422 "resetting...\n"); 1423 if (sc->sc_nerr == STGE_MAXERR) 1424 device_printf(sc->sc_dev, "too many errors; " 1425 "not reporting any more\n"); 1426 error = -1; 1427 break; 1428 } 1429 /* Maximum/Late collisions, Re-enable Tx MAC. */ 1430 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0) 1431 CSR_WRITE_4(sc, STGE_MACCtrl, 1432 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) | 1433 MC_TxEnable); 1434 } 1435 1436 return (error); 1437 } 1438 1439 /* 1440 * stge_intr: 1441 * 1442 * Interrupt service routine. 1443 */ 1444 static void 1445 stge_intr(void *arg) 1446 { 1447 struct stge_softc *sc; 1448 if_t ifp; 1449 int reinit; 1450 uint16_t status; 1451 1452 sc = (struct stge_softc *)arg; 1453 ifp = sc->sc_ifp; 1454 1455 STGE_LOCK(sc); 1456 1457 #ifdef DEVICE_POLLING 1458 if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0) 1459 goto done_locked; 1460 #endif 1461 status = CSR_READ_2(sc, STGE_IntStatus); 1462 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0) 1463 goto done_locked; 1464 1465 /* Disable interrupts. */ 1466 for (reinit = 0;;) { 1467 status = CSR_READ_2(sc, STGE_IntStatusAck); 1468 status &= sc->sc_IntEnable; 1469 if (status == 0) 1470 break; 1471 /* Host interface errors. */ 1472 if ((status & IS_HostError) != 0) { 1473 device_printf(sc->sc_dev, 1474 "Host interface error, resetting...\n"); 1475 reinit = 1; 1476 goto force_init; 1477 } 1478 1479 /* Receive interrupts. */ 1480 if ((status & IS_RxDMAComplete) != 0) { 1481 stge_rxeof(sc); 1482 if ((status & IS_RFDListEnd) != 0) 1483 CSR_WRITE_4(sc, STGE_DMACtrl, 1484 DMAC_RxDMAPollNow); 1485 } 1486 1487 /* Transmit interrupts. */ 1488 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0) 1489 stge_txeof(sc); 1490 1491 /* Transmission errors.*/ 1492 if ((status & IS_TxComplete) != 0) { 1493 if ((reinit = stge_tx_error(sc)) != 0) 1494 break; 1495 } 1496 } 1497 1498 force_init: 1499 if (reinit != 0) { 1500 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1501 stge_init_locked(sc); 1502 } 1503 1504 /* Re-enable interrupts. */ 1505 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 1506 1507 /* Try to get more packets going. */ 1508 if (!if_sendq_empty(ifp)) 1509 stge_start_locked(ifp); 1510 1511 done_locked: 1512 STGE_UNLOCK(sc); 1513 } 1514 1515 /* 1516 * stge_txeof: 1517 * 1518 * Helper; handle transmit interrupts. 1519 */ 1520 static void 1521 stge_txeof(struct stge_softc *sc) 1522 { 1523 if_t ifp; 1524 struct stge_txdesc *txd; 1525 uint64_t control; 1526 int cons; 1527 1528 STGE_LOCK_ASSERT(sc); 1529 1530 ifp = sc->sc_ifp; 1531 1532 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1533 if (txd == NULL) 1534 return; 1535 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1536 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD); 1537 1538 /* 1539 * Go through our Tx list and free mbufs for those 1540 * frames which have been transmitted. 1541 */ 1542 for (cons = sc->sc_cdata.stge_tx_cons;; 1543 cons = (cons + 1) % STGE_TX_RING_CNT) { 1544 if (sc->sc_cdata.stge_tx_cnt <= 0) 1545 break; 1546 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control); 1547 if ((control & TFD_TFDDone) == 0) 1548 break; 1549 sc->sc_cdata.stge_tx_cnt--; 1550 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1551 1552 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1553 BUS_DMASYNC_POSTWRITE); 1554 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); 1555 1556 /* Output counter is updated with statistics register */ 1557 m_freem(txd->tx_m); 1558 txd->tx_m = NULL; 1559 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q); 1560 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 1561 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1562 } 1563 sc->sc_cdata.stge_tx_cons = cons; 1564 if (sc->sc_cdata.stge_tx_cnt == 0) 1565 sc->sc_watchdog_timer = 0; 1566 1567 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1568 sc->sc_cdata.stge_tx_ring_map, 1569 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1570 } 1571 1572 static __inline void 1573 stge_discard_rxbuf(struct stge_softc *sc, int idx) 1574 { 1575 struct stge_rfd *rfd; 1576 1577 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 1578 rfd->rfd_status = 0; 1579 } 1580 1581 #ifndef __NO_STRICT_ALIGNMENT 1582 /* 1583 * It seems that TC9021's DMA engine has alignment restrictions in 1584 * DMA scatter operations. The first DMA segment has no address 1585 * alignment restrictins but the rest should be aligned on 4(?) bytes 1586 * boundary. Otherwise it would corrupt random memory. Since we don't 1587 * know which one is used for the first segment in advance we simply 1588 * don't align at all. 1589 * To avoid copying over an entire frame to align, we allocate a new 1590 * mbuf and copy ethernet header to the new mbuf. The new mbuf is 1591 * prepended into the existing mbuf chain. 1592 */ 1593 static __inline struct mbuf * 1594 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m) 1595 { 1596 struct mbuf *n; 1597 1598 n = NULL; 1599 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 1600 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 1601 m->m_data += ETHER_HDR_LEN; 1602 n = m; 1603 } else { 1604 MGETHDR(n, M_NOWAIT, MT_DATA); 1605 if (n != NULL) { 1606 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 1607 m->m_data += ETHER_HDR_LEN; 1608 m->m_len -= ETHER_HDR_LEN; 1609 n->m_len = ETHER_HDR_LEN; 1610 M_MOVE_PKTHDR(n, m); 1611 n->m_next = m; 1612 } else 1613 m_freem(m); 1614 } 1615 1616 return (n); 1617 } 1618 #endif 1619 1620 /* 1621 * stge_rxeof: 1622 * 1623 * Helper; handle receive interrupts. 1624 */ 1625 static int 1626 stge_rxeof(struct stge_softc *sc) 1627 { 1628 if_t ifp; 1629 struct stge_rxdesc *rxd; 1630 struct mbuf *mp, *m; 1631 uint64_t status64; 1632 uint32_t status; 1633 int cons, prog, rx_npkts; 1634 1635 STGE_LOCK_ASSERT(sc); 1636 1637 rx_npkts = 0; 1638 ifp = sc->sc_ifp; 1639 1640 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1641 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD); 1642 1643 prog = 0; 1644 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT; 1645 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) { 1646 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status); 1647 status = RFD_RxStatus(status64); 1648 if ((status & RFD_RFDDone) == 0) 1649 break; 1650 #ifdef DEVICE_POLLING 1651 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1652 if (sc->sc_cdata.stge_rxcycles <= 0) 1653 break; 1654 sc->sc_cdata.stge_rxcycles--; 1655 } 1656 #endif 1657 prog++; 1658 rxd = &sc->sc_cdata.stge_rxdesc[cons]; 1659 mp = rxd->rx_m; 1660 1661 /* 1662 * If the packet had an error, drop it. Note we count 1663 * the error later in the periodic stats update. 1664 */ 1665 if ((status & RFD_FrameEnd) != 0 && (status & 1666 (RFD_RxFIFOOverrun | RFD_RxRuntFrame | 1667 RFD_RxAlignmentError | RFD_RxFCSError | 1668 RFD_RxLengthError)) != 0) { 1669 stge_discard_rxbuf(sc, cons); 1670 if (sc->sc_cdata.stge_rxhead != NULL) { 1671 m_freem(sc->sc_cdata.stge_rxhead); 1672 STGE_RXCHAIN_RESET(sc); 1673 } 1674 continue; 1675 } 1676 /* 1677 * Add a new receive buffer to the ring. 1678 */ 1679 if (stge_newbuf(sc, cons) != 0) { 1680 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1681 stge_discard_rxbuf(sc, cons); 1682 if (sc->sc_cdata.stge_rxhead != NULL) { 1683 m_freem(sc->sc_cdata.stge_rxhead); 1684 STGE_RXCHAIN_RESET(sc); 1685 } 1686 continue; 1687 } 1688 1689 if ((status & RFD_FrameEnd) != 0) 1690 mp->m_len = RFD_RxDMAFrameLen(status) - 1691 sc->sc_cdata.stge_rxlen; 1692 sc->sc_cdata.stge_rxlen += mp->m_len; 1693 1694 /* Chain mbufs. */ 1695 if (sc->sc_cdata.stge_rxhead == NULL) { 1696 sc->sc_cdata.stge_rxhead = mp; 1697 sc->sc_cdata.stge_rxtail = mp; 1698 } else { 1699 mp->m_flags &= ~M_PKTHDR; 1700 sc->sc_cdata.stge_rxtail->m_next = mp; 1701 sc->sc_cdata.stge_rxtail = mp; 1702 } 1703 1704 if ((status & RFD_FrameEnd) != 0) { 1705 m = sc->sc_cdata.stge_rxhead; 1706 m->m_pkthdr.rcvif = ifp; 1707 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen; 1708 1709 if (m->m_pkthdr.len > sc->sc_if_framesize) { 1710 m_freem(m); 1711 STGE_RXCHAIN_RESET(sc); 1712 continue; 1713 } 1714 /* 1715 * Set the incoming checksum information for 1716 * the packet. 1717 */ 1718 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { 1719 if ((status & RFD_IPDetected) != 0) { 1720 m->m_pkthdr.csum_flags |= 1721 CSUM_IP_CHECKED; 1722 if ((status & RFD_IPError) == 0) 1723 m->m_pkthdr.csum_flags |= 1724 CSUM_IP_VALID; 1725 } 1726 if (((status & RFD_TCPDetected) != 0 && 1727 (status & RFD_TCPError) == 0) || 1728 ((status & RFD_UDPDetected) != 0 && 1729 (status & RFD_UDPError) == 0)) { 1730 m->m_pkthdr.csum_flags |= 1731 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1732 m->m_pkthdr.csum_data = 0xffff; 1733 } 1734 } 1735 1736 #ifndef __NO_STRICT_ALIGNMENT 1737 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) { 1738 if ((m = stge_fixup_rx(sc, m)) == NULL) { 1739 STGE_RXCHAIN_RESET(sc); 1740 continue; 1741 } 1742 } 1743 #endif 1744 /* Check for VLAN tagged packets. */ 1745 if ((status & RFD_VLANDetected) != 0 && 1746 (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 1747 m->m_pkthdr.ether_vtag = RFD_TCI(status64); 1748 m->m_flags |= M_VLANTAG; 1749 } 1750 1751 STGE_UNLOCK(sc); 1752 /* Pass it on. */ 1753 if_input(ifp, m); 1754 STGE_LOCK(sc); 1755 rx_npkts++; 1756 1757 STGE_RXCHAIN_RESET(sc); 1758 } 1759 } 1760 1761 if (prog > 0) { 1762 /* Update the consumer index. */ 1763 sc->sc_cdata.stge_rx_cons = cons; 1764 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1765 sc->sc_cdata.stge_rx_ring_map, 1766 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1767 } 1768 return (rx_npkts); 1769 } 1770 1771 #ifdef DEVICE_POLLING 1772 static int 1773 stge_poll(if_t ifp, enum poll_cmd cmd, int count) 1774 { 1775 struct stge_softc *sc; 1776 uint16_t status; 1777 int rx_npkts; 1778 1779 rx_npkts = 0; 1780 sc = if_getsoftc(ifp); 1781 STGE_LOCK(sc); 1782 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 1783 STGE_UNLOCK(sc); 1784 return (rx_npkts); 1785 } 1786 1787 sc->sc_cdata.stge_rxcycles = count; 1788 rx_npkts = stge_rxeof(sc); 1789 stge_txeof(sc); 1790 1791 if (cmd == POLL_AND_CHECK_STATUS) { 1792 status = CSR_READ_2(sc, STGE_IntStatus); 1793 status &= sc->sc_IntEnable; 1794 if (status != 0) { 1795 if ((status & IS_HostError) != 0) { 1796 device_printf(sc->sc_dev, 1797 "Host interface error, resetting...\n"); 1798 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1799 stge_init_locked(sc); 1800 } 1801 if ((status & IS_TxComplete) != 0) { 1802 if (stge_tx_error(sc) != 0) { 1803 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1804 stge_init_locked(sc); 1805 } 1806 } 1807 } 1808 } 1809 1810 if (!if_sendq_empty(ifp)) 1811 stge_start_locked(ifp); 1812 1813 STGE_UNLOCK(sc); 1814 return (rx_npkts); 1815 } 1816 #endif /* DEVICE_POLLING */ 1817 1818 /* 1819 * stge_tick: 1820 * 1821 * One second timer, used to tick the MII. 1822 */ 1823 static void 1824 stge_tick(void *arg) 1825 { 1826 struct stge_softc *sc; 1827 struct mii_data *mii; 1828 1829 sc = (struct stge_softc *)arg; 1830 1831 STGE_LOCK_ASSERT(sc); 1832 1833 mii = device_get_softc(sc->sc_miibus); 1834 mii_tick(mii); 1835 1836 /* Update statistics counters. */ 1837 stge_stats_update(sc); 1838 1839 /* 1840 * Relcaim any pending Tx descriptors to release mbufs in a 1841 * timely manner as we don't generate Tx completion interrupts 1842 * for every frame. This limits the delay to a maximum of one 1843 * second. 1844 */ 1845 if (sc->sc_cdata.stge_tx_cnt != 0) 1846 stge_txeof(sc); 1847 1848 stge_watchdog(sc); 1849 1850 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 1851 } 1852 1853 /* 1854 * stge_stats_update: 1855 * 1856 * Read the TC9021 statistics counters. 1857 */ 1858 static void 1859 stge_stats_update(struct stge_softc *sc) 1860 { 1861 if_t ifp; 1862 1863 STGE_LOCK_ASSERT(sc); 1864 1865 ifp = sc->sc_ifp; 1866 1867 CSR_READ_4(sc,STGE_OctetRcvOk); 1868 1869 if_inc_counter(ifp, IFCOUNTER_IPACKETS, CSR_READ_4(sc, STGE_FramesRcvdOk)); 1870 1871 if_inc_counter(ifp, IFCOUNTER_IERRORS, CSR_READ_2(sc, STGE_FramesLostRxErrors)); 1872 1873 CSR_READ_4(sc, STGE_OctetXmtdOk); 1874 1875 if_inc_counter(ifp, IFCOUNTER_OPACKETS, CSR_READ_4(sc, STGE_FramesXmtdOk)); 1876 1877 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1878 CSR_READ_4(sc, STGE_LateCollisions) + 1879 CSR_READ_4(sc, STGE_MultiColFrames) + 1880 CSR_READ_4(sc, STGE_SingleColFrames)); 1881 1882 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1883 CSR_READ_2(sc, STGE_FramesAbortXSColls) + 1884 CSR_READ_2(sc, STGE_FramesWEXDeferal)); 1885 } 1886 1887 /* 1888 * stge_reset: 1889 * 1890 * Perform a soft reset on the TC9021. 1891 */ 1892 static void 1893 stge_reset(struct stge_softc *sc, uint32_t how) 1894 { 1895 uint32_t ac; 1896 uint8_t v; 1897 int i, dv; 1898 1899 STGE_LOCK_ASSERT(sc); 1900 1901 dv = 5000; 1902 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1903 switch (how) { 1904 case STGE_RESET_TX: 1905 ac |= AC_TxReset | AC_FIFO; 1906 dv = 100; 1907 break; 1908 case STGE_RESET_RX: 1909 ac |= AC_RxReset | AC_FIFO; 1910 dv = 100; 1911 break; 1912 case STGE_RESET_FULL: 1913 default: 1914 /* 1915 * Only assert RstOut if we're fiber. We need GMII clocks 1916 * to be present in order for the reset to complete on fiber 1917 * cards. 1918 */ 1919 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset | 1920 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | 1921 (sc->sc_usefiber ? AC_RstOut : 0); 1922 break; 1923 } 1924 1925 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1926 1927 /* Account for reset problem at 10Mbps. */ 1928 DELAY(dv); 1929 1930 for (i = 0; i < STGE_TIMEOUT; i++) { 1931 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1932 break; 1933 DELAY(dv); 1934 } 1935 1936 if (i == STGE_TIMEOUT) 1937 device_printf(sc->sc_dev, "reset failed to complete\n"); 1938 1939 /* Set LED, from Linux IPG driver. */ 1940 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1941 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1); 1942 if ((sc->sc_led & 0x01) != 0) 1943 ac |= AC_LEDMode; 1944 if ((sc->sc_led & 0x03) != 0) 1945 ac |= AC_LEDModeBit1; 1946 if ((sc->sc_led & 0x08) != 0) 1947 ac |= AC_LEDSpeed; 1948 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1949 1950 /* Set PHY, from Linux IPG driver */ 1951 v = CSR_READ_1(sc, STGE_PhySet); 1952 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet); 1953 v |= ((sc->sc_led & 0x70) >> 4); 1954 CSR_WRITE_1(sc, STGE_PhySet, v); 1955 } 1956 1957 /* 1958 * stge_init: [ ifnet interface function ] 1959 * 1960 * Initialize the interface. 1961 */ 1962 static void 1963 stge_init(void *xsc) 1964 { 1965 struct stge_softc *sc; 1966 1967 sc = (struct stge_softc *)xsc; 1968 STGE_LOCK(sc); 1969 stge_init_locked(sc); 1970 STGE_UNLOCK(sc); 1971 } 1972 1973 static void 1974 stge_init_locked(struct stge_softc *sc) 1975 { 1976 if_t ifp; 1977 struct mii_data *mii; 1978 uint16_t eaddr[3]; 1979 uint32_t v; 1980 int error; 1981 1982 STGE_LOCK_ASSERT(sc); 1983 1984 ifp = sc->sc_ifp; 1985 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1986 return; 1987 mii = device_get_softc(sc->sc_miibus); 1988 1989 /* 1990 * Cancel any pending I/O. 1991 */ 1992 stge_stop(sc); 1993 1994 /* 1995 * Reset the chip to a known state. 1996 */ 1997 stge_reset(sc, STGE_RESET_FULL); 1998 1999 /* Init descriptors. */ 2000 error = stge_init_rx_ring(sc); 2001 if (error != 0) { 2002 device_printf(sc->sc_dev, 2003 "initialization failed: no memory for rx buffers\n"); 2004 stge_stop(sc); 2005 goto out; 2006 } 2007 stge_init_tx_ring(sc); 2008 2009 /* Set the station address. */ 2010 bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN); 2011 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0])); 2012 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1])); 2013 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2])); 2014 2015 /* 2016 * Set the statistics masks. Disable all the RMON stats, 2017 * and disable selected stats in the non-RMON stats registers. 2018 */ 2019 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff); 2020 CSR_WRITE_4(sc, STGE_StatisticsMask, 2021 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) | 2022 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) | 2023 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) | 2024 (1U << 21)); 2025 2026 /* Set up the receive filter. */ 2027 stge_set_filter(sc); 2028 /* Program multicast filter. */ 2029 stge_set_multi(sc); 2030 2031 /* 2032 * Give the transmit and receive ring to the chip. 2033 */ 2034 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 2035 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0))); 2036 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 2037 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0))); 2038 2039 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 2040 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0))); 2041 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 2042 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0))); 2043 2044 /* 2045 * Initialize the Tx auto-poll period. It's OK to make this number 2046 * large (255 is the max, but we use 127) -- we explicitly kick the 2047 * transmit engine when there's actually a packet. 2048 */ 2049 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2050 2051 /* ..and the Rx auto-poll period. */ 2052 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2053 2054 /* Initialize the Tx start threshold. */ 2055 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh); 2056 2057 /* Rx DMA thresholds, from Linux */ 2058 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30); 2059 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30); 2060 2061 /* Rx early threhold, from Linux */ 2062 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff); 2063 2064 /* Tx DMA thresholds, from Linux */ 2065 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30); 2066 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04); 2067 2068 /* 2069 * Initialize the Rx DMA interrupt control register. We 2070 * request an interrupt after every incoming packet, but 2071 * defer it for sc_rxint_dmawait us. When the number of 2072 * interrupts pending reaches STGE_RXINT_NFRAME, we stop 2073 * deferring the interrupt, and signal it immediately. 2074 */ 2075 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl, 2076 RDIC_RxFrameCount(sc->sc_rxint_nframe) | 2077 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait))); 2078 2079 /* 2080 * Initialize the interrupt mask. 2081 */ 2082 sc->sc_IntEnable = IS_HostError | IS_TxComplete | 2083 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd; 2084 #ifdef DEVICE_POLLING 2085 /* Disable interrupts if we are polling. */ 2086 if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0) 2087 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2088 else 2089 #endif 2090 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 2091 2092 /* 2093 * Configure the DMA engine. 2094 * XXX Should auto-tune TxBurstLimit. 2095 */ 2096 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3)); 2097 2098 /* 2099 * Send a PAUSE frame when we reach 29,696 bytes in the Rx 2100 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes 2101 * in the Rx FIFO. 2102 */ 2103 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16); 2104 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16); 2105 2106 /* 2107 * Set the maximum frame size. 2108 */ 2109 sc->sc_if_framesize = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 2110 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize); 2111 2112 /* 2113 * Initialize MacCtrl -- do it before setting the media, 2114 * as setting the media will actually program the register. 2115 * 2116 * Note: We have to poke the IFS value before poking 2117 * anything else. 2118 */ 2119 /* Tx/Rx MAC should be disabled before programming IFS.*/ 2120 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit)); 2121 2122 stge_vlan_setup(sc); 2123 2124 if (sc->sc_rev >= 6) { /* >= B.2 */ 2125 /* Multi-frag frame bug work-around. */ 2126 CSR_WRITE_2(sc, STGE_DebugCtrl, 2127 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200); 2128 2129 /* Tx Poll Now bug work-around. */ 2130 CSR_WRITE_2(sc, STGE_DebugCtrl, 2131 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010); 2132 /* Tx Poll Now bug work-around. */ 2133 CSR_WRITE_2(sc, STGE_DebugCtrl, 2134 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020); 2135 } 2136 2137 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2138 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable; 2139 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2140 /* 2141 * It seems that transmitting frames without checking the state of 2142 * Rx/Tx MAC wedge the hardware. 2143 */ 2144 stge_start_tx(sc); 2145 stge_start_rx(sc); 2146 2147 sc->sc_link = 0; 2148 /* 2149 * Set the current media. 2150 */ 2151 mii_mediachg(mii); 2152 2153 /* 2154 * Start the one second MII clock. 2155 */ 2156 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 2157 2158 /* 2159 * ...all done! 2160 */ 2161 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2162 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2163 2164 out: 2165 if (error != 0) 2166 device_printf(sc->sc_dev, "interface not running\n"); 2167 } 2168 2169 static void 2170 stge_vlan_setup(struct stge_softc *sc) 2171 { 2172 if_t ifp; 2173 uint32_t v; 2174 2175 ifp = sc->sc_ifp; 2176 /* 2177 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl 2178 * MC_AutoVLANuntagging bit. 2179 * MC_AutoVLANtagging bit selects which VLAN source to use 2180 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert 2181 * bit has priority over MC_AutoVLANtagging bit. So we always 2182 * use TFC instead of STGE_VLANTag register. 2183 */ 2184 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2185 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 2186 v |= MC_AutoVLANuntagging; 2187 else 2188 v &= ~MC_AutoVLANuntagging; 2189 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2190 } 2191 2192 /* 2193 * Stop transmission on the interface. 2194 */ 2195 static void 2196 stge_stop(struct stge_softc *sc) 2197 { 2198 if_t ifp; 2199 struct stge_txdesc *txd; 2200 struct stge_rxdesc *rxd; 2201 uint32_t v; 2202 int i; 2203 2204 STGE_LOCK_ASSERT(sc); 2205 /* 2206 * Stop the one second clock. 2207 */ 2208 callout_stop(&sc->sc_tick_ch); 2209 sc->sc_watchdog_timer = 0; 2210 2211 /* 2212 * Disable interrupts. 2213 */ 2214 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2215 2216 /* 2217 * Stop receiver, transmitter, and stats update. 2218 */ 2219 stge_stop_rx(sc); 2220 stge_stop_tx(sc); 2221 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2222 v |= MC_StatisticsDisable; 2223 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2224 2225 /* 2226 * Stop the transmit and receive DMA. 2227 */ 2228 stge_dma_wait(sc); 2229 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); 2230 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0); 2231 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); 2232 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0); 2233 2234 /* 2235 * Free RX and TX mbufs still in the queues. 2236 */ 2237 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2238 rxd = &sc->sc_cdata.stge_rxdesc[i]; 2239 if (rxd->rx_m != NULL) { 2240 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, 2241 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2242 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, 2243 rxd->rx_dmamap); 2244 m_freem(rxd->rx_m); 2245 rxd->rx_m = NULL; 2246 } 2247 } 2248 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2249 txd = &sc->sc_cdata.stge_txdesc[i]; 2250 if (txd->tx_m != NULL) { 2251 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, 2252 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2253 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, 2254 txd->tx_dmamap); 2255 m_freem(txd->tx_m); 2256 txd->tx_m = NULL; 2257 } 2258 } 2259 2260 /* 2261 * Mark the interface down and cancel the watchdog timer. 2262 */ 2263 ifp = sc->sc_ifp; 2264 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 2265 sc->sc_link = 0; 2266 } 2267 2268 static void 2269 stge_start_tx(struct stge_softc *sc) 2270 { 2271 uint32_t v; 2272 int i; 2273 2274 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2275 if ((v & MC_TxEnabled) != 0) 2276 return; 2277 v |= MC_TxEnable; 2278 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2279 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2280 for (i = STGE_TIMEOUT; i > 0; i--) { 2281 DELAY(10); 2282 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2283 if ((v & MC_TxEnabled) != 0) 2284 break; 2285 } 2286 if (i == 0) 2287 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n"); 2288 } 2289 2290 static void 2291 stge_start_rx(struct stge_softc *sc) 2292 { 2293 uint32_t v; 2294 int i; 2295 2296 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2297 if ((v & MC_RxEnabled) != 0) 2298 return; 2299 v |= MC_RxEnable; 2300 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2301 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2302 for (i = STGE_TIMEOUT; i > 0; i--) { 2303 DELAY(10); 2304 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2305 if ((v & MC_RxEnabled) != 0) 2306 break; 2307 } 2308 if (i == 0) 2309 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n"); 2310 } 2311 2312 static void 2313 stge_stop_tx(struct stge_softc *sc) 2314 { 2315 uint32_t v; 2316 int i; 2317 2318 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2319 if ((v & MC_TxEnabled) == 0) 2320 return; 2321 v |= MC_TxDisable; 2322 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2323 for (i = STGE_TIMEOUT; i > 0; i--) { 2324 DELAY(10); 2325 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2326 if ((v & MC_TxEnabled) == 0) 2327 break; 2328 } 2329 if (i == 0) 2330 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n"); 2331 } 2332 2333 static void 2334 stge_stop_rx(struct stge_softc *sc) 2335 { 2336 uint32_t v; 2337 int i; 2338 2339 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2340 if ((v & MC_RxEnabled) == 0) 2341 return; 2342 v |= MC_RxDisable; 2343 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2344 for (i = STGE_TIMEOUT; i > 0; i--) { 2345 DELAY(10); 2346 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2347 if ((v & MC_RxEnabled) == 0) 2348 break; 2349 } 2350 if (i == 0) 2351 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n"); 2352 } 2353 2354 static void 2355 stge_init_tx_ring(struct stge_softc *sc) 2356 { 2357 struct stge_ring_data *rd; 2358 struct stge_txdesc *txd; 2359 bus_addr_t addr; 2360 int i; 2361 2362 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq); 2363 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq); 2364 2365 sc->sc_cdata.stge_tx_prod = 0; 2366 sc->sc_cdata.stge_tx_cons = 0; 2367 sc->sc_cdata.stge_tx_cnt = 0; 2368 2369 rd = &sc->sc_rdata; 2370 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ); 2371 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2372 if (i == (STGE_TX_RING_CNT - 1)) 2373 addr = STGE_TX_RING_ADDR(sc, 0); 2374 else 2375 addr = STGE_TX_RING_ADDR(sc, i + 1); 2376 rd->stge_tx_ring[i].tfd_next = htole64(addr); 2377 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone); 2378 txd = &sc->sc_cdata.stge_txdesc[i]; 2379 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 2380 } 2381 2382 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 2383 sc->sc_cdata.stge_tx_ring_map, 2384 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2385 2386 } 2387 2388 static int 2389 stge_init_rx_ring(struct stge_softc *sc) 2390 { 2391 struct stge_ring_data *rd; 2392 bus_addr_t addr; 2393 int i; 2394 2395 sc->sc_cdata.stge_rx_cons = 0; 2396 STGE_RXCHAIN_RESET(sc); 2397 2398 rd = &sc->sc_rdata; 2399 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ); 2400 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2401 if (stge_newbuf(sc, i) != 0) 2402 return (ENOBUFS); 2403 if (i == (STGE_RX_RING_CNT - 1)) 2404 addr = STGE_RX_RING_ADDR(sc, 0); 2405 else 2406 addr = STGE_RX_RING_ADDR(sc, i + 1); 2407 rd->stge_rx_ring[i].rfd_next = htole64(addr); 2408 rd->stge_rx_ring[i].rfd_status = 0; 2409 } 2410 2411 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 2412 sc->sc_cdata.stge_rx_ring_map, 2413 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2414 2415 return (0); 2416 } 2417 2418 /* 2419 * stge_newbuf: 2420 * 2421 * Add a receive buffer to the indicated descriptor. 2422 */ 2423 static int 2424 stge_newbuf(struct stge_softc *sc, int idx) 2425 { 2426 struct stge_rxdesc *rxd; 2427 struct stge_rfd *rfd; 2428 struct mbuf *m; 2429 bus_dma_segment_t segs[1]; 2430 bus_dmamap_t map; 2431 int nsegs; 2432 2433 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2434 if (m == NULL) 2435 return (ENOBUFS); 2436 m->m_len = m->m_pkthdr.len = MCLBYTES; 2437 /* 2438 * The hardware requires 4bytes aligned DMA address when JUMBO 2439 * frame is used. 2440 */ 2441 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN)) 2442 m_adj(m, ETHER_ALIGN); 2443 2444 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag, 2445 sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 2446 m_freem(m); 2447 return (ENOBUFS); 2448 } 2449 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2450 2451 rxd = &sc->sc_cdata.stge_rxdesc[idx]; 2452 if (rxd->rx_m != NULL) { 2453 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2454 BUS_DMASYNC_POSTREAD); 2455 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); 2456 } 2457 map = rxd->rx_dmamap; 2458 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap; 2459 sc->sc_cdata.stge_rx_sparemap = map; 2460 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2461 BUS_DMASYNC_PREREAD); 2462 rxd->rx_m = m; 2463 2464 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 2465 rfd->rfd_frag.frag_word0 = 2466 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len)); 2467 rfd->rfd_status = 0; 2468 2469 return (0); 2470 } 2471 2472 /* 2473 * stge_set_filter: 2474 * 2475 * Set up the receive filter. 2476 */ 2477 static void 2478 stge_set_filter(struct stge_softc *sc) 2479 { 2480 if_t ifp; 2481 uint16_t mode; 2482 2483 STGE_LOCK_ASSERT(sc); 2484 2485 ifp = sc->sc_ifp; 2486 2487 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2488 mode |= RM_ReceiveUnicast; 2489 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 2490 mode |= RM_ReceiveBroadcast; 2491 else 2492 mode &= ~RM_ReceiveBroadcast; 2493 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 2494 mode |= RM_ReceiveAllFrames; 2495 else 2496 mode &= ~RM_ReceiveAllFrames; 2497 2498 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2499 } 2500 2501 static u_int 2502 stge_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2503 { 2504 uint32_t crc, *mchash = arg; 2505 2506 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 2507 /* Just want the 6 least significant bits. */ 2508 crc &= 0x3f; 2509 /* Set the corresponding bit in the hash table. */ 2510 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2511 2512 return (1); 2513 } 2514 2515 static void 2516 stge_set_multi(struct stge_softc *sc) 2517 { 2518 if_t ifp; 2519 uint32_t mchash[2]; 2520 uint16_t mode; 2521 int count; 2522 2523 STGE_LOCK_ASSERT(sc); 2524 2525 ifp = sc->sc_ifp; 2526 2527 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2528 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2529 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 2530 mode |= RM_ReceiveAllFrames; 2531 else if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) 2532 mode |= RM_ReceiveMulticast; 2533 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2534 return; 2535 } 2536 2537 /* clear existing filters. */ 2538 CSR_WRITE_4(sc, STGE_HashTable0, 0); 2539 CSR_WRITE_4(sc, STGE_HashTable1, 0); 2540 2541 /* 2542 * Set up the multicast address filter by passing all multicast 2543 * addresses through a CRC generator, and then using the low-order 2544 * 6 bits as an index into the 64 bit multicast hash table. The 2545 * high order bits select the register, while the rest of the bits 2546 * select the bit within the register. 2547 */ 2548 bzero(mchash, sizeof(mchash)); 2549 count = if_foreach_llmaddr(ifp, stge_hash_maddr, mchash); 2550 2551 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames); 2552 if (count > 0) 2553 mode |= RM_ReceiveMulticastHash; 2554 else 2555 mode &= ~RM_ReceiveMulticastHash; 2556 2557 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]); 2558 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]); 2559 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2560 } 2561 2562 static int 2563 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2564 { 2565 int error, value; 2566 2567 if (!arg1) 2568 return (EINVAL); 2569 value = *(int *)arg1; 2570 error = sysctl_handle_int(oidp, &value, 0, req); 2571 if (error || !req->newptr) 2572 return (error); 2573 if (value < low || value > high) 2574 return (EINVAL); 2575 *(int *)arg1 = value; 2576 2577 return (0); 2578 } 2579 2580 static int 2581 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS) 2582 { 2583 return (sysctl_int_range(oidp, arg1, arg2, req, 2584 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX)); 2585 } 2586 2587 static int 2588 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS) 2589 { 2590 return (sysctl_int_range(oidp, arg1, arg2, req, 2591 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX)); 2592 } 2593