1 /* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Device driver for the Sundance Tech. TC9021 10/100/1000 34 * Ethernet controller. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #ifdef HAVE_KERNEL_OPTION_HEADERS 41 #include "opt_device_polling.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/endian.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/kernel.h> 50 #include <sys/module.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <sys/taskqueue.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 #include <net/if_vlan_var.h> 63 64 #include <machine/bus.h> 65 #include <machine/resource.h> 66 #include <sys/bus.h> 67 #include <sys/rman.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/mii_bitbang.h> 71 #include <dev/mii/miivar.h> 72 73 #include <dev/pci/pcireg.h> 74 #include <dev/pci/pcivar.h> 75 76 #include <dev/stge/if_stgereg.h> 77 78 #define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 79 80 MODULE_DEPEND(stge, pci, 1, 1, 1); 81 MODULE_DEPEND(stge, ether, 1, 1, 1); 82 MODULE_DEPEND(stge, miibus, 1, 1, 1); 83 84 /* "device miibus" required. See GENERIC if you get errors here. */ 85 #include "miibus_if.h" 86 87 /* 88 * Devices supported by this driver. 89 */ 90 static const struct stge_product { 91 uint16_t stge_vendorid; 92 uint16_t stge_deviceid; 93 const char *stge_name; 94 } const stge_products[] = { 95 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023, 96 "Sundance ST-1023 Gigabit Ethernet" }, 97 98 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021, 99 "Sundance ST-2021 Gigabit Ethernet" }, 100 101 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021, 102 "Tamarack TC9021 Gigabit Ethernet" }, 103 104 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT, 105 "Tamarack TC9021 Gigabit Ethernet" }, 106 107 /* 108 * The Sundance sample boards use the Sundance vendor ID, 109 * but the Tamarack product ID. 110 */ 111 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021, 112 "Sundance TC9021 Gigabit Ethernet" }, 113 114 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT, 115 "Sundance TC9021 Gigabit Ethernet" }, 116 117 { VENDOR_DLINK, DEVICEID_DLINK_DL4000, 118 "D-Link DL-4000 Gigabit Ethernet" }, 119 120 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021, 121 "Antares Gigabit Ethernet" } 122 }; 123 124 static int stge_probe(device_t); 125 static int stge_attach(device_t); 126 static int stge_detach(device_t); 127 static int stge_shutdown(device_t); 128 static int stge_suspend(device_t); 129 static int stge_resume(device_t); 130 131 static int stge_encap(struct stge_softc *, struct mbuf **); 132 static void stge_start(struct ifnet *); 133 static void stge_start_locked(struct ifnet *); 134 static void stge_watchdog(struct stge_softc *); 135 static int stge_ioctl(struct ifnet *, u_long, caddr_t); 136 static void stge_init(void *); 137 static void stge_init_locked(struct stge_softc *); 138 static void stge_vlan_setup(struct stge_softc *); 139 static void stge_stop(struct stge_softc *); 140 static void stge_start_tx(struct stge_softc *); 141 static void stge_start_rx(struct stge_softc *); 142 static void stge_stop_tx(struct stge_softc *); 143 static void stge_stop_rx(struct stge_softc *); 144 145 static void stge_reset(struct stge_softc *, uint32_t); 146 static int stge_eeprom_wait(struct stge_softc *); 147 static void stge_read_eeprom(struct stge_softc *, int, uint16_t *); 148 static void stge_tick(void *); 149 static void stge_stats_update(struct stge_softc *); 150 static void stge_set_filter(struct stge_softc *); 151 static void stge_set_multi(struct stge_softc *); 152 153 static void stge_link_task(void *, int); 154 static void stge_intr(void *); 155 static __inline int stge_tx_error(struct stge_softc *); 156 static void stge_txeof(struct stge_softc *); 157 static int stge_rxeof(struct stge_softc *); 158 static __inline void stge_discard_rxbuf(struct stge_softc *, int); 159 static int stge_newbuf(struct stge_softc *, int); 160 #ifndef __NO_STRICT_ALIGNMENT 161 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *); 162 #endif 163 164 static int stge_miibus_readreg(device_t, int, int); 165 static int stge_miibus_writereg(device_t, int, int, int); 166 static void stge_miibus_statchg(device_t); 167 static int stge_mediachange(struct ifnet *); 168 static void stge_mediastatus(struct ifnet *, struct ifmediareq *); 169 170 static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 171 static int stge_dma_alloc(struct stge_softc *); 172 static void stge_dma_free(struct stge_softc *); 173 static void stge_dma_wait(struct stge_softc *); 174 static void stge_init_tx_ring(struct stge_softc *); 175 static int stge_init_rx_ring(struct stge_softc *); 176 #ifdef DEVICE_POLLING 177 static int stge_poll(struct ifnet *, enum poll_cmd, int); 178 #endif 179 180 static void stge_setwol(struct stge_softc *); 181 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 182 static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS); 183 static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS); 184 185 /* 186 * MII bit-bang glue 187 */ 188 static uint32_t stge_mii_bitbang_read(device_t); 189 static void stge_mii_bitbang_write(device_t, uint32_t); 190 191 static const struct mii_bitbang_ops stge_mii_bitbang_ops = { 192 stge_mii_bitbang_read, 193 stge_mii_bitbang_write, 194 { 195 PC_MgmtData, /* MII_BIT_MDO */ 196 PC_MgmtData, /* MII_BIT_MDI */ 197 PC_MgmtClk, /* MII_BIT_MDC */ 198 PC_MgmtDir, /* MII_BIT_DIR_HOST_PHY */ 199 0, /* MII_BIT_DIR_PHY_HOST */ 200 } 201 }; 202 203 static device_method_t stge_methods[] = { 204 /* Device interface */ 205 DEVMETHOD(device_probe, stge_probe), 206 DEVMETHOD(device_attach, stge_attach), 207 DEVMETHOD(device_detach, stge_detach), 208 DEVMETHOD(device_shutdown, stge_shutdown), 209 DEVMETHOD(device_suspend, stge_suspend), 210 DEVMETHOD(device_resume, stge_resume), 211 212 /* MII interface */ 213 DEVMETHOD(miibus_readreg, stge_miibus_readreg), 214 DEVMETHOD(miibus_writereg, stge_miibus_writereg), 215 DEVMETHOD(miibus_statchg, stge_miibus_statchg), 216 217 KOBJMETHOD_END 218 }; 219 220 static driver_t stge_driver = { 221 "stge", 222 stge_methods, 223 sizeof(struct stge_softc) 224 }; 225 226 static devclass_t stge_devclass; 227 228 DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0); 229 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0); 230 231 static struct resource_spec stge_res_spec_io[] = { 232 { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE }, 233 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 234 { -1, 0, 0 } 235 }; 236 237 static struct resource_spec stge_res_spec_mem[] = { 238 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, 239 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 240 { -1, 0, 0 } 241 }; 242 243 /* 244 * stge_mii_bitbang_read: [mii bit-bang interface function] 245 * 246 * Read the MII serial port for the MII bit-bang module. 247 */ 248 static uint32_t 249 stge_mii_bitbang_read(device_t dev) 250 { 251 struct stge_softc *sc; 252 uint32_t val; 253 254 sc = device_get_softc(dev); 255 256 val = CSR_READ_1(sc, STGE_PhyCtrl); 257 CSR_BARRIER(sc, STGE_PhyCtrl, 1, 258 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 259 return (val); 260 } 261 262 /* 263 * stge_mii_bitbang_write: [mii big-bang interface function] 264 * 265 * Write the MII serial port for the MII bit-bang module. 266 */ 267 static void 268 stge_mii_bitbang_write(device_t dev, uint32_t val) 269 { 270 struct stge_softc *sc; 271 272 sc = device_get_softc(dev); 273 274 CSR_WRITE_1(sc, STGE_PhyCtrl, val); 275 CSR_BARRIER(sc, STGE_PhyCtrl, 1, 276 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 277 } 278 279 /* 280 * sc_miibus_readreg: [mii interface function] 281 * 282 * Read a PHY register on the MII of the TC9021. 283 */ 284 static int 285 stge_miibus_readreg(device_t dev, int phy, int reg) 286 { 287 struct stge_softc *sc; 288 int error, val; 289 290 sc = device_get_softc(dev); 291 292 if (reg == STGE_PhyCtrl) { 293 /* XXX allow ip1000phy read STGE_PhyCtrl register. */ 294 STGE_MII_LOCK(sc); 295 error = CSR_READ_1(sc, STGE_PhyCtrl); 296 STGE_MII_UNLOCK(sc); 297 return (error); 298 } 299 300 STGE_MII_LOCK(sc); 301 val = mii_bitbang_readreg(dev, &stge_mii_bitbang_ops, phy, reg); 302 STGE_MII_UNLOCK(sc); 303 return (val); 304 } 305 306 /* 307 * stge_miibus_writereg: [mii interface function] 308 * 309 * Write a PHY register on the MII of the TC9021. 310 */ 311 static int 312 stge_miibus_writereg(device_t dev, int phy, int reg, int val) 313 { 314 struct stge_softc *sc; 315 316 sc = device_get_softc(dev); 317 318 STGE_MII_LOCK(sc); 319 mii_bitbang_writereg(dev, &stge_mii_bitbang_ops, phy, reg, val); 320 STGE_MII_UNLOCK(sc); 321 return (0); 322 } 323 324 /* 325 * stge_miibus_statchg: [mii interface function] 326 * 327 * Callback from MII layer when media changes. 328 */ 329 static void 330 stge_miibus_statchg(device_t dev) 331 { 332 struct stge_softc *sc; 333 334 sc = device_get_softc(dev); 335 taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task); 336 } 337 338 /* 339 * stge_mediastatus: [ifmedia interface function] 340 * 341 * Get the current interface media status. 342 */ 343 static void 344 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 345 { 346 struct stge_softc *sc; 347 struct mii_data *mii; 348 349 sc = ifp->if_softc; 350 mii = device_get_softc(sc->sc_miibus); 351 352 mii_pollstat(mii); 353 ifmr->ifm_status = mii->mii_media_status; 354 ifmr->ifm_active = mii->mii_media_active; 355 } 356 357 /* 358 * stge_mediachange: [ifmedia interface function] 359 * 360 * Set hardware to newly-selected media. 361 */ 362 static int 363 stge_mediachange(struct ifnet *ifp) 364 { 365 struct stge_softc *sc; 366 struct mii_data *mii; 367 368 sc = ifp->if_softc; 369 mii = device_get_softc(sc->sc_miibus); 370 mii_mediachg(mii); 371 372 return (0); 373 } 374 375 static int 376 stge_eeprom_wait(struct stge_softc *sc) 377 { 378 int i; 379 380 for (i = 0; i < STGE_TIMEOUT; i++) { 381 DELAY(1000); 382 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0) 383 return (0); 384 } 385 return (1); 386 } 387 388 /* 389 * stge_read_eeprom: 390 * 391 * Read data from the serial EEPROM. 392 */ 393 static void 394 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data) 395 { 396 397 if (stge_eeprom_wait(sc)) 398 device_printf(sc->sc_dev, "EEPROM failed to come ready\n"); 399 400 CSR_WRITE_2(sc, STGE_EepromCtrl, 401 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR)); 402 if (stge_eeprom_wait(sc)) 403 device_printf(sc->sc_dev, "EEPROM read timed out\n"); 404 *data = CSR_READ_2(sc, STGE_EepromData); 405 } 406 407 408 static int 409 stge_probe(device_t dev) 410 { 411 const struct stge_product *sp; 412 int i; 413 uint16_t vendor, devid; 414 415 vendor = pci_get_vendor(dev); 416 devid = pci_get_device(dev); 417 sp = stge_products; 418 for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]); 419 i++, sp++) { 420 if (vendor == sp->stge_vendorid && 421 devid == sp->stge_deviceid) { 422 device_set_desc(dev, sp->stge_name); 423 return (BUS_PROBE_DEFAULT); 424 } 425 } 426 427 return (ENXIO); 428 } 429 430 static int 431 stge_attach(device_t dev) 432 { 433 struct stge_softc *sc; 434 struct ifnet *ifp; 435 uint8_t enaddr[ETHER_ADDR_LEN]; 436 int error, flags, i; 437 uint16_t cmd; 438 uint32_t val; 439 440 error = 0; 441 sc = device_get_softc(dev); 442 sc->sc_dev = dev; 443 444 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 445 MTX_DEF); 446 mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF); 447 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 448 TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc); 449 450 /* 451 * Map the device. 452 */ 453 pci_enable_busmaster(dev); 454 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 455 val = pci_read_config(dev, PCIR_BAR(1), 4); 456 if ((val & 0x01) != 0) 457 sc->sc_spec = stge_res_spec_mem; 458 else { 459 val = pci_read_config(dev, PCIR_BAR(0), 4); 460 if ((val & 0x01) == 0) { 461 device_printf(sc->sc_dev, "couldn't locate IO BAR\n"); 462 error = ENXIO; 463 goto fail; 464 } 465 sc->sc_spec = stge_res_spec_io; 466 } 467 error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res); 468 if (error != 0) { 469 device_printf(dev, "couldn't allocate %s resources\n", 470 sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O"); 471 goto fail; 472 } 473 sc->sc_rev = pci_get_revid(dev); 474 475 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 476 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 477 "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0, 478 sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe"); 479 480 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 481 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 482 "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0, 483 sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait"); 484 485 /* Pull in device tunables. */ 486 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 487 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 488 "rxint_nframe", &sc->sc_rxint_nframe); 489 if (error == 0) { 490 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN || 491 sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) { 492 device_printf(dev, "rxint_nframe value out of range; " 493 "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT); 494 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 495 } 496 } 497 498 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 499 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 500 "rxint_dmawait", &sc->sc_rxint_dmawait); 501 if (error == 0) { 502 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN || 503 sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) { 504 device_printf(dev, "rxint_dmawait value out of range; " 505 "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT); 506 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 507 } 508 } 509 510 if ((error = stge_dma_alloc(sc) != 0)) 511 goto fail; 512 513 /* 514 * Determine if we're copper or fiber. It affects how we 515 * reset the card. 516 */ 517 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia) 518 sc->sc_usefiber = 1; 519 else 520 sc->sc_usefiber = 0; 521 522 /* Load LED configuration from EEPROM. */ 523 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led); 524 525 /* 526 * Reset the chip to a known state. 527 */ 528 STGE_LOCK(sc); 529 stge_reset(sc, STGE_RESET_FULL); 530 STGE_UNLOCK(sc); 531 532 /* 533 * Reading the station address from the EEPROM doesn't seem 534 * to work, at least on my sample boards. Instead, since 535 * the reset sequence does AutoInit, read it from the station 536 * address registers. For Sundance 1023 you can only read it 537 * from EEPROM. 538 */ 539 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) { 540 uint16_t v; 541 542 v = CSR_READ_2(sc, STGE_StationAddress0); 543 enaddr[0] = v & 0xff; 544 enaddr[1] = v >> 8; 545 v = CSR_READ_2(sc, STGE_StationAddress1); 546 enaddr[2] = v & 0xff; 547 enaddr[3] = v >> 8; 548 v = CSR_READ_2(sc, STGE_StationAddress2); 549 enaddr[4] = v & 0xff; 550 enaddr[5] = v >> 8; 551 sc->sc_stge1023 = 0; 552 } else { 553 uint16_t myaddr[ETHER_ADDR_LEN / 2]; 554 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) { 555 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i, 556 &myaddr[i]); 557 myaddr[i] = le16toh(myaddr[i]); 558 } 559 bcopy(myaddr, enaddr, sizeof(enaddr)); 560 sc->sc_stge1023 = 1; 561 } 562 563 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 564 if (ifp == NULL) { 565 device_printf(sc->sc_dev, "failed to if_alloc()\n"); 566 error = ENXIO; 567 goto fail; 568 } 569 570 ifp->if_softc = sc; 571 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 572 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 573 ifp->if_ioctl = stge_ioctl; 574 ifp->if_start = stge_start; 575 ifp->if_init = stge_init; 576 ifp->if_mtu = ETHERMTU; 577 ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1; 578 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 579 IFQ_SET_READY(&ifp->if_snd); 580 /* Revision B3 and earlier chips have checksum bug. */ 581 if (sc->sc_rev >= 0x0c) { 582 ifp->if_hwassist = STGE_CSUM_FEATURES; 583 ifp->if_capabilities = IFCAP_HWCSUM; 584 } else { 585 ifp->if_hwassist = 0; 586 ifp->if_capabilities = 0; 587 } 588 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 589 ifp->if_capenable = ifp->if_capabilities; 590 591 /* 592 * Read some important bits from the PhyCtrl register. 593 */ 594 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) & 595 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity); 596 597 /* Set up MII bus. */ 598 flags = MIIF_DOPAUSE; 599 if (sc->sc_rev >= 0x40 && sc->sc_rev <= 0x4e) 600 flags |= MIIF_MACPRIV0; 601 error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, stge_mediachange, 602 stge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 603 flags); 604 if (error != 0) { 605 device_printf(sc->sc_dev, "attaching PHYs failed\n"); 606 goto fail; 607 } 608 609 ether_ifattach(ifp, enaddr); 610 611 /* VLAN capability setup */ 612 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 613 if (sc->sc_rev >= 0x0c) 614 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 615 ifp->if_capenable = ifp->if_capabilities; 616 #ifdef DEVICE_POLLING 617 ifp->if_capabilities |= IFCAP_POLLING; 618 #endif 619 /* 620 * Tell the upper layer(s) we support long frames. 621 * Must appear after the call to ether_ifattach() because 622 * ether_ifattach() sets ifi_hdrlen to the default value. 623 */ 624 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 625 626 /* 627 * The manual recommends disabling early transmit, so we 628 * do. It's disabled anyway, if using IP checksumming, 629 * since the entire packet must be in the FIFO in order 630 * for the chip to perform the checksum. 631 */ 632 sc->sc_txthresh = 0x0fff; 633 634 /* 635 * Disable MWI if the PCI layer tells us to. 636 */ 637 sc->sc_DMACtrl = 0; 638 if ((cmd & PCIM_CMD_MWRICEN) == 0) 639 sc->sc_DMACtrl |= DMAC_MWIDisable; 640 641 /* 642 * Hookup IRQ 643 */ 644 error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE, 645 NULL, stge_intr, sc, &sc->sc_ih); 646 if (error != 0) { 647 ether_ifdetach(ifp); 648 device_printf(sc->sc_dev, "couldn't set up IRQ\n"); 649 sc->sc_ifp = NULL; 650 goto fail; 651 } 652 653 fail: 654 if (error != 0) 655 stge_detach(dev); 656 657 return (error); 658 } 659 660 static int 661 stge_detach(device_t dev) 662 { 663 struct stge_softc *sc; 664 struct ifnet *ifp; 665 666 sc = device_get_softc(dev); 667 668 ifp = sc->sc_ifp; 669 #ifdef DEVICE_POLLING 670 if (ifp && ifp->if_capenable & IFCAP_POLLING) 671 ether_poll_deregister(ifp); 672 #endif 673 if (device_is_attached(dev)) { 674 STGE_LOCK(sc); 675 /* XXX */ 676 sc->sc_detach = 1; 677 stge_stop(sc); 678 STGE_UNLOCK(sc); 679 callout_drain(&sc->sc_tick_ch); 680 taskqueue_drain(taskqueue_swi, &sc->sc_link_task); 681 ether_ifdetach(ifp); 682 } 683 684 if (sc->sc_miibus != NULL) { 685 device_delete_child(dev, sc->sc_miibus); 686 sc->sc_miibus = NULL; 687 } 688 bus_generic_detach(dev); 689 stge_dma_free(sc); 690 691 if (ifp != NULL) { 692 if_free(ifp); 693 sc->sc_ifp = NULL; 694 } 695 696 if (sc->sc_ih) { 697 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih); 698 sc->sc_ih = NULL; 699 } 700 bus_release_resources(dev, sc->sc_spec, sc->sc_res); 701 702 mtx_destroy(&sc->sc_mii_mtx); 703 mtx_destroy(&sc->sc_mtx); 704 705 return (0); 706 } 707 708 struct stge_dmamap_arg { 709 bus_addr_t stge_busaddr; 710 }; 711 712 static void 713 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 714 { 715 struct stge_dmamap_arg *ctx; 716 717 if (error != 0) 718 return; 719 720 ctx = (struct stge_dmamap_arg *)arg; 721 ctx->stge_busaddr = segs[0].ds_addr; 722 } 723 724 static int 725 stge_dma_alloc(struct stge_softc *sc) 726 { 727 struct stge_dmamap_arg ctx; 728 struct stge_txdesc *txd; 729 struct stge_rxdesc *rxd; 730 int error, i; 731 732 /* create parent tag. */ 733 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */ 734 1, 0, /* algnmnt, boundary */ 735 STGE_DMA_MAXADDR, /* lowaddr */ 736 BUS_SPACE_MAXADDR, /* highaddr */ 737 NULL, NULL, /* filter, filterarg */ 738 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 739 0, /* nsegments */ 740 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 741 0, /* flags */ 742 NULL, NULL, /* lockfunc, lockarg */ 743 &sc->sc_cdata.stge_parent_tag); 744 if (error != 0) { 745 device_printf(sc->sc_dev, "failed to create parent DMA tag\n"); 746 goto fail; 747 } 748 /* create tag for Tx ring. */ 749 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 750 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 751 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 752 BUS_SPACE_MAXADDR, /* highaddr */ 753 NULL, NULL, /* filter, filterarg */ 754 STGE_TX_RING_SZ, /* maxsize */ 755 1, /* nsegments */ 756 STGE_TX_RING_SZ, /* maxsegsize */ 757 0, /* flags */ 758 NULL, NULL, /* lockfunc, lockarg */ 759 &sc->sc_cdata.stge_tx_ring_tag); 760 if (error != 0) { 761 device_printf(sc->sc_dev, 762 "failed to allocate Tx ring DMA tag\n"); 763 goto fail; 764 } 765 766 /* create tag for Rx ring. */ 767 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 768 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 769 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 770 BUS_SPACE_MAXADDR, /* highaddr */ 771 NULL, NULL, /* filter, filterarg */ 772 STGE_RX_RING_SZ, /* maxsize */ 773 1, /* nsegments */ 774 STGE_RX_RING_SZ, /* maxsegsize */ 775 0, /* flags */ 776 NULL, NULL, /* lockfunc, lockarg */ 777 &sc->sc_cdata.stge_rx_ring_tag); 778 if (error != 0) { 779 device_printf(sc->sc_dev, 780 "failed to allocate Rx ring DMA tag\n"); 781 goto fail; 782 } 783 784 /* create tag for Tx buffers. */ 785 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 786 1, 0, /* algnmnt, boundary */ 787 BUS_SPACE_MAXADDR, /* lowaddr */ 788 BUS_SPACE_MAXADDR, /* highaddr */ 789 NULL, NULL, /* filter, filterarg */ 790 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */ 791 STGE_MAXTXSEGS, /* nsegments */ 792 MCLBYTES, /* maxsegsize */ 793 0, /* flags */ 794 NULL, NULL, /* lockfunc, lockarg */ 795 &sc->sc_cdata.stge_tx_tag); 796 if (error != 0) { 797 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n"); 798 goto fail; 799 } 800 801 /* create tag for Rx buffers. */ 802 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 803 1, 0, /* algnmnt, boundary */ 804 BUS_SPACE_MAXADDR, /* lowaddr */ 805 BUS_SPACE_MAXADDR, /* highaddr */ 806 NULL, NULL, /* filter, filterarg */ 807 MCLBYTES, /* maxsize */ 808 1, /* nsegments */ 809 MCLBYTES, /* maxsegsize */ 810 0, /* flags */ 811 NULL, NULL, /* lockfunc, lockarg */ 812 &sc->sc_cdata.stge_rx_tag); 813 if (error != 0) { 814 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n"); 815 goto fail; 816 } 817 818 /* allocate DMA'able memory and load the DMA map for Tx ring. */ 819 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag, 820 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | 821 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map); 822 if (error != 0) { 823 device_printf(sc->sc_dev, 824 "failed to allocate DMA'able memory for Tx ring\n"); 825 goto fail; 826 } 827 828 ctx.stge_busaddr = 0; 829 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag, 830 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring, 831 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 832 if (error != 0 || ctx.stge_busaddr == 0) { 833 device_printf(sc->sc_dev, 834 "failed to load DMA'able memory for Tx ring\n"); 835 goto fail; 836 } 837 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr; 838 839 /* allocate DMA'able memory and load the DMA map for Rx ring. */ 840 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag, 841 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | 842 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map); 843 if (error != 0) { 844 device_printf(sc->sc_dev, 845 "failed to allocate DMA'able memory for Rx ring\n"); 846 goto fail; 847 } 848 849 ctx.stge_busaddr = 0; 850 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag, 851 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring, 852 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 853 if (error != 0 || ctx.stge_busaddr == 0) { 854 device_printf(sc->sc_dev, 855 "failed to load DMA'able memory for Rx ring\n"); 856 goto fail; 857 } 858 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr; 859 860 /* create DMA maps for Tx buffers. */ 861 for (i = 0; i < STGE_TX_RING_CNT; i++) { 862 txd = &sc->sc_cdata.stge_txdesc[i]; 863 txd->tx_m = NULL; 864 txd->tx_dmamap = 0; 865 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0, 866 &txd->tx_dmamap); 867 if (error != 0) { 868 device_printf(sc->sc_dev, 869 "failed to create Tx dmamap\n"); 870 goto fail; 871 } 872 } 873 /* create DMA maps for Rx buffers. */ 874 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 875 &sc->sc_cdata.stge_rx_sparemap)) != 0) { 876 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n"); 877 goto fail; 878 } 879 for (i = 0; i < STGE_RX_RING_CNT; i++) { 880 rxd = &sc->sc_cdata.stge_rxdesc[i]; 881 rxd->rx_m = NULL; 882 rxd->rx_dmamap = 0; 883 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 884 &rxd->rx_dmamap); 885 if (error != 0) { 886 device_printf(sc->sc_dev, 887 "failed to create Rx dmamap\n"); 888 goto fail; 889 } 890 } 891 892 fail: 893 return (error); 894 } 895 896 static void 897 stge_dma_free(struct stge_softc *sc) 898 { 899 struct stge_txdesc *txd; 900 struct stge_rxdesc *rxd; 901 int i; 902 903 /* Tx ring */ 904 if (sc->sc_cdata.stge_tx_ring_tag) { 905 if (sc->sc_cdata.stge_tx_ring_map) 906 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag, 907 sc->sc_cdata.stge_tx_ring_map); 908 if (sc->sc_cdata.stge_tx_ring_map && 909 sc->sc_rdata.stge_tx_ring) 910 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag, 911 sc->sc_rdata.stge_tx_ring, 912 sc->sc_cdata.stge_tx_ring_map); 913 sc->sc_rdata.stge_tx_ring = NULL; 914 sc->sc_cdata.stge_tx_ring_map = 0; 915 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag); 916 sc->sc_cdata.stge_tx_ring_tag = NULL; 917 } 918 /* Rx ring */ 919 if (sc->sc_cdata.stge_rx_ring_tag) { 920 if (sc->sc_cdata.stge_rx_ring_map) 921 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag, 922 sc->sc_cdata.stge_rx_ring_map); 923 if (sc->sc_cdata.stge_rx_ring_map && 924 sc->sc_rdata.stge_rx_ring) 925 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag, 926 sc->sc_rdata.stge_rx_ring, 927 sc->sc_cdata.stge_rx_ring_map); 928 sc->sc_rdata.stge_rx_ring = NULL; 929 sc->sc_cdata.stge_rx_ring_map = 0; 930 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag); 931 sc->sc_cdata.stge_rx_ring_tag = NULL; 932 } 933 /* Tx buffers */ 934 if (sc->sc_cdata.stge_tx_tag) { 935 for (i = 0; i < STGE_TX_RING_CNT; i++) { 936 txd = &sc->sc_cdata.stge_txdesc[i]; 937 if (txd->tx_dmamap) { 938 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag, 939 txd->tx_dmamap); 940 txd->tx_dmamap = 0; 941 } 942 } 943 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag); 944 sc->sc_cdata.stge_tx_tag = NULL; 945 } 946 /* Rx buffers */ 947 if (sc->sc_cdata.stge_rx_tag) { 948 for (i = 0; i < STGE_RX_RING_CNT; i++) { 949 rxd = &sc->sc_cdata.stge_rxdesc[i]; 950 if (rxd->rx_dmamap) { 951 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 952 rxd->rx_dmamap); 953 rxd->rx_dmamap = 0; 954 } 955 } 956 if (sc->sc_cdata.stge_rx_sparemap) { 957 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 958 sc->sc_cdata.stge_rx_sparemap); 959 sc->sc_cdata.stge_rx_sparemap = 0; 960 } 961 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag); 962 sc->sc_cdata.stge_rx_tag = NULL; 963 } 964 965 if (sc->sc_cdata.stge_parent_tag) { 966 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag); 967 sc->sc_cdata.stge_parent_tag = NULL; 968 } 969 } 970 971 /* 972 * stge_shutdown: 973 * 974 * Make sure the interface is stopped at reboot time. 975 */ 976 static int 977 stge_shutdown(device_t dev) 978 { 979 980 return (stge_suspend(dev)); 981 } 982 983 static void 984 stge_setwol(struct stge_softc *sc) 985 { 986 struct ifnet *ifp; 987 uint8_t v; 988 989 STGE_LOCK_ASSERT(sc); 990 991 ifp = sc->sc_ifp; 992 v = CSR_READ_1(sc, STGE_WakeEvent); 993 /* Disable all WOL bits. */ 994 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 995 WE_WakeOnLanEnable); 996 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 997 v |= WE_MagicPktEnable | WE_WakeOnLanEnable; 998 CSR_WRITE_1(sc, STGE_WakeEvent, v); 999 /* Reset Tx and prevent transmission. */ 1000 CSR_WRITE_4(sc, STGE_AsicCtrl, 1001 CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset); 1002 /* 1003 * TC9021 automatically reset link speed to 100Mbps when it's put 1004 * into sleep so there is no need to try to resetting link speed. 1005 */ 1006 } 1007 1008 static int 1009 stge_suspend(device_t dev) 1010 { 1011 struct stge_softc *sc; 1012 1013 sc = device_get_softc(dev); 1014 1015 STGE_LOCK(sc); 1016 stge_stop(sc); 1017 sc->sc_suspended = 1; 1018 stge_setwol(sc); 1019 STGE_UNLOCK(sc); 1020 1021 return (0); 1022 } 1023 1024 static int 1025 stge_resume(device_t dev) 1026 { 1027 struct stge_softc *sc; 1028 struct ifnet *ifp; 1029 uint8_t v; 1030 1031 sc = device_get_softc(dev); 1032 1033 STGE_LOCK(sc); 1034 /* 1035 * Clear WOL bits, so special frames wouldn't interfere 1036 * normal Rx operation anymore. 1037 */ 1038 v = CSR_READ_1(sc, STGE_WakeEvent); 1039 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 1040 WE_WakeOnLanEnable); 1041 CSR_WRITE_1(sc, STGE_WakeEvent, v); 1042 ifp = sc->sc_ifp; 1043 if (ifp->if_flags & IFF_UP) 1044 stge_init_locked(sc); 1045 1046 sc->sc_suspended = 0; 1047 STGE_UNLOCK(sc); 1048 1049 return (0); 1050 } 1051 1052 static void 1053 stge_dma_wait(struct stge_softc *sc) 1054 { 1055 int i; 1056 1057 for (i = 0; i < STGE_TIMEOUT; i++) { 1058 DELAY(2); 1059 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0) 1060 break; 1061 } 1062 1063 if (i == STGE_TIMEOUT) 1064 device_printf(sc->sc_dev, "DMA wait timed out\n"); 1065 } 1066 1067 static int 1068 stge_encap(struct stge_softc *sc, struct mbuf **m_head) 1069 { 1070 struct stge_txdesc *txd; 1071 struct stge_tfd *tfd; 1072 struct mbuf *m; 1073 bus_dma_segment_t txsegs[STGE_MAXTXSEGS]; 1074 int error, i, nsegs, si; 1075 uint64_t csum_flags, tfc; 1076 1077 STGE_LOCK_ASSERT(sc); 1078 1079 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL) 1080 return (ENOBUFS); 1081 1082 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1083 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1084 if (error == EFBIG) { 1085 m = m_collapse(*m_head, M_DONTWAIT, STGE_MAXTXSEGS); 1086 if (m == NULL) { 1087 m_freem(*m_head); 1088 *m_head = NULL; 1089 return (ENOMEM); 1090 } 1091 *m_head = m; 1092 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1093 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1094 if (error != 0) { 1095 m_freem(*m_head); 1096 *m_head = NULL; 1097 return (error); 1098 } 1099 } else if (error != 0) 1100 return (error); 1101 if (nsegs == 0) { 1102 m_freem(*m_head); 1103 *m_head = NULL; 1104 return (EIO); 1105 } 1106 1107 m = *m_head; 1108 csum_flags = 0; 1109 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) { 1110 if (m->m_pkthdr.csum_flags & CSUM_IP) 1111 csum_flags |= TFD_IPChecksumEnable; 1112 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1113 csum_flags |= TFD_TCPChecksumEnable; 1114 else if (m->m_pkthdr.csum_flags & CSUM_UDP) 1115 csum_flags |= TFD_UDPChecksumEnable; 1116 } 1117 1118 si = sc->sc_cdata.stge_tx_prod; 1119 tfd = &sc->sc_rdata.stge_tx_ring[si]; 1120 for (i = 0; i < nsegs; i++) 1121 tfd->tfd_frags[i].frag_word0 = 1122 htole64(FRAG_ADDR(txsegs[i].ds_addr) | 1123 FRAG_LEN(txsegs[i].ds_len)); 1124 sc->sc_cdata.stge_tx_cnt++; 1125 1126 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) | 1127 TFD_FragCount(nsegs) | csum_flags; 1128 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) 1129 tfc |= TFD_TxDMAIndicate; 1130 1131 /* Update producer index. */ 1132 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT; 1133 1134 /* Check if we have a VLAN tag to insert. */ 1135 if (m->m_flags & M_VLANTAG) 1136 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag)); 1137 tfd->tfd_control = htole64(tfc); 1138 1139 /* Update Tx Queue. */ 1140 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q); 1141 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q); 1142 txd->tx_m = m; 1143 1144 /* Sync descriptors. */ 1145 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1146 BUS_DMASYNC_PREWRITE); 1147 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1148 sc->sc_cdata.stge_tx_ring_map, 1149 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1150 1151 return (0); 1152 } 1153 1154 /* 1155 * stge_start: [ifnet interface function] 1156 * 1157 * Start packet transmission on the interface. 1158 */ 1159 static void 1160 stge_start(struct ifnet *ifp) 1161 { 1162 struct stge_softc *sc; 1163 1164 sc = ifp->if_softc; 1165 STGE_LOCK(sc); 1166 stge_start_locked(ifp); 1167 STGE_UNLOCK(sc); 1168 } 1169 1170 static void 1171 stge_start_locked(struct ifnet *ifp) 1172 { 1173 struct stge_softc *sc; 1174 struct mbuf *m_head; 1175 int enq; 1176 1177 sc = ifp->if_softc; 1178 1179 STGE_LOCK_ASSERT(sc); 1180 1181 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1182 IFF_DRV_RUNNING || sc->sc_link == 0) 1183 return; 1184 1185 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1186 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) { 1187 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1188 break; 1189 } 1190 1191 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1192 if (m_head == NULL) 1193 break; 1194 /* 1195 * Pack the data into the transmit ring. If we 1196 * don't have room, set the OACTIVE flag and wait 1197 * for the NIC to drain the ring. 1198 */ 1199 if (stge_encap(sc, &m_head)) { 1200 if (m_head == NULL) 1201 break; 1202 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1203 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1204 break; 1205 } 1206 1207 enq++; 1208 /* 1209 * If there's a BPF listener, bounce a copy of this frame 1210 * to him. 1211 */ 1212 ETHER_BPF_MTAP(ifp, m_head); 1213 } 1214 1215 if (enq > 0) { 1216 /* Transmit */ 1217 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow); 1218 1219 /* Set a timeout in case the chip goes out to lunch. */ 1220 sc->sc_watchdog_timer = 5; 1221 } 1222 } 1223 1224 /* 1225 * stge_watchdog: 1226 * 1227 * Watchdog timer handler. 1228 */ 1229 static void 1230 stge_watchdog(struct stge_softc *sc) 1231 { 1232 struct ifnet *ifp; 1233 1234 STGE_LOCK_ASSERT(sc); 1235 1236 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer) 1237 return; 1238 1239 ifp = sc->sc_ifp; 1240 if_printf(sc->sc_ifp, "device timeout\n"); 1241 ifp->if_oerrors++; 1242 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1243 stge_init_locked(sc); 1244 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1245 stge_start_locked(ifp); 1246 } 1247 1248 /* 1249 * stge_ioctl: [ifnet interface function] 1250 * 1251 * Handle control requests from the operator. 1252 */ 1253 static int 1254 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1255 { 1256 struct stge_softc *sc; 1257 struct ifreq *ifr; 1258 struct mii_data *mii; 1259 int error, mask; 1260 1261 sc = ifp->if_softc; 1262 ifr = (struct ifreq *)data; 1263 error = 0; 1264 switch (cmd) { 1265 case SIOCSIFMTU: 1266 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU) 1267 error = EINVAL; 1268 else if (ifp->if_mtu != ifr->ifr_mtu) { 1269 ifp->if_mtu = ifr->ifr_mtu; 1270 STGE_LOCK(sc); 1271 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1272 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1273 stge_init_locked(sc); 1274 } 1275 STGE_UNLOCK(sc); 1276 } 1277 break; 1278 case SIOCSIFFLAGS: 1279 STGE_LOCK(sc); 1280 if ((ifp->if_flags & IFF_UP) != 0) { 1281 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1282 if (((ifp->if_flags ^ sc->sc_if_flags) 1283 & IFF_PROMISC) != 0) 1284 stge_set_filter(sc); 1285 } else { 1286 if (sc->sc_detach == 0) 1287 stge_init_locked(sc); 1288 } 1289 } else { 1290 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1291 stge_stop(sc); 1292 } 1293 sc->sc_if_flags = ifp->if_flags; 1294 STGE_UNLOCK(sc); 1295 break; 1296 case SIOCADDMULTI: 1297 case SIOCDELMULTI: 1298 STGE_LOCK(sc); 1299 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1300 stge_set_multi(sc); 1301 STGE_UNLOCK(sc); 1302 break; 1303 case SIOCSIFMEDIA: 1304 case SIOCGIFMEDIA: 1305 mii = device_get_softc(sc->sc_miibus); 1306 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1307 break; 1308 case SIOCSIFCAP: 1309 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1310 #ifdef DEVICE_POLLING 1311 if ((mask & IFCAP_POLLING) != 0) { 1312 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1313 error = ether_poll_register(stge_poll, ifp); 1314 if (error != 0) 1315 break; 1316 STGE_LOCK(sc); 1317 CSR_WRITE_2(sc, STGE_IntEnable, 0); 1318 ifp->if_capenable |= IFCAP_POLLING; 1319 STGE_UNLOCK(sc); 1320 } else { 1321 error = ether_poll_deregister(ifp); 1322 if (error != 0) 1323 break; 1324 STGE_LOCK(sc); 1325 CSR_WRITE_2(sc, STGE_IntEnable, 1326 sc->sc_IntEnable); 1327 ifp->if_capenable &= ~IFCAP_POLLING; 1328 STGE_UNLOCK(sc); 1329 } 1330 } 1331 #endif 1332 if ((mask & IFCAP_HWCSUM) != 0) { 1333 ifp->if_capenable ^= IFCAP_HWCSUM; 1334 if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 && 1335 (IFCAP_HWCSUM & ifp->if_capabilities) != 0) 1336 ifp->if_hwassist = STGE_CSUM_FEATURES; 1337 else 1338 ifp->if_hwassist = 0; 1339 } 1340 if ((mask & IFCAP_WOL) != 0 && 1341 (ifp->if_capabilities & IFCAP_WOL) != 0) { 1342 if ((mask & IFCAP_WOL_MAGIC) != 0) 1343 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1344 } 1345 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 1346 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1347 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1348 STGE_LOCK(sc); 1349 stge_vlan_setup(sc); 1350 STGE_UNLOCK(sc); 1351 } 1352 } 1353 VLAN_CAPABILITIES(ifp); 1354 break; 1355 default: 1356 error = ether_ioctl(ifp, cmd, data); 1357 break; 1358 } 1359 1360 return (error); 1361 } 1362 1363 static void 1364 stge_link_task(void *arg, int pending) 1365 { 1366 struct stge_softc *sc; 1367 struct mii_data *mii; 1368 uint32_t v, ac; 1369 int i; 1370 1371 sc = (struct stge_softc *)arg; 1372 STGE_LOCK(sc); 1373 1374 mii = device_get_softc(sc->sc_miibus); 1375 if (mii->mii_media_status & IFM_ACTIVE) { 1376 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1377 sc->sc_link = 1; 1378 } else 1379 sc->sc_link = 0; 1380 1381 sc->sc_MACCtrl = 0; 1382 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 1383 sc->sc_MACCtrl |= MC_DuplexSelect; 1384 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_RXPAUSE) != 0) 1385 sc->sc_MACCtrl |= MC_RxFlowControlEnable; 1386 if (((mii->mii_media_active & IFM_GMASK) & IFM_ETH_TXPAUSE) != 0) 1387 sc->sc_MACCtrl |= MC_TxFlowControlEnable; 1388 /* 1389 * Update STGE_MACCtrl register depending on link status. 1390 * (duplex, flow control etc) 1391 */ 1392 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 1393 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable); 1394 v |= sc->sc_MACCtrl; 1395 CSR_WRITE_4(sc, STGE_MACCtrl, v); 1396 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) { 1397 /* Duplex setting changed, reset Tx/Rx functions. */ 1398 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1399 ac |= AC_TxReset | AC_RxReset; 1400 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1401 for (i = 0; i < STGE_TIMEOUT; i++) { 1402 DELAY(100); 1403 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1404 break; 1405 } 1406 if (i == STGE_TIMEOUT) 1407 device_printf(sc->sc_dev, "reset failed to complete\n"); 1408 } 1409 STGE_UNLOCK(sc); 1410 } 1411 1412 static __inline int 1413 stge_tx_error(struct stge_softc *sc) 1414 { 1415 uint32_t txstat; 1416 int error; 1417 1418 for (error = 0;;) { 1419 txstat = CSR_READ_4(sc, STGE_TxStatus); 1420 if ((txstat & TS_TxComplete) == 0) 1421 break; 1422 /* Tx underrun */ 1423 if ((txstat & TS_TxUnderrun) != 0) { 1424 /* 1425 * XXX 1426 * There should be a more better way to recover 1427 * from Tx underrun instead of a full reset. 1428 */ 1429 if (sc->sc_nerr++ < STGE_MAXERR) 1430 device_printf(sc->sc_dev, "Tx underrun, " 1431 "resetting...\n"); 1432 if (sc->sc_nerr == STGE_MAXERR) 1433 device_printf(sc->sc_dev, "too many errors; " 1434 "not reporting any more\n"); 1435 error = -1; 1436 break; 1437 } 1438 /* Maximum/Late collisions, Re-enable Tx MAC. */ 1439 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0) 1440 CSR_WRITE_4(sc, STGE_MACCtrl, 1441 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) | 1442 MC_TxEnable); 1443 } 1444 1445 return (error); 1446 } 1447 1448 /* 1449 * stge_intr: 1450 * 1451 * Interrupt service routine. 1452 */ 1453 static void 1454 stge_intr(void *arg) 1455 { 1456 struct stge_softc *sc; 1457 struct ifnet *ifp; 1458 int reinit; 1459 uint16_t status; 1460 1461 sc = (struct stge_softc *)arg; 1462 ifp = sc->sc_ifp; 1463 1464 STGE_LOCK(sc); 1465 1466 #ifdef DEVICE_POLLING 1467 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1468 goto done_locked; 1469 #endif 1470 status = CSR_READ_2(sc, STGE_IntStatus); 1471 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0) 1472 goto done_locked; 1473 1474 /* Disable interrupts. */ 1475 for (reinit = 0;;) { 1476 status = CSR_READ_2(sc, STGE_IntStatusAck); 1477 status &= sc->sc_IntEnable; 1478 if (status == 0) 1479 break; 1480 /* Host interface errors. */ 1481 if ((status & IS_HostError) != 0) { 1482 device_printf(sc->sc_dev, 1483 "Host interface error, resetting...\n"); 1484 reinit = 1; 1485 goto force_init; 1486 } 1487 1488 /* Receive interrupts. */ 1489 if ((status & IS_RxDMAComplete) != 0) { 1490 stge_rxeof(sc); 1491 if ((status & IS_RFDListEnd) != 0) 1492 CSR_WRITE_4(sc, STGE_DMACtrl, 1493 DMAC_RxDMAPollNow); 1494 } 1495 1496 /* Transmit interrupts. */ 1497 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0) 1498 stge_txeof(sc); 1499 1500 /* Transmission errors.*/ 1501 if ((status & IS_TxComplete) != 0) { 1502 if ((reinit = stge_tx_error(sc)) != 0) 1503 break; 1504 } 1505 } 1506 1507 force_init: 1508 if (reinit != 0) { 1509 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1510 stge_init_locked(sc); 1511 } 1512 1513 /* Re-enable interrupts. */ 1514 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 1515 1516 /* Try to get more packets going. */ 1517 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1518 stge_start_locked(ifp); 1519 1520 done_locked: 1521 STGE_UNLOCK(sc); 1522 } 1523 1524 /* 1525 * stge_txeof: 1526 * 1527 * Helper; handle transmit interrupts. 1528 */ 1529 static void 1530 stge_txeof(struct stge_softc *sc) 1531 { 1532 struct ifnet *ifp; 1533 struct stge_txdesc *txd; 1534 uint64_t control; 1535 int cons; 1536 1537 STGE_LOCK_ASSERT(sc); 1538 1539 ifp = sc->sc_ifp; 1540 1541 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1542 if (txd == NULL) 1543 return; 1544 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1545 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD); 1546 1547 /* 1548 * Go through our Tx list and free mbufs for those 1549 * frames which have been transmitted. 1550 */ 1551 for (cons = sc->sc_cdata.stge_tx_cons;; 1552 cons = (cons + 1) % STGE_TX_RING_CNT) { 1553 if (sc->sc_cdata.stge_tx_cnt <= 0) 1554 break; 1555 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control); 1556 if ((control & TFD_TFDDone) == 0) 1557 break; 1558 sc->sc_cdata.stge_tx_cnt--; 1559 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1560 1561 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1562 BUS_DMASYNC_POSTWRITE); 1563 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); 1564 1565 /* Output counter is updated with statistics register */ 1566 m_freem(txd->tx_m); 1567 txd->tx_m = NULL; 1568 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q); 1569 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 1570 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1571 } 1572 sc->sc_cdata.stge_tx_cons = cons; 1573 if (sc->sc_cdata.stge_tx_cnt == 0) 1574 sc->sc_watchdog_timer = 0; 1575 1576 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1577 sc->sc_cdata.stge_tx_ring_map, 1578 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1579 } 1580 1581 static __inline void 1582 stge_discard_rxbuf(struct stge_softc *sc, int idx) 1583 { 1584 struct stge_rfd *rfd; 1585 1586 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 1587 rfd->rfd_status = 0; 1588 } 1589 1590 #ifndef __NO_STRICT_ALIGNMENT 1591 /* 1592 * It seems that TC9021's DMA engine has alignment restrictions in 1593 * DMA scatter operations. The first DMA segment has no address 1594 * alignment restrictins but the rest should be aligned on 4(?) bytes 1595 * boundary. Otherwise it would corrupt random memory. Since we don't 1596 * know which one is used for the first segment in advance we simply 1597 * don't align at all. 1598 * To avoid copying over an entire frame to align, we allocate a new 1599 * mbuf and copy ethernet header to the new mbuf. The new mbuf is 1600 * prepended into the existing mbuf chain. 1601 */ 1602 static __inline struct mbuf * 1603 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m) 1604 { 1605 struct mbuf *n; 1606 1607 n = NULL; 1608 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 1609 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 1610 m->m_data += ETHER_HDR_LEN; 1611 n = m; 1612 } else { 1613 MGETHDR(n, M_DONTWAIT, MT_DATA); 1614 if (n != NULL) { 1615 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 1616 m->m_data += ETHER_HDR_LEN; 1617 m->m_len -= ETHER_HDR_LEN; 1618 n->m_len = ETHER_HDR_LEN; 1619 M_MOVE_PKTHDR(n, m); 1620 n->m_next = m; 1621 } else 1622 m_freem(m); 1623 } 1624 1625 return (n); 1626 } 1627 #endif 1628 1629 /* 1630 * stge_rxeof: 1631 * 1632 * Helper; handle receive interrupts. 1633 */ 1634 static int 1635 stge_rxeof(struct stge_softc *sc) 1636 { 1637 struct ifnet *ifp; 1638 struct stge_rxdesc *rxd; 1639 struct mbuf *mp, *m; 1640 uint64_t status64; 1641 uint32_t status; 1642 int cons, prog, rx_npkts; 1643 1644 STGE_LOCK_ASSERT(sc); 1645 1646 rx_npkts = 0; 1647 ifp = sc->sc_ifp; 1648 1649 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1650 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD); 1651 1652 prog = 0; 1653 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT; 1654 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) { 1655 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status); 1656 status = RFD_RxStatus(status64); 1657 if ((status & RFD_RFDDone) == 0) 1658 break; 1659 #ifdef DEVICE_POLLING 1660 if (ifp->if_capenable & IFCAP_POLLING) { 1661 if (sc->sc_cdata.stge_rxcycles <= 0) 1662 break; 1663 sc->sc_cdata.stge_rxcycles--; 1664 } 1665 #endif 1666 prog++; 1667 rxd = &sc->sc_cdata.stge_rxdesc[cons]; 1668 mp = rxd->rx_m; 1669 1670 /* 1671 * If the packet had an error, drop it. Note we count 1672 * the error later in the periodic stats update. 1673 */ 1674 if ((status & RFD_FrameEnd) != 0 && (status & 1675 (RFD_RxFIFOOverrun | RFD_RxRuntFrame | 1676 RFD_RxAlignmentError | RFD_RxFCSError | 1677 RFD_RxLengthError)) != 0) { 1678 stge_discard_rxbuf(sc, cons); 1679 if (sc->sc_cdata.stge_rxhead != NULL) { 1680 m_freem(sc->sc_cdata.stge_rxhead); 1681 STGE_RXCHAIN_RESET(sc); 1682 } 1683 continue; 1684 } 1685 /* 1686 * Add a new receive buffer to the ring. 1687 */ 1688 if (stge_newbuf(sc, cons) != 0) { 1689 ifp->if_iqdrops++; 1690 stge_discard_rxbuf(sc, cons); 1691 if (sc->sc_cdata.stge_rxhead != NULL) { 1692 m_freem(sc->sc_cdata.stge_rxhead); 1693 STGE_RXCHAIN_RESET(sc); 1694 } 1695 continue; 1696 } 1697 1698 if ((status & RFD_FrameEnd) != 0) 1699 mp->m_len = RFD_RxDMAFrameLen(status) - 1700 sc->sc_cdata.stge_rxlen; 1701 sc->sc_cdata.stge_rxlen += mp->m_len; 1702 1703 /* Chain mbufs. */ 1704 if (sc->sc_cdata.stge_rxhead == NULL) { 1705 sc->sc_cdata.stge_rxhead = mp; 1706 sc->sc_cdata.stge_rxtail = mp; 1707 } else { 1708 mp->m_flags &= ~M_PKTHDR; 1709 sc->sc_cdata.stge_rxtail->m_next = mp; 1710 sc->sc_cdata.stge_rxtail = mp; 1711 } 1712 1713 if ((status & RFD_FrameEnd) != 0) { 1714 m = sc->sc_cdata.stge_rxhead; 1715 m->m_pkthdr.rcvif = ifp; 1716 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen; 1717 1718 if (m->m_pkthdr.len > sc->sc_if_framesize) { 1719 m_freem(m); 1720 STGE_RXCHAIN_RESET(sc); 1721 continue; 1722 } 1723 /* 1724 * Set the incoming checksum information for 1725 * the packet. 1726 */ 1727 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1728 if ((status & RFD_IPDetected) != 0) { 1729 m->m_pkthdr.csum_flags |= 1730 CSUM_IP_CHECKED; 1731 if ((status & RFD_IPError) == 0) 1732 m->m_pkthdr.csum_flags |= 1733 CSUM_IP_VALID; 1734 } 1735 if (((status & RFD_TCPDetected) != 0 && 1736 (status & RFD_TCPError) == 0) || 1737 ((status & RFD_UDPDetected) != 0 && 1738 (status & RFD_UDPError) == 0)) { 1739 m->m_pkthdr.csum_flags |= 1740 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1741 m->m_pkthdr.csum_data = 0xffff; 1742 } 1743 } 1744 1745 #ifndef __NO_STRICT_ALIGNMENT 1746 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) { 1747 if ((m = stge_fixup_rx(sc, m)) == NULL) { 1748 STGE_RXCHAIN_RESET(sc); 1749 continue; 1750 } 1751 } 1752 #endif 1753 /* Check for VLAN tagged packets. */ 1754 if ((status & RFD_VLANDetected) != 0 && 1755 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1756 m->m_pkthdr.ether_vtag = RFD_TCI(status64); 1757 m->m_flags |= M_VLANTAG; 1758 } 1759 1760 STGE_UNLOCK(sc); 1761 /* Pass it on. */ 1762 (*ifp->if_input)(ifp, m); 1763 STGE_LOCK(sc); 1764 rx_npkts++; 1765 1766 STGE_RXCHAIN_RESET(sc); 1767 } 1768 } 1769 1770 if (prog > 0) { 1771 /* Update the consumer index. */ 1772 sc->sc_cdata.stge_rx_cons = cons; 1773 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1774 sc->sc_cdata.stge_rx_ring_map, 1775 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1776 } 1777 return (rx_npkts); 1778 } 1779 1780 #ifdef DEVICE_POLLING 1781 static int 1782 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1783 { 1784 struct stge_softc *sc; 1785 uint16_t status; 1786 int rx_npkts; 1787 1788 rx_npkts = 0; 1789 sc = ifp->if_softc; 1790 STGE_LOCK(sc); 1791 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1792 STGE_UNLOCK(sc); 1793 return (rx_npkts); 1794 } 1795 1796 sc->sc_cdata.stge_rxcycles = count; 1797 rx_npkts = stge_rxeof(sc); 1798 stge_txeof(sc); 1799 1800 if (cmd == POLL_AND_CHECK_STATUS) { 1801 status = CSR_READ_2(sc, STGE_IntStatus); 1802 status &= sc->sc_IntEnable; 1803 if (status != 0) { 1804 if ((status & IS_HostError) != 0) { 1805 device_printf(sc->sc_dev, 1806 "Host interface error, resetting...\n"); 1807 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1808 stge_init_locked(sc); 1809 } 1810 if ((status & IS_TxComplete) != 0) { 1811 if (stge_tx_error(sc) != 0) { 1812 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1813 stge_init_locked(sc); 1814 } 1815 } 1816 } 1817 1818 } 1819 1820 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1821 stge_start_locked(ifp); 1822 1823 STGE_UNLOCK(sc); 1824 return (rx_npkts); 1825 } 1826 #endif /* DEVICE_POLLING */ 1827 1828 /* 1829 * stge_tick: 1830 * 1831 * One second timer, used to tick the MII. 1832 */ 1833 static void 1834 stge_tick(void *arg) 1835 { 1836 struct stge_softc *sc; 1837 struct mii_data *mii; 1838 1839 sc = (struct stge_softc *)arg; 1840 1841 STGE_LOCK_ASSERT(sc); 1842 1843 mii = device_get_softc(sc->sc_miibus); 1844 mii_tick(mii); 1845 1846 /* Update statistics counters. */ 1847 stge_stats_update(sc); 1848 1849 /* 1850 * Relcaim any pending Tx descriptors to release mbufs in a 1851 * timely manner as we don't generate Tx completion interrupts 1852 * for every frame. This limits the delay to a maximum of one 1853 * second. 1854 */ 1855 if (sc->sc_cdata.stge_tx_cnt != 0) 1856 stge_txeof(sc); 1857 1858 stge_watchdog(sc); 1859 1860 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 1861 } 1862 1863 /* 1864 * stge_stats_update: 1865 * 1866 * Read the TC9021 statistics counters. 1867 */ 1868 static void 1869 stge_stats_update(struct stge_softc *sc) 1870 { 1871 struct ifnet *ifp; 1872 1873 STGE_LOCK_ASSERT(sc); 1874 1875 ifp = sc->sc_ifp; 1876 1877 CSR_READ_4(sc,STGE_OctetRcvOk); 1878 1879 ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk); 1880 1881 ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors); 1882 1883 CSR_READ_4(sc, STGE_OctetXmtdOk); 1884 1885 ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk); 1886 1887 ifp->if_collisions += 1888 CSR_READ_4(sc, STGE_LateCollisions) + 1889 CSR_READ_4(sc, STGE_MultiColFrames) + 1890 CSR_READ_4(sc, STGE_SingleColFrames); 1891 1892 ifp->if_oerrors += 1893 CSR_READ_2(sc, STGE_FramesAbortXSColls) + 1894 CSR_READ_2(sc, STGE_FramesWEXDeferal); 1895 } 1896 1897 /* 1898 * stge_reset: 1899 * 1900 * Perform a soft reset on the TC9021. 1901 */ 1902 static void 1903 stge_reset(struct stge_softc *sc, uint32_t how) 1904 { 1905 uint32_t ac; 1906 uint8_t v; 1907 int i, dv; 1908 1909 STGE_LOCK_ASSERT(sc); 1910 1911 dv = 5000; 1912 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1913 switch (how) { 1914 case STGE_RESET_TX: 1915 ac |= AC_TxReset | AC_FIFO; 1916 dv = 100; 1917 break; 1918 case STGE_RESET_RX: 1919 ac |= AC_RxReset | AC_FIFO; 1920 dv = 100; 1921 break; 1922 case STGE_RESET_FULL: 1923 default: 1924 /* 1925 * Only assert RstOut if we're fiber. We need GMII clocks 1926 * to be present in order for the reset to complete on fiber 1927 * cards. 1928 */ 1929 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset | 1930 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | 1931 (sc->sc_usefiber ? AC_RstOut : 0); 1932 break; 1933 } 1934 1935 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1936 1937 /* Account for reset problem at 10Mbps. */ 1938 DELAY(dv); 1939 1940 for (i = 0; i < STGE_TIMEOUT; i++) { 1941 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1942 break; 1943 DELAY(dv); 1944 } 1945 1946 if (i == STGE_TIMEOUT) 1947 device_printf(sc->sc_dev, "reset failed to complete\n"); 1948 1949 /* Set LED, from Linux IPG driver. */ 1950 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1951 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1); 1952 if ((sc->sc_led & 0x01) != 0) 1953 ac |= AC_LEDMode; 1954 if ((sc->sc_led & 0x03) != 0) 1955 ac |= AC_LEDModeBit1; 1956 if ((sc->sc_led & 0x08) != 0) 1957 ac |= AC_LEDSpeed; 1958 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1959 1960 /* Set PHY, from Linux IPG driver */ 1961 v = CSR_READ_1(sc, STGE_PhySet); 1962 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet); 1963 v |= ((sc->sc_led & 0x70) >> 4); 1964 CSR_WRITE_1(sc, STGE_PhySet, v); 1965 } 1966 1967 /* 1968 * stge_init: [ ifnet interface function ] 1969 * 1970 * Initialize the interface. 1971 */ 1972 static void 1973 stge_init(void *xsc) 1974 { 1975 struct stge_softc *sc; 1976 1977 sc = (struct stge_softc *)xsc; 1978 STGE_LOCK(sc); 1979 stge_init_locked(sc); 1980 STGE_UNLOCK(sc); 1981 } 1982 1983 static void 1984 stge_init_locked(struct stge_softc *sc) 1985 { 1986 struct ifnet *ifp; 1987 struct mii_data *mii; 1988 uint16_t eaddr[3]; 1989 uint32_t v; 1990 int error; 1991 1992 STGE_LOCK_ASSERT(sc); 1993 1994 ifp = sc->sc_ifp; 1995 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1996 return; 1997 mii = device_get_softc(sc->sc_miibus); 1998 1999 /* 2000 * Cancel any pending I/O. 2001 */ 2002 stge_stop(sc); 2003 2004 /* 2005 * Reset the chip to a known state. 2006 */ 2007 stge_reset(sc, STGE_RESET_FULL); 2008 2009 /* Init descriptors. */ 2010 error = stge_init_rx_ring(sc); 2011 if (error != 0) { 2012 device_printf(sc->sc_dev, 2013 "initialization failed: no memory for rx buffers\n"); 2014 stge_stop(sc); 2015 goto out; 2016 } 2017 stge_init_tx_ring(sc); 2018 2019 /* Set the station address. */ 2020 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2021 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0])); 2022 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1])); 2023 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2])); 2024 2025 /* 2026 * Set the statistics masks. Disable all the RMON stats, 2027 * and disable selected stats in the non-RMON stats registers. 2028 */ 2029 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff); 2030 CSR_WRITE_4(sc, STGE_StatisticsMask, 2031 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) | 2032 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) | 2033 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) | 2034 (1U << 21)); 2035 2036 /* Set up the receive filter. */ 2037 stge_set_filter(sc); 2038 /* Program multicast filter. */ 2039 stge_set_multi(sc); 2040 2041 /* 2042 * Give the transmit and receive ring to the chip. 2043 */ 2044 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 2045 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0))); 2046 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 2047 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0))); 2048 2049 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 2050 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0))); 2051 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 2052 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0))); 2053 2054 /* 2055 * Initialize the Tx auto-poll period. It's OK to make this number 2056 * large (255 is the max, but we use 127) -- we explicitly kick the 2057 * transmit engine when there's actually a packet. 2058 */ 2059 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2060 2061 /* ..and the Rx auto-poll period. */ 2062 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2063 2064 /* Initialize the Tx start threshold. */ 2065 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh); 2066 2067 /* Rx DMA thresholds, from Linux */ 2068 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30); 2069 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30); 2070 2071 /* Rx early threhold, from Linux */ 2072 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff); 2073 2074 /* Tx DMA thresholds, from Linux */ 2075 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30); 2076 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04); 2077 2078 /* 2079 * Initialize the Rx DMA interrupt control register. We 2080 * request an interrupt after every incoming packet, but 2081 * defer it for sc_rxint_dmawait us. When the number of 2082 * interrupts pending reaches STGE_RXINT_NFRAME, we stop 2083 * deferring the interrupt, and signal it immediately. 2084 */ 2085 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl, 2086 RDIC_RxFrameCount(sc->sc_rxint_nframe) | 2087 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait))); 2088 2089 /* 2090 * Initialize the interrupt mask. 2091 */ 2092 sc->sc_IntEnable = IS_HostError | IS_TxComplete | 2093 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd; 2094 #ifdef DEVICE_POLLING 2095 /* Disable interrupts if we are polling. */ 2096 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2097 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2098 else 2099 #endif 2100 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 2101 2102 /* 2103 * Configure the DMA engine. 2104 * XXX Should auto-tune TxBurstLimit. 2105 */ 2106 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3)); 2107 2108 /* 2109 * Send a PAUSE frame when we reach 29,696 bytes in the Rx 2110 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes 2111 * in the Rx FIFO. 2112 */ 2113 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16); 2114 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16); 2115 2116 /* 2117 * Set the maximum frame size. 2118 */ 2119 sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2120 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize); 2121 2122 /* 2123 * Initialize MacCtrl -- do it before setting the media, 2124 * as setting the media will actually program the register. 2125 * 2126 * Note: We have to poke the IFS value before poking 2127 * anything else. 2128 */ 2129 /* Tx/Rx MAC should be disabled before programming IFS.*/ 2130 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit)); 2131 2132 stge_vlan_setup(sc); 2133 2134 if (sc->sc_rev >= 6) { /* >= B.2 */ 2135 /* Multi-frag frame bug work-around. */ 2136 CSR_WRITE_2(sc, STGE_DebugCtrl, 2137 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200); 2138 2139 /* Tx Poll Now bug work-around. */ 2140 CSR_WRITE_2(sc, STGE_DebugCtrl, 2141 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010); 2142 /* Tx Poll Now bug work-around. */ 2143 CSR_WRITE_2(sc, STGE_DebugCtrl, 2144 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020); 2145 } 2146 2147 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2148 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable; 2149 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2150 /* 2151 * It seems that transmitting frames without checking the state of 2152 * Rx/Tx MAC wedge the hardware. 2153 */ 2154 stge_start_tx(sc); 2155 stge_start_rx(sc); 2156 2157 sc->sc_link = 0; 2158 /* 2159 * Set the current media. 2160 */ 2161 mii_mediachg(mii); 2162 2163 /* 2164 * Start the one second MII clock. 2165 */ 2166 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 2167 2168 /* 2169 * ...all done! 2170 */ 2171 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2172 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2173 2174 out: 2175 if (error != 0) 2176 device_printf(sc->sc_dev, "interface not running\n"); 2177 } 2178 2179 static void 2180 stge_vlan_setup(struct stge_softc *sc) 2181 { 2182 struct ifnet *ifp; 2183 uint32_t v; 2184 2185 ifp = sc->sc_ifp; 2186 /* 2187 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl 2188 * MC_AutoVLANuntagging bit. 2189 * MC_AutoVLANtagging bit selects which VLAN source to use 2190 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert 2191 * bit has priority over MC_AutoVLANtagging bit. So we always 2192 * use TFC instead of STGE_VLANTag register. 2193 */ 2194 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2195 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2196 v |= MC_AutoVLANuntagging; 2197 else 2198 v &= ~MC_AutoVLANuntagging; 2199 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2200 } 2201 2202 /* 2203 * Stop transmission on the interface. 2204 */ 2205 static void 2206 stge_stop(struct stge_softc *sc) 2207 { 2208 struct ifnet *ifp; 2209 struct stge_txdesc *txd; 2210 struct stge_rxdesc *rxd; 2211 uint32_t v; 2212 int i; 2213 2214 STGE_LOCK_ASSERT(sc); 2215 /* 2216 * Stop the one second clock. 2217 */ 2218 callout_stop(&sc->sc_tick_ch); 2219 sc->sc_watchdog_timer = 0; 2220 2221 /* 2222 * Disable interrupts. 2223 */ 2224 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2225 2226 /* 2227 * Stop receiver, transmitter, and stats update. 2228 */ 2229 stge_stop_rx(sc); 2230 stge_stop_tx(sc); 2231 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2232 v |= MC_StatisticsDisable; 2233 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2234 2235 /* 2236 * Stop the transmit and receive DMA. 2237 */ 2238 stge_dma_wait(sc); 2239 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); 2240 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0); 2241 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); 2242 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0); 2243 2244 /* 2245 * Free RX and TX mbufs still in the queues. 2246 */ 2247 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2248 rxd = &sc->sc_cdata.stge_rxdesc[i]; 2249 if (rxd->rx_m != NULL) { 2250 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, 2251 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2252 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, 2253 rxd->rx_dmamap); 2254 m_freem(rxd->rx_m); 2255 rxd->rx_m = NULL; 2256 } 2257 } 2258 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2259 txd = &sc->sc_cdata.stge_txdesc[i]; 2260 if (txd->tx_m != NULL) { 2261 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, 2262 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2263 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, 2264 txd->tx_dmamap); 2265 m_freem(txd->tx_m); 2266 txd->tx_m = NULL; 2267 } 2268 } 2269 2270 /* 2271 * Mark the interface down and cancel the watchdog timer. 2272 */ 2273 ifp = sc->sc_ifp; 2274 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2275 sc->sc_link = 0; 2276 } 2277 2278 static void 2279 stge_start_tx(struct stge_softc *sc) 2280 { 2281 uint32_t v; 2282 int i; 2283 2284 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2285 if ((v & MC_TxEnabled) != 0) 2286 return; 2287 v |= MC_TxEnable; 2288 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2289 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2290 for (i = STGE_TIMEOUT; i > 0; i--) { 2291 DELAY(10); 2292 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2293 if ((v & MC_TxEnabled) != 0) 2294 break; 2295 } 2296 if (i == 0) 2297 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n"); 2298 } 2299 2300 static void 2301 stge_start_rx(struct stge_softc *sc) 2302 { 2303 uint32_t v; 2304 int i; 2305 2306 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2307 if ((v & MC_RxEnabled) != 0) 2308 return; 2309 v |= MC_RxEnable; 2310 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2311 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2312 for (i = STGE_TIMEOUT; i > 0; i--) { 2313 DELAY(10); 2314 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2315 if ((v & MC_RxEnabled) != 0) 2316 break; 2317 } 2318 if (i == 0) 2319 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n"); 2320 } 2321 2322 static void 2323 stge_stop_tx(struct stge_softc *sc) 2324 { 2325 uint32_t v; 2326 int i; 2327 2328 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2329 if ((v & MC_TxEnabled) == 0) 2330 return; 2331 v |= MC_TxDisable; 2332 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2333 for (i = STGE_TIMEOUT; i > 0; i--) { 2334 DELAY(10); 2335 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2336 if ((v & MC_TxEnabled) == 0) 2337 break; 2338 } 2339 if (i == 0) 2340 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n"); 2341 } 2342 2343 static void 2344 stge_stop_rx(struct stge_softc *sc) 2345 { 2346 uint32_t v; 2347 int i; 2348 2349 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2350 if ((v & MC_RxEnabled) == 0) 2351 return; 2352 v |= MC_RxDisable; 2353 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2354 for (i = STGE_TIMEOUT; i > 0; i--) { 2355 DELAY(10); 2356 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2357 if ((v & MC_RxEnabled) == 0) 2358 break; 2359 } 2360 if (i == 0) 2361 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n"); 2362 } 2363 2364 static void 2365 stge_init_tx_ring(struct stge_softc *sc) 2366 { 2367 struct stge_ring_data *rd; 2368 struct stge_txdesc *txd; 2369 bus_addr_t addr; 2370 int i; 2371 2372 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq); 2373 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq); 2374 2375 sc->sc_cdata.stge_tx_prod = 0; 2376 sc->sc_cdata.stge_tx_cons = 0; 2377 sc->sc_cdata.stge_tx_cnt = 0; 2378 2379 rd = &sc->sc_rdata; 2380 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ); 2381 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2382 if (i == (STGE_TX_RING_CNT - 1)) 2383 addr = STGE_TX_RING_ADDR(sc, 0); 2384 else 2385 addr = STGE_TX_RING_ADDR(sc, i + 1); 2386 rd->stge_tx_ring[i].tfd_next = htole64(addr); 2387 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone); 2388 txd = &sc->sc_cdata.stge_txdesc[i]; 2389 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 2390 } 2391 2392 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 2393 sc->sc_cdata.stge_tx_ring_map, 2394 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2395 2396 } 2397 2398 static int 2399 stge_init_rx_ring(struct stge_softc *sc) 2400 { 2401 struct stge_ring_data *rd; 2402 bus_addr_t addr; 2403 int i; 2404 2405 sc->sc_cdata.stge_rx_cons = 0; 2406 STGE_RXCHAIN_RESET(sc); 2407 2408 rd = &sc->sc_rdata; 2409 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ); 2410 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2411 if (stge_newbuf(sc, i) != 0) 2412 return (ENOBUFS); 2413 if (i == (STGE_RX_RING_CNT - 1)) 2414 addr = STGE_RX_RING_ADDR(sc, 0); 2415 else 2416 addr = STGE_RX_RING_ADDR(sc, i + 1); 2417 rd->stge_rx_ring[i].rfd_next = htole64(addr); 2418 rd->stge_rx_ring[i].rfd_status = 0; 2419 } 2420 2421 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 2422 sc->sc_cdata.stge_rx_ring_map, 2423 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2424 2425 return (0); 2426 } 2427 2428 /* 2429 * stge_newbuf: 2430 * 2431 * Add a receive buffer to the indicated descriptor. 2432 */ 2433 static int 2434 stge_newbuf(struct stge_softc *sc, int idx) 2435 { 2436 struct stge_rxdesc *rxd; 2437 struct stge_rfd *rfd; 2438 struct mbuf *m; 2439 bus_dma_segment_t segs[1]; 2440 bus_dmamap_t map; 2441 int nsegs; 2442 2443 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2444 if (m == NULL) 2445 return (ENOBUFS); 2446 m->m_len = m->m_pkthdr.len = MCLBYTES; 2447 /* 2448 * The hardware requires 4bytes aligned DMA address when JUMBO 2449 * frame is used. 2450 */ 2451 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN)) 2452 m_adj(m, ETHER_ALIGN); 2453 2454 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag, 2455 sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 2456 m_freem(m); 2457 return (ENOBUFS); 2458 } 2459 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2460 2461 rxd = &sc->sc_cdata.stge_rxdesc[idx]; 2462 if (rxd->rx_m != NULL) { 2463 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2464 BUS_DMASYNC_POSTREAD); 2465 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); 2466 } 2467 map = rxd->rx_dmamap; 2468 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap; 2469 sc->sc_cdata.stge_rx_sparemap = map; 2470 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2471 BUS_DMASYNC_PREREAD); 2472 rxd->rx_m = m; 2473 2474 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 2475 rfd->rfd_frag.frag_word0 = 2476 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len)); 2477 rfd->rfd_status = 0; 2478 2479 return (0); 2480 } 2481 2482 /* 2483 * stge_set_filter: 2484 * 2485 * Set up the receive filter. 2486 */ 2487 static void 2488 stge_set_filter(struct stge_softc *sc) 2489 { 2490 struct ifnet *ifp; 2491 uint16_t mode; 2492 2493 STGE_LOCK_ASSERT(sc); 2494 2495 ifp = sc->sc_ifp; 2496 2497 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2498 mode |= RM_ReceiveUnicast; 2499 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2500 mode |= RM_ReceiveBroadcast; 2501 else 2502 mode &= ~RM_ReceiveBroadcast; 2503 if ((ifp->if_flags & IFF_PROMISC) != 0) 2504 mode |= RM_ReceiveAllFrames; 2505 else 2506 mode &= ~RM_ReceiveAllFrames; 2507 2508 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2509 } 2510 2511 static void 2512 stge_set_multi(struct stge_softc *sc) 2513 { 2514 struct ifnet *ifp; 2515 struct ifmultiaddr *ifma; 2516 uint32_t crc; 2517 uint32_t mchash[2]; 2518 uint16_t mode; 2519 int count; 2520 2521 STGE_LOCK_ASSERT(sc); 2522 2523 ifp = sc->sc_ifp; 2524 2525 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2526 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2527 if ((ifp->if_flags & IFF_PROMISC) != 0) 2528 mode |= RM_ReceiveAllFrames; 2529 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2530 mode |= RM_ReceiveMulticast; 2531 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2532 return; 2533 } 2534 2535 /* clear existing filters. */ 2536 CSR_WRITE_4(sc, STGE_HashTable0, 0); 2537 CSR_WRITE_4(sc, STGE_HashTable1, 0); 2538 2539 /* 2540 * Set up the multicast address filter by passing all multicast 2541 * addresses through a CRC generator, and then using the low-order 2542 * 6 bits as an index into the 64 bit multicast hash table. The 2543 * high order bits select the register, while the rest of the bits 2544 * select the bit within the register. 2545 */ 2546 2547 bzero(mchash, sizeof(mchash)); 2548 2549 count = 0; 2550 if_maddr_rlock(sc->sc_ifp); 2551 TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) { 2552 if (ifma->ifma_addr->sa_family != AF_LINK) 2553 continue; 2554 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2555 ifma->ifma_addr), ETHER_ADDR_LEN); 2556 2557 /* Just want the 6 least significant bits. */ 2558 crc &= 0x3f; 2559 2560 /* Set the corresponding bit in the hash table. */ 2561 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2562 count++; 2563 } 2564 if_maddr_runlock(ifp); 2565 2566 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames); 2567 if (count > 0) 2568 mode |= RM_ReceiveMulticastHash; 2569 else 2570 mode &= ~RM_ReceiveMulticastHash; 2571 2572 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]); 2573 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]); 2574 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2575 } 2576 2577 static int 2578 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2579 { 2580 int error, value; 2581 2582 if (!arg1) 2583 return (EINVAL); 2584 value = *(int *)arg1; 2585 error = sysctl_handle_int(oidp, &value, 0, req); 2586 if (error || !req->newptr) 2587 return (error); 2588 if (value < low || value > high) 2589 return (EINVAL); 2590 *(int *)arg1 = value; 2591 2592 return (0); 2593 } 2594 2595 static int 2596 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS) 2597 { 2598 return (sysctl_int_range(oidp, arg1, arg2, req, 2599 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX)); 2600 } 2601 2602 static int 2603 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS) 2604 { 2605 return (sysctl_int_range(oidp, arg1, arg2, req, 2606 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX)); 2607 } 2608