1 /* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 2001 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Device driver for the Sundance Tech. TC9021 10/100/1000 34 * Ethernet controller. 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #ifdef HAVE_KERNEL_OPTION_HEADERS 41 #include "opt_device_polling.h" 42 #endif 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/endian.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/kernel.h> 50 #include <sys/module.h> 51 #include <sys/socket.h> 52 #include <sys/sockio.h> 53 #include <sys/sysctl.h> 54 #include <sys/taskqueue.h> 55 56 #include <net/bpf.h> 57 #include <net/ethernet.h> 58 #include <net/if.h> 59 #include <net/if_dl.h> 60 #include <net/if_media.h> 61 #include <net/if_types.h> 62 #include <net/if_vlan_var.h> 63 64 #include <machine/bus.h> 65 #include <machine/resource.h> 66 #include <sys/bus.h> 67 #include <sys/rman.h> 68 69 #include <dev/mii/mii.h> 70 #include <dev/mii/miivar.h> 71 72 #include <dev/pci/pcireg.h> 73 #include <dev/pci/pcivar.h> 74 75 #include <dev/stge/if_stgereg.h> 76 77 #define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 78 79 MODULE_DEPEND(stge, pci, 1, 1, 1); 80 MODULE_DEPEND(stge, ether, 1, 1, 1); 81 MODULE_DEPEND(stge, miibus, 1, 1, 1); 82 83 /* "device miibus" required. See GENERIC if you get errors here. */ 84 #include "miibus_if.h" 85 86 /* 87 * Devices supported by this driver. 88 */ 89 static struct stge_product { 90 uint16_t stge_vendorid; 91 uint16_t stge_deviceid; 92 const char *stge_name; 93 } stge_products[] = { 94 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023, 95 "Sundance ST-1023 Gigabit Ethernet" }, 96 97 { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021, 98 "Sundance ST-2021 Gigabit Ethernet" }, 99 100 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021, 101 "Tamarack TC9021 Gigabit Ethernet" }, 102 103 { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT, 104 "Tamarack TC9021 Gigabit Ethernet" }, 105 106 /* 107 * The Sundance sample boards use the Sundance vendor ID, 108 * but the Tamarack product ID. 109 */ 110 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021, 111 "Sundance TC9021 Gigabit Ethernet" }, 112 113 { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT, 114 "Sundance TC9021 Gigabit Ethernet" }, 115 116 { VENDOR_DLINK, DEVICEID_DLINK_DL4000, 117 "D-Link DL-4000 Gigabit Ethernet" }, 118 119 { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021, 120 "Antares Gigabit Ethernet" } 121 }; 122 123 static int stge_probe(device_t); 124 static int stge_attach(device_t); 125 static int stge_detach(device_t); 126 static int stge_shutdown(device_t); 127 static int stge_suspend(device_t); 128 static int stge_resume(device_t); 129 130 static int stge_encap(struct stge_softc *, struct mbuf **); 131 static void stge_start(struct ifnet *); 132 static void stge_start_locked(struct ifnet *); 133 static void stge_watchdog(struct stge_softc *); 134 static int stge_ioctl(struct ifnet *, u_long, caddr_t); 135 static void stge_init(void *); 136 static void stge_init_locked(struct stge_softc *); 137 static void stge_vlan_setup(struct stge_softc *); 138 static void stge_stop(struct stge_softc *); 139 static void stge_start_tx(struct stge_softc *); 140 static void stge_start_rx(struct stge_softc *); 141 static void stge_stop_tx(struct stge_softc *); 142 static void stge_stop_rx(struct stge_softc *); 143 144 static void stge_reset(struct stge_softc *, uint32_t); 145 static int stge_eeprom_wait(struct stge_softc *); 146 static void stge_read_eeprom(struct stge_softc *, int, uint16_t *); 147 static void stge_tick(void *); 148 static void stge_stats_update(struct stge_softc *); 149 static void stge_set_filter(struct stge_softc *); 150 static void stge_set_multi(struct stge_softc *); 151 152 static void stge_link_task(void *, int); 153 static void stge_intr(void *); 154 static __inline int stge_tx_error(struct stge_softc *); 155 static void stge_txeof(struct stge_softc *); 156 static int stge_rxeof(struct stge_softc *); 157 static __inline void stge_discard_rxbuf(struct stge_softc *, int); 158 static int stge_newbuf(struct stge_softc *, int); 159 #ifndef __NO_STRICT_ALIGNMENT 160 static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *); 161 #endif 162 163 static void stge_mii_sync(struct stge_softc *); 164 static void stge_mii_send(struct stge_softc *, uint32_t, int); 165 static int stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *); 166 static int stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *); 167 static int stge_miibus_readreg(device_t, int, int); 168 static int stge_miibus_writereg(device_t, int, int, int); 169 static void stge_miibus_statchg(device_t); 170 static int stge_mediachange(struct ifnet *); 171 static void stge_mediastatus(struct ifnet *, struct ifmediareq *); 172 173 static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 174 static int stge_dma_alloc(struct stge_softc *); 175 static void stge_dma_free(struct stge_softc *); 176 static void stge_dma_wait(struct stge_softc *); 177 static void stge_init_tx_ring(struct stge_softc *); 178 static int stge_init_rx_ring(struct stge_softc *); 179 #ifdef DEVICE_POLLING 180 static int stge_poll(struct ifnet *, enum poll_cmd, int); 181 #endif 182 183 static void stge_setwol(struct stge_softc *); 184 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 185 static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS); 186 static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS); 187 188 static device_method_t stge_methods[] = { 189 /* Device interface */ 190 DEVMETHOD(device_probe, stge_probe), 191 DEVMETHOD(device_attach, stge_attach), 192 DEVMETHOD(device_detach, stge_detach), 193 DEVMETHOD(device_shutdown, stge_shutdown), 194 DEVMETHOD(device_suspend, stge_suspend), 195 DEVMETHOD(device_resume, stge_resume), 196 197 /* MII interface */ 198 DEVMETHOD(miibus_readreg, stge_miibus_readreg), 199 DEVMETHOD(miibus_writereg, stge_miibus_writereg), 200 DEVMETHOD(miibus_statchg, stge_miibus_statchg), 201 202 { 0, 0 } 203 204 }; 205 206 static driver_t stge_driver = { 207 "stge", 208 stge_methods, 209 sizeof(struct stge_softc) 210 }; 211 212 static devclass_t stge_devclass; 213 214 DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0); 215 DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0); 216 217 static struct resource_spec stge_res_spec_io[] = { 218 { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE }, 219 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 220 { -1, 0, 0 } 221 }; 222 223 static struct resource_spec stge_res_spec_mem[] = { 224 { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, 225 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 226 { -1, 0, 0 } 227 }; 228 229 #define MII_SET(x) \ 230 CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x)) 231 #define MII_CLR(x) \ 232 CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x)) 233 234 /* 235 * Sync the PHYs by setting data bit and strobing the clock 32 times. 236 */ 237 static void 238 stge_mii_sync(struct stge_softc *sc) 239 { 240 int i; 241 242 MII_SET(PC_MgmtDir | PC_MgmtData); 243 244 for (i = 0; i < 32; i++) { 245 MII_SET(PC_MgmtClk); 246 DELAY(1); 247 MII_CLR(PC_MgmtClk); 248 DELAY(1); 249 } 250 } 251 252 /* 253 * Clock a series of bits through the MII. 254 */ 255 static void 256 stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt) 257 { 258 int i; 259 260 MII_CLR(PC_MgmtClk); 261 262 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 263 if (bits & i) 264 MII_SET(PC_MgmtData); 265 else 266 MII_CLR(PC_MgmtData); 267 DELAY(1); 268 MII_CLR(PC_MgmtClk); 269 DELAY(1); 270 MII_SET(PC_MgmtClk); 271 } 272 } 273 274 /* 275 * Read an PHY register through the MII. 276 */ 277 static int 278 stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame) 279 { 280 int i, ack; 281 282 /* 283 * Set up frame for RX. 284 */ 285 frame->mii_stdelim = STGE_MII_STARTDELIM; 286 frame->mii_opcode = STGE_MII_READOP; 287 frame->mii_turnaround = 0; 288 frame->mii_data = 0; 289 290 CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl); 291 /* 292 * Turn on data xmit. 293 */ 294 MII_SET(PC_MgmtDir); 295 296 stge_mii_sync(sc); 297 298 /* 299 * Send command/address info. 300 */ 301 stge_mii_send(sc, frame->mii_stdelim, 2); 302 stge_mii_send(sc, frame->mii_opcode, 2); 303 stge_mii_send(sc, frame->mii_phyaddr, 5); 304 stge_mii_send(sc, frame->mii_regaddr, 5); 305 306 /* Turn off xmit. */ 307 MII_CLR(PC_MgmtDir); 308 309 /* Idle bit */ 310 MII_CLR((PC_MgmtClk | PC_MgmtData)); 311 DELAY(1); 312 MII_SET(PC_MgmtClk); 313 DELAY(1); 314 315 /* Check for ack */ 316 MII_CLR(PC_MgmtClk); 317 DELAY(1); 318 ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData; 319 MII_SET(PC_MgmtClk); 320 DELAY(1); 321 322 /* 323 * Now try reading data bits. If the ack failed, we still 324 * need to clock through 16 cycles to keep the PHY(s) in sync. 325 */ 326 if (ack) { 327 for(i = 0; i < 16; i++) { 328 MII_CLR(PC_MgmtClk); 329 DELAY(1); 330 MII_SET(PC_MgmtClk); 331 DELAY(1); 332 } 333 goto fail; 334 } 335 336 for (i = 0x8000; i; i >>= 1) { 337 MII_CLR(PC_MgmtClk); 338 DELAY(1); 339 if (!ack) { 340 if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData) 341 frame->mii_data |= i; 342 DELAY(1); 343 } 344 MII_SET(PC_MgmtClk); 345 DELAY(1); 346 } 347 348 fail: 349 MII_CLR(PC_MgmtClk); 350 DELAY(1); 351 MII_SET(PC_MgmtClk); 352 DELAY(1); 353 354 if (ack) 355 return(1); 356 return(0); 357 } 358 359 /* 360 * Write to a PHY register through the MII. 361 */ 362 static int 363 stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame) 364 { 365 366 /* 367 * Set up frame for TX. 368 */ 369 frame->mii_stdelim = STGE_MII_STARTDELIM; 370 frame->mii_opcode = STGE_MII_WRITEOP; 371 frame->mii_turnaround = STGE_MII_TURNAROUND; 372 373 /* 374 * Turn on data output. 375 */ 376 MII_SET(PC_MgmtDir); 377 378 stge_mii_sync(sc); 379 380 stge_mii_send(sc, frame->mii_stdelim, 2); 381 stge_mii_send(sc, frame->mii_opcode, 2); 382 stge_mii_send(sc, frame->mii_phyaddr, 5); 383 stge_mii_send(sc, frame->mii_regaddr, 5); 384 stge_mii_send(sc, frame->mii_turnaround, 2); 385 stge_mii_send(sc, frame->mii_data, 16); 386 387 /* Idle bit. */ 388 MII_SET(PC_MgmtClk); 389 DELAY(1); 390 MII_CLR(PC_MgmtClk); 391 DELAY(1); 392 393 /* 394 * Turn off xmit. 395 */ 396 MII_CLR(PC_MgmtDir); 397 398 return(0); 399 } 400 401 /* 402 * sc_miibus_readreg: [mii interface function] 403 * 404 * Read a PHY register on the MII of the TC9021. 405 */ 406 static int 407 stge_miibus_readreg(device_t dev, int phy, int reg) 408 { 409 struct stge_softc *sc; 410 struct stge_mii_frame frame; 411 int error; 412 413 sc = device_get_softc(dev); 414 415 if (reg == STGE_PhyCtrl) { 416 /* XXX allow ip1000phy read STGE_PhyCtrl register. */ 417 STGE_MII_LOCK(sc); 418 error = CSR_READ_1(sc, STGE_PhyCtrl); 419 STGE_MII_UNLOCK(sc); 420 return (error); 421 } 422 bzero(&frame, sizeof(frame)); 423 frame.mii_phyaddr = phy; 424 frame.mii_regaddr = reg; 425 426 STGE_MII_LOCK(sc); 427 error = stge_mii_readreg(sc, &frame); 428 STGE_MII_UNLOCK(sc); 429 430 if (error != 0) { 431 /* Don't show errors for PHY probe request */ 432 if (reg != 1) 433 device_printf(sc->sc_dev, "phy read fail\n"); 434 return (0); 435 } 436 return (frame.mii_data); 437 } 438 439 /* 440 * stge_miibus_writereg: [mii interface function] 441 * 442 * Write a PHY register on the MII of the TC9021. 443 */ 444 static int 445 stge_miibus_writereg(device_t dev, int phy, int reg, int val) 446 { 447 struct stge_softc *sc; 448 struct stge_mii_frame frame; 449 int error; 450 451 sc = device_get_softc(dev); 452 453 bzero(&frame, sizeof(frame)); 454 frame.mii_phyaddr = phy; 455 frame.mii_regaddr = reg; 456 frame.mii_data = val; 457 458 STGE_MII_LOCK(sc); 459 error = stge_mii_writereg(sc, &frame); 460 STGE_MII_UNLOCK(sc); 461 462 if (error != 0) 463 device_printf(sc->sc_dev, "phy write fail\n"); 464 return (0); 465 } 466 467 /* 468 * stge_miibus_statchg: [mii interface function] 469 * 470 * Callback from MII layer when media changes. 471 */ 472 static void 473 stge_miibus_statchg(device_t dev) 474 { 475 struct stge_softc *sc; 476 477 sc = device_get_softc(dev); 478 taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task); 479 } 480 481 /* 482 * stge_mediastatus: [ifmedia interface function] 483 * 484 * Get the current interface media status. 485 */ 486 static void 487 stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 488 { 489 struct stge_softc *sc; 490 struct mii_data *mii; 491 492 sc = ifp->if_softc; 493 mii = device_get_softc(sc->sc_miibus); 494 495 mii_pollstat(mii); 496 ifmr->ifm_status = mii->mii_media_status; 497 ifmr->ifm_active = mii->mii_media_active; 498 } 499 500 /* 501 * stge_mediachange: [ifmedia interface function] 502 * 503 * Set hardware to newly-selected media. 504 */ 505 static int 506 stge_mediachange(struct ifnet *ifp) 507 { 508 struct stge_softc *sc; 509 struct mii_data *mii; 510 511 sc = ifp->if_softc; 512 mii = device_get_softc(sc->sc_miibus); 513 mii_mediachg(mii); 514 515 return (0); 516 } 517 518 static int 519 stge_eeprom_wait(struct stge_softc *sc) 520 { 521 int i; 522 523 for (i = 0; i < STGE_TIMEOUT; i++) { 524 DELAY(1000); 525 if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0) 526 return (0); 527 } 528 return (1); 529 } 530 531 /* 532 * stge_read_eeprom: 533 * 534 * Read data from the serial EEPROM. 535 */ 536 static void 537 stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data) 538 { 539 540 if (stge_eeprom_wait(sc)) 541 device_printf(sc->sc_dev, "EEPROM failed to come ready\n"); 542 543 CSR_WRITE_2(sc, STGE_EepromCtrl, 544 EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR)); 545 if (stge_eeprom_wait(sc)) 546 device_printf(sc->sc_dev, "EEPROM read timed out\n"); 547 *data = CSR_READ_2(sc, STGE_EepromData); 548 } 549 550 551 static int 552 stge_probe(device_t dev) 553 { 554 struct stge_product *sp; 555 int i; 556 uint16_t vendor, devid; 557 558 vendor = pci_get_vendor(dev); 559 devid = pci_get_device(dev); 560 sp = stge_products; 561 for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]); 562 i++, sp++) { 563 if (vendor == sp->stge_vendorid && 564 devid == sp->stge_deviceid) { 565 device_set_desc(dev, sp->stge_name); 566 return (BUS_PROBE_DEFAULT); 567 } 568 } 569 570 return (ENXIO); 571 } 572 573 static int 574 stge_attach(device_t dev) 575 { 576 struct stge_softc *sc; 577 struct ifnet *ifp; 578 uint8_t enaddr[ETHER_ADDR_LEN]; 579 int error, i; 580 uint16_t cmd; 581 uint32_t val; 582 583 error = 0; 584 sc = device_get_softc(dev); 585 sc->sc_dev = dev; 586 587 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 588 MTX_DEF); 589 mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF); 590 callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); 591 TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc); 592 593 /* 594 * Map the device. 595 */ 596 pci_enable_busmaster(dev); 597 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 598 val = pci_read_config(dev, PCIR_BAR(1), 4); 599 if ((val & 0x01) != 0) 600 sc->sc_spec = stge_res_spec_mem; 601 else { 602 val = pci_read_config(dev, PCIR_BAR(0), 4); 603 if ((val & 0x01) == 0) { 604 device_printf(sc->sc_dev, "couldn't locate IO BAR\n"); 605 error = ENXIO; 606 goto fail; 607 } 608 sc->sc_spec = stge_res_spec_io; 609 } 610 error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res); 611 if (error != 0) { 612 device_printf(dev, "couldn't allocate %s resources\n", 613 sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O"); 614 goto fail; 615 } 616 sc->sc_rev = pci_get_revid(dev); 617 618 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 619 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 620 "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0, 621 sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe"); 622 623 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 624 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, 625 "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0, 626 sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait"); 627 628 /* Pull in device tunables. */ 629 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 630 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 631 "rxint_nframe", &sc->sc_rxint_nframe); 632 if (error == 0) { 633 if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN || 634 sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) { 635 device_printf(dev, "rxint_nframe value out of range; " 636 "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT); 637 sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; 638 } 639 } 640 641 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 642 error = resource_int_value(device_get_name(dev), device_get_unit(dev), 643 "rxint_dmawait", &sc->sc_rxint_dmawait); 644 if (error == 0) { 645 if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN || 646 sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) { 647 device_printf(dev, "rxint_dmawait value out of range; " 648 "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT); 649 sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; 650 } 651 } 652 653 if ((error = stge_dma_alloc(sc) != 0)) 654 goto fail; 655 656 /* 657 * Determine if we're copper or fiber. It affects how we 658 * reset the card. 659 */ 660 if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia) 661 sc->sc_usefiber = 1; 662 else 663 sc->sc_usefiber = 0; 664 665 /* Load LED configuration from EEPROM. */ 666 stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led); 667 668 /* 669 * Reset the chip to a known state. 670 */ 671 STGE_LOCK(sc); 672 stge_reset(sc, STGE_RESET_FULL); 673 STGE_UNLOCK(sc); 674 675 /* 676 * Reading the station address from the EEPROM doesn't seem 677 * to work, at least on my sample boards. Instead, since 678 * the reset sequence does AutoInit, read it from the station 679 * address registers. For Sundance 1023 you can only read it 680 * from EEPROM. 681 */ 682 if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) { 683 uint16_t v; 684 685 v = CSR_READ_2(sc, STGE_StationAddress0); 686 enaddr[0] = v & 0xff; 687 enaddr[1] = v >> 8; 688 v = CSR_READ_2(sc, STGE_StationAddress1); 689 enaddr[2] = v & 0xff; 690 enaddr[3] = v >> 8; 691 v = CSR_READ_2(sc, STGE_StationAddress2); 692 enaddr[4] = v & 0xff; 693 enaddr[5] = v >> 8; 694 sc->sc_stge1023 = 0; 695 } else { 696 uint16_t myaddr[ETHER_ADDR_LEN / 2]; 697 for (i = 0; i <ETHER_ADDR_LEN / 2; i++) { 698 stge_read_eeprom(sc, STGE_EEPROM_StationAddress0 + i, 699 &myaddr[i]); 700 myaddr[i] = le16toh(myaddr[i]); 701 } 702 bcopy(myaddr, enaddr, sizeof(enaddr)); 703 sc->sc_stge1023 = 1; 704 } 705 706 ifp = sc->sc_ifp = if_alloc(IFT_ETHER); 707 if (ifp == NULL) { 708 device_printf(sc->sc_dev, "failed to if_alloc()\n"); 709 error = ENXIO; 710 goto fail; 711 } 712 713 ifp->if_softc = sc; 714 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 715 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 716 ifp->if_ioctl = stge_ioctl; 717 ifp->if_start = stge_start; 718 ifp->if_init = stge_init; 719 ifp->if_mtu = ETHERMTU; 720 ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1; 721 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 722 IFQ_SET_READY(&ifp->if_snd); 723 /* Revision B3 and earlier chips have checksum bug. */ 724 if (sc->sc_rev >= 0x0c) { 725 ifp->if_hwassist = STGE_CSUM_FEATURES; 726 ifp->if_capabilities = IFCAP_HWCSUM; 727 } else { 728 ifp->if_hwassist = 0; 729 ifp->if_capabilities = 0; 730 } 731 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 732 ifp->if_capenable = ifp->if_capabilities; 733 734 /* 735 * Read some important bits from the PhyCtrl register. 736 */ 737 sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) & 738 (PC_PhyDuplexPolarity | PC_PhyLnkPolarity); 739 740 /* Set up MII bus. */ 741 if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange, 742 stge_mediastatus)) != 0) { 743 device_printf(sc->sc_dev, "no PHY found!\n"); 744 goto fail; 745 } 746 747 ether_ifattach(ifp, enaddr); 748 749 /* VLAN capability setup */ 750 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 751 if (sc->sc_rev >= 0x0c) 752 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 753 ifp->if_capenable = ifp->if_capabilities; 754 #ifdef DEVICE_POLLING 755 ifp->if_capabilities |= IFCAP_POLLING; 756 #endif 757 /* 758 * Tell the upper layer(s) we support long frames. 759 * Must appear after the call to ether_ifattach() because 760 * ether_ifattach() sets ifi_hdrlen to the default value. 761 */ 762 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 763 764 /* 765 * The manual recommends disabling early transmit, so we 766 * do. It's disabled anyway, if using IP checksumming, 767 * since the entire packet must be in the FIFO in order 768 * for the chip to perform the checksum. 769 */ 770 sc->sc_txthresh = 0x0fff; 771 772 /* 773 * Disable MWI if the PCI layer tells us to. 774 */ 775 sc->sc_DMACtrl = 0; 776 if ((cmd & PCIM_CMD_MWRICEN) == 0) 777 sc->sc_DMACtrl |= DMAC_MWIDisable; 778 779 /* 780 * Hookup IRQ 781 */ 782 error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE, 783 NULL, stge_intr, sc, &sc->sc_ih); 784 if (error != 0) { 785 ether_ifdetach(ifp); 786 device_printf(sc->sc_dev, "couldn't set up IRQ\n"); 787 sc->sc_ifp = NULL; 788 goto fail; 789 } 790 791 fail: 792 if (error != 0) 793 stge_detach(dev); 794 795 return (error); 796 } 797 798 static int 799 stge_detach(device_t dev) 800 { 801 struct stge_softc *sc; 802 struct ifnet *ifp; 803 804 sc = device_get_softc(dev); 805 806 ifp = sc->sc_ifp; 807 #ifdef DEVICE_POLLING 808 if (ifp && ifp->if_capenable & IFCAP_POLLING) 809 ether_poll_deregister(ifp); 810 #endif 811 if (device_is_attached(dev)) { 812 STGE_LOCK(sc); 813 /* XXX */ 814 sc->sc_detach = 1; 815 stge_stop(sc); 816 STGE_UNLOCK(sc); 817 callout_drain(&sc->sc_tick_ch); 818 taskqueue_drain(taskqueue_swi, &sc->sc_link_task); 819 ether_ifdetach(ifp); 820 } 821 822 if (sc->sc_miibus != NULL) { 823 device_delete_child(dev, sc->sc_miibus); 824 sc->sc_miibus = NULL; 825 } 826 bus_generic_detach(dev); 827 stge_dma_free(sc); 828 829 if (ifp != NULL) { 830 if_free(ifp); 831 sc->sc_ifp = NULL; 832 } 833 834 if (sc->sc_ih) { 835 bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih); 836 sc->sc_ih = NULL; 837 } 838 bus_release_resources(dev, sc->sc_spec, sc->sc_res); 839 840 mtx_destroy(&sc->sc_mii_mtx); 841 mtx_destroy(&sc->sc_mtx); 842 843 return (0); 844 } 845 846 struct stge_dmamap_arg { 847 bus_addr_t stge_busaddr; 848 }; 849 850 static void 851 stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 852 { 853 struct stge_dmamap_arg *ctx; 854 855 if (error != 0) 856 return; 857 858 ctx = (struct stge_dmamap_arg *)arg; 859 ctx->stge_busaddr = segs[0].ds_addr; 860 } 861 862 static int 863 stge_dma_alloc(struct stge_softc *sc) 864 { 865 struct stge_dmamap_arg ctx; 866 struct stge_txdesc *txd; 867 struct stge_rxdesc *rxd; 868 int error, i; 869 870 /* create parent tag. */ 871 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */ 872 1, 0, /* algnmnt, boundary */ 873 STGE_DMA_MAXADDR, /* lowaddr */ 874 BUS_SPACE_MAXADDR, /* highaddr */ 875 NULL, NULL, /* filter, filterarg */ 876 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 877 0, /* nsegments */ 878 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 879 0, /* flags */ 880 NULL, NULL, /* lockfunc, lockarg */ 881 &sc->sc_cdata.stge_parent_tag); 882 if (error != 0) { 883 device_printf(sc->sc_dev, "failed to create parent DMA tag\n"); 884 goto fail; 885 } 886 /* create tag for Tx ring. */ 887 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 888 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 889 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 890 BUS_SPACE_MAXADDR, /* highaddr */ 891 NULL, NULL, /* filter, filterarg */ 892 STGE_TX_RING_SZ, /* maxsize */ 893 1, /* nsegments */ 894 STGE_TX_RING_SZ, /* maxsegsize */ 895 0, /* flags */ 896 NULL, NULL, /* lockfunc, lockarg */ 897 &sc->sc_cdata.stge_tx_ring_tag); 898 if (error != 0) { 899 device_printf(sc->sc_dev, 900 "failed to allocate Tx ring DMA tag\n"); 901 goto fail; 902 } 903 904 /* create tag for Rx ring. */ 905 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 906 STGE_RING_ALIGN, 0, /* algnmnt, boundary */ 907 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 908 BUS_SPACE_MAXADDR, /* highaddr */ 909 NULL, NULL, /* filter, filterarg */ 910 STGE_RX_RING_SZ, /* maxsize */ 911 1, /* nsegments */ 912 STGE_RX_RING_SZ, /* maxsegsize */ 913 0, /* flags */ 914 NULL, NULL, /* lockfunc, lockarg */ 915 &sc->sc_cdata.stge_rx_ring_tag); 916 if (error != 0) { 917 device_printf(sc->sc_dev, 918 "failed to allocate Rx ring DMA tag\n"); 919 goto fail; 920 } 921 922 /* create tag for Tx buffers. */ 923 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 924 1, 0, /* algnmnt, boundary */ 925 BUS_SPACE_MAXADDR, /* lowaddr */ 926 BUS_SPACE_MAXADDR, /* highaddr */ 927 NULL, NULL, /* filter, filterarg */ 928 MCLBYTES * STGE_MAXTXSEGS, /* maxsize */ 929 STGE_MAXTXSEGS, /* nsegments */ 930 MCLBYTES, /* maxsegsize */ 931 0, /* flags */ 932 NULL, NULL, /* lockfunc, lockarg */ 933 &sc->sc_cdata.stge_tx_tag); 934 if (error != 0) { 935 device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n"); 936 goto fail; 937 } 938 939 /* create tag for Rx buffers. */ 940 error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 941 1, 0, /* algnmnt, boundary */ 942 BUS_SPACE_MAXADDR, /* lowaddr */ 943 BUS_SPACE_MAXADDR, /* highaddr */ 944 NULL, NULL, /* filter, filterarg */ 945 MCLBYTES, /* maxsize */ 946 1, /* nsegments */ 947 MCLBYTES, /* maxsegsize */ 948 0, /* flags */ 949 NULL, NULL, /* lockfunc, lockarg */ 950 &sc->sc_cdata.stge_rx_tag); 951 if (error != 0) { 952 device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n"); 953 goto fail; 954 } 955 956 /* allocate DMA'able memory and load the DMA map for Tx ring. */ 957 error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag, 958 (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 959 &sc->sc_cdata.stge_tx_ring_map); 960 if (error != 0) { 961 device_printf(sc->sc_dev, 962 "failed to allocate DMA'able memory for Tx ring\n"); 963 goto fail; 964 } 965 966 ctx.stge_busaddr = 0; 967 error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag, 968 sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring, 969 STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 970 if (error != 0 || ctx.stge_busaddr == 0) { 971 device_printf(sc->sc_dev, 972 "failed to load DMA'able memory for Tx ring\n"); 973 goto fail; 974 } 975 sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr; 976 977 /* allocate DMA'able memory and load the DMA map for Rx ring. */ 978 error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag, 979 (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 980 &sc->sc_cdata.stge_rx_ring_map); 981 if (error != 0) { 982 device_printf(sc->sc_dev, 983 "failed to allocate DMA'able memory for Rx ring\n"); 984 goto fail; 985 } 986 987 ctx.stge_busaddr = 0; 988 error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag, 989 sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring, 990 STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); 991 if (error != 0 || ctx.stge_busaddr == 0) { 992 device_printf(sc->sc_dev, 993 "failed to load DMA'able memory for Rx ring\n"); 994 goto fail; 995 } 996 sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr; 997 998 /* create DMA maps for Tx buffers. */ 999 for (i = 0; i < STGE_TX_RING_CNT; i++) { 1000 txd = &sc->sc_cdata.stge_txdesc[i]; 1001 txd->tx_m = NULL; 1002 txd->tx_dmamap = 0; 1003 error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0, 1004 &txd->tx_dmamap); 1005 if (error != 0) { 1006 device_printf(sc->sc_dev, 1007 "failed to create Tx dmamap\n"); 1008 goto fail; 1009 } 1010 } 1011 /* create DMA maps for Rx buffers. */ 1012 if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 1013 &sc->sc_cdata.stge_rx_sparemap)) != 0) { 1014 device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n"); 1015 goto fail; 1016 } 1017 for (i = 0; i < STGE_RX_RING_CNT; i++) { 1018 rxd = &sc->sc_cdata.stge_rxdesc[i]; 1019 rxd->rx_m = NULL; 1020 rxd->rx_dmamap = 0; 1021 error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, 1022 &rxd->rx_dmamap); 1023 if (error != 0) { 1024 device_printf(sc->sc_dev, 1025 "failed to create Rx dmamap\n"); 1026 goto fail; 1027 } 1028 } 1029 1030 fail: 1031 return (error); 1032 } 1033 1034 static void 1035 stge_dma_free(struct stge_softc *sc) 1036 { 1037 struct stge_txdesc *txd; 1038 struct stge_rxdesc *rxd; 1039 int i; 1040 1041 /* Tx ring */ 1042 if (sc->sc_cdata.stge_tx_ring_tag) { 1043 if (sc->sc_cdata.stge_tx_ring_map) 1044 bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag, 1045 sc->sc_cdata.stge_tx_ring_map); 1046 if (sc->sc_cdata.stge_tx_ring_map && 1047 sc->sc_rdata.stge_tx_ring) 1048 bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag, 1049 sc->sc_rdata.stge_tx_ring, 1050 sc->sc_cdata.stge_tx_ring_map); 1051 sc->sc_rdata.stge_tx_ring = NULL; 1052 sc->sc_cdata.stge_tx_ring_map = 0; 1053 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag); 1054 sc->sc_cdata.stge_tx_ring_tag = NULL; 1055 } 1056 /* Rx ring */ 1057 if (sc->sc_cdata.stge_rx_ring_tag) { 1058 if (sc->sc_cdata.stge_rx_ring_map) 1059 bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag, 1060 sc->sc_cdata.stge_rx_ring_map); 1061 if (sc->sc_cdata.stge_rx_ring_map && 1062 sc->sc_rdata.stge_rx_ring) 1063 bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag, 1064 sc->sc_rdata.stge_rx_ring, 1065 sc->sc_cdata.stge_rx_ring_map); 1066 sc->sc_rdata.stge_rx_ring = NULL; 1067 sc->sc_cdata.stge_rx_ring_map = 0; 1068 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag); 1069 sc->sc_cdata.stge_rx_ring_tag = NULL; 1070 } 1071 /* Tx buffers */ 1072 if (sc->sc_cdata.stge_tx_tag) { 1073 for (i = 0; i < STGE_TX_RING_CNT; i++) { 1074 txd = &sc->sc_cdata.stge_txdesc[i]; 1075 if (txd->tx_dmamap) { 1076 bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag, 1077 txd->tx_dmamap); 1078 txd->tx_dmamap = 0; 1079 } 1080 } 1081 bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag); 1082 sc->sc_cdata.stge_tx_tag = NULL; 1083 } 1084 /* Rx buffers */ 1085 if (sc->sc_cdata.stge_rx_tag) { 1086 for (i = 0; i < STGE_RX_RING_CNT; i++) { 1087 rxd = &sc->sc_cdata.stge_rxdesc[i]; 1088 if (rxd->rx_dmamap) { 1089 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 1090 rxd->rx_dmamap); 1091 rxd->rx_dmamap = 0; 1092 } 1093 } 1094 if (sc->sc_cdata.stge_rx_sparemap) { 1095 bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, 1096 sc->sc_cdata.stge_rx_sparemap); 1097 sc->sc_cdata.stge_rx_sparemap = 0; 1098 } 1099 bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag); 1100 sc->sc_cdata.stge_rx_tag = NULL; 1101 } 1102 1103 if (sc->sc_cdata.stge_parent_tag) { 1104 bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag); 1105 sc->sc_cdata.stge_parent_tag = NULL; 1106 } 1107 } 1108 1109 /* 1110 * stge_shutdown: 1111 * 1112 * Make sure the interface is stopped at reboot time. 1113 */ 1114 static int 1115 stge_shutdown(device_t dev) 1116 { 1117 1118 return (stge_suspend(dev)); 1119 } 1120 1121 static void 1122 stge_setwol(struct stge_softc *sc) 1123 { 1124 struct ifnet *ifp; 1125 uint8_t v; 1126 1127 STGE_LOCK_ASSERT(sc); 1128 1129 ifp = sc->sc_ifp; 1130 v = CSR_READ_1(sc, STGE_WakeEvent); 1131 /* Disable all WOL bits. */ 1132 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 1133 WE_WakeOnLanEnable); 1134 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1135 v |= WE_MagicPktEnable | WE_WakeOnLanEnable; 1136 CSR_WRITE_1(sc, STGE_WakeEvent, v); 1137 /* Reset Tx and prevent transmission. */ 1138 CSR_WRITE_4(sc, STGE_AsicCtrl, 1139 CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset); 1140 /* 1141 * TC9021 automatically reset link speed to 100Mbps when it's put 1142 * into sleep so there is no need to try to resetting link speed. 1143 */ 1144 } 1145 1146 static int 1147 stge_suspend(device_t dev) 1148 { 1149 struct stge_softc *sc; 1150 1151 sc = device_get_softc(dev); 1152 1153 STGE_LOCK(sc); 1154 stge_stop(sc); 1155 sc->sc_suspended = 1; 1156 stge_setwol(sc); 1157 STGE_UNLOCK(sc); 1158 1159 return (0); 1160 } 1161 1162 static int 1163 stge_resume(device_t dev) 1164 { 1165 struct stge_softc *sc; 1166 struct ifnet *ifp; 1167 uint8_t v; 1168 1169 sc = device_get_softc(dev); 1170 1171 STGE_LOCK(sc); 1172 /* 1173 * Clear WOL bits, so special frames wouldn't interfere 1174 * normal Rx operation anymore. 1175 */ 1176 v = CSR_READ_1(sc, STGE_WakeEvent); 1177 v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | 1178 WE_WakeOnLanEnable); 1179 CSR_WRITE_1(sc, STGE_WakeEvent, v); 1180 ifp = sc->sc_ifp; 1181 if (ifp->if_flags & IFF_UP) 1182 stge_init_locked(sc); 1183 1184 sc->sc_suspended = 0; 1185 STGE_UNLOCK(sc); 1186 1187 return (0); 1188 } 1189 1190 static void 1191 stge_dma_wait(struct stge_softc *sc) 1192 { 1193 int i; 1194 1195 for (i = 0; i < STGE_TIMEOUT; i++) { 1196 DELAY(2); 1197 if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0) 1198 break; 1199 } 1200 1201 if (i == STGE_TIMEOUT) 1202 device_printf(sc->sc_dev, "DMA wait timed out\n"); 1203 } 1204 1205 static int 1206 stge_encap(struct stge_softc *sc, struct mbuf **m_head) 1207 { 1208 struct stge_txdesc *txd; 1209 struct stge_tfd *tfd; 1210 struct mbuf *m; 1211 bus_dma_segment_t txsegs[STGE_MAXTXSEGS]; 1212 int error, i, nsegs, si; 1213 uint64_t csum_flags, tfc; 1214 1215 STGE_LOCK_ASSERT(sc); 1216 1217 if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL) 1218 return (ENOBUFS); 1219 1220 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1221 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1222 if (error == EFBIG) { 1223 m = m_collapse(*m_head, M_DONTWAIT, STGE_MAXTXSEGS); 1224 if (m == NULL) { 1225 m_freem(*m_head); 1226 *m_head = NULL; 1227 return (ENOMEM); 1228 } 1229 *m_head = m; 1230 error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, 1231 txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); 1232 if (error != 0) { 1233 m_freem(*m_head); 1234 *m_head = NULL; 1235 return (error); 1236 } 1237 } else if (error != 0) 1238 return (error); 1239 if (nsegs == 0) { 1240 m_freem(*m_head); 1241 *m_head = NULL; 1242 return (EIO); 1243 } 1244 1245 m = *m_head; 1246 csum_flags = 0; 1247 if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) { 1248 if (m->m_pkthdr.csum_flags & CSUM_IP) 1249 csum_flags |= TFD_IPChecksumEnable; 1250 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1251 csum_flags |= TFD_TCPChecksumEnable; 1252 else if (m->m_pkthdr.csum_flags & CSUM_UDP) 1253 csum_flags |= TFD_UDPChecksumEnable; 1254 } 1255 1256 si = sc->sc_cdata.stge_tx_prod; 1257 tfd = &sc->sc_rdata.stge_tx_ring[si]; 1258 for (i = 0; i < nsegs; i++) 1259 tfd->tfd_frags[i].frag_word0 = 1260 htole64(FRAG_ADDR(txsegs[i].ds_addr) | 1261 FRAG_LEN(txsegs[i].ds_len)); 1262 sc->sc_cdata.stge_tx_cnt++; 1263 1264 tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) | 1265 TFD_FragCount(nsegs) | csum_flags; 1266 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) 1267 tfc |= TFD_TxDMAIndicate; 1268 1269 /* Update producer index. */ 1270 sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT; 1271 1272 /* Check if we have a VLAN tag to insert. */ 1273 if (m->m_flags & M_VLANTAG) 1274 tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag)); 1275 tfd->tfd_control = htole64(tfc); 1276 1277 /* Update Tx Queue. */ 1278 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q); 1279 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q); 1280 txd->tx_m = m; 1281 1282 /* Sync descriptors. */ 1283 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1284 BUS_DMASYNC_PREWRITE); 1285 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1286 sc->sc_cdata.stge_tx_ring_map, 1287 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1288 1289 return (0); 1290 } 1291 1292 /* 1293 * stge_start: [ifnet interface function] 1294 * 1295 * Start packet transmission on the interface. 1296 */ 1297 static void 1298 stge_start(struct ifnet *ifp) 1299 { 1300 struct stge_softc *sc; 1301 1302 sc = ifp->if_softc; 1303 STGE_LOCK(sc); 1304 stge_start_locked(ifp); 1305 STGE_UNLOCK(sc); 1306 } 1307 1308 static void 1309 stge_start_locked(struct ifnet *ifp) 1310 { 1311 struct stge_softc *sc; 1312 struct mbuf *m_head; 1313 int enq; 1314 1315 sc = ifp->if_softc; 1316 1317 STGE_LOCK_ASSERT(sc); 1318 1319 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 1320 IFF_DRV_RUNNING || sc->sc_link == 0) 1321 return; 1322 1323 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1324 if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) { 1325 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1326 break; 1327 } 1328 1329 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1330 if (m_head == NULL) 1331 break; 1332 /* 1333 * Pack the data into the transmit ring. If we 1334 * don't have room, set the OACTIVE flag and wait 1335 * for the NIC to drain the ring. 1336 */ 1337 if (stge_encap(sc, &m_head)) { 1338 if (m_head == NULL) 1339 break; 1340 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1341 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1342 break; 1343 } 1344 1345 enq++; 1346 /* 1347 * If there's a BPF listener, bounce a copy of this frame 1348 * to him. 1349 */ 1350 ETHER_BPF_MTAP(ifp, m_head); 1351 } 1352 1353 if (enq > 0) { 1354 /* Transmit */ 1355 CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow); 1356 1357 /* Set a timeout in case the chip goes out to lunch. */ 1358 sc->sc_watchdog_timer = 5; 1359 } 1360 } 1361 1362 /* 1363 * stge_watchdog: 1364 * 1365 * Watchdog timer handler. 1366 */ 1367 static void 1368 stge_watchdog(struct stge_softc *sc) 1369 { 1370 struct ifnet *ifp; 1371 1372 STGE_LOCK_ASSERT(sc); 1373 1374 if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer) 1375 return; 1376 1377 ifp = sc->sc_ifp; 1378 if_printf(sc->sc_ifp, "device timeout\n"); 1379 ifp->if_oerrors++; 1380 stge_init_locked(sc); 1381 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1382 stge_start_locked(ifp); 1383 } 1384 1385 /* 1386 * stge_ioctl: [ifnet interface function] 1387 * 1388 * Handle control requests from the operator. 1389 */ 1390 static int 1391 stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1392 { 1393 struct stge_softc *sc; 1394 struct ifreq *ifr; 1395 struct mii_data *mii; 1396 int error, mask; 1397 1398 sc = ifp->if_softc; 1399 ifr = (struct ifreq *)data; 1400 error = 0; 1401 switch (cmd) { 1402 case SIOCSIFMTU: 1403 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU) 1404 error = EINVAL; 1405 else if (ifp->if_mtu != ifr->ifr_mtu) { 1406 ifp->if_mtu = ifr->ifr_mtu; 1407 STGE_LOCK(sc); 1408 stge_init_locked(sc); 1409 STGE_UNLOCK(sc); 1410 } 1411 break; 1412 case SIOCSIFFLAGS: 1413 STGE_LOCK(sc); 1414 if ((ifp->if_flags & IFF_UP) != 0) { 1415 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1416 if (((ifp->if_flags ^ sc->sc_if_flags) 1417 & IFF_PROMISC) != 0) 1418 stge_set_filter(sc); 1419 } else { 1420 if (sc->sc_detach == 0) 1421 stge_init_locked(sc); 1422 } 1423 } else { 1424 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1425 stge_stop(sc); 1426 } 1427 sc->sc_if_flags = ifp->if_flags; 1428 STGE_UNLOCK(sc); 1429 break; 1430 case SIOCADDMULTI: 1431 case SIOCDELMULTI: 1432 STGE_LOCK(sc); 1433 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1434 stge_set_multi(sc); 1435 STGE_UNLOCK(sc); 1436 break; 1437 case SIOCSIFMEDIA: 1438 case SIOCGIFMEDIA: 1439 mii = device_get_softc(sc->sc_miibus); 1440 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1441 break; 1442 case SIOCSIFCAP: 1443 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1444 #ifdef DEVICE_POLLING 1445 if ((mask & IFCAP_POLLING) != 0) { 1446 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1447 error = ether_poll_register(stge_poll, ifp); 1448 if (error != 0) 1449 break; 1450 STGE_LOCK(sc); 1451 CSR_WRITE_2(sc, STGE_IntEnable, 0); 1452 ifp->if_capenable |= IFCAP_POLLING; 1453 STGE_UNLOCK(sc); 1454 } else { 1455 error = ether_poll_deregister(ifp); 1456 if (error != 0) 1457 break; 1458 STGE_LOCK(sc); 1459 CSR_WRITE_2(sc, STGE_IntEnable, 1460 sc->sc_IntEnable); 1461 ifp->if_capenable &= ~IFCAP_POLLING; 1462 STGE_UNLOCK(sc); 1463 } 1464 } 1465 #endif 1466 if ((mask & IFCAP_HWCSUM) != 0) { 1467 ifp->if_capenable ^= IFCAP_HWCSUM; 1468 if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 && 1469 (IFCAP_HWCSUM & ifp->if_capabilities) != 0) 1470 ifp->if_hwassist = STGE_CSUM_FEATURES; 1471 else 1472 ifp->if_hwassist = 0; 1473 } 1474 if ((mask & IFCAP_WOL) != 0 && 1475 (ifp->if_capabilities & IFCAP_WOL) != 0) { 1476 if ((mask & IFCAP_WOL_MAGIC) != 0) 1477 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1478 } 1479 if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { 1480 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1481 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1482 STGE_LOCK(sc); 1483 stge_vlan_setup(sc); 1484 STGE_UNLOCK(sc); 1485 } 1486 } 1487 VLAN_CAPABILITIES(ifp); 1488 break; 1489 default: 1490 error = ether_ioctl(ifp, cmd, data); 1491 break; 1492 } 1493 1494 return (error); 1495 } 1496 1497 static void 1498 stge_link_task(void *arg, int pending) 1499 { 1500 struct stge_softc *sc; 1501 struct mii_data *mii; 1502 uint32_t v, ac; 1503 int i; 1504 1505 sc = (struct stge_softc *)arg; 1506 STGE_LOCK(sc); 1507 1508 mii = device_get_softc(sc->sc_miibus); 1509 if (mii->mii_media_status & IFM_ACTIVE) { 1510 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 1511 sc->sc_link = 1; 1512 } else 1513 sc->sc_link = 0; 1514 1515 sc->sc_MACCtrl = 0; 1516 if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) 1517 sc->sc_MACCtrl |= MC_DuplexSelect; 1518 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0) 1519 sc->sc_MACCtrl |= MC_RxFlowControlEnable; 1520 if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0) 1521 sc->sc_MACCtrl |= MC_TxFlowControlEnable; 1522 /* 1523 * Update STGE_MACCtrl register depending on link status. 1524 * (duplex, flow control etc) 1525 */ 1526 v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 1527 v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable); 1528 v |= sc->sc_MACCtrl; 1529 CSR_WRITE_4(sc, STGE_MACCtrl, v); 1530 if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) { 1531 /* Duplex setting changed, reset Tx/Rx functions. */ 1532 ac = CSR_READ_4(sc, STGE_AsicCtrl); 1533 ac |= AC_TxReset | AC_RxReset; 1534 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 1535 for (i = 0; i < STGE_TIMEOUT; i++) { 1536 DELAY(100); 1537 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 1538 break; 1539 } 1540 if (i == STGE_TIMEOUT) 1541 device_printf(sc->sc_dev, "reset failed to complete\n"); 1542 } 1543 STGE_UNLOCK(sc); 1544 } 1545 1546 static __inline int 1547 stge_tx_error(struct stge_softc *sc) 1548 { 1549 uint32_t txstat; 1550 int error; 1551 1552 for (error = 0;;) { 1553 txstat = CSR_READ_4(sc, STGE_TxStatus); 1554 if ((txstat & TS_TxComplete) == 0) 1555 break; 1556 /* Tx underrun */ 1557 if ((txstat & TS_TxUnderrun) != 0) { 1558 /* 1559 * XXX 1560 * There should be a more better way to recover 1561 * from Tx underrun instead of a full reset. 1562 */ 1563 if (sc->sc_nerr++ < STGE_MAXERR) 1564 device_printf(sc->sc_dev, "Tx underrun, " 1565 "resetting...\n"); 1566 if (sc->sc_nerr == STGE_MAXERR) 1567 device_printf(sc->sc_dev, "too many errors; " 1568 "not reporting any more\n"); 1569 error = -1; 1570 break; 1571 } 1572 /* Maximum/Late collisions, Re-enable Tx MAC. */ 1573 if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0) 1574 CSR_WRITE_4(sc, STGE_MACCtrl, 1575 (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) | 1576 MC_TxEnable); 1577 } 1578 1579 return (error); 1580 } 1581 1582 /* 1583 * stge_intr: 1584 * 1585 * Interrupt service routine. 1586 */ 1587 static void 1588 stge_intr(void *arg) 1589 { 1590 struct stge_softc *sc; 1591 struct ifnet *ifp; 1592 int reinit; 1593 uint16_t status; 1594 1595 sc = (struct stge_softc *)arg; 1596 ifp = sc->sc_ifp; 1597 1598 STGE_LOCK(sc); 1599 1600 #ifdef DEVICE_POLLING 1601 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1602 goto done_locked; 1603 #endif 1604 status = CSR_READ_2(sc, STGE_IntStatus); 1605 if (sc->sc_suspended || (status & IS_InterruptStatus) == 0) 1606 goto done_locked; 1607 1608 /* Disable interrupts. */ 1609 for (reinit = 0;;) { 1610 status = CSR_READ_2(sc, STGE_IntStatusAck); 1611 status &= sc->sc_IntEnable; 1612 if (status == 0) 1613 break; 1614 /* Host interface errors. */ 1615 if ((status & IS_HostError) != 0) { 1616 device_printf(sc->sc_dev, 1617 "Host interface error, resetting...\n"); 1618 reinit = 1; 1619 goto force_init; 1620 } 1621 1622 /* Receive interrupts. */ 1623 if ((status & IS_RxDMAComplete) != 0) { 1624 stge_rxeof(sc); 1625 if ((status & IS_RFDListEnd) != 0) 1626 CSR_WRITE_4(sc, STGE_DMACtrl, 1627 DMAC_RxDMAPollNow); 1628 } 1629 1630 /* Transmit interrupts. */ 1631 if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0) 1632 stge_txeof(sc); 1633 1634 /* Transmission errors.*/ 1635 if ((status & IS_TxComplete) != 0) { 1636 if ((reinit = stge_tx_error(sc)) != 0) 1637 break; 1638 } 1639 } 1640 1641 force_init: 1642 if (reinit != 0) 1643 stge_init_locked(sc); 1644 1645 /* Re-enable interrupts. */ 1646 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 1647 1648 /* Try to get more packets going. */ 1649 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1650 stge_start_locked(ifp); 1651 1652 done_locked: 1653 STGE_UNLOCK(sc); 1654 } 1655 1656 /* 1657 * stge_txeof: 1658 * 1659 * Helper; handle transmit interrupts. 1660 */ 1661 static void 1662 stge_txeof(struct stge_softc *sc) 1663 { 1664 struct ifnet *ifp; 1665 struct stge_txdesc *txd; 1666 uint64_t control; 1667 int cons; 1668 1669 STGE_LOCK_ASSERT(sc); 1670 1671 ifp = sc->sc_ifp; 1672 1673 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1674 if (txd == NULL) 1675 return; 1676 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1677 sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD); 1678 1679 /* 1680 * Go through our Tx list and free mbufs for those 1681 * frames which have been transmitted. 1682 */ 1683 for (cons = sc->sc_cdata.stge_tx_cons;; 1684 cons = (cons + 1) % STGE_TX_RING_CNT) { 1685 if (sc->sc_cdata.stge_tx_cnt <= 0) 1686 break; 1687 control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control); 1688 if ((control & TFD_TFDDone) == 0) 1689 break; 1690 sc->sc_cdata.stge_tx_cnt--; 1691 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1692 1693 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, 1694 BUS_DMASYNC_POSTWRITE); 1695 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); 1696 1697 /* Output counter is updated with statistics register */ 1698 m_freem(txd->tx_m); 1699 txd->tx_m = NULL; 1700 STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q); 1701 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 1702 txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); 1703 } 1704 sc->sc_cdata.stge_tx_cons = cons; 1705 if (sc->sc_cdata.stge_tx_cnt == 0) 1706 sc->sc_watchdog_timer = 0; 1707 1708 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 1709 sc->sc_cdata.stge_tx_ring_map, 1710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1711 } 1712 1713 static __inline void 1714 stge_discard_rxbuf(struct stge_softc *sc, int idx) 1715 { 1716 struct stge_rfd *rfd; 1717 1718 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 1719 rfd->rfd_status = 0; 1720 } 1721 1722 #ifndef __NO_STRICT_ALIGNMENT 1723 /* 1724 * It seems that TC9021's DMA engine has alignment restrictions in 1725 * DMA scatter operations. The first DMA segment has no address 1726 * alignment restrictins but the rest should be aligned on 4(?) bytes 1727 * boundary. Otherwise it would corrupt random memory. Since we don't 1728 * know which one is used for the first segment in advance we simply 1729 * don't align at all. 1730 * To avoid copying over an entire frame to align, we allocate a new 1731 * mbuf and copy ethernet header to the new mbuf. The new mbuf is 1732 * prepended into the existing mbuf chain. 1733 */ 1734 static __inline struct mbuf * 1735 stge_fixup_rx(struct stge_softc *sc, struct mbuf *m) 1736 { 1737 struct mbuf *n; 1738 1739 n = NULL; 1740 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 1741 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 1742 m->m_data += ETHER_HDR_LEN; 1743 n = m; 1744 } else { 1745 MGETHDR(n, M_DONTWAIT, MT_DATA); 1746 if (n != NULL) { 1747 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 1748 m->m_data += ETHER_HDR_LEN; 1749 m->m_len -= ETHER_HDR_LEN; 1750 n->m_len = ETHER_HDR_LEN; 1751 M_MOVE_PKTHDR(n, m); 1752 n->m_next = m; 1753 } else 1754 m_freem(m); 1755 } 1756 1757 return (n); 1758 } 1759 #endif 1760 1761 /* 1762 * stge_rxeof: 1763 * 1764 * Helper; handle receive interrupts. 1765 */ 1766 static int 1767 stge_rxeof(struct stge_softc *sc) 1768 { 1769 struct ifnet *ifp; 1770 struct stge_rxdesc *rxd; 1771 struct mbuf *mp, *m; 1772 uint64_t status64; 1773 uint32_t status; 1774 int cons, prog, rx_npkts; 1775 1776 STGE_LOCK_ASSERT(sc); 1777 1778 rx_npkts = 0; 1779 ifp = sc->sc_ifp; 1780 1781 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1782 sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD); 1783 1784 prog = 0; 1785 for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT; 1786 prog++, cons = (cons + 1) % STGE_RX_RING_CNT) { 1787 status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status); 1788 status = RFD_RxStatus(status64); 1789 if ((status & RFD_RFDDone) == 0) 1790 break; 1791 #ifdef DEVICE_POLLING 1792 if (ifp->if_capenable & IFCAP_POLLING) { 1793 if (sc->sc_cdata.stge_rxcycles <= 0) 1794 break; 1795 sc->sc_cdata.stge_rxcycles--; 1796 } 1797 #endif 1798 prog++; 1799 rxd = &sc->sc_cdata.stge_rxdesc[cons]; 1800 mp = rxd->rx_m; 1801 1802 /* 1803 * If the packet had an error, drop it. Note we count 1804 * the error later in the periodic stats update. 1805 */ 1806 if ((status & RFD_FrameEnd) != 0 && (status & 1807 (RFD_RxFIFOOverrun | RFD_RxRuntFrame | 1808 RFD_RxAlignmentError | RFD_RxFCSError | 1809 RFD_RxLengthError)) != 0) { 1810 stge_discard_rxbuf(sc, cons); 1811 if (sc->sc_cdata.stge_rxhead != NULL) { 1812 m_freem(sc->sc_cdata.stge_rxhead); 1813 STGE_RXCHAIN_RESET(sc); 1814 } 1815 continue; 1816 } 1817 /* 1818 * Add a new receive buffer to the ring. 1819 */ 1820 if (stge_newbuf(sc, cons) != 0) { 1821 ifp->if_iqdrops++; 1822 stge_discard_rxbuf(sc, cons); 1823 if (sc->sc_cdata.stge_rxhead != NULL) { 1824 m_freem(sc->sc_cdata.stge_rxhead); 1825 STGE_RXCHAIN_RESET(sc); 1826 } 1827 continue; 1828 } 1829 1830 if ((status & RFD_FrameEnd) != 0) 1831 mp->m_len = RFD_RxDMAFrameLen(status) - 1832 sc->sc_cdata.stge_rxlen; 1833 sc->sc_cdata.stge_rxlen += mp->m_len; 1834 1835 /* Chain mbufs. */ 1836 if (sc->sc_cdata.stge_rxhead == NULL) { 1837 sc->sc_cdata.stge_rxhead = mp; 1838 sc->sc_cdata.stge_rxtail = mp; 1839 } else { 1840 mp->m_flags &= ~M_PKTHDR; 1841 sc->sc_cdata.stge_rxtail->m_next = mp; 1842 sc->sc_cdata.stge_rxtail = mp; 1843 } 1844 1845 if ((status & RFD_FrameEnd) != 0) { 1846 m = sc->sc_cdata.stge_rxhead; 1847 m->m_pkthdr.rcvif = ifp; 1848 m->m_pkthdr.len = sc->sc_cdata.stge_rxlen; 1849 1850 if (m->m_pkthdr.len > sc->sc_if_framesize) { 1851 m_freem(m); 1852 STGE_RXCHAIN_RESET(sc); 1853 continue; 1854 } 1855 /* 1856 * Set the incoming checksum information for 1857 * the packet. 1858 */ 1859 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1860 if ((status & RFD_IPDetected) != 0) { 1861 m->m_pkthdr.csum_flags |= 1862 CSUM_IP_CHECKED; 1863 if ((status & RFD_IPError) == 0) 1864 m->m_pkthdr.csum_flags |= 1865 CSUM_IP_VALID; 1866 } 1867 if (((status & RFD_TCPDetected) != 0 && 1868 (status & RFD_TCPError) == 0) || 1869 ((status & RFD_UDPDetected) != 0 && 1870 (status & RFD_UDPError) == 0)) { 1871 m->m_pkthdr.csum_flags |= 1872 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1873 m->m_pkthdr.csum_data = 0xffff; 1874 } 1875 } 1876 1877 #ifndef __NO_STRICT_ALIGNMENT 1878 if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) { 1879 if ((m = stge_fixup_rx(sc, m)) == NULL) { 1880 STGE_RXCHAIN_RESET(sc); 1881 continue; 1882 } 1883 } 1884 #endif 1885 /* Check for VLAN tagged packets. */ 1886 if ((status & RFD_VLANDetected) != 0 && 1887 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1888 m->m_pkthdr.ether_vtag = RFD_TCI(status64); 1889 m->m_flags |= M_VLANTAG; 1890 } 1891 1892 STGE_UNLOCK(sc); 1893 /* Pass it on. */ 1894 (*ifp->if_input)(ifp, m); 1895 STGE_LOCK(sc); 1896 rx_npkts++; 1897 1898 STGE_RXCHAIN_RESET(sc); 1899 } 1900 } 1901 1902 if (prog > 0) { 1903 /* Update the consumer index. */ 1904 sc->sc_cdata.stge_rx_cons = cons; 1905 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 1906 sc->sc_cdata.stge_rx_ring_map, 1907 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1908 } 1909 return (rx_npkts); 1910 } 1911 1912 #ifdef DEVICE_POLLING 1913 static int 1914 stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1915 { 1916 struct stge_softc *sc; 1917 uint16_t status; 1918 int rx_npkts; 1919 1920 rx_npkts = 0; 1921 sc = ifp->if_softc; 1922 STGE_LOCK(sc); 1923 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1924 STGE_UNLOCK(sc); 1925 return (rx_npkts); 1926 } 1927 1928 sc->sc_cdata.stge_rxcycles = count; 1929 rx_npkts = stge_rxeof(sc); 1930 stge_txeof(sc); 1931 1932 if (cmd == POLL_AND_CHECK_STATUS) { 1933 status = CSR_READ_2(sc, STGE_IntStatus); 1934 status &= sc->sc_IntEnable; 1935 if (status != 0) { 1936 if ((status & IS_HostError) != 0) { 1937 device_printf(sc->sc_dev, 1938 "Host interface error, resetting...\n"); 1939 stge_init_locked(sc); 1940 } 1941 if ((status & IS_TxComplete) != 0) { 1942 if (stge_tx_error(sc) != 0) 1943 stge_init_locked(sc); 1944 } 1945 } 1946 1947 } 1948 1949 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1950 stge_start_locked(ifp); 1951 1952 STGE_UNLOCK(sc); 1953 return (rx_npkts); 1954 } 1955 #endif /* DEVICE_POLLING */ 1956 1957 /* 1958 * stge_tick: 1959 * 1960 * One second timer, used to tick the MII. 1961 */ 1962 static void 1963 stge_tick(void *arg) 1964 { 1965 struct stge_softc *sc; 1966 struct mii_data *mii; 1967 1968 sc = (struct stge_softc *)arg; 1969 1970 STGE_LOCK_ASSERT(sc); 1971 1972 mii = device_get_softc(sc->sc_miibus); 1973 mii_tick(mii); 1974 1975 /* Update statistics counters. */ 1976 stge_stats_update(sc); 1977 1978 /* 1979 * Relcaim any pending Tx descriptors to release mbufs in a 1980 * timely manner as we don't generate Tx completion interrupts 1981 * for every frame. This limits the delay to a maximum of one 1982 * second. 1983 */ 1984 if (sc->sc_cdata.stge_tx_cnt != 0) 1985 stge_txeof(sc); 1986 1987 stge_watchdog(sc); 1988 1989 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 1990 } 1991 1992 /* 1993 * stge_stats_update: 1994 * 1995 * Read the TC9021 statistics counters. 1996 */ 1997 static void 1998 stge_stats_update(struct stge_softc *sc) 1999 { 2000 struct ifnet *ifp; 2001 2002 STGE_LOCK_ASSERT(sc); 2003 2004 ifp = sc->sc_ifp; 2005 2006 CSR_READ_4(sc,STGE_OctetRcvOk); 2007 2008 ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk); 2009 2010 ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors); 2011 2012 CSR_READ_4(sc, STGE_OctetXmtdOk); 2013 2014 ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk); 2015 2016 ifp->if_collisions += 2017 CSR_READ_4(sc, STGE_LateCollisions) + 2018 CSR_READ_4(sc, STGE_MultiColFrames) + 2019 CSR_READ_4(sc, STGE_SingleColFrames); 2020 2021 ifp->if_oerrors += 2022 CSR_READ_2(sc, STGE_FramesAbortXSColls) + 2023 CSR_READ_2(sc, STGE_FramesWEXDeferal); 2024 } 2025 2026 /* 2027 * stge_reset: 2028 * 2029 * Perform a soft reset on the TC9021. 2030 */ 2031 static void 2032 stge_reset(struct stge_softc *sc, uint32_t how) 2033 { 2034 uint32_t ac; 2035 uint8_t v; 2036 int i, dv; 2037 2038 STGE_LOCK_ASSERT(sc); 2039 2040 dv = 5000; 2041 ac = CSR_READ_4(sc, STGE_AsicCtrl); 2042 switch (how) { 2043 case STGE_RESET_TX: 2044 ac |= AC_TxReset | AC_FIFO; 2045 dv = 100; 2046 break; 2047 case STGE_RESET_RX: 2048 ac |= AC_RxReset | AC_FIFO; 2049 dv = 100; 2050 break; 2051 case STGE_RESET_FULL: 2052 default: 2053 /* 2054 * Only assert RstOut if we're fiber. We need GMII clocks 2055 * to be present in order for the reset to complete on fiber 2056 * cards. 2057 */ 2058 ac |= AC_GlobalReset | AC_RxReset | AC_TxReset | 2059 AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | 2060 (sc->sc_usefiber ? AC_RstOut : 0); 2061 break; 2062 } 2063 2064 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 2065 2066 /* Account for reset problem at 10Mbps. */ 2067 DELAY(dv); 2068 2069 for (i = 0; i < STGE_TIMEOUT; i++) { 2070 if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) 2071 break; 2072 DELAY(dv); 2073 } 2074 2075 if (i == STGE_TIMEOUT) 2076 device_printf(sc->sc_dev, "reset failed to complete\n"); 2077 2078 /* Set LED, from Linux IPG driver. */ 2079 ac = CSR_READ_4(sc, STGE_AsicCtrl); 2080 ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1); 2081 if ((sc->sc_led & 0x01) != 0) 2082 ac |= AC_LEDMode; 2083 if ((sc->sc_led & 0x03) != 0) 2084 ac |= AC_LEDModeBit1; 2085 if ((sc->sc_led & 0x08) != 0) 2086 ac |= AC_LEDSpeed; 2087 CSR_WRITE_4(sc, STGE_AsicCtrl, ac); 2088 2089 /* Set PHY, from Linux IPG driver */ 2090 v = CSR_READ_1(sc, STGE_PhySet); 2091 v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet); 2092 v |= ((sc->sc_led & 0x70) >> 4); 2093 CSR_WRITE_1(sc, STGE_PhySet, v); 2094 } 2095 2096 /* 2097 * stge_init: [ ifnet interface function ] 2098 * 2099 * Initialize the interface. 2100 */ 2101 static void 2102 stge_init(void *xsc) 2103 { 2104 struct stge_softc *sc; 2105 2106 sc = (struct stge_softc *)xsc; 2107 STGE_LOCK(sc); 2108 stge_init_locked(sc); 2109 STGE_UNLOCK(sc); 2110 } 2111 2112 static void 2113 stge_init_locked(struct stge_softc *sc) 2114 { 2115 struct ifnet *ifp; 2116 struct mii_data *mii; 2117 uint16_t eaddr[3]; 2118 uint32_t v; 2119 int error; 2120 2121 STGE_LOCK_ASSERT(sc); 2122 2123 ifp = sc->sc_ifp; 2124 mii = device_get_softc(sc->sc_miibus); 2125 2126 /* 2127 * Cancel any pending I/O. 2128 */ 2129 stge_stop(sc); 2130 2131 /* 2132 * Reset the chip to a known state. 2133 */ 2134 stge_reset(sc, STGE_RESET_FULL); 2135 2136 /* Init descriptors. */ 2137 error = stge_init_rx_ring(sc); 2138 if (error != 0) { 2139 device_printf(sc->sc_dev, 2140 "initialization failed: no memory for rx buffers\n"); 2141 stge_stop(sc); 2142 goto out; 2143 } 2144 stge_init_tx_ring(sc); 2145 2146 /* Set the station address. */ 2147 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2148 CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0])); 2149 CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1])); 2150 CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2])); 2151 2152 /* 2153 * Set the statistics masks. Disable all the RMON stats, 2154 * and disable selected stats in the non-RMON stats registers. 2155 */ 2156 CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff); 2157 CSR_WRITE_4(sc, STGE_StatisticsMask, 2158 (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) | 2159 (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) | 2160 (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) | 2161 (1U << 21)); 2162 2163 /* Set up the receive filter. */ 2164 stge_set_filter(sc); 2165 /* Program multicast filter. */ 2166 stge_set_multi(sc); 2167 2168 /* 2169 * Give the transmit and receive ring to the chip. 2170 */ 2171 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 2172 STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0))); 2173 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 2174 STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0))); 2175 2176 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 2177 STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0))); 2178 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 2179 STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0))); 2180 2181 /* 2182 * Initialize the Tx auto-poll period. It's OK to make this number 2183 * large (255 is the max, but we use 127) -- we explicitly kick the 2184 * transmit engine when there's actually a packet. 2185 */ 2186 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2187 2188 /* ..and the Rx auto-poll period. */ 2189 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2190 2191 /* Initialize the Tx start threshold. */ 2192 CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh); 2193 2194 /* Rx DMA thresholds, from Linux */ 2195 CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30); 2196 CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30); 2197 2198 /* Rx early threhold, from Linux */ 2199 CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff); 2200 2201 /* Tx DMA thresholds, from Linux */ 2202 CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30); 2203 CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04); 2204 2205 /* 2206 * Initialize the Rx DMA interrupt control register. We 2207 * request an interrupt after every incoming packet, but 2208 * defer it for sc_rxint_dmawait us. When the number of 2209 * interrupts pending reaches STGE_RXINT_NFRAME, we stop 2210 * deferring the interrupt, and signal it immediately. 2211 */ 2212 CSR_WRITE_4(sc, STGE_RxDMAIntCtrl, 2213 RDIC_RxFrameCount(sc->sc_rxint_nframe) | 2214 RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait))); 2215 2216 /* 2217 * Initialize the interrupt mask. 2218 */ 2219 sc->sc_IntEnable = IS_HostError | IS_TxComplete | 2220 IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd; 2221 #ifdef DEVICE_POLLING 2222 /* Disable interrupts if we are polling. */ 2223 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2224 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2225 else 2226 #endif 2227 CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); 2228 2229 /* 2230 * Configure the DMA engine. 2231 * XXX Should auto-tune TxBurstLimit. 2232 */ 2233 CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3)); 2234 2235 /* 2236 * Send a PAUSE frame when we reach 29,696 bytes in the Rx 2237 * FIFO, and send an un-PAUSE frame when we reach 3056 bytes 2238 * in the Rx FIFO. 2239 */ 2240 CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16); 2241 CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16); 2242 2243 /* 2244 * Set the maximum frame size. 2245 */ 2246 sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2247 CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize); 2248 2249 /* 2250 * Initialize MacCtrl -- do it before setting the media, 2251 * as setting the media will actually program the register. 2252 * 2253 * Note: We have to poke the IFS value before poking 2254 * anything else. 2255 */ 2256 /* Tx/Rx MAC should be disabled before programming IFS.*/ 2257 CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit)); 2258 2259 stge_vlan_setup(sc); 2260 2261 if (sc->sc_rev >= 6) { /* >= B.2 */ 2262 /* Multi-frag frame bug work-around. */ 2263 CSR_WRITE_2(sc, STGE_DebugCtrl, 2264 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200); 2265 2266 /* Tx Poll Now bug work-around. */ 2267 CSR_WRITE_2(sc, STGE_DebugCtrl, 2268 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010); 2269 /* Tx Poll Now bug work-around. */ 2270 CSR_WRITE_2(sc, STGE_DebugCtrl, 2271 CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020); 2272 } 2273 2274 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2275 v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable; 2276 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2277 /* 2278 * It seems that transmitting frames without checking the state of 2279 * Rx/Tx MAC wedge the hardware. 2280 */ 2281 stge_start_tx(sc); 2282 stge_start_rx(sc); 2283 2284 sc->sc_link = 0; 2285 /* 2286 * Set the current media. 2287 */ 2288 mii_mediachg(mii); 2289 2290 /* 2291 * Start the one second MII clock. 2292 */ 2293 callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); 2294 2295 /* 2296 * ...all done! 2297 */ 2298 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2299 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2300 2301 out: 2302 if (error != 0) 2303 device_printf(sc->sc_dev, "interface not running\n"); 2304 } 2305 2306 static void 2307 stge_vlan_setup(struct stge_softc *sc) 2308 { 2309 struct ifnet *ifp; 2310 uint32_t v; 2311 2312 ifp = sc->sc_ifp; 2313 /* 2314 * The NIC always copy a VLAN tag regardless of STGE_MACCtrl 2315 * MC_AutoVLANuntagging bit. 2316 * MC_AutoVLANtagging bit selects which VLAN source to use 2317 * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert 2318 * bit has priority over MC_AutoVLANtagging bit. So we always 2319 * use TFC instead of STGE_VLANTag register. 2320 */ 2321 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2322 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2323 v |= MC_AutoVLANuntagging; 2324 else 2325 v &= ~MC_AutoVLANuntagging; 2326 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2327 } 2328 2329 /* 2330 * Stop transmission on the interface. 2331 */ 2332 static void 2333 stge_stop(struct stge_softc *sc) 2334 { 2335 struct ifnet *ifp; 2336 struct stge_txdesc *txd; 2337 struct stge_rxdesc *rxd; 2338 uint32_t v; 2339 int i; 2340 2341 STGE_LOCK_ASSERT(sc); 2342 /* 2343 * Stop the one second clock. 2344 */ 2345 callout_stop(&sc->sc_tick_ch); 2346 sc->sc_watchdog_timer = 0; 2347 2348 /* 2349 * Disable interrupts. 2350 */ 2351 CSR_WRITE_2(sc, STGE_IntEnable, 0); 2352 2353 /* 2354 * Stop receiver, transmitter, and stats update. 2355 */ 2356 stge_stop_rx(sc); 2357 stge_stop_tx(sc); 2358 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2359 v |= MC_StatisticsDisable; 2360 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2361 2362 /* 2363 * Stop the transmit and receive DMA. 2364 */ 2365 stge_dma_wait(sc); 2366 CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); 2367 CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0); 2368 CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); 2369 CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0); 2370 2371 /* 2372 * Free RX and TX mbufs still in the queues. 2373 */ 2374 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2375 rxd = &sc->sc_cdata.stge_rxdesc[i]; 2376 if (rxd->rx_m != NULL) { 2377 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, 2378 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2379 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, 2380 rxd->rx_dmamap); 2381 m_freem(rxd->rx_m); 2382 rxd->rx_m = NULL; 2383 } 2384 } 2385 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2386 txd = &sc->sc_cdata.stge_txdesc[i]; 2387 if (txd->tx_m != NULL) { 2388 bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, 2389 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2390 bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, 2391 txd->tx_dmamap); 2392 m_freem(txd->tx_m); 2393 txd->tx_m = NULL; 2394 } 2395 } 2396 2397 /* 2398 * Mark the interface down and cancel the watchdog timer. 2399 */ 2400 ifp = sc->sc_ifp; 2401 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2402 sc->sc_link = 0; 2403 } 2404 2405 static void 2406 stge_start_tx(struct stge_softc *sc) 2407 { 2408 uint32_t v; 2409 int i; 2410 2411 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2412 if ((v & MC_TxEnabled) != 0) 2413 return; 2414 v |= MC_TxEnable; 2415 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2416 CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); 2417 for (i = STGE_TIMEOUT; i > 0; i--) { 2418 DELAY(10); 2419 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2420 if ((v & MC_TxEnabled) != 0) 2421 break; 2422 } 2423 if (i == 0) 2424 device_printf(sc->sc_dev, "Starting Tx MAC timed out\n"); 2425 } 2426 2427 static void 2428 stge_start_rx(struct stge_softc *sc) 2429 { 2430 uint32_t v; 2431 int i; 2432 2433 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2434 if ((v & MC_RxEnabled) != 0) 2435 return; 2436 v |= MC_RxEnable; 2437 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2438 CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); 2439 for (i = STGE_TIMEOUT; i > 0; i--) { 2440 DELAY(10); 2441 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2442 if ((v & MC_RxEnabled) != 0) 2443 break; 2444 } 2445 if (i == 0) 2446 device_printf(sc->sc_dev, "Starting Rx MAC timed out\n"); 2447 } 2448 2449 static void 2450 stge_stop_tx(struct stge_softc *sc) 2451 { 2452 uint32_t v; 2453 int i; 2454 2455 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2456 if ((v & MC_TxEnabled) == 0) 2457 return; 2458 v |= MC_TxDisable; 2459 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2460 for (i = STGE_TIMEOUT; i > 0; i--) { 2461 DELAY(10); 2462 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2463 if ((v & MC_TxEnabled) == 0) 2464 break; 2465 } 2466 if (i == 0) 2467 device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n"); 2468 } 2469 2470 static void 2471 stge_stop_rx(struct stge_softc *sc) 2472 { 2473 uint32_t v; 2474 int i; 2475 2476 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2477 if ((v & MC_RxEnabled) == 0) 2478 return; 2479 v |= MC_RxDisable; 2480 CSR_WRITE_4(sc, STGE_MACCtrl, v); 2481 for (i = STGE_TIMEOUT; i > 0; i--) { 2482 DELAY(10); 2483 v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; 2484 if ((v & MC_RxEnabled) == 0) 2485 break; 2486 } 2487 if (i == 0) 2488 device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n"); 2489 } 2490 2491 static void 2492 stge_init_tx_ring(struct stge_softc *sc) 2493 { 2494 struct stge_ring_data *rd; 2495 struct stge_txdesc *txd; 2496 bus_addr_t addr; 2497 int i; 2498 2499 STAILQ_INIT(&sc->sc_cdata.stge_txfreeq); 2500 STAILQ_INIT(&sc->sc_cdata.stge_txbusyq); 2501 2502 sc->sc_cdata.stge_tx_prod = 0; 2503 sc->sc_cdata.stge_tx_cons = 0; 2504 sc->sc_cdata.stge_tx_cnt = 0; 2505 2506 rd = &sc->sc_rdata; 2507 bzero(rd->stge_tx_ring, STGE_TX_RING_SZ); 2508 for (i = 0; i < STGE_TX_RING_CNT; i++) { 2509 if (i == (STGE_TX_RING_CNT - 1)) 2510 addr = STGE_TX_RING_ADDR(sc, 0); 2511 else 2512 addr = STGE_TX_RING_ADDR(sc, i + 1); 2513 rd->stge_tx_ring[i].tfd_next = htole64(addr); 2514 rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone); 2515 txd = &sc->sc_cdata.stge_txdesc[i]; 2516 STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); 2517 } 2518 2519 bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, 2520 sc->sc_cdata.stge_tx_ring_map, 2521 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2522 2523 } 2524 2525 static int 2526 stge_init_rx_ring(struct stge_softc *sc) 2527 { 2528 struct stge_ring_data *rd; 2529 bus_addr_t addr; 2530 int i; 2531 2532 sc->sc_cdata.stge_rx_cons = 0; 2533 STGE_RXCHAIN_RESET(sc); 2534 2535 rd = &sc->sc_rdata; 2536 bzero(rd->stge_rx_ring, STGE_RX_RING_SZ); 2537 for (i = 0; i < STGE_RX_RING_CNT; i++) { 2538 if (stge_newbuf(sc, i) != 0) 2539 return (ENOBUFS); 2540 if (i == (STGE_RX_RING_CNT - 1)) 2541 addr = STGE_RX_RING_ADDR(sc, 0); 2542 else 2543 addr = STGE_RX_RING_ADDR(sc, i + 1); 2544 rd->stge_rx_ring[i].rfd_next = htole64(addr); 2545 rd->stge_rx_ring[i].rfd_status = 0; 2546 } 2547 2548 bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, 2549 sc->sc_cdata.stge_rx_ring_map, 2550 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2551 2552 return (0); 2553 } 2554 2555 /* 2556 * stge_newbuf: 2557 * 2558 * Add a receive buffer to the indicated descriptor. 2559 */ 2560 static int 2561 stge_newbuf(struct stge_softc *sc, int idx) 2562 { 2563 struct stge_rxdesc *rxd; 2564 struct stge_rfd *rfd; 2565 struct mbuf *m; 2566 bus_dma_segment_t segs[1]; 2567 bus_dmamap_t map; 2568 int nsegs; 2569 2570 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2571 if (m == NULL) 2572 return (ENOBUFS); 2573 m->m_len = m->m_pkthdr.len = MCLBYTES; 2574 /* 2575 * The hardware requires 4bytes aligned DMA address when JUMBO 2576 * frame is used. 2577 */ 2578 if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN)) 2579 m_adj(m, ETHER_ALIGN); 2580 2581 if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag, 2582 sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 2583 m_freem(m); 2584 return (ENOBUFS); 2585 } 2586 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2587 2588 rxd = &sc->sc_cdata.stge_rxdesc[idx]; 2589 if (rxd->rx_m != NULL) { 2590 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2591 BUS_DMASYNC_POSTREAD); 2592 bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); 2593 } 2594 map = rxd->rx_dmamap; 2595 rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap; 2596 sc->sc_cdata.stge_rx_sparemap = map; 2597 bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, 2598 BUS_DMASYNC_PREREAD); 2599 rxd->rx_m = m; 2600 2601 rfd = &sc->sc_rdata.stge_rx_ring[idx]; 2602 rfd->rfd_frag.frag_word0 = 2603 htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len)); 2604 rfd->rfd_status = 0; 2605 2606 return (0); 2607 } 2608 2609 /* 2610 * stge_set_filter: 2611 * 2612 * Set up the receive filter. 2613 */ 2614 static void 2615 stge_set_filter(struct stge_softc *sc) 2616 { 2617 struct ifnet *ifp; 2618 uint16_t mode; 2619 2620 STGE_LOCK_ASSERT(sc); 2621 2622 ifp = sc->sc_ifp; 2623 2624 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2625 mode |= RM_ReceiveUnicast; 2626 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2627 mode |= RM_ReceiveBroadcast; 2628 else 2629 mode &= ~RM_ReceiveBroadcast; 2630 if ((ifp->if_flags & IFF_PROMISC) != 0) 2631 mode |= RM_ReceiveAllFrames; 2632 else 2633 mode &= ~RM_ReceiveAllFrames; 2634 2635 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2636 } 2637 2638 static void 2639 stge_set_multi(struct stge_softc *sc) 2640 { 2641 struct ifnet *ifp; 2642 struct ifmultiaddr *ifma; 2643 uint32_t crc; 2644 uint32_t mchash[2]; 2645 uint16_t mode; 2646 int count; 2647 2648 STGE_LOCK_ASSERT(sc); 2649 2650 ifp = sc->sc_ifp; 2651 2652 mode = CSR_READ_2(sc, STGE_ReceiveMode); 2653 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2654 if ((ifp->if_flags & IFF_PROMISC) != 0) 2655 mode |= RM_ReceiveAllFrames; 2656 else if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2657 mode |= RM_ReceiveMulticast; 2658 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2659 return; 2660 } 2661 2662 /* clear existing filters. */ 2663 CSR_WRITE_4(sc, STGE_HashTable0, 0); 2664 CSR_WRITE_4(sc, STGE_HashTable1, 0); 2665 2666 /* 2667 * Set up the multicast address filter by passing all multicast 2668 * addresses through a CRC generator, and then using the low-order 2669 * 6 bits as an index into the 64 bit multicast hash table. The 2670 * high order bits select the register, while the rest of the bits 2671 * select the bit within the register. 2672 */ 2673 2674 bzero(mchash, sizeof(mchash)); 2675 2676 count = 0; 2677 if_maddr_rlock(sc->sc_ifp); 2678 TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) { 2679 if (ifma->ifma_addr->sa_family != AF_LINK) 2680 continue; 2681 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2682 ifma->ifma_addr), ETHER_ADDR_LEN); 2683 2684 /* Just want the 6 least significant bits. */ 2685 crc &= 0x3f; 2686 2687 /* Set the corresponding bit in the hash table. */ 2688 mchash[crc >> 5] |= 1 << (crc & 0x1f); 2689 count++; 2690 } 2691 if_maddr_runlock(ifp); 2692 2693 mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames); 2694 if (count > 0) 2695 mode |= RM_ReceiveMulticastHash; 2696 else 2697 mode &= ~RM_ReceiveMulticastHash; 2698 2699 CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]); 2700 CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]); 2701 CSR_WRITE_2(sc, STGE_ReceiveMode, mode); 2702 } 2703 2704 static int 2705 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2706 { 2707 int error, value; 2708 2709 if (!arg1) 2710 return (EINVAL); 2711 value = *(int *)arg1; 2712 error = sysctl_handle_int(oidp, &value, 0, req); 2713 if (error || !req->newptr) 2714 return (error); 2715 if (value < low || value > high) 2716 return (EINVAL); 2717 *(int *)arg1 = value; 2718 2719 return (0); 2720 } 2721 2722 static int 2723 sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS) 2724 { 2725 return (sysctl_int_range(oidp, arg1, arg2, req, 2726 STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX)); 2727 } 2728 2729 static int 2730 sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS) 2731 { 2732 return (sysctl_int_range(oidp, arg1, arg2, req, 2733 STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX)); 2734 } 2735