1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #ifdef HAVE_KERNEL_OPTION_HEADERS 37 #include "opt_device_polling.h" 38 #endif 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bus.h> 43 #include <sys/endian.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/module.h> 49 #include <sys/rman.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include <net/bpf.h> 55 #include <net/if.h> 56 #include <net/if_arp.h> 57 #include <net/ethernet.h> 58 #include <net/if_dl.h> 59 #include <net/if_media.h> 60 #include <net/if_types.h> 61 #include <net/if_vlan_var.h> 62 63 #include <machine/bus.h> 64 #include <machine/resource.h> 65 66 #include <dev/mii/mii.h> 67 #include <dev/mii/mii_bitbang.h> 68 #include <dev/mii/miivar.h> 69 70 #include <dev/pci/pcireg.h> 71 #include <dev/pci/pcivar.h> 72 73 #include <dev/ste/if_stereg.h> 74 75 /* "device miibus" required. See GENERIC if you get errors here. */ 76 #include "miibus_if.h" 77 78 MODULE_DEPEND(ste, pci, 1, 1, 1); 79 MODULE_DEPEND(ste, ether, 1, 1, 1); 80 MODULE_DEPEND(ste, miibus, 1, 1, 1); 81 82 /* Define to show Tx error status. */ 83 #define STE_SHOW_TXERRORS 84 85 /* 86 * Various supported device vendors/types and their names. 87 */ 88 static const struct ste_type const ste_devs[] = { 89 { ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" }, 90 { ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" }, 91 { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, 92 { 0, 0, NULL } 93 }; 94 95 static int ste_attach(device_t); 96 static int ste_detach(device_t); 97 static int ste_probe(device_t); 98 static int ste_resume(device_t); 99 static int ste_shutdown(device_t); 100 static int ste_suspend(device_t); 101 102 static int ste_dma_alloc(struct ste_softc *); 103 static void ste_dma_free(struct ste_softc *); 104 static void ste_dmamap_cb(void *, bus_dma_segment_t *, int, int); 105 static int ste_eeprom_wait(struct ste_softc *); 106 static int ste_encap(struct ste_softc *, struct mbuf **, 107 struct ste_chain *); 108 static int ste_ifmedia_upd(struct ifnet *); 109 static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 110 static void ste_init(void *); 111 static void ste_init_locked(struct ste_softc *); 112 static int ste_init_rx_list(struct ste_softc *); 113 static void ste_init_tx_list(struct ste_softc *); 114 static void ste_intr(void *); 115 static int ste_ioctl(struct ifnet *, u_long, caddr_t); 116 static uint32_t ste_mii_bitbang_read(device_t); 117 static void ste_mii_bitbang_write(device_t, uint32_t); 118 static int ste_miibus_readreg(device_t, int, int); 119 static void ste_miibus_statchg(device_t); 120 static int ste_miibus_writereg(device_t, int, int, int); 121 static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *); 122 static int ste_read_eeprom(struct ste_softc *, uint16_t *, int, int); 123 static void ste_reset(struct ste_softc *); 124 static void ste_restart_tx(struct ste_softc *); 125 static int ste_rxeof(struct ste_softc *, int); 126 static void ste_rxfilter(struct ste_softc *); 127 static void ste_setwol(struct ste_softc *); 128 static void ste_start(struct ifnet *); 129 static void ste_start_locked(struct ifnet *); 130 static void ste_stats_clear(struct ste_softc *); 131 static void ste_stats_update(struct ste_softc *); 132 static void ste_stop(struct ste_softc *); 133 static void ste_sysctl_node(struct ste_softc *); 134 static void ste_tick(void *); 135 static void ste_txeoc(struct ste_softc *); 136 static void ste_txeof(struct ste_softc *); 137 static void ste_wait(struct ste_softc *); 138 static void ste_watchdog(struct ste_softc *); 139 140 /* 141 * MII bit-bang glue 142 */ 143 static const struct mii_bitbang_ops ste_mii_bitbang_ops = { 144 ste_mii_bitbang_read, 145 ste_mii_bitbang_write, 146 { 147 STE_PHYCTL_MDATA, /* MII_BIT_MDO */ 148 STE_PHYCTL_MDATA, /* MII_BIT_MDI */ 149 STE_PHYCTL_MCLK, /* MII_BIT_MDC */ 150 STE_PHYCTL_MDIR, /* MII_BIT_DIR_HOST_PHY */ 151 0, /* MII_BIT_DIR_PHY_HOST */ 152 } 153 }; 154 155 static device_method_t ste_methods[] = { 156 /* Device interface */ 157 DEVMETHOD(device_probe, ste_probe), 158 DEVMETHOD(device_attach, ste_attach), 159 DEVMETHOD(device_detach, ste_detach), 160 DEVMETHOD(device_shutdown, ste_shutdown), 161 DEVMETHOD(device_suspend, ste_suspend), 162 DEVMETHOD(device_resume, ste_resume), 163 164 /* bus interface */ 165 DEVMETHOD(bus_print_child, bus_generic_print_child), 166 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 167 168 /* MII interface */ 169 DEVMETHOD(miibus_readreg, ste_miibus_readreg), 170 DEVMETHOD(miibus_writereg, ste_miibus_writereg), 171 DEVMETHOD(miibus_statchg, ste_miibus_statchg), 172 173 { 0, 0 } 174 }; 175 176 static driver_t ste_driver = { 177 "ste", 178 ste_methods, 179 sizeof(struct ste_softc) 180 }; 181 182 static devclass_t ste_devclass; 183 184 DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); 185 DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); 186 187 #define STE_SETBIT4(sc, reg, x) \ 188 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 189 190 #define STE_CLRBIT4(sc, reg, x) \ 191 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 192 193 #define STE_SETBIT2(sc, reg, x) \ 194 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) 195 196 #define STE_CLRBIT2(sc, reg, x) \ 197 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) 198 199 #define STE_SETBIT1(sc, reg, x) \ 200 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) 201 202 #define STE_CLRBIT1(sc, reg, x) \ 203 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) 204 205 /* 206 * Read the MII serial port for the MII bit-bang module. 207 */ 208 static uint32_t 209 ste_mii_bitbang_read(device_t dev) 210 { 211 struct ste_softc *sc; 212 uint32_t val; 213 214 sc = device_get_softc(dev); 215 216 val = CSR_READ_1(sc, STE_PHYCTL); 217 CSR_BARRIER(sc, STE_PHYCTL, 1, 218 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 219 220 return (val); 221 } 222 223 /* 224 * Write the MII serial port for the MII bit-bang module. 225 */ 226 static void 227 ste_mii_bitbang_write(device_t dev, uint32_t val) 228 { 229 struct ste_softc *sc; 230 231 sc = device_get_softc(dev); 232 233 CSR_WRITE_1(sc, STE_PHYCTL, val); 234 CSR_BARRIER(sc, STE_PHYCTL, 1, 235 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 236 } 237 238 static int 239 ste_miibus_readreg(device_t dev, int phy, int reg) 240 { 241 242 return (mii_bitbang_readreg(dev, &ste_mii_bitbang_ops, phy, reg)); 243 } 244 245 static int 246 ste_miibus_writereg(device_t dev, int phy, int reg, int data) 247 { 248 249 mii_bitbang_writereg(dev, &ste_mii_bitbang_ops, phy, reg, data); 250 251 return (0); 252 } 253 254 static void 255 ste_miibus_statchg(device_t dev) 256 { 257 struct ste_softc *sc; 258 struct mii_data *mii; 259 struct ifnet *ifp; 260 uint16_t cfg; 261 262 sc = device_get_softc(dev); 263 264 mii = device_get_softc(sc->ste_miibus); 265 ifp = sc->ste_ifp; 266 if (mii == NULL || ifp == NULL || 267 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 268 return; 269 270 sc->ste_flags &= ~STE_FLAG_LINK; 271 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 272 (IFM_ACTIVE | IFM_AVALID)) { 273 switch (IFM_SUBTYPE(mii->mii_media_active)) { 274 case IFM_10_T: 275 case IFM_100_TX: 276 case IFM_100_FX: 277 case IFM_100_T4: 278 sc->ste_flags |= STE_FLAG_LINK; 279 default: 280 break; 281 } 282 } 283 284 /* Program MACs with resolved speed/duplex/flow-control. */ 285 if ((sc->ste_flags & STE_FLAG_LINK) != 0) { 286 cfg = CSR_READ_2(sc, STE_MACCTL0); 287 cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX); 288 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 289 /* 290 * ST201 data sheet says driver should enable receiving 291 * MAC control frames bit of receive mode register to 292 * receive flow-control frames but the register has no 293 * such bits. In addition the controller has no ability 294 * to send pause frames so it should be handled in 295 * driver. Implementing pause timer handling in driver 296 * layer is not trivial, so don't enable flow-control 297 * here. 298 */ 299 cfg |= STE_MACCTL0_FULLDUPLEX; 300 } 301 CSR_WRITE_2(sc, STE_MACCTL0, cfg); 302 } 303 } 304 305 static int 306 ste_ifmedia_upd(struct ifnet *ifp) 307 { 308 struct ste_softc *sc; 309 struct mii_data *mii; 310 struct mii_softc *miisc; 311 int error; 312 313 sc = ifp->if_softc; 314 STE_LOCK(sc); 315 mii = device_get_softc(sc->ste_miibus); 316 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 317 PHY_RESET(miisc); 318 error = mii_mediachg(mii); 319 STE_UNLOCK(sc); 320 321 return (error); 322 } 323 324 static void 325 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 326 { 327 struct ste_softc *sc; 328 struct mii_data *mii; 329 330 sc = ifp->if_softc; 331 mii = device_get_softc(sc->ste_miibus); 332 333 STE_LOCK(sc); 334 if ((ifp->if_flags & IFF_UP) == 0) { 335 STE_UNLOCK(sc); 336 return; 337 } 338 mii_pollstat(mii); 339 ifmr->ifm_active = mii->mii_media_active; 340 ifmr->ifm_status = mii->mii_media_status; 341 STE_UNLOCK(sc); 342 } 343 344 static void 345 ste_wait(struct ste_softc *sc) 346 { 347 int i; 348 349 for (i = 0; i < STE_TIMEOUT; i++) { 350 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 351 break; 352 DELAY(1); 353 } 354 355 if (i == STE_TIMEOUT) 356 device_printf(sc->ste_dev, "command never completed!\n"); 357 } 358 359 /* 360 * The EEPROM is slow: give it time to come ready after issuing 361 * it a command. 362 */ 363 static int 364 ste_eeprom_wait(struct ste_softc *sc) 365 { 366 int i; 367 368 DELAY(1000); 369 370 for (i = 0; i < 100; i++) { 371 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 372 DELAY(1000); 373 else 374 break; 375 } 376 377 if (i == 100) { 378 device_printf(sc->ste_dev, "eeprom failed to come ready\n"); 379 return (1); 380 } 381 382 return (0); 383 } 384 385 /* 386 * Read a sequence of words from the EEPROM. Note that ethernet address 387 * data is stored in the EEPROM in network byte order. 388 */ 389 static int 390 ste_read_eeprom(struct ste_softc *sc, uint16_t *dest, int off, int cnt) 391 { 392 int err = 0, i; 393 394 if (ste_eeprom_wait(sc)) 395 return (1); 396 397 for (i = 0; i < cnt; i++) { 398 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 399 err = ste_eeprom_wait(sc); 400 if (err) 401 break; 402 *dest = le16toh(CSR_READ_2(sc, STE_EEPROM_DATA)); 403 dest++; 404 } 405 406 return (err ? 1 : 0); 407 } 408 409 static void 410 ste_rxfilter(struct ste_softc *sc) 411 { 412 struct ifnet *ifp; 413 struct ifmultiaddr *ifma; 414 uint32_t hashes[2] = { 0, 0 }; 415 uint8_t rxcfg; 416 int h; 417 418 STE_LOCK_ASSERT(sc); 419 420 ifp = sc->ste_ifp; 421 rxcfg = CSR_READ_1(sc, STE_RX_MODE); 422 rxcfg |= STE_RXMODE_UNICAST; 423 rxcfg &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_MULTIHASH | 424 STE_RXMODE_BROADCAST | STE_RXMODE_PROMISC); 425 if (ifp->if_flags & IFF_BROADCAST) 426 rxcfg |= STE_RXMODE_BROADCAST; 427 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 428 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 429 rxcfg |= STE_RXMODE_ALLMULTI; 430 if ((ifp->if_flags & IFF_PROMISC) != 0) 431 rxcfg |= STE_RXMODE_PROMISC; 432 goto chipit; 433 } 434 435 rxcfg |= STE_RXMODE_MULTIHASH; 436 /* Now program new ones. */ 437 if_maddr_rlock(ifp); 438 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 439 if (ifma->ifma_addr->sa_family != AF_LINK) 440 continue; 441 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 442 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; 443 if (h < 32) 444 hashes[0] |= (1 << h); 445 else 446 hashes[1] |= (1 << (h - 32)); 447 } 448 if_maddr_runlock(ifp); 449 450 chipit: 451 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 452 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 453 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 454 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 455 CSR_WRITE_1(sc, STE_RX_MODE, rxcfg); 456 CSR_READ_1(sc, STE_RX_MODE); 457 } 458 459 #ifdef DEVICE_POLLING 460 static poll_handler_t ste_poll, ste_poll_locked; 461 462 static int 463 ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 464 { 465 struct ste_softc *sc = ifp->if_softc; 466 int rx_npkts = 0; 467 468 STE_LOCK(sc); 469 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 470 rx_npkts = ste_poll_locked(ifp, cmd, count); 471 STE_UNLOCK(sc); 472 return (rx_npkts); 473 } 474 475 static int 476 ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 477 { 478 struct ste_softc *sc = ifp->if_softc; 479 int rx_npkts; 480 481 STE_LOCK_ASSERT(sc); 482 483 rx_npkts = ste_rxeof(sc, count); 484 ste_txeof(sc); 485 ste_txeoc(sc); 486 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 487 ste_start_locked(ifp); 488 489 if (cmd == POLL_AND_CHECK_STATUS) { 490 uint16_t status; 491 492 status = CSR_READ_2(sc, STE_ISR_ACK); 493 494 if (status & STE_ISR_STATS_OFLOW) 495 ste_stats_update(sc); 496 497 if (status & STE_ISR_HOSTERR) { 498 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 499 ste_init_locked(sc); 500 } 501 } 502 return (rx_npkts); 503 } 504 #endif /* DEVICE_POLLING */ 505 506 static void 507 ste_intr(void *xsc) 508 { 509 struct ste_softc *sc; 510 struct ifnet *ifp; 511 uint16_t intrs, status; 512 513 sc = xsc; 514 STE_LOCK(sc); 515 ifp = sc->ste_ifp; 516 517 #ifdef DEVICE_POLLING 518 if (ifp->if_capenable & IFCAP_POLLING) { 519 STE_UNLOCK(sc); 520 return; 521 } 522 #endif 523 /* Reading STE_ISR_ACK clears STE_IMR register. */ 524 status = CSR_READ_2(sc, STE_ISR_ACK); 525 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 526 STE_UNLOCK(sc); 527 return; 528 } 529 530 intrs = STE_INTRS; 531 if (status == 0xFFFF || (status & intrs) == 0) 532 goto done; 533 534 if (sc->ste_int_rx_act > 0) { 535 status &= ~STE_ISR_RX_DMADONE; 536 intrs &= ~STE_IMR_RX_DMADONE; 537 } 538 539 if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) { 540 ste_rxeof(sc, -1); 541 /* 542 * The controller has no ability to Rx interrupt 543 * moderation feature. Receiving 64 bytes frames 544 * from wire generates too many interrupts which in 545 * turn make system useless to process other useful 546 * things. Fortunately ST201 supports single shot 547 * timer so use the timer to implement Rx interrupt 548 * moderation in driver. This adds more register 549 * access but it greatly reduces number of Rx 550 * interrupts under high network load. 551 */ 552 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 553 (sc->ste_int_rx_mod != 0)) { 554 if ((status & STE_ISR_RX_DMADONE) != 0) { 555 CSR_WRITE_2(sc, STE_COUNTDOWN, 556 STE_TIMER_USECS(sc->ste_int_rx_mod)); 557 intrs &= ~STE_IMR_RX_DMADONE; 558 sc->ste_int_rx_act = 1; 559 } else { 560 intrs |= STE_IMR_RX_DMADONE; 561 sc->ste_int_rx_act = 0; 562 } 563 } 564 } 565 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 566 if ((status & STE_ISR_TX_DMADONE) != 0) 567 ste_txeof(sc); 568 if ((status & STE_ISR_TX_DONE) != 0) 569 ste_txeoc(sc); 570 if ((status & STE_ISR_STATS_OFLOW) != 0) 571 ste_stats_update(sc); 572 if ((status & STE_ISR_HOSTERR) != 0) { 573 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 574 ste_init_locked(sc); 575 STE_UNLOCK(sc); 576 return; 577 } 578 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 579 ste_start_locked(ifp); 580 done: 581 /* Re-enable interrupts */ 582 CSR_WRITE_2(sc, STE_IMR, intrs); 583 } 584 STE_UNLOCK(sc); 585 } 586 587 /* 588 * A frame has been uploaded: pass the resulting mbuf chain up to 589 * the higher level protocols. 590 */ 591 static int 592 ste_rxeof(struct ste_softc *sc, int count) 593 { 594 struct mbuf *m; 595 struct ifnet *ifp; 596 struct ste_chain_onefrag *cur_rx; 597 uint32_t rxstat; 598 int total_len, rx_npkts; 599 600 ifp = sc->ste_ifp; 601 602 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 603 sc->ste_cdata.ste_rx_list_map, 604 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 605 606 cur_rx = sc->ste_cdata.ste_rx_head; 607 for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++, 608 cur_rx = cur_rx->ste_next) { 609 rxstat = le32toh(cur_rx->ste_ptr->ste_status); 610 if ((rxstat & STE_RXSTAT_DMADONE) == 0) 611 break; 612 #ifdef DEVICE_POLLING 613 if (ifp->if_capenable & IFCAP_POLLING) { 614 if (count == 0) 615 break; 616 count--; 617 } 618 #endif 619 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 620 break; 621 /* 622 * If an error occurs, update stats, clear the 623 * status word and leave the mbuf cluster in place: 624 * it should simply get re-used next time this descriptor 625 * comes up in the ring. 626 */ 627 if (rxstat & STE_RXSTAT_FRAME_ERR) { 628 ifp->if_ierrors++; 629 cur_rx->ste_ptr->ste_status = 0; 630 continue; 631 } 632 633 /* No errors; receive the packet. */ 634 m = cur_rx->ste_mbuf; 635 total_len = STE_RX_BYTES(rxstat); 636 637 /* 638 * Try to conjure up a new mbuf cluster. If that 639 * fails, it means we have an out of memory condition and 640 * should leave the buffer in place and continue. This will 641 * result in a lost packet, but there's little else we 642 * can do in this situation. 643 */ 644 if (ste_newbuf(sc, cur_rx) != 0) { 645 ifp->if_iqdrops++; 646 cur_rx->ste_ptr->ste_status = 0; 647 continue; 648 } 649 650 m->m_pkthdr.rcvif = ifp; 651 m->m_pkthdr.len = m->m_len = total_len; 652 653 ifp->if_ipackets++; 654 STE_UNLOCK(sc); 655 (*ifp->if_input)(ifp, m); 656 STE_LOCK(sc); 657 } 658 659 if (rx_npkts > 0) { 660 sc->ste_cdata.ste_rx_head = cur_rx; 661 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 662 sc->ste_cdata.ste_rx_list_map, 663 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 664 } 665 666 return (rx_npkts); 667 } 668 669 static void 670 ste_txeoc(struct ste_softc *sc) 671 { 672 uint16_t txstat; 673 struct ifnet *ifp; 674 675 STE_LOCK_ASSERT(sc); 676 677 ifp = sc->ste_ifp; 678 679 /* 680 * STE_TX_STATUS register implements a queue of up to 31 681 * transmit status byte. Writing an arbitrary value to the 682 * register will advance the queue to the next transmit 683 * status byte. This means if driver does not read 684 * STE_TX_STATUS register after completing sending more 685 * than 31 frames the controller would be stalled so driver 686 * should re-wake the Tx MAC. This is the most severe 687 * limitation of ST201 based controller. 688 */ 689 for (;;) { 690 txstat = CSR_READ_2(sc, STE_TX_STATUS); 691 if ((txstat & STE_TXSTATUS_TXDONE) == 0) 692 break; 693 if ((txstat & (STE_TXSTATUS_UNDERRUN | 694 STE_TXSTATUS_EXCESSCOLLS | STE_TXSTATUS_RECLAIMERR | 695 STE_TXSTATUS_STATSOFLOW)) != 0) { 696 ifp->if_oerrors++; 697 #ifdef STE_SHOW_TXERRORS 698 device_printf(sc->ste_dev, "TX error : 0x%b\n", 699 txstat & 0xFF, STE_ERR_BITS); 700 #endif 701 if ((txstat & STE_TXSTATUS_UNDERRUN) != 0 && 702 sc->ste_tx_thresh < STE_PACKET_SIZE) { 703 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 704 if (sc->ste_tx_thresh > STE_PACKET_SIZE) 705 sc->ste_tx_thresh = STE_PACKET_SIZE; 706 device_printf(sc->ste_dev, 707 "TX underrun, increasing TX" 708 " start threshold to %d bytes\n", 709 sc->ste_tx_thresh); 710 /* Make sure to disable active DMA cycles. */ 711 STE_SETBIT4(sc, STE_DMACTL, 712 STE_DMACTL_TXDMA_STALL); 713 ste_wait(sc); 714 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 715 ste_init_locked(sc); 716 break; 717 } 718 /* Restart Tx. */ 719 ste_restart_tx(sc); 720 } 721 /* 722 * Advance to next status and ACK TxComplete 723 * interrupt. ST201 data sheet was wrong here, to 724 * get next Tx status, we have to write both 725 * STE_TX_STATUS and STE_TX_FRAMEID register. 726 * Otherwise controller returns the same status 727 * as well as not acknowledge Tx completion 728 * interrupt. 729 */ 730 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 731 } 732 } 733 734 static void 735 ste_tick(void *arg) 736 { 737 struct ste_softc *sc; 738 struct mii_data *mii; 739 740 sc = (struct ste_softc *)arg; 741 742 STE_LOCK_ASSERT(sc); 743 744 mii = device_get_softc(sc->ste_miibus); 745 mii_tick(mii); 746 /* 747 * ukphy(4) does not seem to generate CB that reports 748 * resolved link state so if we know we lost a link, 749 * explicitly check the link state. 750 */ 751 if ((sc->ste_flags & STE_FLAG_LINK) == 0) 752 ste_miibus_statchg(sc->ste_dev); 753 /* 754 * Because we are not generating Tx completion 755 * interrupt for every frame, reclaim transmitted 756 * buffers here. 757 */ 758 ste_txeof(sc); 759 ste_txeoc(sc); 760 ste_stats_update(sc); 761 ste_watchdog(sc); 762 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 763 } 764 765 static void 766 ste_txeof(struct ste_softc *sc) 767 { 768 struct ifnet *ifp; 769 struct ste_chain *cur_tx; 770 uint32_t txstat; 771 int idx; 772 773 STE_LOCK_ASSERT(sc); 774 775 ifp = sc->ste_ifp; 776 idx = sc->ste_cdata.ste_tx_cons; 777 if (idx == sc->ste_cdata.ste_tx_prod) 778 return; 779 780 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 781 sc->ste_cdata.ste_tx_list_map, 782 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 783 784 while (idx != sc->ste_cdata.ste_tx_prod) { 785 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 786 txstat = le32toh(cur_tx->ste_ptr->ste_ctl); 787 if ((txstat & STE_TXCTL_DMADONE) == 0) 788 break; 789 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map, 790 BUS_DMASYNC_POSTWRITE); 791 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map); 792 KASSERT(cur_tx->ste_mbuf != NULL, 793 ("%s: freeing NULL mbuf!\n", __func__)); 794 m_freem(cur_tx->ste_mbuf); 795 cur_tx->ste_mbuf = NULL; 796 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 797 ifp->if_opackets++; 798 sc->ste_cdata.ste_tx_cnt--; 799 STE_INC(idx, STE_TX_LIST_CNT); 800 } 801 802 sc->ste_cdata.ste_tx_cons = idx; 803 if (sc->ste_cdata.ste_tx_cnt == 0) 804 sc->ste_timer = 0; 805 } 806 807 static void 808 ste_stats_clear(struct ste_softc *sc) 809 { 810 811 STE_LOCK_ASSERT(sc); 812 813 /* Rx stats. */ 814 CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO); 815 CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI); 816 CSR_READ_2(sc, STE_STAT_RX_FRAMES); 817 CSR_READ_1(sc, STE_STAT_RX_BCAST); 818 CSR_READ_1(sc, STE_STAT_RX_MCAST); 819 CSR_READ_1(sc, STE_STAT_RX_LOST); 820 /* Tx stats. */ 821 CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO); 822 CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI); 823 CSR_READ_2(sc, STE_STAT_TX_FRAMES); 824 CSR_READ_1(sc, STE_STAT_TX_BCAST); 825 CSR_READ_1(sc, STE_STAT_TX_MCAST); 826 CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 827 CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 828 CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 829 CSR_READ_1(sc, STE_STAT_LATE_COLLS); 830 CSR_READ_1(sc, STE_STAT_TX_DEFER); 831 CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 832 CSR_READ_1(sc, STE_STAT_TX_ABORT); 833 } 834 835 static void 836 ste_stats_update(struct ste_softc *sc) 837 { 838 struct ifnet *ifp; 839 struct ste_hw_stats *stats; 840 uint32_t val; 841 842 STE_LOCK_ASSERT(sc); 843 844 ifp = sc->ste_ifp; 845 stats = &sc->ste_stats; 846 /* Rx stats. */ 847 val = (uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO) | 848 ((uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI)) << 16; 849 val &= 0x000FFFFF; 850 stats->rx_bytes += val; 851 stats->rx_frames += CSR_READ_2(sc, STE_STAT_RX_FRAMES); 852 stats->rx_bcast_frames += CSR_READ_1(sc, STE_STAT_RX_BCAST); 853 stats->rx_mcast_frames += CSR_READ_1(sc, STE_STAT_RX_MCAST); 854 stats->rx_lost_frames += CSR_READ_1(sc, STE_STAT_RX_LOST); 855 /* Tx stats. */ 856 val = (uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO) | 857 ((uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI)) << 16; 858 val &= 0x000FFFFF; 859 stats->tx_bytes += val; 860 stats->tx_frames += CSR_READ_2(sc, STE_STAT_TX_FRAMES); 861 stats->tx_bcast_frames += CSR_READ_1(sc, STE_STAT_TX_BCAST); 862 stats->tx_mcast_frames += CSR_READ_1(sc, STE_STAT_TX_MCAST); 863 stats->tx_carrsense_errs += CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 864 val = CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 865 stats->tx_single_colls += val; 866 ifp->if_collisions += val; 867 val = CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 868 stats->tx_multi_colls += val; 869 ifp->if_collisions += val; 870 val += CSR_READ_1(sc, STE_STAT_LATE_COLLS); 871 stats->tx_late_colls += val; 872 ifp->if_collisions += val; 873 stats->tx_frames_defered += CSR_READ_1(sc, STE_STAT_TX_DEFER); 874 stats->tx_excess_defers += CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 875 stats->tx_abort += CSR_READ_1(sc, STE_STAT_TX_ABORT); 876 } 877 878 /* 879 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 880 * IDs against our list and return a device name if we find a match. 881 */ 882 static int 883 ste_probe(device_t dev) 884 { 885 const struct ste_type *t; 886 887 t = ste_devs; 888 889 while (t->ste_name != NULL) { 890 if ((pci_get_vendor(dev) == t->ste_vid) && 891 (pci_get_device(dev) == t->ste_did)) { 892 device_set_desc(dev, t->ste_name); 893 return (BUS_PROBE_DEFAULT); 894 } 895 t++; 896 } 897 898 return (ENXIO); 899 } 900 901 /* 902 * Attach the interface. Allocate softc structures, do ifmedia 903 * setup and ethernet/BPF attach. 904 */ 905 static int 906 ste_attach(device_t dev) 907 { 908 struct ste_softc *sc; 909 struct ifnet *ifp; 910 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 911 int error = 0, phy, pmc, prefer_iomap, rid; 912 913 sc = device_get_softc(dev); 914 sc->ste_dev = dev; 915 916 /* 917 * Only use one PHY since this chip reports multiple 918 * Note on the DFE-550 the PHY is at 1 on the DFE-580 919 * it is at 0 & 1. It is rev 0x12. 920 */ 921 if (pci_get_vendor(dev) == DL_VENDORID && 922 pci_get_device(dev) == DL_DEVICEID_DL10050 && 923 pci_get_revid(dev) == 0x12 ) 924 sc->ste_flags |= STE_FLAG_ONE_PHY; 925 926 mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 927 MTX_DEF); 928 /* 929 * Map control/status registers. 930 */ 931 pci_enable_busmaster(dev); 932 933 /* 934 * Prefer memory space register mapping over IO space but use 935 * IO space for a device that is known to have issues on memory 936 * mapping. 937 */ 938 prefer_iomap = 0; 939 if (pci_get_device(dev) == ST_DEVICEID_ST201_1) 940 prefer_iomap = 1; 941 else 942 resource_int_value(device_get_name(sc->ste_dev), 943 device_get_unit(sc->ste_dev), "prefer_iomap", 944 &prefer_iomap); 945 if (prefer_iomap == 0) { 946 sc->ste_res_id = PCIR_BAR(1); 947 sc->ste_res_type = SYS_RES_MEMORY; 948 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 949 &sc->ste_res_id, RF_ACTIVE); 950 } 951 if (prefer_iomap || sc->ste_res == NULL) { 952 sc->ste_res_id = PCIR_BAR(0); 953 sc->ste_res_type = SYS_RES_IOPORT; 954 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 955 &sc->ste_res_id, RF_ACTIVE); 956 } 957 if (sc->ste_res == NULL) { 958 device_printf(dev, "couldn't map ports/memory\n"); 959 error = ENXIO; 960 goto fail; 961 } 962 963 /* Allocate interrupt */ 964 rid = 0; 965 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 966 RF_SHAREABLE | RF_ACTIVE); 967 968 if (sc->ste_irq == NULL) { 969 device_printf(dev, "couldn't map interrupt\n"); 970 error = ENXIO; 971 goto fail; 972 } 973 974 callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0); 975 976 /* Reset the adapter. */ 977 ste_reset(sc); 978 979 /* 980 * Get station address from the EEPROM. 981 */ 982 if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) { 983 device_printf(dev, "failed to read station address\n"); 984 error = ENXIO; 985 goto fail; 986 } 987 ste_sysctl_node(sc); 988 989 if ((error = ste_dma_alloc(sc)) != 0) 990 goto fail; 991 992 ifp = sc->ste_ifp = if_alloc(IFT_ETHER); 993 if (ifp == NULL) { 994 device_printf(dev, "can not if_alloc()\n"); 995 error = ENOSPC; 996 goto fail; 997 } 998 999 /* Do MII setup. */ 1000 phy = MII_PHY_ANY; 1001 if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0) 1002 phy = 0; 1003 error = mii_attach(dev, &sc->ste_miibus, ifp, ste_ifmedia_upd, 1004 ste_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 1005 if (error != 0) { 1006 device_printf(dev, "attaching PHYs failed\n"); 1007 goto fail; 1008 } 1009 1010 ifp->if_softc = sc; 1011 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1012 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1013 ifp->if_ioctl = ste_ioctl; 1014 ifp->if_start = ste_start; 1015 ifp->if_init = ste_init; 1016 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 1017 ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; 1018 IFQ_SET_READY(&ifp->if_snd); 1019 1020 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1021 1022 /* 1023 * Call MI attach routine. 1024 */ 1025 ether_ifattach(ifp, (uint8_t *)eaddr); 1026 1027 /* 1028 * Tell the upper layer(s) we support long frames. 1029 */ 1030 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1031 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1032 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 1033 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 1034 ifp->if_capenable = ifp->if_capabilities; 1035 #ifdef DEVICE_POLLING 1036 ifp->if_capabilities |= IFCAP_POLLING; 1037 #endif 1038 1039 /* Hook interrupt last to avoid having to lock softc */ 1040 error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE, 1041 NULL, ste_intr, sc, &sc->ste_intrhand); 1042 1043 if (error) { 1044 device_printf(dev, "couldn't set up irq\n"); 1045 ether_ifdetach(ifp); 1046 goto fail; 1047 } 1048 1049 fail: 1050 if (error) 1051 ste_detach(dev); 1052 1053 return (error); 1054 } 1055 1056 /* 1057 * Shutdown hardware and free up resources. This can be called any 1058 * time after the mutex has been initialized. It is called in both 1059 * the error case in attach and the normal detach case so it needs 1060 * to be careful about only freeing resources that have actually been 1061 * allocated. 1062 */ 1063 static int 1064 ste_detach(device_t dev) 1065 { 1066 struct ste_softc *sc; 1067 struct ifnet *ifp; 1068 1069 sc = device_get_softc(dev); 1070 KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); 1071 ifp = sc->ste_ifp; 1072 1073 #ifdef DEVICE_POLLING 1074 if (ifp->if_capenable & IFCAP_POLLING) 1075 ether_poll_deregister(ifp); 1076 #endif 1077 1078 /* These should only be active if attach succeeded */ 1079 if (device_is_attached(dev)) { 1080 ether_ifdetach(ifp); 1081 STE_LOCK(sc); 1082 ste_stop(sc); 1083 STE_UNLOCK(sc); 1084 callout_drain(&sc->ste_callout); 1085 } 1086 if (sc->ste_miibus) 1087 device_delete_child(dev, sc->ste_miibus); 1088 bus_generic_detach(dev); 1089 1090 if (sc->ste_intrhand) 1091 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); 1092 if (sc->ste_irq) 1093 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); 1094 if (sc->ste_res) 1095 bus_release_resource(dev, sc->ste_res_type, sc->ste_res_id, 1096 sc->ste_res); 1097 1098 if (ifp) 1099 if_free(ifp); 1100 1101 ste_dma_free(sc); 1102 mtx_destroy(&sc->ste_mtx); 1103 1104 return (0); 1105 } 1106 1107 struct ste_dmamap_arg { 1108 bus_addr_t ste_busaddr; 1109 }; 1110 1111 static void 1112 ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1113 { 1114 struct ste_dmamap_arg *ctx; 1115 1116 if (error != 0) 1117 return; 1118 1119 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1120 1121 ctx = (struct ste_dmamap_arg *)arg; 1122 ctx->ste_busaddr = segs[0].ds_addr; 1123 } 1124 1125 static int 1126 ste_dma_alloc(struct ste_softc *sc) 1127 { 1128 struct ste_chain *txc; 1129 struct ste_chain_onefrag *rxc; 1130 struct ste_dmamap_arg ctx; 1131 int error, i; 1132 1133 /* Create parent DMA tag. */ 1134 error = bus_dma_tag_create( 1135 bus_get_dma_tag(sc->ste_dev), /* parent */ 1136 1, 0, /* alignment, boundary */ 1137 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1138 BUS_SPACE_MAXADDR, /* highaddr */ 1139 NULL, NULL, /* filter, filterarg */ 1140 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1141 0, /* nsegments */ 1142 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1143 0, /* flags */ 1144 NULL, NULL, /* lockfunc, lockarg */ 1145 &sc->ste_cdata.ste_parent_tag); 1146 if (error != 0) { 1147 device_printf(sc->ste_dev, 1148 "could not create parent DMA tag.\n"); 1149 goto fail; 1150 } 1151 1152 /* Create DMA tag for Tx descriptor list. */ 1153 error = bus_dma_tag_create( 1154 sc->ste_cdata.ste_parent_tag, /* parent */ 1155 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1156 BUS_SPACE_MAXADDR, /* lowaddr */ 1157 BUS_SPACE_MAXADDR, /* highaddr */ 1158 NULL, NULL, /* filter, filterarg */ 1159 STE_TX_LIST_SZ, /* maxsize */ 1160 1, /* nsegments */ 1161 STE_TX_LIST_SZ, /* maxsegsize */ 1162 0, /* flags */ 1163 NULL, NULL, /* lockfunc, lockarg */ 1164 &sc->ste_cdata.ste_tx_list_tag); 1165 if (error != 0) { 1166 device_printf(sc->ste_dev, 1167 "could not create Tx list DMA tag.\n"); 1168 goto fail; 1169 } 1170 1171 /* Create DMA tag for Rx descriptor list. */ 1172 error = bus_dma_tag_create( 1173 sc->ste_cdata.ste_parent_tag, /* parent */ 1174 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1175 BUS_SPACE_MAXADDR, /* lowaddr */ 1176 BUS_SPACE_MAXADDR, /* highaddr */ 1177 NULL, NULL, /* filter, filterarg */ 1178 STE_RX_LIST_SZ, /* maxsize */ 1179 1, /* nsegments */ 1180 STE_RX_LIST_SZ, /* maxsegsize */ 1181 0, /* flags */ 1182 NULL, NULL, /* lockfunc, lockarg */ 1183 &sc->ste_cdata.ste_rx_list_tag); 1184 if (error != 0) { 1185 device_printf(sc->ste_dev, 1186 "could not create Rx list DMA tag.\n"); 1187 goto fail; 1188 } 1189 1190 /* Create DMA tag for Tx buffers. */ 1191 error = bus_dma_tag_create( 1192 sc->ste_cdata.ste_parent_tag, /* parent */ 1193 1, 0, /* alignment, boundary */ 1194 BUS_SPACE_MAXADDR, /* lowaddr */ 1195 BUS_SPACE_MAXADDR, /* highaddr */ 1196 NULL, NULL, /* filter, filterarg */ 1197 MCLBYTES * STE_MAXFRAGS, /* maxsize */ 1198 STE_MAXFRAGS, /* nsegments */ 1199 MCLBYTES, /* maxsegsize */ 1200 0, /* flags */ 1201 NULL, NULL, /* lockfunc, lockarg */ 1202 &sc->ste_cdata.ste_tx_tag); 1203 if (error != 0) { 1204 device_printf(sc->ste_dev, "could not create Tx DMA tag.\n"); 1205 goto fail; 1206 } 1207 1208 /* Create DMA tag for Rx buffers. */ 1209 error = bus_dma_tag_create( 1210 sc->ste_cdata.ste_parent_tag, /* parent */ 1211 1, 0, /* alignment, boundary */ 1212 BUS_SPACE_MAXADDR, /* lowaddr */ 1213 BUS_SPACE_MAXADDR, /* highaddr */ 1214 NULL, NULL, /* filter, filterarg */ 1215 MCLBYTES, /* maxsize */ 1216 1, /* nsegments */ 1217 MCLBYTES, /* maxsegsize */ 1218 0, /* flags */ 1219 NULL, NULL, /* lockfunc, lockarg */ 1220 &sc->ste_cdata.ste_rx_tag); 1221 if (error != 0) { 1222 device_printf(sc->ste_dev, "could not create Rx DMA tag.\n"); 1223 goto fail; 1224 } 1225 1226 /* Allocate DMA'able memory and load the DMA map for Tx list. */ 1227 error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag, 1228 (void **)&sc->ste_ldata.ste_tx_list, 1229 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1230 &sc->ste_cdata.ste_tx_list_map); 1231 if (error != 0) { 1232 device_printf(sc->ste_dev, 1233 "could not allocate DMA'able memory for Tx list.\n"); 1234 goto fail; 1235 } 1236 ctx.ste_busaddr = 0; 1237 error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag, 1238 sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list, 1239 STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1240 if (error != 0 || ctx.ste_busaddr == 0) { 1241 device_printf(sc->ste_dev, 1242 "could not load DMA'able memory for Tx list.\n"); 1243 goto fail; 1244 } 1245 sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr; 1246 1247 /* Allocate DMA'able memory and load the DMA map for Rx list. */ 1248 error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag, 1249 (void **)&sc->ste_ldata.ste_rx_list, 1250 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1251 &sc->ste_cdata.ste_rx_list_map); 1252 if (error != 0) { 1253 device_printf(sc->ste_dev, 1254 "could not allocate DMA'able memory for Rx list.\n"); 1255 goto fail; 1256 } 1257 ctx.ste_busaddr = 0; 1258 error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag, 1259 sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list, 1260 STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1261 if (error != 0 || ctx.ste_busaddr == 0) { 1262 device_printf(sc->ste_dev, 1263 "could not load DMA'able memory for Rx list.\n"); 1264 goto fail; 1265 } 1266 sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr; 1267 1268 /* Create DMA maps for Tx buffers. */ 1269 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1270 txc = &sc->ste_cdata.ste_tx_chain[i]; 1271 txc->ste_ptr = NULL; 1272 txc->ste_mbuf = NULL; 1273 txc->ste_next = NULL; 1274 txc->ste_phys = 0; 1275 txc->ste_map = NULL; 1276 error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0, 1277 &txc->ste_map); 1278 if (error != 0) { 1279 device_printf(sc->ste_dev, 1280 "could not create Tx dmamap.\n"); 1281 goto fail; 1282 } 1283 } 1284 /* Create DMA maps for Rx buffers. */ 1285 if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1286 &sc->ste_cdata.ste_rx_sparemap)) != 0) { 1287 device_printf(sc->ste_dev, 1288 "could not create spare Rx dmamap.\n"); 1289 goto fail; 1290 } 1291 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1292 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1293 rxc->ste_ptr = NULL; 1294 rxc->ste_mbuf = NULL; 1295 rxc->ste_next = NULL; 1296 rxc->ste_map = NULL; 1297 error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1298 &rxc->ste_map); 1299 if (error != 0) { 1300 device_printf(sc->ste_dev, 1301 "could not create Rx dmamap.\n"); 1302 goto fail; 1303 } 1304 } 1305 1306 fail: 1307 return (error); 1308 } 1309 1310 static void 1311 ste_dma_free(struct ste_softc *sc) 1312 { 1313 struct ste_chain *txc; 1314 struct ste_chain_onefrag *rxc; 1315 int i; 1316 1317 /* Tx buffers. */ 1318 if (sc->ste_cdata.ste_tx_tag != NULL) { 1319 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1320 txc = &sc->ste_cdata.ste_tx_chain[i]; 1321 if (txc->ste_map != NULL) { 1322 bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag, 1323 txc->ste_map); 1324 txc->ste_map = NULL; 1325 } 1326 } 1327 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag); 1328 sc->ste_cdata.ste_tx_tag = NULL; 1329 } 1330 /* Rx buffers. */ 1331 if (sc->ste_cdata.ste_rx_tag != NULL) { 1332 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1333 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1334 if (rxc->ste_map != NULL) { 1335 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1336 rxc->ste_map); 1337 rxc->ste_map = NULL; 1338 } 1339 } 1340 if (sc->ste_cdata.ste_rx_sparemap != NULL) { 1341 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1342 sc->ste_cdata.ste_rx_sparemap); 1343 sc->ste_cdata.ste_rx_sparemap = NULL; 1344 } 1345 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag); 1346 sc->ste_cdata.ste_rx_tag = NULL; 1347 } 1348 /* Tx descriptor list. */ 1349 if (sc->ste_cdata.ste_tx_list_tag != NULL) { 1350 if (sc->ste_cdata.ste_tx_list_map != NULL) 1351 bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag, 1352 sc->ste_cdata.ste_tx_list_map); 1353 if (sc->ste_cdata.ste_tx_list_map != NULL && 1354 sc->ste_ldata.ste_tx_list != NULL) 1355 bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag, 1356 sc->ste_ldata.ste_tx_list, 1357 sc->ste_cdata.ste_tx_list_map); 1358 sc->ste_ldata.ste_tx_list = NULL; 1359 sc->ste_cdata.ste_tx_list_map = NULL; 1360 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag); 1361 sc->ste_cdata.ste_tx_list_tag = NULL; 1362 } 1363 /* Rx descriptor list. */ 1364 if (sc->ste_cdata.ste_rx_list_tag != NULL) { 1365 if (sc->ste_cdata.ste_rx_list_map != NULL) 1366 bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag, 1367 sc->ste_cdata.ste_rx_list_map); 1368 if (sc->ste_cdata.ste_rx_list_map != NULL && 1369 sc->ste_ldata.ste_rx_list != NULL) 1370 bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag, 1371 sc->ste_ldata.ste_rx_list, 1372 sc->ste_cdata.ste_rx_list_map); 1373 sc->ste_ldata.ste_rx_list = NULL; 1374 sc->ste_cdata.ste_rx_list_map = NULL; 1375 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag); 1376 sc->ste_cdata.ste_rx_list_tag = NULL; 1377 } 1378 if (sc->ste_cdata.ste_parent_tag != NULL) { 1379 bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag); 1380 sc->ste_cdata.ste_parent_tag = NULL; 1381 } 1382 } 1383 1384 static int 1385 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc) 1386 { 1387 struct mbuf *m; 1388 bus_dma_segment_t segs[1]; 1389 bus_dmamap_t map; 1390 int error, nsegs; 1391 1392 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1393 if (m == NULL) 1394 return (ENOBUFS); 1395 m->m_len = m->m_pkthdr.len = MCLBYTES; 1396 m_adj(m, ETHER_ALIGN); 1397 1398 if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag, 1399 sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) { 1400 m_freem(m); 1401 return (error); 1402 } 1403 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1404 1405 if (rxc->ste_mbuf != NULL) { 1406 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1407 BUS_DMASYNC_POSTREAD); 1408 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map); 1409 } 1410 map = rxc->ste_map; 1411 rxc->ste_map = sc->ste_cdata.ste_rx_sparemap; 1412 sc->ste_cdata.ste_rx_sparemap = map; 1413 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1414 BUS_DMASYNC_PREREAD); 1415 rxc->ste_mbuf = m; 1416 rxc->ste_ptr->ste_status = 0; 1417 rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr); 1418 rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len | 1419 STE_FRAG_LAST); 1420 return (0); 1421 } 1422 1423 static int 1424 ste_init_rx_list(struct ste_softc *sc) 1425 { 1426 struct ste_chain_data *cd; 1427 struct ste_list_data *ld; 1428 int error, i; 1429 1430 sc->ste_int_rx_act = 0; 1431 cd = &sc->ste_cdata; 1432 ld = &sc->ste_ldata; 1433 bzero(ld->ste_rx_list, STE_RX_LIST_SZ); 1434 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1435 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1436 error = ste_newbuf(sc, &cd->ste_rx_chain[i]); 1437 if (error != 0) 1438 return (error); 1439 if (i == (STE_RX_LIST_CNT - 1)) { 1440 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0]; 1441 ld->ste_rx_list[i].ste_next = 1442 htole32(ld->ste_rx_list_paddr + 1443 (sizeof(struct ste_desc_onefrag) * 0)); 1444 } else { 1445 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1]; 1446 ld->ste_rx_list[i].ste_next = 1447 htole32(ld->ste_rx_list_paddr + 1448 (sizeof(struct ste_desc_onefrag) * (i + 1))); 1449 } 1450 } 1451 1452 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1453 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 1454 sc->ste_cdata.ste_rx_list_map, 1455 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1456 1457 return (0); 1458 } 1459 1460 static void 1461 ste_init_tx_list(struct ste_softc *sc) 1462 { 1463 struct ste_chain_data *cd; 1464 struct ste_list_data *ld; 1465 int i; 1466 1467 cd = &sc->ste_cdata; 1468 ld = &sc->ste_ldata; 1469 bzero(ld->ste_tx_list, STE_TX_LIST_SZ); 1470 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1471 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1472 cd->ste_tx_chain[i].ste_mbuf = NULL; 1473 if (i == (STE_TX_LIST_CNT - 1)) { 1474 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0]; 1475 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1476 ld->ste_tx_list_paddr + 1477 (sizeof(struct ste_desc) * 0))); 1478 } else { 1479 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1]; 1480 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1481 ld->ste_tx_list_paddr + 1482 (sizeof(struct ste_desc) * (i + 1)))); 1483 } 1484 } 1485 1486 cd->ste_last_tx = NULL; 1487 cd->ste_tx_prod = 0; 1488 cd->ste_tx_cons = 0; 1489 cd->ste_tx_cnt = 0; 1490 1491 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1492 sc->ste_cdata.ste_tx_list_map, 1493 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1494 } 1495 1496 static void 1497 ste_init(void *xsc) 1498 { 1499 struct ste_softc *sc; 1500 1501 sc = xsc; 1502 STE_LOCK(sc); 1503 ste_init_locked(sc); 1504 STE_UNLOCK(sc); 1505 } 1506 1507 static void 1508 ste_init_locked(struct ste_softc *sc) 1509 { 1510 struct ifnet *ifp; 1511 struct mii_data *mii; 1512 uint8_t val; 1513 int i; 1514 1515 STE_LOCK_ASSERT(sc); 1516 ifp = sc->ste_ifp; 1517 mii = device_get_softc(sc->ste_miibus); 1518 1519 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1520 return; 1521 1522 ste_stop(sc); 1523 /* Reset the chip to a known state. */ 1524 ste_reset(sc); 1525 1526 /* Init our MAC address */ 1527 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1528 CSR_WRITE_2(sc, STE_PAR0 + i, 1529 ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) | 1530 IF_LLADDR(sc->ste_ifp)[i + 1] << 8)); 1531 } 1532 1533 /* Init RX list */ 1534 if (ste_init_rx_list(sc) != 0) { 1535 device_printf(sc->ste_dev, 1536 "initialization failed: no memory for RX buffers\n"); 1537 ste_stop(sc); 1538 return; 1539 } 1540 1541 /* Set RX polling interval */ 1542 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); 1543 1544 /* Init TX descriptors */ 1545 ste_init_tx_list(sc); 1546 1547 /* Clear and disable WOL. */ 1548 val = CSR_READ_1(sc, STE_WAKE_EVENT); 1549 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 1550 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 1551 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 1552 1553 /* Set the TX freethresh value */ 1554 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1555 1556 /* Set the TX start threshold for best performance. */ 1557 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1558 1559 /* Set the TX reclaim threshold. */ 1560 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1561 1562 /* Accept VLAN length packets */ 1563 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1564 1565 /* Set up the RX filter. */ 1566 ste_rxfilter(sc); 1567 1568 /* Load the address of the RX list. */ 1569 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1570 ste_wait(sc); 1571 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1572 STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr)); 1573 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1574 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1575 1576 /* Set TX polling interval(defer until we TX first packet). */ 1577 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1578 1579 /* Load address of the TX list */ 1580 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1581 ste_wait(sc); 1582 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1583 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1584 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1585 ste_wait(sc); 1586 /* Select 3.2us timer. */ 1587 STE_CLRBIT4(sc, STE_DMACTL, STE_DMACTL_COUNTDOWN_SPEED | 1588 STE_DMACTL_COUNTDOWN_MODE); 1589 1590 /* Enable receiver and transmitter */ 1591 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1592 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1593 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1594 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1595 1596 /* Enable stats counters. */ 1597 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1598 /* Clear stats counters. */ 1599 ste_stats_clear(sc); 1600 1601 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1602 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1603 #ifdef DEVICE_POLLING 1604 /* Disable interrupts if we are polling. */ 1605 if (ifp->if_capenable & IFCAP_POLLING) 1606 CSR_WRITE_2(sc, STE_IMR, 0); 1607 else 1608 #endif 1609 /* Enable interrupts. */ 1610 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1611 1612 sc->ste_flags &= ~STE_FLAG_LINK; 1613 /* Switch to the current media. */ 1614 mii_mediachg(mii); 1615 1616 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1617 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1618 1619 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 1620 } 1621 1622 static void 1623 ste_stop(struct ste_softc *sc) 1624 { 1625 struct ifnet *ifp; 1626 struct ste_chain_onefrag *cur_rx; 1627 struct ste_chain *cur_tx; 1628 uint32_t val; 1629 int i; 1630 1631 STE_LOCK_ASSERT(sc); 1632 ifp = sc->ste_ifp; 1633 1634 callout_stop(&sc->ste_callout); 1635 sc->ste_timer = 0; 1636 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 1637 1638 CSR_WRITE_2(sc, STE_IMR, 0); 1639 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1640 /* Stop pending DMA. */ 1641 val = CSR_READ_4(sc, STE_DMACTL); 1642 val |= STE_DMACTL_TXDMA_STALL | STE_DMACTL_RXDMA_STALL; 1643 CSR_WRITE_4(sc, STE_DMACTL, val); 1644 ste_wait(sc); 1645 /* Disable auto-polling. */ 1646 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 0); 1647 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1648 /* Nullify DMA address to stop any further DMA. */ 1649 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 0); 1650 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1651 /* Stop TX/RX MAC. */ 1652 val = CSR_READ_2(sc, STE_MACCTL1); 1653 val |= STE_MACCTL1_TX_DISABLE | STE_MACCTL1_RX_DISABLE | 1654 STE_MACCTL1_STATS_DISABLE; 1655 CSR_WRITE_2(sc, STE_MACCTL1, val); 1656 for (i = 0; i < STE_TIMEOUT; i++) { 1657 DELAY(10); 1658 if ((CSR_READ_2(sc, STE_MACCTL1) & (STE_MACCTL1_TX_DISABLE | 1659 STE_MACCTL1_RX_DISABLE | STE_MACCTL1_STATS_DISABLE)) == 0) 1660 break; 1661 } 1662 if (i == STE_TIMEOUT) 1663 device_printf(sc->ste_dev, "Stopping MAC timed out\n"); 1664 /* Acknowledge any pending interrupts. */ 1665 CSR_READ_2(sc, STE_ISR_ACK); 1666 ste_stats_update(sc); 1667 1668 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1669 cur_rx = &sc->ste_cdata.ste_rx_chain[i]; 1670 if (cur_rx->ste_mbuf != NULL) { 1671 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, 1672 cur_rx->ste_map, BUS_DMASYNC_POSTREAD); 1673 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, 1674 cur_rx->ste_map); 1675 m_freem(cur_rx->ste_mbuf); 1676 cur_rx->ste_mbuf = NULL; 1677 } 1678 } 1679 1680 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1681 cur_tx = &sc->ste_cdata.ste_tx_chain[i]; 1682 if (cur_tx->ste_mbuf != NULL) { 1683 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, 1684 cur_tx->ste_map, BUS_DMASYNC_POSTWRITE); 1685 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, 1686 cur_tx->ste_map); 1687 m_freem(cur_tx->ste_mbuf); 1688 cur_tx->ste_mbuf = NULL; 1689 } 1690 } 1691 } 1692 1693 static void 1694 ste_reset(struct ste_softc *sc) 1695 { 1696 uint32_t ctl; 1697 int i; 1698 1699 ctl = CSR_READ_4(sc, STE_ASICCTL); 1700 ctl |= STE_ASICCTL_GLOBAL_RESET | STE_ASICCTL_RX_RESET | 1701 STE_ASICCTL_TX_RESET | STE_ASICCTL_DMA_RESET | 1702 STE_ASICCTL_FIFO_RESET | STE_ASICCTL_NETWORK_RESET | 1703 STE_ASICCTL_AUTOINIT_RESET |STE_ASICCTL_HOST_RESET | 1704 STE_ASICCTL_EXTRESET_RESET; 1705 CSR_WRITE_4(sc, STE_ASICCTL, ctl); 1706 CSR_READ_4(sc, STE_ASICCTL); 1707 /* 1708 * Due to the need of accessing EEPROM controller can take 1709 * up to 1ms to complete the global reset. 1710 */ 1711 DELAY(1000); 1712 1713 for (i = 0; i < STE_TIMEOUT; i++) { 1714 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1715 break; 1716 DELAY(10); 1717 } 1718 1719 if (i == STE_TIMEOUT) 1720 device_printf(sc->ste_dev, "global reset never completed\n"); 1721 } 1722 1723 static void 1724 ste_restart_tx(struct ste_softc *sc) 1725 { 1726 uint16_t mac; 1727 int i; 1728 1729 for (i = 0; i < STE_TIMEOUT; i++) { 1730 mac = CSR_READ_2(sc, STE_MACCTL1); 1731 mac |= STE_MACCTL1_TX_ENABLE; 1732 CSR_WRITE_2(sc, STE_MACCTL1, mac); 1733 mac = CSR_READ_2(sc, STE_MACCTL1); 1734 if ((mac & STE_MACCTL1_TX_ENABLED) != 0) 1735 break; 1736 DELAY(10); 1737 } 1738 1739 if (i == STE_TIMEOUT) 1740 device_printf(sc->ste_dev, "starting Tx failed"); 1741 } 1742 1743 static int 1744 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1745 { 1746 struct ste_softc *sc; 1747 struct ifreq *ifr; 1748 struct mii_data *mii; 1749 int error = 0, mask; 1750 1751 sc = ifp->if_softc; 1752 ifr = (struct ifreq *)data; 1753 1754 switch (command) { 1755 case SIOCSIFFLAGS: 1756 STE_LOCK(sc); 1757 if ((ifp->if_flags & IFF_UP) != 0) { 1758 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1759 ((ifp->if_flags ^ sc->ste_if_flags) & 1760 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1761 ste_rxfilter(sc); 1762 else 1763 ste_init_locked(sc); 1764 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1765 ste_stop(sc); 1766 sc->ste_if_flags = ifp->if_flags; 1767 STE_UNLOCK(sc); 1768 break; 1769 case SIOCADDMULTI: 1770 case SIOCDELMULTI: 1771 STE_LOCK(sc); 1772 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1773 ste_rxfilter(sc); 1774 STE_UNLOCK(sc); 1775 break; 1776 case SIOCGIFMEDIA: 1777 case SIOCSIFMEDIA: 1778 mii = device_get_softc(sc->ste_miibus); 1779 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1780 break; 1781 case SIOCSIFCAP: 1782 STE_LOCK(sc); 1783 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1784 #ifdef DEVICE_POLLING 1785 if ((mask & IFCAP_POLLING) != 0 && 1786 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 1787 ifp->if_capenable ^= IFCAP_POLLING; 1788 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 1789 error = ether_poll_register(ste_poll, ifp); 1790 if (error != 0) { 1791 STE_UNLOCK(sc); 1792 break; 1793 } 1794 /* Disable interrupts. */ 1795 CSR_WRITE_2(sc, STE_IMR, 0); 1796 } else { 1797 error = ether_poll_deregister(ifp); 1798 /* Enable interrupts. */ 1799 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1800 } 1801 } 1802 #endif /* DEVICE_POLLING */ 1803 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1804 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1805 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1806 STE_UNLOCK(sc); 1807 break; 1808 default: 1809 error = ether_ioctl(ifp, command, data); 1810 break; 1811 } 1812 1813 return (error); 1814 } 1815 1816 static int 1817 ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc) 1818 { 1819 struct ste_frag *frag; 1820 struct mbuf *m; 1821 struct ste_desc *desc; 1822 bus_dma_segment_t txsegs[STE_MAXFRAGS]; 1823 int error, i, nsegs; 1824 1825 STE_LOCK_ASSERT(sc); 1826 M_ASSERTPKTHDR((*m_head)); 1827 1828 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1829 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1830 if (error == EFBIG) { 1831 m = m_collapse(*m_head, M_DONTWAIT, STE_MAXFRAGS); 1832 if (m == NULL) { 1833 m_freem(*m_head); 1834 *m_head = NULL; 1835 return (ENOMEM); 1836 } 1837 *m_head = m; 1838 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1839 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1840 if (error != 0) { 1841 m_freem(*m_head); 1842 *m_head = NULL; 1843 return (error); 1844 } 1845 } else if (error != 0) 1846 return (error); 1847 if (nsegs == 0) { 1848 m_freem(*m_head); 1849 *m_head = NULL; 1850 return (EIO); 1851 } 1852 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map, 1853 BUS_DMASYNC_PREWRITE); 1854 1855 desc = txc->ste_ptr; 1856 for (i = 0; i < nsegs; i++) { 1857 frag = &desc->ste_frags[i]; 1858 frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr)); 1859 frag->ste_len = htole32(txsegs[i].ds_len); 1860 } 1861 desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST); 1862 /* 1863 * Because we use Tx polling we can't chain multiple 1864 * Tx descriptors here. Otherwise we race with controller. 1865 */ 1866 desc->ste_next = 0; 1867 if ((sc->ste_cdata.ste_tx_prod % STE_TX_INTR_FRAMES) == 0) 1868 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS | 1869 STE_TXCTL_DMAINTR); 1870 else 1871 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS); 1872 txc->ste_mbuf = *m_head; 1873 STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT); 1874 sc->ste_cdata.ste_tx_cnt++; 1875 1876 return (0); 1877 } 1878 1879 static void 1880 ste_start(struct ifnet *ifp) 1881 { 1882 struct ste_softc *sc; 1883 1884 sc = ifp->if_softc; 1885 STE_LOCK(sc); 1886 ste_start_locked(ifp); 1887 STE_UNLOCK(sc); 1888 } 1889 1890 static void 1891 ste_start_locked(struct ifnet *ifp) 1892 { 1893 struct ste_softc *sc; 1894 struct ste_chain *cur_tx; 1895 struct mbuf *m_head = NULL; 1896 int enq; 1897 1898 sc = ifp->if_softc; 1899 STE_LOCK_ASSERT(sc); 1900 1901 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1902 IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0) 1903 return; 1904 1905 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 1906 if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) { 1907 /* 1908 * Controller may have cached copy of the last used 1909 * next ptr so we have to reserve one TFD to avoid 1910 * TFD overruns. 1911 */ 1912 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1913 break; 1914 } 1915 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1916 if (m_head == NULL) 1917 break; 1918 cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod]; 1919 if (ste_encap(sc, &m_head, cur_tx) != 0) { 1920 if (m_head == NULL) 1921 break; 1922 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1923 break; 1924 } 1925 if (sc->ste_cdata.ste_last_tx == NULL) { 1926 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1927 sc->ste_cdata.ste_tx_list_map, 1928 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1929 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1930 ste_wait(sc); 1931 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 1932 STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr)); 1933 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 1934 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1935 ste_wait(sc); 1936 } else { 1937 sc->ste_cdata.ste_last_tx->ste_ptr->ste_next = 1938 sc->ste_cdata.ste_last_tx->ste_phys; 1939 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1940 sc->ste_cdata.ste_tx_list_map, 1941 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1942 } 1943 sc->ste_cdata.ste_last_tx = cur_tx; 1944 1945 enq++; 1946 /* 1947 * If there's a BPF listener, bounce a copy of this frame 1948 * to him. 1949 */ 1950 BPF_MTAP(ifp, m_head); 1951 } 1952 1953 if (enq > 0) 1954 sc->ste_timer = STE_TX_TIMEOUT; 1955 } 1956 1957 static void 1958 ste_watchdog(struct ste_softc *sc) 1959 { 1960 struct ifnet *ifp; 1961 1962 ifp = sc->ste_ifp; 1963 STE_LOCK_ASSERT(sc); 1964 1965 if (sc->ste_timer == 0 || --sc->ste_timer) 1966 return; 1967 1968 ifp->if_oerrors++; 1969 if_printf(ifp, "watchdog timeout\n"); 1970 1971 ste_txeof(sc); 1972 ste_txeoc(sc); 1973 ste_rxeof(sc, -1); 1974 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1975 ste_init_locked(sc); 1976 1977 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1978 ste_start_locked(ifp); 1979 } 1980 1981 static int 1982 ste_shutdown(device_t dev) 1983 { 1984 1985 return (ste_suspend(dev)); 1986 } 1987 1988 static int 1989 ste_suspend(device_t dev) 1990 { 1991 struct ste_softc *sc; 1992 1993 sc = device_get_softc(dev); 1994 1995 STE_LOCK(sc); 1996 ste_stop(sc); 1997 ste_setwol(sc); 1998 STE_UNLOCK(sc); 1999 2000 return (0); 2001 } 2002 2003 static int 2004 ste_resume(device_t dev) 2005 { 2006 struct ste_softc *sc; 2007 struct ifnet *ifp; 2008 int pmc; 2009 uint16_t pmstat; 2010 2011 sc = device_get_softc(dev); 2012 STE_LOCK(sc); 2013 if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) == 0) { 2014 /* Disable PME and clear PME status. */ 2015 pmstat = pci_read_config(sc->ste_dev, 2016 pmc + PCIR_POWER_STATUS, 2); 2017 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2018 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2019 pci_write_config(sc->ste_dev, 2020 pmc + PCIR_POWER_STATUS, pmstat, 2); 2021 } 2022 } 2023 ifp = sc->ste_ifp; 2024 if ((ifp->if_flags & IFF_UP) != 0) { 2025 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2026 ste_init_locked(sc); 2027 } 2028 STE_UNLOCK(sc); 2029 2030 return (0); 2031 } 2032 2033 #define STE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2034 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2035 #define STE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 2036 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 2037 2038 static void 2039 ste_sysctl_node(struct ste_softc *sc) 2040 { 2041 struct sysctl_ctx_list *ctx; 2042 struct sysctl_oid_list *child, *parent; 2043 struct sysctl_oid *tree; 2044 struct ste_hw_stats *stats; 2045 2046 stats = &sc->ste_stats; 2047 ctx = device_get_sysctl_ctx(sc->ste_dev); 2048 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ste_dev)); 2049 2050 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_rx_mod", 2051 CTLFLAG_RW, &sc->ste_int_rx_mod, 0, "ste RX interrupt moderation"); 2052 /* Pull in device tunables. */ 2053 sc->ste_int_rx_mod = STE_IM_RX_TIMER_DEFAULT; 2054 resource_int_value(device_get_name(sc->ste_dev), 2055 device_get_unit(sc->ste_dev), "int_rx_mod", &sc->ste_int_rx_mod); 2056 2057 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2058 NULL, "STE statistics"); 2059 parent = SYSCTL_CHILDREN(tree); 2060 2061 /* Rx statistics. */ 2062 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2063 NULL, "Rx MAC statistics"); 2064 child = SYSCTL_CHILDREN(tree); 2065 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2066 &stats->rx_bytes, "Good octets"); 2067 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2068 &stats->rx_frames, "Good frames"); 2069 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2070 &stats->rx_bcast_frames, "Good broadcast frames"); 2071 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2072 &stats->rx_mcast_frames, "Good multicast frames"); 2073 STE_SYSCTL_STAT_ADD32(ctx, child, "lost_frames", 2074 &stats->rx_lost_frames, "Lost frames"); 2075 2076 /* Tx statistics. */ 2077 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2078 NULL, "Tx MAC statistics"); 2079 child = SYSCTL_CHILDREN(tree); 2080 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2081 &stats->tx_bytes, "Good octets"); 2082 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2083 &stats->tx_frames, "Good frames"); 2084 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2085 &stats->tx_bcast_frames, "Good broadcast frames"); 2086 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2087 &stats->tx_mcast_frames, "Good multicast frames"); 2088 STE_SYSCTL_STAT_ADD32(ctx, child, "carrier_errs", 2089 &stats->tx_carrsense_errs, "Carrier sense errors"); 2090 STE_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 2091 &stats->tx_single_colls, "Single collisions"); 2092 STE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 2093 &stats->tx_multi_colls, "Multiple collisions"); 2094 STE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2095 &stats->tx_late_colls, "Late collisions"); 2096 STE_SYSCTL_STAT_ADD32(ctx, child, "defers", 2097 &stats->tx_frames_defered, "Frames with deferrals"); 2098 STE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 2099 &stats->tx_excess_defers, "Frames with excessive derferrals"); 2100 STE_SYSCTL_STAT_ADD32(ctx, child, "abort", 2101 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 2102 } 2103 2104 #undef STE_SYSCTL_STAT_ADD32 2105 #undef STE_SYSCTL_STAT_ADD64 2106 2107 static void 2108 ste_setwol(struct ste_softc *sc) 2109 { 2110 struct ifnet *ifp; 2111 uint16_t pmstat; 2112 uint8_t val; 2113 int pmc; 2114 2115 STE_LOCK_ASSERT(sc); 2116 2117 if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) != 0) { 2118 /* Disable WOL. */ 2119 CSR_READ_1(sc, STE_WAKE_EVENT); 2120 CSR_WRITE_1(sc, STE_WAKE_EVENT, 0); 2121 return; 2122 } 2123 2124 ifp = sc->ste_ifp; 2125 val = CSR_READ_1(sc, STE_WAKE_EVENT); 2126 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 2127 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 2128 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2129 val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB; 2130 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 2131 /* Request PME. */ 2132 pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2); 2133 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2134 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2135 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2136 pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2137 } 2138