1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #ifdef HAVE_KERNEL_OPTION_HEADERS 37 #include "opt_device_polling.h" 38 #endif 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bus.h> 43 #include <sys/endian.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/module.h> 49 #include <sys/rman.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include <net/bpf.h> 55 #include <net/if.h> 56 #include <net/if_arp.h> 57 #include <net/ethernet.h> 58 #include <net/if_dl.h> 59 #include <net/if_media.h> 60 #include <net/if_types.h> 61 #include <net/if_vlan_var.h> 62 63 #include <machine/bus.h> 64 #include <machine/resource.h> 65 66 #include <dev/mii/mii.h> 67 #include <dev/mii/mii_bitbang.h> 68 #include <dev/mii/miivar.h> 69 70 #include <dev/pci/pcireg.h> 71 #include <dev/pci/pcivar.h> 72 73 #include <dev/ste/if_stereg.h> 74 75 /* "device miibus" required. See GENERIC if you get errors here. */ 76 #include "miibus_if.h" 77 78 MODULE_DEPEND(ste, pci, 1, 1, 1); 79 MODULE_DEPEND(ste, ether, 1, 1, 1); 80 MODULE_DEPEND(ste, miibus, 1, 1, 1); 81 82 /* Define to show Tx error status. */ 83 #define STE_SHOW_TXERRORS 84 85 /* 86 * Various supported device vendors/types and their names. 87 */ 88 static const struct ste_type const ste_devs[] = { 89 { ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" }, 90 { ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" }, 91 { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, 92 { 0, 0, NULL } 93 }; 94 95 static int ste_attach(device_t); 96 static int ste_detach(device_t); 97 static int ste_probe(device_t); 98 static int ste_resume(device_t); 99 static int ste_shutdown(device_t); 100 static int ste_suspend(device_t); 101 102 static int ste_dma_alloc(struct ste_softc *); 103 static void ste_dma_free(struct ste_softc *); 104 static void ste_dmamap_cb(void *, bus_dma_segment_t *, int, int); 105 static int ste_eeprom_wait(struct ste_softc *); 106 static int ste_encap(struct ste_softc *, struct mbuf **, 107 struct ste_chain *); 108 static int ste_ifmedia_upd(struct ifnet *); 109 static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 110 static void ste_init(void *); 111 static void ste_init_locked(struct ste_softc *); 112 static int ste_init_rx_list(struct ste_softc *); 113 static void ste_init_tx_list(struct ste_softc *); 114 static void ste_intr(void *); 115 static int ste_ioctl(struct ifnet *, u_long, caddr_t); 116 static uint32_t ste_mii_bitbang_read(device_t); 117 static void ste_mii_bitbang_write(device_t, uint32_t); 118 static int ste_miibus_readreg(device_t, int, int); 119 static void ste_miibus_statchg(device_t); 120 static int ste_miibus_writereg(device_t, int, int, int); 121 static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *); 122 static int ste_read_eeprom(struct ste_softc *, uint16_t *, int, int); 123 static void ste_reset(struct ste_softc *); 124 static void ste_restart_tx(struct ste_softc *); 125 static int ste_rxeof(struct ste_softc *, int); 126 static void ste_rxfilter(struct ste_softc *); 127 static void ste_setwol(struct ste_softc *); 128 static void ste_start(struct ifnet *); 129 static void ste_start_locked(struct ifnet *); 130 static void ste_stats_clear(struct ste_softc *); 131 static void ste_stats_update(struct ste_softc *); 132 static void ste_stop(struct ste_softc *); 133 static void ste_sysctl_node(struct ste_softc *); 134 static void ste_tick(void *); 135 static void ste_txeoc(struct ste_softc *); 136 static void ste_txeof(struct ste_softc *); 137 static void ste_wait(struct ste_softc *); 138 static void ste_watchdog(struct ste_softc *); 139 140 /* 141 * MII bit-bang glue 142 */ 143 static const struct mii_bitbang_ops ste_mii_bitbang_ops = { 144 ste_mii_bitbang_read, 145 ste_mii_bitbang_write, 146 { 147 STE_PHYCTL_MDATA, /* MII_BIT_MDO */ 148 STE_PHYCTL_MDATA, /* MII_BIT_MDI */ 149 STE_PHYCTL_MCLK, /* MII_BIT_MDC */ 150 STE_PHYCTL_MDIR, /* MII_BIT_DIR_HOST_PHY */ 151 0, /* MII_BIT_DIR_PHY_HOST */ 152 } 153 }; 154 155 static device_method_t ste_methods[] = { 156 /* Device interface */ 157 DEVMETHOD(device_probe, ste_probe), 158 DEVMETHOD(device_attach, ste_attach), 159 DEVMETHOD(device_detach, ste_detach), 160 DEVMETHOD(device_shutdown, ste_shutdown), 161 DEVMETHOD(device_suspend, ste_suspend), 162 DEVMETHOD(device_resume, ste_resume), 163 164 /* MII interface */ 165 DEVMETHOD(miibus_readreg, ste_miibus_readreg), 166 DEVMETHOD(miibus_writereg, ste_miibus_writereg), 167 DEVMETHOD(miibus_statchg, ste_miibus_statchg), 168 169 DEVMETHOD_END 170 }; 171 172 static driver_t ste_driver = { 173 "ste", 174 ste_methods, 175 sizeof(struct ste_softc) 176 }; 177 178 static devclass_t ste_devclass; 179 180 DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); 181 DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); 182 183 #define STE_SETBIT4(sc, reg, x) \ 184 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 185 186 #define STE_CLRBIT4(sc, reg, x) \ 187 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 188 189 #define STE_SETBIT2(sc, reg, x) \ 190 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) 191 192 #define STE_CLRBIT2(sc, reg, x) \ 193 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) 194 195 #define STE_SETBIT1(sc, reg, x) \ 196 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) 197 198 #define STE_CLRBIT1(sc, reg, x) \ 199 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) 200 201 /* 202 * Read the MII serial port for the MII bit-bang module. 203 */ 204 static uint32_t 205 ste_mii_bitbang_read(device_t dev) 206 { 207 struct ste_softc *sc; 208 uint32_t val; 209 210 sc = device_get_softc(dev); 211 212 val = CSR_READ_1(sc, STE_PHYCTL); 213 CSR_BARRIER(sc, STE_PHYCTL, 1, 214 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 215 216 return (val); 217 } 218 219 /* 220 * Write the MII serial port for the MII bit-bang module. 221 */ 222 static void 223 ste_mii_bitbang_write(device_t dev, uint32_t val) 224 { 225 struct ste_softc *sc; 226 227 sc = device_get_softc(dev); 228 229 CSR_WRITE_1(sc, STE_PHYCTL, val); 230 CSR_BARRIER(sc, STE_PHYCTL, 1, 231 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 232 } 233 234 static int 235 ste_miibus_readreg(device_t dev, int phy, int reg) 236 { 237 238 return (mii_bitbang_readreg(dev, &ste_mii_bitbang_ops, phy, reg)); 239 } 240 241 static int 242 ste_miibus_writereg(device_t dev, int phy, int reg, int data) 243 { 244 245 mii_bitbang_writereg(dev, &ste_mii_bitbang_ops, phy, reg, data); 246 247 return (0); 248 } 249 250 static void 251 ste_miibus_statchg(device_t dev) 252 { 253 struct ste_softc *sc; 254 struct mii_data *mii; 255 struct ifnet *ifp; 256 uint16_t cfg; 257 258 sc = device_get_softc(dev); 259 260 mii = device_get_softc(sc->ste_miibus); 261 ifp = sc->ste_ifp; 262 if (mii == NULL || ifp == NULL || 263 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 264 return; 265 266 sc->ste_flags &= ~STE_FLAG_LINK; 267 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 268 (IFM_ACTIVE | IFM_AVALID)) { 269 switch (IFM_SUBTYPE(mii->mii_media_active)) { 270 case IFM_10_T: 271 case IFM_100_TX: 272 case IFM_100_FX: 273 case IFM_100_T4: 274 sc->ste_flags |= STE_FLAG_LINK; 275 default: 276 break; 277 } 278 } 279 280 /* Program MACs with resolved speed/duplex/flow-control. */ 281 if ((sc->ste_flags & STE_FLAG_LINK) != 0) { 282 cfg = CSR_READ_2(sc, STE_MACCTL0); 283 cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX); 284 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 285 /* 286 * ST201 data sheet says driver should enable receiving 287 * MAC control frames bit of receive mode register to 288 * receive flow-control frames but the register has no 289 * such bits. In addition the controller has no ability 290 * to send pause frames so it should be handled in 291 * driver. Implementing pause timer handling in driver 292 * layer is not trivial, so don't enable flow-control 293 * here. 294 */ 295 cfg |= STE_MACCTL0_FULLDUPLEX; 296 } 297 CSR_WRITE_2(sc, STE_MACCTL0, cfg); 298 } 299 } 300 301 static int 302 ste_ifmedia_upd(struct ifnet *ifp) 303 { 304 struct ste_softc *sc; 305 struct mii_data *mii; 306 struct mii_softc *miisc; 307 int error; 308 309 sc = ifp->if_softc; 310 STE_LOCK(sc); 311 mii = device_get_softc(sc->ste_miibus); 312 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 313 PHY_RESET(miisc); 314 error = mii_mediachg(mii); 315 STE_UNLOCK(sc); 316 317 return (error); 318 } 319 320 static void 321 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 322 { 323 struct ste_softc *sc; 324 struct mii_data *mii; 325 326 sc = ifp->if_softc; 327 mii = device_get_softc(sc->ste_miibus); 328 329 STE_LOCK(sc); 330 if ((ifp->if_flags & IFF_UP) == 0) { 331 STE_UNLOCK(sc); 332 return; 333 } 334 mii_pollstat(mii); 335 ifmr->ifm_active = mii->mii_media_active; 336 ifmr->ifm_status = mii->mii_media_status; 337 STE_UNLOCK(sc); 338 } 339 340 static void 341 ste_wait(struct ste_softc *sc) 342 { 343 int i; 344 345 for (i = 0; i < STE_TIMEOUT; i++) { 346 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 347 break; 348 DELAY(1); 349 } 350 351 if (i == STE_TIMEOUT) 352 device_printf(sc->ste_dev, "command never completed!\n"); 353 } 354 355 /* 356 * The EEPROM is slow: give it time to come ready after issuing 357 * it a command. 358 */ 359 static int 360 ste_eeprom_wait(struct ste_softc *sc) 361 { 362 int i; 363 364 DELAY(1000); 365 366 for (i = 0; i < 100; i++) { 367 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 368 DELAY(1000); 369 else 370 break; 371 } 372 373 if (i == 100) { 374 device_printf(sc->ste_dev, "eeprom failed to come ready\n"); 375 return (1); 376 } 377 378 return (0); 379 } 380 381 /* 382 * Read a sequence of words from the EEPROM. Note that ethernet address 383 * data is stored in the EEPROM in network byte order. 384 */ 385 static int 386 ste_read_eeprom(struct ste_softc *sc, uint16_t *dest, int off, int cnt) 387 { 388 int err = 0, i; 389 390 if (ste_eeprom_wait(sc)) 391 return (1); 392 393 for (i = 0; i < cnt; i++) { 394 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 395 err = ste_eeprom_wait(sc); 396 if (err) 397 break; 398 *dest = le16toh(CSR_READ_2(sc, STE_EEPROM_DATA)); 399 dest++; 400 } 401 402 return (err ? 1 : 0); 403 } 404 405 static void 406 ste_rxfilter(struct ste_softc *sc) 407 { 408 struct ifnet *ifp; 409 struct ifmultiaddr *ifma; 410 uint32_t hashes[2] = { 0, 0 }; 411 uint8_t rxcfg; 412 int h; 413 414 STE_LOCK_ASSERT(sc); 415 416 ifp = sc->ste_ifp; 417 rxcfg = CSR_READ_1(sc, STE_RX_MODE); 418 rxcfg |= STE_RXMODE_UNICAST; 419 rxcfg &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_MULTIHASH | 420 STE_RXMODE_BROADCAST | STE_RXMODE_PROMISC); 421 if (ifp->if_flags & IFF_BROADCAST) 422 rxcfg |= STE_RXMODE_BROADCAST; 423 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 424 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 425 rxcfg |= STE_RXMODE_ALLMULTI; 426 if ((ifp->if_flags & IFF_PROMISC) != 0) 427 rxcfg |= STE_RXMODE_PROMISC; 428 goto chipit; 429 } 430 431 rxcfg |= STE_RXMODE_MULTIHASH; 432 /* Now program new ones. */ 433 if_maddr_rlock(ifp); 434 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 435 if (ifma->ifma_addr->sa_family != AF_LINK) 436 continue; 437 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 438 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; 439 if (h < 32) 440 hashes[0] |= (1 << h); 441 else 442 hashes[1] |= (1 << (h - 32)); 443 } 444 if_maddr_runlock(ifp); 445 446 chipit: 447 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 448 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 449 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 450 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 451 CSR_WRITE_1(sc, STE_RX_MODE, rxcfg); 452 CSR_READ_1(sc, STE_RX_MODE); 453 } 454 455 #ifdef DEVICE_POLLING 456 static poll_handler_t ste_poll, ste_poll_locked; 457 458 static int 459 ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 460 { 461 struct ste_softc *sc = ifp->if_softc; 462 int rx_npkts = 0; 463 464 STE_LOCK(sc); 465 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 466 rx_npkts = ste_poll_locked(ifp, cmd, count); 467 STE_UNLOCK(sc); 468 return (rx_npkts); 469 } 470 471 static int 472 ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 473 { 474 struct ste_softc *sc = ifp->if_softc; 475 int rx_npkts; 476 477 STE_LOCK_ASSERT(sc); 478 479 rx_npkts = ste_rxeof(sc, count); 480 ste_txeof(sc); 481 ste_txeoc(sc); 482 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 483 ste_start_locked(ifp); 484 485 if (cmd == POLL_AND_CHECK_STATUS) { 486 uint16_t status; 487 488 status = CSR_READ_2(sc, STE_ISR_ACK); 489 490 if (status & STE_ISR_STATS_OFLOW) 491 ste_stats_update(sc); 492 493 if (status & STE_ISR_HOSTERR) { 494 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 495 ste_init_locked(sc); 496 } 497 } 498 return (rx_npkts); 499 } 500 #endif /* DEVICE_POLLING */ 501 502 static void 503 ste_intr(void *xsc) 504 { 505 struct ste_softc *sc; 506 struct ifnet *ifp; 507 uint16_t intrs, status; 508 509 sc = xsc; 510 STE_LOCK(sc); 511 ifp = sc->ste_ifp; 512 513 #ifdef DEVICE_POLLING 514 if (ifp->if_capenable & IFCAP_POLLING) { 515 STE_UNLOCK(sc); 516 return; 517 } 518 #endif 519 /* Reading STE_ISR_ACK clears STE_IMR register. */ 520 status = CSR_READ_2(sc, STE_ISR_ACK); 521 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 522 STE_UNLOCK(sc); 523 return; 524 } 525 526 intrs = STE_INTRS; 527 if (status == 0xFFFF || (status & intrs) == 0) 528 goto done; 529 530 if (sc->ste_int_rx_act > 0) { 531 status &= ~STE_ISR_RX_DMADONE; 532 intrs &= ~STE_IMR_RX_DMADONE; 533 } 534 535 if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) { 536 ste_rxeof(sc, -1); 537 /* 538 * The controller has no ability to Rx interrupt 539 * moderation feature. Receiving 64 bytes frames 540 * from wire generates too many interrupts which in 541 * turn make system useless to process other useful 542 * things. Fortunately ST201 supports single shot 543 * timer so use the timer to implement Rx interrupt 544 * moderation in driver. This adds more register 545 * access but it greatly reduces number of Rx 546 * interrupts under high network load. 547 */ 548 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 549 (sc->ste_int_rx_mod != 0)) { 550 if ((status & STE_ISR_RX_DMADONE) != 0) { 551 CSR_WRITE_2(sc, STE_COUNTDOWN, 552 STE_TIMER_USECS(sc->ste_int_rx_mod)); 553 intrs &= ~STE_IMR_RX_DMADONE; 554 sc->ste_int_rx_act = 1; 555 } else { 556 intrs |= STE_IMR_RX_DMADONE; 557 sc->ste_int_rx_act = 0; 558 } 559 } 560 } 561 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 562 if ((status & STE_ISR_TX_DMADONE) != 0) 563 ste_txeof(sc); 564 if ((status & STE_ISR_TX_DONE) != 0) 565 ste_txeoc(sc); 566 if ((status & STE_ISR_STATS_OFLOW) != 0) 567 ste_stats_update(sc); 568 if ((status & STE_ISR_HOSTERR) != 0) { 569 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 570 ste_init_locked(sc); 571 STE_UNLOCK(sc); 572 return; 573 } 574 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 575 ste_start_locked(ifp); 576 done: 577 /* Re-enable interrupts */ 578 CSR_WRITE_2(sc, STE_IMR, intrs); 579 } 580 STE_UNLOCK(sc); 581 } 582 583 /* 584 * A frame has been uploaded: pass the resulting mbuf chain up to 585 * the higher level protocols. 586 */ 587 static int 588 ste_rxeof(struct ste_softc *sc, int count) 589 { 590 struct mbuf *m; 591 struct ifnet *ifp; 592 struct ste_chain_onefrag *cur_rx; 593 uint32_t rxstat; 594 int total_len, rx_npkts; 595 596 ifp = sc->ste_ifp; 597 598 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 599 sc->ste_cdata.ste_rx_list_map, 600 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 601 602 cur_rx = sc->ste_cdata.ste_rx_head; 603 for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++, 604 cur_rx = cur_rx->ste_next) { 605 rxstat = le32toh(cur_rx->ste_ptr->ste_status); 606 if ((rxstat & STE_RXSTAT_DMADONE) == 0) 607 break; 608 #ifdef DEVICE_POLLING 609 if (ifp->if_capenable & IFCAP_POLLING) { 610 if (count == 0) 611 break; 612 count--; 613 } 614 #endif 615 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 616 break; 617 /* 618 * If an error occurs, update stats, clear the 619 * status word and leave the mbuf cluster in place: 620 * it should simply get re-used next time this descriptor 621 * comes up in the ring. 622 */ 623 if (rxstat & STE_RXSTAT_FRAME_ERR) { 624 ifp->if_ierrors++; 625 cur_rx->ste_ptr->ste_status = 0; 626 continue; 627 } 628 629 /* No errors; receive the packet. */ 630 m = cur_rx->ste_mbuf; 631 total_len = STE_RX_BYTES(rxstat); 632 633 /* 634 * Try to conjure up a new mbuf cluster. If that 635 * fails, it means we have an out of memory condition and 636 * should leave the buffer in place and continue. This will 637 * result in a lost packet, but there's little else we 638 * can do in this situation. 639 */ 640 if (ste_newbuf(sc, cur_rx) != 0) { 641 ifp->if_iqdrops++; 642 cur_rx->ste_ptr->ste_status = 0; 643 continue; 644 } 645 646 m->m_pkthdr.rcvif = ifp; 647 m->m_pkthdr.len = m->m_len = total_len; 648 649 ifp->if_ipackets++; 650 STE_UNLOCK(sc); 651 (*ifp->if_input)(ifp, m); 652 STE_LOCK(sc); 653 } 654 655 if (rx_npkts > 0) { 656 sc->ste_cdata.ste_rx_head = cur_rx; 657 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 658 sc->ste_cdata.ste_rx_list_map, 659 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 660 } 661 662 return (rx_npkts); 663 } 664 665 static void 666 ste_txeoc(struct ste_softc *sc) 667 { 668 uint16_t txstat; 669 struct ifnet *ifp; 670 671 STE_LOCK_ASSERT(sc); 672 673 ifp = sc->ste_ifp; 674 675 /* 676 * STE_TX_STATUS register implements a queue of up to 31 677 * transmit status byte. Writing an arbitrary value to the 678 * register will advance the queue to the next transmit 679 * status byte. This means if driver does not read 680 * STE_TX_STATUS register after completing sending more 681 * than 31 frames the controller would be stalled so driver 682 * should re-wake the Tx MAC. This is the most severe 683 * limitation of ST201 based controller. 684 */ 685 for (;;) { 686 txstat = CSR_READ_2(sc, STE_TX_STATUS); 687 if ((txstat & STE_TXSTATUS_TXDONE) == 0) 688 break; 689 if ((txstat & (STE_TXSTATUS_UNDERRUN | 690 STE_TXSTATUS_EXCESSCOLLS | STE_TXSTATUS_RECLAIMERR | 691 STE_TXSTATUS_STATSOFLOW)) != 0) { 692 ifp->if_oerrors++; 693 #ifdef STE_SHOW_TXERRORS 694 device_printf(sc->ste_dev, "TX error : 0x%b\n", 695 txstat & 0xFF, STE_ERR_BITS); 696 #endif 697 if ((txstat & STE_TXSTATUS_UNDERRUN) != 0 && 698 sc->ste_tx_thresh < STE_PACKET_SIZE) { 699 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 700 if (sc->ste_tx_thresh > STE_PACKET_SIZE) 701 sc->ste_tx_thresh = STE_PACKET_SIZE; 702 device_printf(sc->ste_dev, 703 "TX underrun, increasing TX" 704 " start threshold to %d bytes\n", 705 sc->ste_tx_thresh); 706 /* Make sure to disable active DMA cycles. */ 707 STE_SETBIT4(sc, STE_DMACTL, 708 STE_DMACTL_TXDMA_STALL); 709 ste_wait(sc); 710 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 711 ste_init_locked(sc); 712 break; 713 } 714 /* Restart Tx. */ 715 ste_restart_tx(sc); 716 } 717 /* 718 * Advance to next status and ACK TxComplete 719 * interrupt. ST201 data sheet was wrong here, to 720 * get next Tx status, we have to write both 721 * STE_TX_STATUS and STE_TX_FRAMEID register. 722 * Otherwise controller returns the same status 723 * as well as not acknowledge Tx completion 724 * interrupt. 725 */ 726 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 727 } 728 } 729 730 static void 731 ste_tick(void *arg) 732 { 733 struct ste_softc *sc; 734 struct mii_data *mii; 735 736 sc = (struct ste_softc *)arg; 737 738 STE_LOCK_ASSERT(sc); 739 740 mii = device_get_softc(sc->ste_miibus); 741 mii_tick(mii); 742 /* 743 * ukphy(4) does not seem to generate CB that reports 744 * resolved link state so if we know we lost a link, 745 * explicitly check the link state. 746 */ 747 if ((sc->ste_flags & STE_FLAG_LINK) == 0) 748 ste_miibus_statchg(sc->ste_dev); 749 /* 750 * Because we are not generating Tx completion 751 * interrupt for every frame, reclaim transmitted 752 * buffers here. 753 */ 754 ste_txeof(sc); 755 ste_txeoc(sc); 756 ste_stats_update(sc); 757 ste_watchdog(sc); 758 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 759 } 760 761 static void 762 ste_txeof(struct ste_softc *sc) 763 { 764 struct ifnet *ifp; 765 struct ste_chain *cur_tx; 766 uint32_t txstat; 767 int idx; 768 769 STE_LOCK_ASSERT(sc); 770 771 ifp = sc->ste_ifp; 772 idx = sc->ste_cdata.ste_tx_cons; 773 if (idx == sc->ste_cdata.ste_tx_prod) 774 return; 775 776 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 777 sc->ste_cdata.ste_tx_list_map, 778 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 779 780 while (idx != sc->ste_cdata.ste_tx_prod) { 781 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 782 txstat = le32toh(cur_tx->ste_ptr->ste_ctl); 783 if ((txstat & STE_TXCTL_DMADONE) == 0) 784 break; 785 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map, 786 BUS_DMASYNC_POSTWRITE); 787 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map); 788 KASSERT(cur_tx->ste_mbuf != NULL, 789 ("%s: freeing NULL mbuf!\n", __func__)); 790 m_freem(cur_tx->ste_mbuf); 791 cur_tx->ste_mbuf = NULL; 792 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 793 ifp->if_opackets++; 794 sc->ste_cdata.ste_tx_cnt--; 795 STE_INC(idx, STE_TX_LIST_CNT); 796 } 797 798 sc->ste_cdata.ste_tx_cons = idx; 799 if (sc->ste_cdata.ste_tx_cnt == 0) 800 sc->ste_timer = 0; 801 } 802 803 static void 804 ste_stats_clear(struct ste_softc *sc) 805 { 806 807 STE_LOCK_ASSERT(sc); 808 809 /* Rx stats. */ 810 CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO); 811 CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI); 812 CSR_READ_2(sc, STE_STAT_RX_FRAMES); 813 CSR_READ_1(sc, STE_STAT_RX_BCAST); 814 CSR_READ_1(sc, STE_STAT_RX_MCAST); 815 CSR_READ_1(sc, STE_STAT_RX_LOST); 816 /* Tx stats. */ 817 CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO); 818 CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI); 819 CSR_READ_2(sc, STE_STAT_TX_FRAMES); 820 CSR_READ_1(sc, STE_STAT_TX_BCAST); 821 CSR_READ_1(sc, STE_STAT_TX_MCAST); 822 CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 823 CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 824 CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 825 CSR_READ_1(sc, STE_STAT_LATE_COLLS); 826 CSR_READ_1(sc, STE_STAT_TX_DEFER); 827 CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 828 CSR_READ_1(sc, STE_STAT_TX_ABORT); 829 } 830 831 static void 832 ste_stats_update(struct ste_softc *sc) 833 { 834 struct ifnet *ifp; 835 struct ste_hw_stats *stats; 836 uint32_t val; 837 838 STE_LOCK_ASSERT(sc); 839 840 ifp = sc->ste_ifp; 841 stats = &sc->ste_stats; 842 /* Rx stats. */ 843 val = (uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO) | 844 ((uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI)) << 16; 845 val &= 0x000FFFFF; 846 stats->rx_bytes += val; 847 stats->rx_frames += CSR_READ_2(sc, STE_STAT_RX_FRAMES); 848 stats->rx_bcast_frames += CSR_READ_1(sc, STE_STAT_RX_BCAST); 849 stats->rx_mcast_frames += CSR_READ_1(sc, STE_STAT_RX_MCAST); 850 stats->rx_lost_frames += CSR_READ_1(sc, STE_STAT_RX_LOST); 851 /* Tx stats. */ 852 val = (uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO) | 853 ((uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI)) << 16; 854 val &= 0x000FFFFF; 855 stats->tx_bytes += val; 856 stats->tx_frames += CSR_READ_2(sc, STE_STAT_TX_FRAMES); 857 stats->tx_bcast_frames += CSR_READ_1(sc, STE_STAT_TX_BCAST); 858 stats->tx_mcast_frames += CSR_READ_1(sc, STE_STAT_TX_MCAST); 859 stats->tx_carrsense_errs += CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 860 val = CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 861 stats->tx_single_colls += val; 862 ifp->if_collisions += val; 863 val = CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 864 stats->tx_multi_colls += val; 865 ifp->if_collisions += val; 866 val += CSR_READ_1(sc, STE_STAT_LATE_COLLS); 867 stats->tx_late_colls += val; 868 ifp->if_collisions += val; 869 stats->tx_frames_defered += CSR_READ_1(sc, STE_STAT_TX_DEFER); 870 stats->tx_excess_defers += CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 871 stats->tx_abort += CSR_READ_1(sc, STE_STAT_TX_ABORT); 872 } 873 874 /* 875 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 876 * IDs against our list and return a device name if we find a match. 877 */ 878 static int 879 ste_probe(device_t dev) 880 { 881 const struct ste_type *t; 882 883 t = ste_devs; 884 885 while (t->ste_name != NULL) { 886 if ((pci_get_vendor(dev) == t->ste_vid) && 887 (pci_get_device(dev) == t->ste_did)) { 888 device_set_desc(dev, t->ste_name); 889 return (BUS_PROBE_DEFAULT); 890 } 891 t++; 892 } 893 894 return (ENXIO); 895 } 896 897 /* 898 * Attach the interface. Allocate softc structures, do ifmedia 899 * setup and ethernet/BPF attach. 900 */ 901 static int 902 ste_attach(device_t dev) 903 { 904 struct ste_softc *sc; 905 struct ifnet *ifp; 906 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 907 int error = 0, phy, pmc, prefer_iomap, rid; 908 909 sc = device_get_softc(dev); 910 sc->ste_dev = dev; 911 912 /* 913 * Only use one PHY since this chip reports multiple 914 * Note on the DFE-550 the PHY is at 1 on the DFE-580 915 * it is at 0 & 1. It is rev 0x12. 916 */ 917 if (pci_get_vendor(dev) == DL_VENDORID && 918 pci_get_device(dev) == DL_DEVICEID_DL10050 && 919 pci_get_revid(dev) == 0x12 ) 920 sc->ste_flags |= STE_FLAG_ONE_PHY; 921 922 mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 923 MTX_DEF); 924 /* 925 * Map control/status registers. 926 */ 927 pci_enable_busmaster(dev); 928 929 /* 930 * Prefer memory space register mapping over IO space but use 931 * IO space for a device that is known to have issues on memory 932 * mapping. 933 */ 934 prefer_iomap = 0; 935 if (pci_get_device(dev) == ST_DEVICEID_ST201_1) 936 prefer_iomap = 1; 937 else 938 resource_int_value(device_get_name(sc->ste_dev), 939 device_get_unit(sc->ste_dev), "prefer_iomap", 940 &prefer_iomap); 941 if (prefer_iomap == 0) { 942 sc->ste_res_id = PCIR_BAR(1); 943 sc->ste_res_type = SYS_RES_MEMORY; 944 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 945 &sc->ste_res_id, RF_ACTIVE); 946 } 947 if (prefer_iomap || sc->ste_res == NULL) { 948 sc->ste_res_id = PCIR_BAR(0); 949 sc->ste_res_type = SYS_RES_IOPORT; 950 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 951 &sc->ste_res_id, RF_ACTIVE); 952 } 953 if (sc->ste_res == NULL) { 954 device_printf(dev, "couldn't map ports/memory\n"); 955 error = ENXIO; 956 goto fail; 957 } 958 959 /* Allocate interrupt */ 960 rid = 0; 961 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 962 RF_SHAREABLE | RF_ACTIVE); 963 964 if (sc->ste_irq == NULL) { 965 device_printf(dev, "couldn't map interrupt\n"); 966 error = ENXIO; 967 goto fail; 968 } 969 970 callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0); 971 972 /* Reset the adapter. */ 973 ste_reset(sc); 974 975 /* 976 * Get station address from the EEPROM. 977 */ 978 if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) { 979 device_printf(dev, "failed to read station address\n"); 980 error = ENXIO; 981 goto fail; 982 } 983 ste_sysctl_node(sc); 984 985 if ((error = ste_dma_alloc(sc)) != 0) 986 goto fail; 987 988 ifp = sc->ste_ifp = if_alloc(IFT_ETHER); 989 if (ifp == NULL) { 990 device_printf(dev, "can not if_alloc()\n"); 991 error = ENOSPC; 992 goto fail; 993 } 994 995 /* Do MII setup. */ 996 phy = MII_PHY_ANY; 997 if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0) 998 phy = 0; 999 error = mii_attach(dev, &sc->ste_miibus, ifp, ste_ifmedia_upd, 1000 ste_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 1001 if (error != 0) { 1002 device_printf(dev, "attaching PHYs failed\n"); 1003 goto fail; 1004 } 1005 1006 ifp->if_softc = sc; 1007 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1008 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1009 ifp->if_ioctl = ste_ioctl; 1010 ifp->if_start = ste_start; 1011 ifp->if_init = ste_init; 1012 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 1013 ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; 1014 IFQ_SET_READY(&ifp->if_snd); 1015 1016 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1017 1018 /* 1019 * Call MI attach routine. 1020 */ 1021 ether_ifattach(ifp, (uint8_t *)eaddr); 1022 1023 /* 1024 * Tell the upper layer(s) we support long frames. 1025 */ 1026 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1027 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1028 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 1029 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 1030 ifp->if_capenable = ifp->if_capabilities; 1031 #ifdef DEVICE_POLLING 1032 ifp->if_capabilities |= IFCAP_POLLING; 1033 #endif 1034 1035 /* Hook interrupt last to avoid having to lock softc */ 1036 error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE, 1037 NULL, ste_intr, sc, &sc->ste_intrhand); 1038 1039 if (error) { 1040 device_printf(dev, "couldn't set up irq\n"); 1041 ether_ifdetach(ifp); 1042 goto fail; 1043 } 1044 1045 fail: 1046 if (error) 1047 ste_detach(dev); 1048 1049 return (error); 1050 } 1051 1052 /* 1053 * Shutdown hardware and free up resources. This can be called any 1054 * time after the mutex has been initialized. It is called in both 1055 * the error case in attach and the normal detach case so it needs 1056 * to be careful about only freeing resources that have actually been 1057 * allocated. 1058 */ 1059 static int 1060 ste_detach(device_t dev) 1061 { 1062 struct ste_softc *sc; 1063 struct ifnet *ifp; 1064 1065 sc = device_get_softc(dev); 1066 KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); 1067 ifp = sc->ste_ifp; 1068 1069 #ifdef DEVICE_POLLING 1070 if (ifp->if_capenable & IFCAP_POLLING) 1071 ether_poll_deregister(ifp); 1072 #endif 1073 1074 /* These should only be active if attach succeeded */ 1075 if (device_is_attached(dev)) { 1076 ether_ifdetach(ifp); 1077 STE_LOCK(sc); 1078 ste_stop(sc); 1079 STE_UNLOCK(sc); 1080 callout_drain(&sc->ste_callout); 1081 } 1082 if (sc->ste_miibus) 1083 device_delete_child(dev, sc->ste_miibus); 1084 bus_generic_detach(dev); 1085 1086 if (sc->ste_intrhand) 1087 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); 1088 if (sc->ste_irq) 1089 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); 1090 if (sc->ste_res) 1091 bus_release_resource(dev, sc->ste_res_type, sc->ste_res_id, 1092 sc->ste_res); 1093 1094 if (ifp) 1095 if_free(ifp); 1096 1097 ste_dma_free(sc); 1098 mtx_destroy(&sc->ste_mtx); 1099 1100 return (0); 1101 } 1102 1103 struct ste_dmamap_arg { 1104 bus_addr_t ste_busaddr; 1105 }; 1106 1107 static void 1108 ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1109 { 1110 struct ste_dmamap_arg *ctx; 1111 1112 if (error != 0) 1113 return; 1114 1115 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1116 1117 ctx = (struct ste_dmamap_arg *)arg; 1118 ctx->ste_busaddr = segs[0].ds_addr; 1119 } 1120 1121 static int 1122 ste_dma_alloc(struct ste_softc *sc) 1123 { 1124 struct ste_chain *txc; 1125 struct ste_chain_onefrag *rxc; 1126 struct ste_dmamap_arg ctx; 1127 int error, i; 1128 1129 /* Create parent DMA tag. */ 1130 error = bus_dma_tag_create( 1131 bus_get_dma_tag(sc->ste_dev), /* parent */ 1132 1, 0, /* alignment, boundary */ 1133 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1134 BUS_SPACE_MAXADDR, /* highaddr */ 1135 NULL, NULL, /* filter, filterarg */ 1136 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1137 0, /* nsegments */ 1138 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1139 0, /* flags */ 1140 NULL, NULL, /* lockfunc, lockarg */ 1141 &sc->ste_cdata.ste_parent_tag); 1142 if (error != 0) { 1143 device_printf(sc->ste_dev, 1144 "could not create parent DMA tag.\n"); 1145 goto fail; 1146 } 1147 1148 /* Create DMA tag for Tx descriptor list. */ 1149 error = bus_dma_tag_create( 1150 sc->ste_cdata.ste_parent_tag, /* parent */ 1151 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1152 BUS_SPACE_MAXADDR, /* lowaddr */ 1153 BUS_SPACE_MAXADDR, /* highaddr */ 1154 NULL, NULL, /* filter, filterarg */ 1155 STE_TX_LIST_SZ, /* maxsize */ 1156 1, /* nsegments */ 1157 STE_TX_LIST_SZ, /* maxsegsize */ 1158 0, /* flags */ 1159 NULL, NULL, /* lockfunc, lockarg */ 1160 &sc->ste_cdata.ste_tx_list_tag); 1161 if (error != 0) { 1162 device_printf(sc->ste_dev, 1163 "could not create Tx list DMA tag.\n"); 1164 goto fail; 1165 } 1166 1167 /* Create DMA tag for Rx descriptor list. */ 1168 error = bus_dma_tag_create( 1169 sc->ste_cdata.ste_parent_tag, /* parent */ 1170 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1171 BUS_SPACE_MAXADDR, /* lowaddr */ 1172 BUS_SPACE_MAXADDR, /* highaddr */ 1173 NULL, NULL, /* filter, filterarg */ 1174 STE_RX_LIST_SZ, /* maxsize */ 1175 1, /* nsegments */ 1176 STE_RX_LIST_SZ, /* maxsegsize */ 1177 0, /* flags */ 1178 NULL, NULL, /* lockfunc, lockarg */ 1179 &sc->ste_cdata.ste_rx_list_tag); 1180 if (error != 0) { 1181 device_printf(sc->ste_dev, 1182 "could not create Rx list DMA tag.\n"); 1183 goto fail; 1184 } 1185 1186 /* Create DMA tag for Tx buffers. */ 1187 error = bus_dma_tag_create( 1188 sc->ste_cdata.ste_parent_tag, /* parent */ 1189 1, 0, /* alignment, boundary */ 1190 BUS_SPACE_MAXADDR, /* lowaddr */ 1191 BUS_SPACE_MAXADDR, /* highaddr */ 1192 NULL, NULL, /* filter, filterarg */ 1193 MCLBYTES * STE_MAXFRAGS, /* maxsize */ 1194 STE_MAXFRAGS, /* nsegments */ 1195 MCLBYTES, /* maxsegsize */ 1196 0, /* flags */ 1197 NULL, NULL, /* lockfunc, lockarg */ 1198 &sc->ste_cdata.ste_tx_tag); 1199 if (error != 0) { 1200 device_printf(sc->ste_dev, "could not create Tx DMA tag.\n"); 1201 goto fail; 1202 } 1203 1204 /* Create DMA tag for Rx buffers. */ 1205 error = bus_dma_tag_create( 1206 sc->ste_cdata.ste_parent_tag, /* parent */ 1207 1, 0, /* alignment, boundary */ 1208 BUS_SPACE_MAXADDR, /* lowaddr */ 1209 BUS_SPACE_MAXADDR, /* highaddr */ 1210 NULL, NULL, /* filter, filterarg */ 1211 MCLBYTES, /* maxsize */ 1212 1, /* nsegments */ 1213 MCLBYTES, /* maxsegsize */ 1214 0, /* flags */ 1215 NULL, NULL, /* lockfunc, lockarg */ 1216 &sc->ste_cdata.ste_rx_tag); 1217 if (error != 0) { 1218 device_printf(sc->ste_dev, "could not create Rx DMA tag.\n"); 1219 goto fail; 1220 } 1221 1222 /* Allocate DMA'able memory and load the DMA map for Tx list. */ 1223 error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag, 1224 (void **)&sc->ste_ldata.ste_tx_list, 1225 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1226 &sc->ste_cdata.ste_tx_list_map); 1227 if (error != 0) { 1228 device_printf(sc->ste_dev, 1229 "could not allocate DMA'able memory for Tx list.\n"); 1230 goto fail; 1231 } 1232 ctx.ste_busaddr = 0; 1233 error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag, 1234 sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list, 1235 STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1236 if (error != 0 || ctx.ste_busaddr == 0) { 1237 device_printf(sc->ste_dev, 1238 "could not load DMA'able memory for Tx list.\n"); 1239 goto fail; 1240 } 1241 sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr; 1242 1243 /* Allocate DMA'able memory and load the DMA map for Rx list. */ 1244 error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag, 1245 (void **)&sc->ste_ldata.ste_rx_list, 1246 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1247 &sc->ste_cdata.ste_rx_list_map); 1248 if (error != 0) { 1249 device_printf(sc->ste_dev, 1250 "could not allocate DMA'able memory for Rx list.\n"); 1251 goto fail; 1252 } 1253 ctx.ste_busaddr = 0; 1254 error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag, 1255 sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list, 1256 STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1257 if (error != 0 || ctx.ste_busaddr == 0) { 1258 device_printf(sc->ste_dev, 1259 "could not load DMA'able memory for Rx list.\n"); 1260 goto fail; 1261 } 1262 sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr; 1263 1264 /* Create DMA maps for Tx buffers. */ 1265 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1266 txc = &sc->ste_cdata.ste_tx_chain[i]; 1267 txc->ste_ptr = NULL; 1268 txc->ste_mbuf = NULL; 1269 txc->ste_next = NULL; 1270 txc->ste_phys = 0; 1271 txc->ste_map = NULL; 1272 error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0, 1273 &txc->ste_map); 1274 if (error != 0) { 1275 device_printf(sc->ste_dev, 1276 "could not create Tx dmamap.\n"); 1277 goto fail; 1278 } 1279 } 1280 /* Create DMA maps for Rx buffers. */ 1281 if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1282 &sc->ste_cdata.ste_rx_sparemap)) != 0) { 1283 device_printf(sc->ste_dev, 1284 "could not create spare Rx dmamap.\n"); 1285 goto fail; 1286 } 1287 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1288 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1289 rxc->ste_ptr = NULL; 1290 rxc->ste_mbuf = NULL; 1291 rxc->ste_next = NULL; 1292 rxc->ste_map = NULL; 1293 error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1294 &rxc->ste_map); 1295 if (error != 0) { 1296 device_printf(sc->ste_dev, 1297 "could not create Rx dmamap.\n"); 1298 goto fail; 1299 } 1300 } 1301 1302 fail: 1303 return (error); 1304 } 1305 1306 static void 1307 ste_dma_free(struct ste_softc *sc) 1308 { 1309 struct ste_chain *txc; 1310 struct ste_chain_onefrag *rxc; 1311 int i; 1312 1313 /* Tx buffers. */ 1314 if (sc->ste_cdata.ste_tx_tag != NULL) { 1315 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1316 txc = &sc->ste_cdata.ste_tx_chain[i]; 1317 if (txc->ste_map != NULL) { 1318 bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag, 1319 txc->ste_map); 1320 txc->ste_map = NULL; 1321 } 1322 } 1323 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag); 1324 sc->ste_cdata.ste_tx_tag = NULL; 1325 } 1326 /* Rx buffers. */ 1327 if (sc->ste_cdata.ste_rx_tag != NULL) { 1328 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1329 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1330 if (rxc->ste_map != NULL) { 1331 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1332 rxc->ste_map); 1333 rxc->ste_map = NULL; 1334 } 1335 } 1336 if (sc->ste_cdata.ste_rx_sparemap != NULL) { 1337 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1338 sc->ste_cdata.ste_rx_sparemap); 1339 sc->ste_cdata.ste_rx_sparemap = NULL; 1340 } 1341 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag); 1342 sc->ste_cdata.ste_rx_tag = NULL; 1343 } 1344 /* Tx descriptor list. */ 1345 if (sc->ste_cdata.ste_tx_list_tag != NULL) { 1346 if (sc->ste_cdata.ste_tx_list_map != NULL) 1347 bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag, 1348 sc->ste_cdata.ste_tx_list_map); 1349 if (sc->ste_cdata.ste_tx_list_map != NULL && 1350 sc->ste_ldata.ste_tx_list != NULL) 1351 bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag, 1352 sc->ste_ldata.ste_tx_list, 1353 sc->ste_cdata.ste_tx_list_map); 1354 sc->ste_ldata.ste_tx_list = NULL; 1355 sc->ste_cdata.ste_tx_list_map = NULL; 1356 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag); 1357 sc->ste_cdata.ste_tx_list_tag = NULL; 1358 } 1359 /* Rx descriptor list. */ 1360 if (sc->ste_cdata.ste_rx_list_tag != NULL) { 1361 if (sc->ste_cdata.ste_rx_list_map != NULL) 1362 bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag, 1363 sc->ste_cdata.ste_rx_list_map); 1364 if (sc->ste_cdata.ste_rx_list_map != NULL && 1365 sc->ste_ldata.ste_rx_list != NULL) 1366 bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag, 1367 sc->ste_ldata.ste_rx_list, 1368 sc->ste_cdata.ste_rx_list_map); 1369 sc->ste_ldata.ste_rx_list = NULL; 1370 sc->ste_cdata.ste_rx_list_map = NULL; 1371 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag); 1372 sc->ste_cdata.ste_rx_list_tag = NULL; 1373 } 1374 if (sc->ste_cdata.ste_parent_tag != NULL) { 1375 bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag); 1376 sc->ste_cdata.ste_parent_tag = NULL; 1377 } 1378 } 1379 1380 static int 1381 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc) 1382 { 1383 struct mbuf *m; 1384 bus_dma_segment_t segs[1]; 1385 bus_dmamap_t map; 1386 int error, nsegs; 1387 1388 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1389 if (m == NULL) 1390 return (ENOBUFS); 1391 m->m_len = m->m_pkthdr.len = MCLBYTES; 1392 m_adj(m, ETHER_ALIGN); 1393 1394 if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag, 1395 sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) { 1396 m_freem(m); 1397 return (error); 1398 } 1399 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1400 1401 if (rxc->ste_mbuf != NULL) { 1402 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1403 BUS_DMASYNC_POSTREAD); 1404 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map); 1405 } 1406 map = rxc->ste_map; 1407 rxc->ste_map = sc->ste_cdata.ste_rx_sparemap; 1408 sc->ste_cdata.ste_rx_sparemap = map; 1409 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1410 BUS_DMASYNC_PREREAD); 1411 rxc->ste_mbuf = m; 1412 rxc->ste_ptr->ste_status = 0; 1413 rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr); 1414 rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len | 1415 STE_FRAG_LAST); 1416 return (0); 1417 } 1418 1419 static int 1420 ste_init_rx_list(struct ste_softc *sc) 1421 { 1422 struct ste_chain_data *cd; 1423 struct ste_list_data *ld; 1424 int error, i; 1425 1426 sc->ste_int_rx_act = 0; 1427 cd = &sc->ste_cdata; 1428 ld = &sc->ste_ldata; 1429 bzero(ld->ste_rx_list, STE_RX_LIST_SZ); 1430 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1431 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1432 error = ste_newbuf(sc, &cd->ste_rx_chain[i]); 1433 if (error != 0) 1434 return (error); 1435 if (i == (STE_RX_LIST_CNT - 1)) { 1436 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0]; 1437 ld->ste_rx_list[i].ste_next = 1438 htole32(ld->ste_rx_list_paddr + 1439 (sizeof(struct ste_desc_onefrag) * 0)); 1440 } else { 1441 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1]; 1442 ld->ste_rx_list[i].ste_next = 1443 htole32(ld->ste_rx_list_paddr + 1444 (sizeof(struct ste_desc_onefrag) * (i + 1))); 1445 } 1446 } 1447 1448 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1449 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 1450 sc->ste_cdata.ste_rx_list_map, 1451 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1452 1453 return (0); 1454 } 1455 1456 static void 1457 ste_init_tx_list(struct ste_softc *sc) 1458 { 1459 struct ste_chain_data *cd; 1460 struct ste_list_data *ld; 1461 int i; 1462 1463 cd = &sc->ste_cdata; 1464 ld = &sc->ste_ldata; 1465 bzero(ld->ste_tx_list, STE_TX_LIST_SZ); 1466 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1467 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1468 cd->ste_tx_chain[i].ste_mbuf = NULL; 1469 if (i == (STE_TX_LIST_CNT - 1)) { 1470 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0]; 1471 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1472 ld->ste_tx_list_paddr + 1473 (sizeof(struct ste_desc) * 0))); 1474 } else { 1475 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1]; 1476 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1477 ld->ste_tx_list_paddr + 1478 (sizeof(struct ste_desc) * (i + 1)))); 1479 } 1480 } 1481 1482 cd->ste_last_tx = NULL; 1483 cd->ste_tx_prod = 0; 1484 cd->ste_tx_cons = 0; 1485 cd->ste_tx_cnt = 0; 1486 1487 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1488 sc->ste_cdata.ste_tx_list_map, 1489 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1490 } 1491 1492 static void 1493 ste_init(void *xsc) 1494 { 1495 struct ste_softc *sc; 1496 1497 sc = xsc; 1498 STE_LOCK(sc); 1499 ste_init_locked(sc); 1500 STE_UNLOCK(sc); 1501 } 1502 1503 static void 1504 ste_init_locked(struct ste_softc *sc) 1505 { 1506 struct ifnet *ifp; 1507 struct mii_data *mii; 1508 uint8_t val; 1509 int i; 1510 1511 STE_LOCK_ASSERT(sc); 1512 ifp = sc->ste_ifp; 1513 mii = device_get_softc(sc->ste_miibus); 1514 1515 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1516 return; 1517 1518 ste_stop(sc); 1519 /* Reset the chip to a known state. */ 1520 ste_reset(sc); 1521 1522 /* Init our MAC address */ 1523 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1524 CSR_WRITE_2(sc, STE_PAR0 + i, 1525 ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) | 1526 IF_LLADDR(sc->ste_ifp)[i + 1] << 8)); 1527 } 1528 1529 /* Init RX list */ 1530 if (ste_init_rx_list(sc) != 0) { 1531 device_printf(sc->ste_dev, 1532 "initialization failed: no memory for RX buffers\n"); 1533 ste_stop(sc); 1534 return; 1535 } 1536 1537 /* Set RX polling interval */ 1538 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); 1539 1540 /* Init TX descriptors */ 1541 ste_init_tx_list(sc); 1542 1543 /* Clear and disable WOL. */ 1544 val = CSR_READ_1(sc, STE_WAKE_EVENT); 1545 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 1546 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 1547 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 1548 1549 /* Set the TX freethresh value */ 1550 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1551 1552 /* Set the TX start threshold for best performance. */ 1553 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1554 1555 /* Set the TX reclaim threshold. */ 1556 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1557 1558 /* Accept VLAN length packets */ 1559 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1560 1561 /* Set up the RX filter. */ 1562 ste_rxfilter(sc); 1563 1564 /* Load the address of the RX list. */ 1565 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1566 ste_wait(sc); 1567 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1568 STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr)); 1569 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1570 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1571 1572 /* Set TX polling interval(defer until we TX first packet). */ 1573 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1574 1575 /* Load address of the TX list */ 1576 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1577 ste_wait(sc); 1578 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1579 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1580 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1581 ste_wait(sc); 1582 /* Select 3.2us timer. */ 1583 STE_CLRBIT4(sc, STE_DMACTL, STE_DMACTL_COUNTDOWN_SPEED | 1584 STE_DMACTL_COUNTDOWN_MODE); 1585 1586 /* Enable receiver and transmitter */ 1587 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1588 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1589 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1590 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1591 1592 /* Enable stats counters. */ 1593 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1594 /* Clear stats counters. */ 1595 ste_stats_clear(sc); 1596 1597 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1598 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1599 #ifdef DEVICE_POLLING 1600 /* Disable interrupts if we are polling. */ 1601 if (ifp->if_capenable & IFCAP_POLLING) 1602 CSR_WRITE_2(sc, STE_IMR, 0); 1603 else 1604 #endif 1605 /* Enable interrupts. */ 1606 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1607 1608 sc->ste_flags &= ~STE_FLAG_LINK; 1609 /* Switch to the current media. */ 1610 mii_mediachg(mii); 1611 1612 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1613 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1614 1615 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 1616 } 1617 1618 static void 1619 ste_stop(struct ste_softc *sc) 1620 { 1621 struct ifnet *ifp; 1622 struct ste_chain_onefrag *cur_rx; 1623 struct ste_chain *cur_tx; 1624 uint32_t val; 1625 int i; 1626 1627 STE_LOCK_ASSERT(sc); 1628 ifp = sc->ste_ifp; 1629 1630 callout_stop(&sc->ste_callout); 1631 sc->ste_timer = 0; 1632 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 1633 1634 CSR_WRITE_2(sc, STE_IMR, 0); 1635 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1636 /* Stop pending DMA. */ 1637 val = CSR_READ_4(sc, STE_DMACTL); 1638 val |= STE_DMACTL_TXDMA_STALL | STE_DMACTL_RXDMA_STALL; 1639 CSR_WRITE_4(sc, STE_DMACTL, val); 1640 ste_wait(sc); 1641 /* Disable auto-polling. */ 1642 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 0); 1643 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1644 /* Nullify DMA address to stop any further DMA. */ 1645 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 0); 1646 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1647 /* Stop TX/RX MAC. */ 1648 val = CSR_READ_2(sc, STE_MACCTL1); 1649 val |= STE_MACCTL1_TX_DISABLE | STE_MACCTL1_RX_DISABLE | 1650 STE_MACCTL1_STATS_DISABLE; 1651 CSR_WRITE_2(sc, STE_MACCTL1, val); 1652 for (i = 0; i < STE_TIMEOUT; i++) { 1653 DELAY(10); 1654 if ((CSR_READ_2(sc, STE_MACCTL1) & (STE_MACCTL1_TX_DISABLE | 1655 STE_MACCTL1_RX_DISABLE | STE_MACCTL1_STATS_DISABLE)) == 0) 1656 break; 1657 } 1658 if (i == STE_TIMEOUT) 1659 device_printf(sc->ste_dev, "Stopping MAC timed out\n"); 1660 /* Acknowledge any pending interrupts. */ 1661 CSR_READ_2(sc, STE_ISR_ACK); 1662 ste_stats_update(sc); 1663 1664 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1665 cur_rx = &sc->ste_cdata.ste_rx_chain[i]; 1666 if (cur_rx->ste_mbuf != NULL) { 1667 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, 1668 cur_rx->ste_map, BUS_DMASYNC_POSTREAD); 1669 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, 1670 cur_rx->ste_map); 1671 m_freem(cur_rx->ste_mbuf); 1672 cur_rx->ste_mbuf = NULL; 1673 } 1674 } 1675 1676 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1677 cur_tx = &sc->ste_cdata.ste_tx_chain[i]; 1678 if (cur_tx->ste_mbuf != NULL) { 1679 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, 1680 cur_tx->ste_map, BUS_DMASYNC_POSTWRITE); 1681 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, 1682 cur_tx->ste_map); 1683 m_freem(cur_tx->ste_mbuf); 1684 cur_tx->ste_mbuf = NULL; 1685 } 1686 } 1687 } 1688 1689 static void 1690 ste_reset(struct ste_softc *sc) 1691 { 1692 uint32_t ctl; 1693 int i; 1694 1695 ctl = CSR_READ_4(sc, STE_ASICCTL); 1696 ctl |= STE_ASICCTL_GLOBAL_RESET | STE_ASICCTL_RX_RESET | 1697 STE_ASICCTL_TX_RESET | STE_ASICCTL_DMA_RESET | 1698 STE_ASICCTL_FIFO_RESET | STE_ASICCTL_NETWORK_RESET | 1699 STE_ASICCTL_AUTOINIT_RESET |STE_ASICCTL_HOST_RESET | 1700 STE_ASICCTL_EXTRESET_RESET; 1701 CSR_WRITE_4(sc, STE_ASICCTL, ctl); 1702 CSR_READ_4(sc, STE_ASICCTL); 1703 /* 1704 * Due to the need of accessing EEPROM controller can take 1705 * up to 1ms to complete the global reset. 1706 */ 1707 DELAY(1000); 1708 1709 for (i = 0; i < STE_TIMEOUT; i++) { 1710 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1711 break; 1712 DELAY(10); 1713 } 1714 1715 if (i == STE_TIMEOUT) 1716 device_printf(sc->ste_dev, "global reset never completed\n"); 1717 } 1718 1719 static void 1720 ste_restart_tx(struct ste_softc *sc) 1721 { 1722 uint16_t mac; 1723 int i; 1724 1725 for (i = 0; i < STE_TIMEOUT; i++) { 1726 mac = CSR_READ_2(sc, STE_MACCTL1); 1727 mac |= STE_MACCTL1_TX_ENABLE; 1728 CSR_WRITE_2(sc, STE_MACCTL1, mac); 1729 mac = CSR_READ_2(sc, STE_MACCTL1); 1730 if ((mac & STE_MACCTL1_TX_ENABLED) != 0) 1731 break; 1732 DELAY(10); 1733 } 1734 1735 if (i == STE_TIMEOUT) 1736 device_printf(sc->ste_dev, "starting Tx failed"); 1737 } 1738 1739 static int 1740 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1741 { 1742 struct ste_softc *sc; 1743 struct ifreq *ifr; 1744 struct mii_data *mii; 1745 int error = 0, mask; 1746 1747 sc = ifp->if_softc; 1748 ifr = (struct ifreq *)data; 1749 1750 switch (command) { 1751 case SIOCSIFFLAGS: 1752 STE_LOCK(sc); 1753 if ((ifp->if_flags & IFF_UP) != 0) { 1754 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1755 ((ifp->if_flags ^ sc->ste_if_flags) & 1756 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1757 ste_rxfilter(sc); 1758 else 1759 ste_init_locked(sc); 1760 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1761 ste_stop(sc); 1762 sc->ste_if_flags = ifp->if_flags; 1763 STE_UNLOCK(sc); 1764 break; 1765 case SIOCADDMULTI: 1766 case SIOCDELMULTI: 1767 STE_LOCK(sc); 1768 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1769 ste_rxfilter(sc); 1770 STE_UNLOCK(sc); 1771 break; 1772 case SIOCGIFMEDIA: 1773 case SIOCSIFMEDIA: 1774 mii = device_get_softc(sc->ste_miibus); 1775 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1776 break; 1777 case SIOCSIFCAP: 1778 STE_LOCK(sc); 1779 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1780 #ifdef DEVICE_POLLING 1781 if ((mask & IFCAP_POLLING) != 0 && 1782 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 1783 ifp->if_capenable ^= IFCAP_POLLING; 1784 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 1785 error = ether_poll_register(ste_poll, ifp); 1786 if (error != 0) { 1787 STE_UNLOCK(sc); 1788 break; 1789 } 1790 /* Disable interrupts. */ 1791 CSR_WRITE_2(sc, STE_IMR, 0); 1792 } else { 1793 error = ether_poll_deregister(ifp); 1794 /* Enable interrupts. */ 1795 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1796 } 1797 } 1798 #endif /* DEVICE_POLLING */ 1799 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1800 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1801 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1802 STE_UNLOCK(sc); 1803 break; 1804 default: 1805 error = ether_ioctl(ifp, command, data); 1806 break; 1807 } 1808 1809 return (error); 1810 } 1811 1812 static int 1813 ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc) 1814 { 1815 struct ste_frag *frag; 1816 struct mbuf *m; 1817 struct ste_desc *desc; 1818 bus_dma_segment_t txsegs[STE_MAXFRAGS]; 1819 int error, i, nsegs; 1820 1821 STE_LOCK_ASSERT(sc); 1822 M_ASSERTPKTHDR((*m_head)); 1823 1824 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1825 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1826 if (error == EFBIG) { 1827 m = m_collapse(*m_head, M_DONTWAIT, STE_MAXFRAGS); 1828 if (m == NULL) { 1829 m_freem(*m_head); 1830 *m_head = NULL; 1831 return (ENOMEM); 1832 } 1833 *m_head = m; 1834 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1835 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1836 if (error != 0) { 1837 m_freem(*m_head); 1838 *m_head = NULL; 1839 return (error); 1840 } 1841 } else if (error != 0) 1842 return (error); 1843 if (nsegs == 0) { 1844 m_freem(*m_head); 1845 *m_head = NULL; 1846 return (EIO); 1847 } 1848 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map, 1849 BUS_DMASYNC_PREWRITE); 1850 1851 desc = txc->ste_ptr; 1852 for (i = 0; i < nsegs; i++) { 1853 frag = &desc->ste_frags[i]; 1854 frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr)); 1855 frag->ste_len = htole32(txsegs[i].ds_len); 1856 } 1857 desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST); 1858 /* 1859 * Because we use Tx polling we can't chain multiple 1860 * Tx descriptors here. Otherwise we race with controller. 1861 */ 1862 desc->ste_next = 0; 1863 if ((sc->ste_cdata.ste_tx_prod % STE_TX_INTR_FRAMES) == 0) 1864 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS | 1865 STE_TXCTL_DMAINTR); 1866 else 1867 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS); 1868 txc->ste_mbuf = *m_head; 1869 STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT); 1870 sc->ste_cdata.ste_tx_cnt++; 1871 1872 return (0); 1873 } 1874 1875 static void 1876 ste_start(struct ifnet *ifp) 1877 { 1878 struct ste_softc *sc; 1879 1880 sc = ifp->if_softc; 1881 STE_LOCK(sc); 1882 ste_start_locked(ifp); 1883 STE_UNLOCK(sc); 1884 } 1885 1886 static void 1887 ste_start_locked(struct ifnet *ifp) 1888 { 1889 struct ste_softc *sc; 1890 struct ste_chain *cur_tx; 1891 struct mbuf *m_head = NULL; 1892 int enq; 1893 1894 sc = ifp->if_softc; 1895 STE_LOCK_ASSERT(sc); 1896 1897 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1898 IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0) 1899 return; 1900 1901 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 1902 if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) { 1903 /* 1904 * Controller may have cached copy of the last used 1905 * next ptr so we have to reserve one TFD to avoid 1906 * TFD overruns. 1907 */ 1908 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1909 break; 1910 } 1911 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1912 if (m_head == NULL) 1913 break; 1914 cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod]; 1915 if (ste_encap(sc, &m_head, cur_tx) != 0) { 1916 if (m_head == NULL) 1917 break; 1918 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1919 break; 1920 } 1921 if (sc->ste_cdata.ste_last_tx == NULL) { 1922 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1923 sc->ste_cdata.ste_tx_list_map, 1924 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1925 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1926 ste_wait(sc); 1927 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 1928 STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr)); 1929 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 1930 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1931 ste_wait(sc); 1932 } else { 1933 sc->ste_cdata.ste_last_tx->ste_ptr->ste_next = 1934 sc->ste_cdata.ste_last_tx->ste_phys; 1935 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1936 sc->ste_cdata.ste_tx_list_map, 1937 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1938 } 1939 sc->ste_cdata.ste_last_tx = cur_tx; 1940 1941 enq++; 1942 /* 1943 * If there's a BPF listener, bounce a copy of this frame 1944 * to him. 1945 */ 1946 BPF_MTAP(ifp, m_head); 1947 } 1948 1949 if (enq > 0) 1950 sc->ste_timer = STE_TX_TIMEOUT; 1951 } 1952 1953 static void 1954 ste_watchdog(struct ste_softc *sc) 1955 { 1956 struct ifnet *ifp; 1957 1958 ifp = sc->ste_ifp; 1959 STE_LOCK_ASSERT(sc); 1960 1961 if (sc->ste_timer == 0 || --sc->ste_timer) 1962 return; 1963 1964 ifp->if_oerrors++; 1965 if_printf(ifp, "watchdog timeout\n"); 1966 1967 ste_txeof(sc); 1968 ste_txeoc(sc); 1969 ste_rxeof(sc, -1); 1970 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1971 ste_init_locked(sc); 1972 1973 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1974 ste_start_locked(ifp); 1975 } 1976 1977 static int 1978 ste_shutdown(device_t dev) 1979 { 1980 1981 return (ste_suspend(dev)); 1982 } 1983 1984 static int 1985 ste_suspend(device_t dev) 1986 { 1987 struct ste_softc *sc; 1988 1989 sc = device_get_softc(dev); 1990 1991 STE_LOCK(sc); 1992 ste_stop(sc); 1993 ste_setwol(sc); 1994 STE_UNLOCK(sc); 1995 1996 return (0); 1997 } 1998 1999 static int 2000 ste_resume(device_t dev) 2001 { 2002 struct ste_softc *sc; 2003 struct ifnet *ifp; 2004 int pmc; 2005 uint16_t pmstat; 2006 2007 sc = device_get_softc(dev); 2008 STE_LOCK(sc); 2009 if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) == 0) { 2010 /* Disable PME and clear PME status. */ 2011 pmstat = pci_read_config(sc->ste_dev, 2012 pmc + PCIR_POWER_STATUS, 2); 2013 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2014 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2015 pci_write_config(sc->ste_dev, 2016 pmc + PCIR_POWER_STATUS, pmstat, 2); 2017 } 2018 } 2019 ifp = sc->ste_ifp; 2020 if ((ifp->if_flags & IFF_UP) != 0) { 2021 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2022 ste_init_locked(sc); 2023 } 2024 STE_UNLOCK(sc); 2025 2026 return (0); 2027 } 2028 2029 #define STE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2030 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2031 #define STE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 2032 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 2033 2034 static void 2035 ste_sysctl_node(struct ste_softc *sc) 2036 { 2037 struct sysctl_ctx_list *ctx; 2038 struct sysctl_oid_list *child, *parent; 2039 struct sysctl_oid *tree; 2040 struct ste_hw_stats *stats; 2041 2042 stats = &sc->ste_stats; 2043 ctx = device_get_sysctl_ctx(sc->ste_dev); 2044 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ste_dev)); 2045 2046 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_rx_mod", 2047 CTLFLAG_RW, &sc->ste_int_rx_mod, 0, "ste RX interrupt moderation"); 2048 /* Pull in device tunables. */ 2049 sc->ste_int_rx_mod = STE_IM_RX_TIMER_DEFAULT; 2050 resource_int_value(device_get_name(sc->ste_dev), 2051 device_get_unit(sc->ste_dev), "int_rx_mod", &sc->ste_int_rx_mod); 2052 2053 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2054 NULL, "STE statistics"); 2055 parent = SYSCTL_CHILDREN(tree); 2056 2057 /* Rx statistics. */ 2058 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2059 NULL, "Rx MAC statistics"); 2060 child = SYSCTL_CHILDREN(tree); 2061 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2062 &stats->rx_bytes, "Good octets"); 2063 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2064 &stats->rx_frames, "Good frames"); 2065 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2066 &stats->rx_bcast_frames, "Good broadcast frames"); 2067 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2068 &stats->rx_mcast_frames, "Good multicast frames"); 2069 STE_SYSCTL_STAT_ADD32(ctx, child, "lost_frames", 2070 &stats->rx_lost_frames, "Lost frames"); 2071 2072 /* Tx statistics. */ 2073 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2074 NULL, "Tx MAC statistics"); 2075 child = SYSCTL_CHILDREN(tree); 2076 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2077 &stats->tx_bytes, "Good octets"); 2078 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2079 &stats->tx_frames, "Good frames"); 2080 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2081 &stats->tx_bcast_frames, "Good broadcast frames"); 2082 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2083 &stats->tx_mcast_frames, "Good multicast frames"); 2084 STE_SYSCTL_STAT_ADD32(ctx, child, "carrier_errs", 2085 &stats->tx_carrsense_errs, "Carrier sense errors"); 2086 STE_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 2087 &stats->tx_single_colls, "Single collisions"); 2088 STE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 2089 &stats->tx_multi_colls, "Multiple collisions"); 2090 STE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2091 &stats->tx_late_colls, "Late collisions"); 2092 STE_SYSCTL_STAT_ADD32(ctx, child, "defers", 2093 &stats->tx_frames_defered, "Frames with deferrals"); 2094 STE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 2095 &stats->tx_excess_defers, "Frames with excessive derferrals"); 2096 STE_SYSCTL_STAT_ADD32(ctx, child, "abort", 2097 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 2098 } 2099 2100 #undef STE_SYSCTL_STAT_ADD32 2101 #undef STE_SYSCTL_STAT_ADD64 2102 2103 static void 2104 ste_setwol(struct ste_softc *sc) 2105 { 2106 struct ifnet *ifp; 2107 uint16_t pmstat; 2108 uint8_t val; 2109 int pmc; 2110 2111 STE_LOCK_ASSERT(sc); 2112 2113 if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) != 0) { 2114 /* Disable WOL. */ 2115 CSR_READ_1(sc, STE_WAKE_EVENT); 2116 CSR_WRITE_1(sc, STE_WAKE_EVENT, 0); 2117 return; 2118 } 2119 2120 ifp = sc->ste_ifp; 2121 val = CSR_READ_1(sc, STE_WAKE_EVENT); 2122 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 2123 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 2124 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2125 val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB; 2126 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 2127 /* Request PME. */ 2128 pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2); 2129 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2130 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2131 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2132 pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2133 } 2134