1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #ifdef HAVE_KERNEL_OPTION_HEADERS 37 #include "opt_device_polling.h" 38 #endif 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bus.h> 43 #include <sys/endian.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/module.h> 49 #include <sys/rman.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include <net/bpf.h> 55 #include <net/if.h> 56 #include <net/if_arp.h> 57 #include <net/ethernet.h> 58 #include <net/if_dl.h> 59 #include <net/if_media.h> 60 #include <net/if_types.h> 61 #include <net/if_vlan_var.h> 62 63 #include <machine/bus.h> 64 #include <machine/resource.h> 65 66 #include <dev/mii/mii.h> 67 #include <dev/mii/miivar.h> 68 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcivar.h> 71 72 #include <dev/ste/if_stereg.h> 73 74 /* "device miibus" required. See GENERIC if you get errors here. */ 75 #include "miibus_if.h" 76 77 MODULE_DEPEND(ste, pci, 1, 1, 1); 78 MODULE_DEPEND(ste, ether, 1, 1, 1); 79 MODULE_DEPEND(ste, miibus, 1, 1, 1); 80 81 /* Define to show Tx error status. */ 82 #define STE_SHOW_TXERRORS 83 84 /* 85 * Various supported device vendors/types and their names. 86 */ 87 static struct ste_type ste_devs[] = { 88 { ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" }, 89 { ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" }, 90 { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, 91 { 0, 0, NULL } 92 }; 93 94 static int ste_attach(device_t); 95 static int ste_detach(device_t); 96 static int ste_probe(device_t); 97 static int ste_resume(device_t); 98 static int ste_shutdown(device_t); 99 static int ste_suspend(device_t); 100 101 static int ste_dma_alloc(struct ste_softc *); 102 static void ste_dma_free(struct ste_softc *); 103 static void ste_dmamap_cb(void *, bus_dma_segment_t *, int, int); 104 static int ste_eeprom_wait(struct ste_softc *); 105 static int ste_encap(struct ste_softc *, struct mbuf **, 106 struct ste_chain *); 107 static int ste_ifmedia_upd(struct ifnet *); 108 static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 109 static void ste_init(void *); 110 static void ste_init_locked(struct ste_softc *); 111 static int ste_init_rx_list(struct ste_softc *); 112 static void ste_init_tx_list(struct ste_softc *); 113 static void ste_intr(void *); 114 static int ste_ioctl(struct ifnet *, u_long, caddr_t); 115 static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *); 116 static void ste_mii_send(struct ste_softc *, uint32_t, int); 117 static void ste_mii_sync(struct ste_softc *); 118 static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *); 119 static int ste_miibus_readreg(device_t, int, int); 120 static void ste_miibus_statchg(device_t); 121 static int ste_miibus_writereg(device_t, int, int, int); 122 static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *); 123 static int ste_read_eeprom(struct ste_softc *, uint16_t *, int, int); 124 static void ste_reset(struct ste_softc *); 125 static void ste_restart_tx(struct ste_softc *); 126 static int ste_rxeof(struct ste_softc *, int); 127 static void ste_rxfilter(struct ste_softc *); 128 static void ste_setwol(struct ste_softc *); 129 static void ste_start(struct ifnet *); 130 static void ste_start_locked(struct ifnet *); 131 static void ste_stats_clear(struct ste_softc *); 132 static void ste_stats_update(struct ste_softc *); 133 static void ste_stop(struct ste_softc *); 134 static void ste_sysctl_node(struct ste_softc *); 135 static void ste_tick(void *); 136 static void ste_txeoc(struct ste_softc *); 137 static void ste_txeof(struct ste_softc *); 138 static void ste_wait(struct ste_softc *); 139 static void ste_watchdog(struct ste_softc *); 140 141 static device_method_t ste_methods[] = { 142 /* Device interface */ 143 DEVMETHOD(device_probe, ste_probe), 144 DEVMETHOD(device_attach, ste_attach), 145 DEVMETHOD(device_detach, ste_detach), 146 DEVMETHOD(device_shutdown, ste_shutdown), 147 DEVMETHOD(device_suspend, ste_suspend), 148 DEVMETHOD(device_resume, ste_resume), 149 150 /* bus interface */ 151 DEVMETHOD(bus_print_child, bus_generic_print_child), 152 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 153 154 /* MII interface */ 155 DEVMETHOD(miibus_readreg, ste_miibus_readreg), 156 DEVMETHOD(miibus_writereg, ste_miibus_writereg), 157 DEVMETHOD(miibus_statchg, ste_miibus_statchg), 158 159 { 0, 0 } 160 }; 161 162 static driver_t ste_driver = { 163 "ste", 164 ste_methods, 165 sizeof(struct ste_softc) 166 }; 167 168 static devclass_t ste_devclass; 169 170 DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); 171 DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); 172 173 #define STE_SETBIT4(sc, reg, x) \ 174 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 175 176 #define STE_CLRBIT4(sc, reg, x) \ 177 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 178 179 #define STE_SETBIT2(sc, reg, x) \ 180 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) 181 182 #define STE_CLRBIT2(sc, reg, x) \ 183 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) 184 185 #define STE_SETBIT1(sc, reg, x) \ 186 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) 187 188 #define STE_CLRBIT1(sc, reg, x) \ 189 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) 190 191 192 #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) 193 #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) 194 195 /* 196 * Sync the PHYs by setting data bit and strobing the clock 32 times. 197 */ 198 static void 199 ste_mii_sync(struct ste_softc *sc) 200 { 201 int i; 202 203 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); 204 205 for (i = 0; i < 32; i++) { 206 MII_SET(STE_PHYCTL_MCLK); 207 DELAY(1); 208 MII_CLR(STE_PHYCTL_MCLK); 209 DELAY(1); 210 } 211 } 212 213 /* 214 * Clock a series of bits through the MII. 215 */ 216 static void 217 ste_mii_send(struct ste_softc *sc, uint32_t bits, int cnt) 218 { 219 int i; 220 221 MII_CLR(STE_PHYCTL_MCLK); 222 223 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 224 if (bits & i) { 225 MII_SET(STE_PHYCTL_MDATA); 226 } else { 227 MII_CLR(STE_PHYCTL_MDATA); 228 } 229 DELAY(1); 230 MII_CLR(STE_PHYCTL_MCLK); 231 DELAY(1); 232 MII_SET(STE_PHYCTL_MCLK); 233 } 234 } 235 236 /* 237 * Read an PHY register through the MII. 238 */ 239 static int 240 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame) 241 { 242 int i, ack; 243 244 /* 245 * Set up frame for RX. 246 */ 247 frame->mii_stdelim = STE_MII_STARTDELIM; 248 frame->mii_opcode = STE_MII_READOP; 249 frame->mii_turnaround = 0; 250 frame->mii_data = 0; 251 252 CSR_WRITE_2(sc, STE_PHYCTL, 0); 253 /* 254 * Turn on data xmit. 255 */ 256 MII_SET(STE_PHYCTL_MDIR); 257 258 ste_mii_sync(sc); 259 260 /* 261 * Send command/address info. 262 */ 263 ste_mii_send(sc, frame->mii_stdelim, 2); 264 ste_mii_send(sc, frame->mii_opcode, 2); 265 ste_mii_send(sc, frame->mii_phyaddr, 5); 266 ste_mii_send(sc, frame->mii_regaddr, 5); 267 268 /* Turn off xmit. */ 269 MII_CLR(STE_PHYCTL_MDIR); 270 271 /* Idle bit */ 272 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); 273 DELAY(1); 274 MII_SET(STE_PHYCTL_MCLK); 275 DELAY(1); 276 277 /* Check for ack */ 278 MII_CLR(STE_PHYCTL_MCLK); 279 DELAY(1); 280 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; 281 MII_SET(STE_PHYCTL_MCLK); 282 DELAY(1); 283 284 /* 285 * Now try reading data bits. If the ack failed, we still 286 * need to clock through 16 cycles to keep the PHY(s) in sync. 287 */ 288 if (ack) { 289 for (i = 0; i < 16; i++) { 290 MII_CLR(STE_PHYCTL_MCLK); 291 DELAY(1); 292 MII_SET(STE_PHYCTL_MCLK); 293 DELAY(1); 294 } 295 goto fail; 296 } 297 298 for (i = 0x8000; i; i >>= 1) { 299 MII_CLR(STE_PHYCTL_MCLK); 300 DELAY(1); 301 if (!ack) { 302 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) 303 frame->mii_data |= i; 304 DELAY(1); 305 } 306 MII_SET(STE_PHYCTL_MCLK); 307 DELAY(1); 308 } 309 310 fail: 311 312 MII_CLR(STE_PHYCTL_MCLK); 313 DELAY(1); 314 MII_SET(STE_PHYCTL_MCLK); 315 DELAY(1); 316 317 if (ack) 318 return (1); 319 return (0); 320 } 321 322 /* 323 * Write to a PHY register through the MII. 324 */ 325 static int 326 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame) 327 { 328 329 /* 330 * Set up frame for TX. 331 */ 332 333 frame->mii_stdelim = STE_MII_STARTDELIM; 334 frame->mii_opcode = STE_MII_WRITEOP; 335 frame->mii_turnaround = STE_MII_TURNAROUND; 336 337 /* 338 * Turn on data output. 339 */ 340 MII_SET(STE_PHYCTL_MDIR); 341 342 ste_mii_sync(sc); 343 344 ste_mii_send(sc, frame->mii_stdelim, 2); 345 ste_mii_send(sc, frame->mii_opcode, 2); 346 ste_mii_send(sc, frame->mii_phyaddr, 5); 347 ste_mii_send(sc, frame->mii_regaddr, 5); 348 ste_mii_send(sc, frame->mii_turnaround, 2); 349 ste_mii_send(sc, frame->mii_data, 16); 350 351 /* Idle bit. */ 352 MII_SET(STE_PHYCTL_MCLK); 353 DELAY(1); 354 MII_CLR(STE_PHYCTL_MCLK); 355 DELAY(1); 356 357 /* 358 * Turn off xmit. 359 */ 360 MII_CLR(STE_PHYCTL_MDIR); 361 362 return (0); 363 } 364 365 static int 366 ste_miibus_readreg(device_t dev, int phy, int reg) 367 { 368 struct ste_softc *sc; 369 struct ste_mii_frame frame; 370 371 sc = device_get_softc(dev); 372 373 if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0 && phy != 0) 374 return (0); 375 376 bzero((char *)&frame, sizeof(frame)); 377 378 frame.mii_phyaddr = phy; 379 frame.mii_regaddr = reg; 380 ste_mii_readreg(sc, &frame); 381 382 return (frame.mii_data); 383 } 384 385 static int 386 ste_miibus_writereg(device_t dev, int phy, int reg, int data) 387 { 388 struct ste_softc *sc; 389 struct ste_mii_frame frame; 390 391 sc = device_get_softc(dev); 392 bzero((char *)&frame, sizeof(frame)); 393 394 frame.mii_phyaddr = phy; 395 frame.mii_regaddr = reg; 396 frame.mii_data = data; 397 398 ste_mii_writereg(sc, &frame); 399 400 return (0); 401 } 402 403 static void 404 ste_miibus_statchg(device_t dev) 405 { 406 struct ste_softc *sc; 407 struct mii_data *mii; 408 struct ifnet *ifp; 409 uint16_t cfg; 410 411 sc = device_get_softc(dev); 412 413 mii = device_get_softc(sc->ste_miibus); 414 ifp = sc->ste_ifp; 415 if (mii == NULL || ifp == NULL || 416 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 417 return; 418 419 sc->ste_flags &= ~STE_FLAG_LINK; 420 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 421 (IFM_ACTIVE | IFM_AVALID)) { 422 switch (IFM_SUBTYPE(mii->mii_media_active)) { 423 case IFM_10_T: 424 case IFM_100_TX: 425 case IFM_100_FX: 426 case IFM_100_T4: 427 sc->ste_flags |= STE_FLAG_LINK; 428 default: 429 break; 430 } 431 } 432 433 /* Program MACs with resolved speed/duplex/flow-control. */ 434 if ((sc->ste_flags & STE_FLAG_LINK) != 0) { 435 cfg = CSR_READ_2(sc, STE_MACCTL0); 436 cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX); 437 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 438 /* 439 * ST201 data sheet says driver should enable receiving 440 * MAC control frames bit of receive mode register to 441 * receive flow-control frames but the register has no 442 * such bits. In addition the controller has no ability 443 * to send pause frames so it should be handled in 444 * driver. Implementing pause timer handling in driver 445 * layer is not trivial, so don't enable flow-control 446 * here. 447 */ 448 cfg |= STE_MACCTL0_FULLDUPLEX; 449 } 450 CSR_WRITE_2(sc, STE_MACCTL0, cfg); 451 } 452 } 453 454 static int 455 ste_ifmedia_upd(struct ifnet *ifp) 456 { 457 struct ste_softc *sc; 458 struct mii_data *mii; 459 struct mii_softc *miisc; 460 int error; 461 462 sc = ifp->if_softc; 463 STE_LOCK(sc); 464 mii = device_get_softc(sc->ste_miibus); 465 if (mii->mii_instance) { 466 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 467 mii_phy_reset(miisc); 468 } 469 error = mii_mediachg(mii); 470 STE_UNLOCK(sc); 471 472 return (error); 473 } 474 475 static void 476 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 477 { 478 struct ste_softc *sc; 479 struct mii_data *mii; 480 481 sc = ifp->if_softc; 482 mii = device_get_softc(sc->ste_miibus); 483 484 STE_LOCK(sc); 485 if ((ifp->if_flags & IFF_UP) == 0) { 486 STE_UNLOCK(sc); 487 return; 488 } 489 mii_pollstat(mii); 490 ifmr->ifm_active = mii->mii_media_active; 491 ifmr->ifm_status = mii->mii_media_status; 492 STE_UNLOCK(sc); 493 } 494 495 static void 496 ste_wait(struct ste_softc *sc) 497 { 498 int i; 499 500 for (i = 0; i < STE_TIMEOUT; i++) { 501 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 502 break; 503 DELAY(1); 504 } 505 506 if (i == STE_TIMEOUT) 507 device_printf(sc->ste_dev, "command never completed!\n"); 508 } 509 510 /* 511 * The EEPROM is slow: give it time to come ready after issuing 512 * it a command. 513 */ 514 static int 515 ste_eeprom_wait(struct ste_softc *sc) 516 { 517 int i; 518 519 DELAY(1000); 520 521 for (i = 0; i < 100; i++) { 522 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 523 DELAY(1000); 524 else 525 break; 526 } 527 528 if (i == 100) { 529 device_printf(sc->ste_dev, "eeprom failed to come ready\n"); 530 return (1); 531 } 532 533 return (0); 534 } 535 536 /* 537 * Read a sequence of words from the EEPROM. Note that ethernet address 538 * data is stored in the EEPROM in network byte order. 539 */ 540 static int 541 ste_read_eeprom(struct ste_softc *sc, uint16_t *dest, int off, int cnt) 542 { 543 int err = 0, i; 544 545 if (ste_eeprom_wait(sc)) 546 return (1); 547 548 for (i = 0; i < cnt; i++) { 549 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 550 err = ste_eeprom_wait(sc); 551 if (err) 552 break; 553 *dest = le16toh(CSR_READ_2(sc, STE_EEPROM_DATA)); 554 dest++; 555 } 556 557 return (err ? 1 : 0); 558 } 559 560 static void 561 ste_rxfilter(struct ste_softc *sc) 562 { 563 struct ifnet *ifp; 564 struct ifmultiaddr *ifma; 565 uint32_t hashes[2] = { 0, 0 }; 566 uint8_t rxcfg; 567 int h; 568 569 STE_LOCK_ASSERT(sc); 570 571 ifp = sc->ste_ifp; 572 rxcfg = CSR_READ_1(sc, STE_RX_MODE); 573 rxcfg |= STE_RXMODE_UNICAST; 574 rxcfg &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_MULTIHASH | 575 STE_RXMODE_BROADCAST | STE_RXMODE_PROMISC); 576 if (ifp->if_flags & IFF_BROADCAST) 577 rxcfg |= STE_RXMODE_BROADCAST; 578 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 579 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 580 rxcfg |= STE_RXMODE_ALLMULTI; 581 if ((ifp->if_flags & IFF_PROMISC) != 0) 582 rxcfg |= STE_RXMODE_PROMISC; 583 goto chipit; 584 } 585 586 rxcfg |= STE_RXMODE_MULTIHASH; 587 /* Now program new ones. */ 588 if_maddr_rlock(ifp); 589 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 590 if (ifma->ifma_addr->sa_family != AF_LINK) 591 continue; 592 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 593 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; 594 if (h < 32) 595 hashes[0] |= (1 << h); 596 else 597 hashes[1] |= (1 << (h - 32)); 598 } 599 if_maddr_runlock(ifp); 600 601 chipit: 602 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 603 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 604 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 605 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 606 CSR_WRITE_1(sc, STE_RX_MODE, rxcfg); 607 CSR_READ_1(sc, STE_RX_MODE); 608 } 609 610 #ifdef DEVICE_POLLING 611 static poll_handler_t ste_poll, ste_poll_locked; 612 613 static int 614 ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 615 { 616 struct ste_softc *sc = ifp->if_softc; 617 int rx_npkts = 0; 618 619 STE_LOCK(sc); 620 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 621 rx_npkts = ste_poll_locked(ifp, cmd, count); 622 STE_UNLOCK(sc); 623 return (rx_npkts); 624 } 625 626 static int 627 ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 628 { 629 struct ste_softc *sc = ifp->if_softc; 630 int rx_npkts; 631 632 STE_LOCK_ASSERT(sc); 633 634 rx_npkts = ste_rxeof(sc, count); 635 ste_txeof(sc); 636 ste_txeoc(sc); 637 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 638 ste_start_locked(ifp); 639 640 if (cmd == POLL_AND_CHECK_STATUS) { 641 uint16_t status; 642 643 status = CSR_READ_2(sc, STE_ISR_ACK); 644 645 if (status & STE_ISR_STATS_OFLOW) 646 ste_stats_update(sc); 647 648 if (status & STE_ISR_HOSTERR) { 649 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 650 ste_init_locked(sc); 651 } 652 } 653 return (rx_npkts); 654 } 655 #endif /* DEVICE_POLLING */ 656 657 static void 658 ste_intr(void *xsc) 659 { 660 struct ste_softc *sc; 661 struct ifnet *ifp; 662 uint16_t intrs, status; 663 664 sc = xsc; 665 STE_LOCK(sc); 666 ifp = sc->ste_ifp; 667 668 #ifdef DEVICE_POLLING 669 if (ifp->if_capenable & IFCAP_POLLING) { 670 STE_UNLOCK(sc); 671 return; 672 } 673 #endif 674 /* Reading STE_ISR_ACK clears STE_IMR register. */ 675 status = CSR_READ_2(sc, STE_ISR_ACK); 676 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 677 STE_UNLOCK(sc); 678 return; 679 } 680 681 intrs = STE_INTRS; 682 if (status == 0xFFFF || (status & intrs) == 0) 683 goto done; 684 685 if (sc->ste_int_rx_act > 0) { 686 status &= ~STE_ISR_RX_DMADONE; 687 intrs &= ~STE_IMR_RX_DMADONE; 688 } 689 690 if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) { 691 ste_rxeof(sc, -1); 692 /* 693 * The controller has no ability to Rx interrupt 694 * moderation feature. Receiving 64 bytes frames 695 * from wire generates too many interrupts which in 696 * turn make system useless to process other useful 697 * things. Fortunately ST201 supports single shot 698 * timer so use the timer to implement Rx interrupt 699 * moderation in driver. This adds more register 700 * access but it greatly reduces number of Rx 701 * interrupts under high network load. 702 */ 703 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 704 (sc->ste_int_rx_mod != 0)) { 705 if ((status & STE_ISR_RX_DMADONE) != 0) { 706 CSR_WRITE_2(sc, STE_COUNTDOWN, 707 STE_TIMER_USECS(sc->ste_int_rx_mod)); 708 intrs &= ~STE_IMR_RX_DMADONE; 709 sc->ste_int_rx_act = 1; 710 } else { 711 intrs |= STE_IMR_RX_DMADONE; 712 sc->ste_int_rx_act = 0; 713 } 714 } 715 } 716 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 717 if ((status & STE_ISR_TX_DMADONE) != 0) 718 ste_txeof(sc); 719 if ((status & STE_ISR_TX_DONE) != 0) 720 ste_txeoc(sc); 721 if ((status & STE_ISR_STATS_OFLOW) != 0) 722 ste_stats_update(sc); 723 if ((status & STE_ISR_HOSTERR) != 0) { 724 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 725 ste_init_locked(sc); 726 STE_UNLOCK(sc); 727 return; 728 } 729 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 730 ste_start_locked(ifp); 731 done: 732 /* Re-enable interrupts */ 733 CSR_WRITE_2(sc, STE_IMR, intrs); 734 } 735 STE_UNLOCK(sc); 736 } 737 738 /* 739 * A frame has been uploaded: pass the resulting mbuf chain up to 740 * the higher level protocols. 741 */ 742 static int 743 ste_rxeof(struct ste_softc *sc, int count) 744 { 745 struct mbuf *m; 746 struct ifnet *ifp; 747 struct ste_chain_onefrag *cur_rx; 748 uint32_t rxstat; 749 int total_len, rx_npkts; 750 751 ifp = sc->ste_ifp; 752 753 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 754 sc->ste_cdata.ste_rx_list_map, 755 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 756 757 cur_rx = sc->ste_cdata.ste_rx_head; 758 for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++, 759 cur_rx = cur_rx->ste_next) { 760 rxstat = le32toh(cur_rx->ste_ptr->ste_status); 761 if ((rxstat & STE_RXSTAT_DMADONE) == 0) 762 break; 763 #ifdef DEVICE_POLLING 764 if (ifp->if_capenable & IFCAP_POLLING) { 765 if (count == 0) 766 break; 767 count--; 768 } 769 #endif 770 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 771 break; 772 /* 773 * If an error occurs, update stats, clear the 774 * status word and leave the mbuf cluster in place: 775 * it should simply get re-used next time this descriptor 776 * comes up in the ring. 777 */ 778 if (rxstat & STE_RXSTAT_FRAME_ERR) { 779 ifp->if_ierrors++; 780 cur_rx->ste_ptr->ste_status = 0; 781 continue; 782 } 783 784 /* No errors; receive the packet. */ 785 m = cur_rx->ste_mbuf; 786 total_len = STE_RX_BYTES(rxstat); 787 788 /* 789 * Try to conjure up a new mbuf cluster. If that 790 * fails, it means we have an out of memory condition and 791 * should leave the buffer in place and continue. This will 792 * result in a lost packet, but there's little else we 793 * can do in this situation. 794 */ 795 if (ste_newbuf(sc, cur_rx) != 0) { 796 ifp->if_iqdrops++; 797 cur_rx->ste_ptr->ste_status = 0; 798 continue; 799 } 800 801 m->m_pkthdr.rcvif = ifp; 802 m->m_pkthdr.len = m->m_len = total_len; 803 804 ifp->if_ipackets++; 805 STE_UNLOCK(sc); 806 (*ifp->if_input)(ifp, m); 807 STE_LOCK(sc); 808 } 809 810 if (rx_npkts > 0) { 811 sc->ste_cdata.ste_rx_head = cur_rx; 812 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 813 sc->ste_cdata.ste_rx_list_map, 814 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 815 } 816 817 return (rx_npkts); 818 } 819 820 static void 821 ste_txeoc(struct ste_softc *sc) 822 { 823 uint16_t txstat; 824 struct ifnet *ifp; 825 826 STE_LOCK_ASSERT(sc); 827 828 ifp = sc->ste_ifp; 829 830 /* 831 * STE_TX_STATUS register implements a queue of up to 31 832 * transmit status byte. Writing an arbitrary value to the 833 * register will advance the queue to the next transmit 834 * status byte. This means if driver does not read 835 * STE_TX_STATUS register after completing sending more 836 * than 31 frames the controller would be stalled so driver 837 * should re-wake the Tx MAC. This is the most severe 838 * limitation of ST201 based controller. 839 */ 840 for (;;) { 841 txstat = CSR_READ_2(sc, STE_TX_STATUS); 842 if ((txstat & STE_TXSTATUS_TXDONE) == 0) 843 break; 844 if ((txstat & (STE_TXSTATUS_UNDERRUN | 845 STE_TXSTATUS_EXCESSCOLLS | STE_TXSTATUS_RECLAIMERR | 846 STE_TXSTATUS_STATSOFLOW)) != 0) { 847 ifp->if_oerrors++; 848 #ifdef STE_SHOW_TXERRORS 849 device_printf(sc->ste_dev, "TX error : 0x%b\n", 850 txstat & 0xFF, STE_ERR_BITS); 851 #endif 852 if ((txstat & STE_TXSTATUS_UNDERRUN) != 0 && 853 sc->ste_tx_thresh < STE_PACKET_SIZE) { 854 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 855 if (sc->ste_tx_thresh > STE_PACKET_SIZE) 856 sc->ste_tx_thresh = STE_PACKET_SIZE; 857 device_printf(sc->ste_dev, 858 "TX underrun, increasing TX" 859 " start threshold to %d bytes\n", 860 sc->ste_tx_thresh); 861 /* Make sure to disable active DMA cycles. */ 862 STE_SETBIT4(sc, STE_DMACTL, 863 STE_DMACTL_TXDMA_STALL); 864 ste_wait(sc); 865 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 866 ste_init_locked(sc); 867 break; 868 } 869 /* Restart Tx. */ 870 ste_restart_tx(sc); 871 } 872 /* 873 * Advance to next status and ACK TxComplete 874 * interrupt. ST201 data sheet was wrong here, to 875 * get next Tx status, we have to write both 876 * STE_TX_STATUS and STE_TX_FRAMEID register. 877 * Otherwise controller returns the same status 878 * as well as not acknowledge Tx completion 879 * interrupt. 880 */ 881 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 882 } 883 } 884 885 static void 886 ste_tick(void *arg) 887 { 888 struct ste_softc *sc; 889 struct mii_data *mii; 890 891 sc = (struct ste_softc *)arg; 892 893 STE_LOCK_ASSERT(sc); 894 895 mii = device_get_softc(sc->ste_miibus); 896 mii_tick(mii); 897 /* 898 * ukphy(4) does not seem to generate CB that reports 899 * resolved link state so if we know we lost a link, 900 * explicitly check the link state. 901 */ 902 if ((sc->ste_flags & STE_FLAG_LINK) == 0) 903 ste_miibus_statchg(sc->ste_dev); 904 /* 905 * Because we are not generating Tx completion 906 * interrupt for every frame, reclaim transmitted 907 * buffers here. 908 */ 909 ste_txeof(sc); 910 ste_txeoc(sc); 911 ste_stats_update(sc); 912 ste_watchdog(sc); 913 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 914 } 915 916 static void 917 ste_txeof(struct ste_softc *sc) 918 { 919 struct ifnet *ifp; 920 struct ste_chain *cur_tx; 921 uint32_t txstat; 922 int idx; 923 924 STE_LOCK_ASSERT(sc); 925 926 ifp = sc->ste_ifp; 927 idx = sc->ste_cdata.ste_tx_cons; 928 if (idx == sc->ste_cdata.ste_tx_prod) 929 return; 930 931 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 932 sc->ste_cdata.ste_tx_list_map, 933 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 934 935 while (idx != sc->ste_cdata.ste_tx_prod) { 936 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 937 txstat = le32toh(cur_tx->ste_ptr->ste_ctl); 938 if ((txstat & STE_TXCTL_DMADONE) == 0) 939 break; 940 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map, 941 BUS_DMASYNC_POSTWRITE); 942 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map); 943 KASSERT(cur_tx->ste_mbuf != NULL, 944 ("%s: freeing NULL mbuf!\n", __func__)); 945 m_freem(cur_tx->ste_mbuf); 946 cur_tx->ste_mbuf = NULL; 947 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 948 ifp->if_opackets++; 949 sc->ste_cdata.ste_tx_cnt--; 950 STE_INC(idx, STE_TX_LIST_CNT); 951 } 952 953 sc->ste_cdata.ste_tx_cons = idx; 954 if (sc->ste_cdata.ste_tx_cnt == 0) 955 sc->ste_timer = 0; 956 } 957 958 static void 959 ste_stats_clear(struct ste_softc *sc) 960 { 961 962 STE_LOCK_ASSERT(sc); 963 964 /* Rx stats. */ 965 CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO); 966 CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI); 967 CSR_READ_2(sc, STE_STAT_RX_FRAMES); 968 CSR_READ_1(sc, STE_STAT_RX_BCAST); 969 CSR_READ_1(sc, STE_STAT_RX_MCAST); 970 CSR_READ_1(sc, STE_STAT_RX_LOST); 971 /* Tx stats. */ 972 CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO); 973 CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI); 974 CSR_READ_2(sc, STE_STAT_TX_FRAMES); 975 CSR_READ_1(sc, STE_STAT_TX_BCAST); 976 CSR_READ_1(sc, STE_STAT_TX_MCAST); 977 CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 978 CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 979 CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 980 CSR_READ_1(sc, STE_STAT_LATE_COLLS); 981 CSR_READ_1(sc, STE_STAT_TX_DEFER); 982 CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 983 CSR_READ_1(sc, STE_STAT_TX_ABORT); 984 } 985 986 static void 987 ste_stats_update(struct ste_softc *sc) 988 { 989 struct ifnet *ifp; 990 struct ste_hw_stats *stats; 991 uint32_t val; 992 993 STE_LOCK_ASSERT(sc); 994 995 ifp = sc->ste_ifp; 996 stats = &sc->ste_stats; 997 /* Rx stats. */ 998 val = (uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO) | 999 ((uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI)) << 16; 1000 val &= 0x000FFFFF; 1001 stats->rx_bytes += val; 1002 stats->rx_frames += CSR_READ_2(sc, STE_STAT_RX_FRAMES); 1003 stats->rx_bcast_frames += CSR_READ_1(sc, STE_STAT_RX_BCAST); 1004 stats->rx_mcast_frames += CSR_READ_1(sc, STE_STAT_RX_MCAST); 1005 stats->rx_lost_frames += CSR_READ_1(sc, STE_STAT_RX_LOST); 1006 /* Tx stats. */ 1007 val = (uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO) | 1008 ((uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI)) << 16; 1009 val &= 0x000FFFFF; 1010 stats->tx_bytes += val; 1011 stats->tx_frames += CSR_READ_2(sc, STE_STAT_TX_FRAMES); 1012 stats->tx_bcast_frames += CSR_READ_1(sc, STE_STAT_TX_BCAST); 1013 stats->tx_mcast_frames += CSR_READ_1(sc, STE_STAT_TX_MCAST); 1014 stats->tx_carrsense_errs += CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 1015 val = CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 1016 stats->tx_single_colls += val; 1017 ifp->if_collisions += val; 1018 val = CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 1019 stats->tx_multi_colls += val; 1020 ifp->if_collisions += val; 1021 val += CSR_READ_1(sc, STE_STAT_LATE_COLLS); 1022 stats->tx_late_colls += val; 1023 ifp->if_collisions += val; 1024 stats->tx_frames_defered += CSR_READ_1(sc, STE_STAT_TX_DEFER); 1025 stats->tx_excess_defers += CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 1026 stats->tx_abort += CSR_READ_1(sc, STE_STAT_TX_ABORT); 1027 } 1028 1029 /* 1030 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 1031 * IDs against our list and return a device name if we find a match. 1032 */ 1033 static int 1034 ste_probe(device_t dev) 1035 { 1036 struct ste_type *t; 1037 1038 t = ste_devs; 1039 1040 while (t->ste_name != NULL) { 1041 if ((pci_get_vendor(dev) == t->ste_vid) && 1042 (pci_get_device(dev) == t->ste_did)) { 1043 device_set_desc(dev, t->ste_name); 1044 return (BUS_PROBE_DEFAULT); 1045 } 1046 t++; 1047 } 1048 1049 return (ENXIO); 1050 } 1051 1052 /* 1053 * Attach the interface. Allocate softc structures, do ifmedia 1054 * setup and ethernet/BPF attach. 1055 */ 1056 static int 1057 ste_attach(device_t dev) 1058 { 1059 struct ste_softc *sc; 1060 struct ifnet *ifp; 1061 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 1062 int error = 0, pmc, prefer_iomap, rid; 1063 1064 sc = device_get_softc(dev); 1065 sc->ste_dev = dev; 1066 1067 /* 1068 * Only use one PHY since this chip reports multiple 1069 * Note on the DFE-550 the PHY is at 1 on the DFE-580 1070 * it is at 0 & 1. It is rev 0x12. 1071 */ 1072 if (pci_get_vendor(dev) == DL_VENDORID && 1073 pci_get_device(dev) == DL_DEVICEID_DL10050 && 1074 pci_get_revid(dev) == 0x12 ) 1075 sc->ste_flags |= STE_FLAG_ONE_PHY; 1076 1077 mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1078 MTX_DEF); 1079 /* 1080 * Map control/status registers. 1081 */ 1082 pci_enable_busmaster(dev); 1083 1084 /* 1085 * Prefer memory space register mapping over IO space but use 1086 * IO space for a device that is known to have issues on memory 1087 * mapping. 1088 */ 1089 prefer_iomap = 0; 1090 if (pci_get_device(dev) == ST_DEVICEID_ST201_1) 1091 prefer_iomap = 1; 1092 else 1093 resource_int_value(device_get_name(sc->ste_dev), 1094 device_get_unit(sc->ste_dev), "prefer_iomap", 1095 &prefer_iomap); 1096 if (prefer_iomap == 0) { 1097 sc->ste_res_id = PCIR_BAR(1); 1098 sc->ste_res_type = SYS_RES_MEMORY; 1099 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 1100 &sc->ste_res_id, RF_ACTIVE); 1101 } 1102 if (prefer_iomap || sc->ste_res == NULL) { 1103 sc->ste_res_id = PCIR_BAR(0); 1104 sc->ste_res_type = SYS_RES_IOPORT; 1105 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 1106 &sc->ste_res_id, RF_ACTIVE); 1107 } 1108 if (sc->ste_res == NULL) { 1109 device_printf(dev, "couldn't map ports/memory\n"); 1110 error = ENXIO; 1111 goto fail; 1112 } 1113 1114 /* Allocate interrupt */ 1115 rid = 0; 1116 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1117 RF_SHAREABLE | RF_ACTIVE); 1118 1119 if (sc->ste_irq == NULL) { 1120 device_printf(dev, "couldn't map interrupt\n"); 1121 error = ENXIO; 1122 goto fail; 1123 } 1124 1125 callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0); 1126 1127 /* Reset the adapter. */ 1128 ste_reset(sc); 1129 1130 /* 1131 * Get station address from the EEPROM. 1132 */ 1133 if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) { 1134 device_printf(dev, "failed to read station address\n"); 1135 error = ENXIO; 1136 goto fail; 1137 } 1138 ste_sysctl_node(sc); 1139 1140 if ((error = ste_dma_alloc(sc)) != 0) 1141 goto fail; 1142 1143 ifp = sc->ste_ifp = if_alloc(IFT_ETHER); 1144 if (ifp == NULL) { 1145 device_printf(dev, "can not if_alloc()\n"); 1146 error = ENOSPC; 1147 goto fail; 1148 } 1149 1150 /* Do MII setup. */ 1151 if (mii_phy_probe(dev, &sc->ste_miibus, 1152 ste_ifmedia_upd, ste_ifmedia_sts)) { 1153 device_printf(dev, "MII without any phy!\n"); 1154 error = ENXIO; 1155 goto fail; 1156 } 1157 1158 ifp->if_softc = sc; 1159 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1160 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1161 ifp->if_ioctl = ste_ioctl; 1162 ifp->if_start = ste_start; 1163 ifp->if_init = ste_init; 1164 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 1165 ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; 1166 IFQ_SET_READY(&ifp->if_snd); 1167 1168 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1169 1170 /* 1171 * Call MI attach routine. 1172 */ 1173 ether_ifattach(ifp, (uint8_t *)eaddr); 1174 1175 /* 1176 * Tell the upper layer(s) we support long frames. 1177 */ 1178 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1179 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1180 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 1181 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 1182 ifp->if_capenable = ifp->if_capabilities; 1183 #ifdef DEVICE_POLLING 1184 ifp->if_capabilities |= IFCAP_POLLING; 1185 #endif 1186 1187 /* Hook interrupt last to avoid having to lock softc */ 1188 error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE, 1189 NULL, ste_intr, sc, &sc->ste_intrhand); 1190 1191 if (error) { 1192 device_printf(dev, "couldn't set up irq\n"); 1193 ether_ifdetach(ifp); 1194 goto fail; 1195 } 1196 1197 fail: 1198 if (error) 1199 ste_detach(dev); 1200 1201 return (error); 1202 } 1203 1204 /* 1205 * Shutdown hardware and free up resources. This can be called any 1206 * time after the mutex has been initialized. It is called in both 1207 * the error case in attach and the normal detach case so it needs 1208 * to be careful about only freeing resources that have actually been 1209 * allocated. 1210 */ 1211 static int 1212 ste_detach(device_t dev) 1213 { 1214 struct ste_softc *sc; 1215 struct ifnet *ifp; 1216 1217 sc = device_get_softc(dev); 1218 KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); 1219 ifp = sc->ste_ifp; 1220 1221 #ifdef DEVICE_POLLING 1222 if (ifp->if_capenable & IFCAP_POLLING) 1223 ether_poll_deregister(ifp); 1224 #endif 1225 1226 /* These should only be active if attach succeeded */ 1227 if (device_is_attached(dev)) { 1228 ether_ifdetach(ifp); 1229 STE_LOCK(sc); 1230 ste_stop(sc); 1231 STE_UNLOCK(sc); 1232 callout_drain(&sc->ste_callout); 1233 } 1234 if (sc->ste_miibus) 1235 device_delete_child(dev, sc->ste_miibus); 1236 bus_generic_detach(dev); 1237 1238 if (sc->ste_intrhand) 1239 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); 1240 if (sc->ste_irq) 1241 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); 1242 if (sc->ste_res) 1243 bus_release_resource(dev, sc->ste_res_type, sc->ste_res_id, 1244 sc->ste_res); 1245 1246 if (ifp) 1247 if_free(ifp); 1248 1249 ste_dma_free(sc); 1250 mtx_destroy(&sc->ste_mtx); 1251 1252 return (0); 1253 } 1254 1255 struct ste_dmamap_arg { 1256 bus_addr_t ste_busaddr; 1257 }; 1258 1259 static void 1260 ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1261 { 1262 struct ste_dmamap_arg *ctx; 1263 1264 if (error != 0) 1265 return; 1266 1267 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1268 1269 ctx = (struct ste_dmamap_arg *)arg; 1270 ctx->ste_busaddr = segs[0].ds_addr; 1271 } 1272 1273 static int 1274 ste_dma_alloc(struct ste_softc *sc) 1275 { 1276 struct ste_chain *txc; 1277 struct ste_chain_onefrag *rxc; 1278 struct ste_dmamap_arg ctx; 1279 int error, i; 1280 1281 /* Create parent DMA tag. */ 1282 error = bus_dma_tag_create( 1283 bus_get_dma_tag(sc->ste_dev), /* parent */ 1284 1, 0, /* alignment, boundary */ 1285 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1286 BUS_SPACE_MAXADDR, /* highaddr */ 1287 NULL, NULL, /* filter, filterarg */ 1288 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1289 0, /* nsegments */ 1290 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1291 0, /* flags */ 1292 NULL, NULL, /* lockfunc, lockarg */ 1293 &sc->ste_cdata.ste_parent_tag); 1294 if (error != 0) { 1295 device_printf(sc->ste_dev, 1296 "could not create parent DMA tag.\n"); 1297 goto fail; 1298 } 1299 1300 /* Create DMA tag for Tx descriptor list. */ 1301 error = bus_dma_tag_create( 1302 sc->ste_cdata.ste_parent_tag, /* parent */ 1303 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1304 BUS_SPACE_MAXADDR, /* lowaddr */ 1305 BUS_SPACE_MAXADDR, /* highaddr */ 1306 NULL, NULL, /* filter, filterarg */ 1307 STE_TX_LIST_SZ, /* maxsize */ 1308 1, /* nsegments */ 1309 STE_TX_LIST_SZ, /* maxsegsize */ 1310 0, /* flags */ 1311 NULL, NULL, /* lockfunc, lockarg */ 1312 &sc->ste_cdata.ste_tx_list_tag); 1313 if (error != 0) { 1314 device_printf(sc->ste_dev, 1315 "could not create Tx list DMA tag.\n"); 1316 goto fail; 1317 } 1318 1319 /* Create DMA tag for Rx descriptor list. */ 1320 error = bus_dma_tag_create( 1321 sc->ste_cdata.ste_parent_tag, /* parent */ 1322 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1323 BUS_SPACE_MAXADDR, /* lowaddr */ 1324 BUS_SPACE_MAXADDR, /* highaddr */ 1325 NULL, NULL, /* filter, filterarg */ 1326 STE_RX_LIST_SZ, /* maxsize */ 1327 1, /* nsegments */ 1328 STE_RX_LIST_SZ, /* maxsegsize */ 1329 0, /* flags */ 1330 NULL, NULL, /* lockfunc, lockarg */ 1331 &sc->ste_cdata.ste_rx_list_tag); 1332 if (error != 0) { 1333 device_printf(sc->ste_dev, 1334 "could not create Rx list DMA tag.\n"); 1335 goto fail; 1336 } 1337 1338 /* Create DMA tag for Tx buffers. */ 1339 error = bus_dma_tag_create( 1340 sc->ste_cdata.ste_parent_tag, /* parent */ 1341 1, 0, /* alignment, boundary */ 1342 BUS_SPACE_MAXADDR, /* lowaddr */ 1343 BUS_SPACE_MAXADDR, /* highaddr */ 1344 NULL, NULL, /* filter, filterarg */ 1345 MCLBYTES * STE_MAXFRAGS, /* maxsize */ 1346 STE_MAXFRAGS, /* nsegments */ 1347 MCLBYTES, /* maxsegsize */ 1348 0, /* flags */ 1349 NULL, NULL, /* lockfunc, lockarg */ 1350 &sc->ste_cdata.ste_tx_tag); 1351 if (error != 0) { 1352 device_printf(sc->ste_dev, "could not create Tx DMA tag.\n"); 1353 goto fail; 1354 } 1355 1356 /* Create DMA tag for Rx buffers. */ 1357 error = bus_dma_tag_create( 1358 sc->ste_cdata.ste_parent_tag, /* parent */ 1359 1, 0, /* alignment, boundary */ 1360 BUS_SPACE_MAXADDR, /* lowaddr */ 1361 BUS_SPACE_MAXADDR, /* highaddr */ 1362 NULL, NULL, /* filter, filterarg */ 1363 MCLBYTES, /* maxsize */ 1364 1, /* nsegments */ 1365 MCLBYTES, /* maxsegsize */ 1366 0, /* flags */ 1367 NULL, NULL, /* lockfunc, lockarg */ 1368 &sc->ste_cdata.ste_rx_tag); 1369 if (error != 0) { 1370 device_printf(sc->ste_dev, "could not create Rx DMA tag.\n"); 1371 goto fail; 1372 } 1373 1374 /* Allocate DMA'able memory and load the DMA map for Tx list. */ 1375 error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag, 1376 (void **)&sc->ste_ldata.ste_tx_list, 1377 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1378 &sc->ste_cdata.ste_tx_list_map); 1379 if (error != 0) { 1380 device_printf(sc->ste_dev, 1381 "could not allocate DMA'able memory for Tx list.\n"); 1382 goto fail; 1383 } 1384 ctx.ste_busaddr = 0; 1385 error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag, 1386 sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list, 1387 STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1388 if (error != 0 || ctx.ste_busaddr == 0) { 1389 device_printf(sc->ste_dev, 1390 "could not load DMA'able memory for Tx list.\n"); 1391 goto fail; 1392 } 1393 sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr; 1394 1395 /* Allocate DMA'able memory and load the DMA map for Rx list. */ 1396 error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag, 1397 (void **)&sc->ste_ldata.ste_rx_list, 1398 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1399 &sc->ste_cdata.ste_rx_list_map); 1400 if (error != 0) { 1401 device_printf(sc->ste_dev, 1402 "could not allocate DMA'able memory for Rx list.\n"); 1403 goto fail; 1404 } 1405 ctx.ste_busaddr = 0; 1406 error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag, 1407 sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list, 1408 STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1409 if (error != 0 || ctx.ste_busaddr == 0) { 1410 device_printf(sc->ste_dev, 1411 "could not load DMA'able memory for Rx list.\n"); 1412 goto fail; 1413 } 1414 sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr; 1415 1416 /* Create DMA maps for Tx buffers. */ 1417 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1418 txc = &sc->ste_cdata.ste_tx_chain[i]; 1419 txc->ste_ptr = NULL; 1420 txc->ste_mbuf = NULL; 1421 txc->ste_next = NULL; 1422 txc->ste_phys = 0; 1423 txc->ste_map = NULL; 1424 error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0, 1425 &txc->ste_map); 1426 if (error != 0) { 1427 device_printf(sc->ste_dev, 1428 "could not create Tx dmamap.\n"); 1429 goto fail; 1430 } 1431 } 1432 /* Create DMA maps for Rx buffers. */ 1433 if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1434 &sc->ste_cdata.ste_rx_sparemap)) != 0) { 1435 device_printf(sc->ste_dev, 1436 "could not create spare Rx dmamap.\n"); 1437 goto fail; 1438 } 1439 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1440 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1441 rxc->ste_ptr = NULL; 1442 rxc->ste_mbuf = NULL; 1443 rxc->ste_next = NULL; 1444 rxc->ste_map = NULL; 1445 error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1446 &rxc->ste_map); 1447 if (error != 0) { 1448 device_printf(sc->ste_dev, 1449 "could not create Rx dmamap.\n"); 1450 goto fail; 1451 } 1452 } 1453 1454 fail: 1455 return (error); 1456 } 1457 1458 static void 1459 ste_dma_free(struct ste_softc *sc) 1460 { 1461 struct ste_chain *txc; 1462 struct ste_chain_onefrag *rxc; 1463 int i; 1464 1465 /* Tx buffers. */ 1466 if (sc->ste_cdata.ste_tx_tag != NULL) { 1467 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1468 txc = &sc->ste_cdata.ste_tx_chain[i]; 1469 if (txc->ste_map != NULL) { 1470 bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag, 1471 txc->ste_map); 1472 txc->ste_map = NULL; 1473 } 1474 } 1475 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag); 1476 sc->ste_cdata.ste_tx_tag = NULL; 1477 } 1478 /* Rx buffers. */ 1479 if (sc->ste_cdata.ste_rx_tag != NULL) { 1480 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1481 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1482 if (rxc->ste_map != NULL) { 1483 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1484 rxc->ste_map); 1485 rxc->ste_map = NULL; 1486 } 1487 } 1488 if (sc->ste_cdata.ste_rx_sparemap != NULL) { 1489 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1490 sc->ste_cdata.ste_rx_sparemap); 1491 sc->ste_cdata.ste_rx_sparemap = NULL; 1492 } 1493 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag); 1494 sc->ste_cdata.ste_rx_tag = NULL; 1495 } 1496 /* Tx descriptor list. */ 1497 if (sc->ste_cdata.ste_tx_list_tag != NULL) { 1498 if (sc->ste_cdata.ste_tx_list_map != NULL) 1499 bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag, 1500 sc->ste_cdata.ste_tx_list_map); 1501 if (sc->ste_cdata.ste_tx_list_map != NULL && 1502 sc->ste_ldata.ste_tx_list != NULL) 1503 bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag, 1504 sc->ste_ldata.ste_tx_list, 1505 sc->ste_cdata.ste_tx_list_map); 1506 sc->ste_ldata.ste_tx_list = NULL; 1507 sc->ste_cdata.ste_tx_list_map = NULL; 1508 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag); 1509 sc->ste_cdata.ste_tx_list_tag = NULL; 1510 } 1511 /* Rx descriptor list. */ 1512 if (sc->ste_cdata.ste_rx_list_tag != NULL) { 1513 if (sc->ste_cdata.ste_rx_list_map != NULL) 1514 bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag, 1515 sc->ste_cdata.ste_rx_list_map); 1516 if (sc->ste_cdata.ste_rx_list_map != NULL && 1517 sc->ste_ldata.ste_rx_list != NULL) 1518 bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag, 1519 sc->ste_ldata.ste_rx_list, 1520 sc->ste_cdata.ste_rx_list_map); 1521 sc->ste_ldata.ste_rx_list = NULL; 1522 sc->ste_cdata.ste_rx_list_map = NULL; 1523 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag); 1524 sc->ste_cdata.ste_rx_list_tag = NULL; 1525 } 1526 if (sc->ste_cdata.ste_parent_tag != NULL) { 1527 bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag); 1528 sc->ste_cdata.ste_parent_tag = NULL; 1529 } 1530 } 1531 1532 static int 1533 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc) 1534 { 1535 struct mbuf *m; 1536 bus_dma_segment_t segs[1]; 1537 bus_dmamap_t map; 1538 int error, nsegs; 1539 1540 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1541 if (m == NULL) 1542 return (ENOBUFS); 1543 m->m_len = m->m_pkthdr.len = MCLBYTES; 1544 m_adj(m, ETHER_ALIGN); 1545 1546 if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag, 1547 sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) { 1548 m_freem(m); 1549 return (error); 1550 } 1551 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1552 1553 if (rxc->ste_mbuf != NULL) { 1554 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1555 BUS_DMASYNC_POSTREAD); 1556 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map); 1557 } 1558 map = rxc->ste_map; 1559 rxc->ste_map = sc->ste_cdata.ste_rx_sparemap; 1560 sc->ste_cdata.ste_rx_sparemap = map; 1561 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1562 BUS_DMASYNC_PREREAD); 1563 rxc->ste_mbuf = m; 1564 rxc->ste_ptr->ste_status = 0; 1565 rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr); 1566 rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len | 1567 STE_FRAG_LAST); 1568 return (0); 1569 } 1570 1571 static int 1572 ste_init_rx_list(struct ste_softc *sc) 1573 { 1574 struct ste_chain_data *cd; 1575 struct ste_list_data *ld; 1576 int error, i; 1577 1578 sc->ste_int_rx_act = 0; 1579 cd = &sc->ste_cdata; 1580 ld = &sc->ste_ldata; 1581 bzero(ld->ste_rx_list, STE_RX_LIST_SZ); 1582 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1583 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1584 error = ste_newbuf(sc, &cd->ste_rx_chain[i]); 1585 if (error != 0) 1586 return (error); 1587 if (i == (STE_RX_LIST_CNT - 1)) { 1588 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0]; 1589 ld->ste_rx_list[i].ste_next = 1590 htole32(ld->ste_rx_list_paddr + 1591 (sizeof(struct ste_desc_onefrag) * 0)); 1592 } else { 1593 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1]; 1594 ld->ste_rx_list[i].ste_next = 1595 htole32(ld->ste_rx_list_paddr + 1596 (sizeof(struct ste_desc_onefrag) * (i + 1))); 1597 } 1598 } 1599 1600 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1601 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 1602 sc->ste_cdata.ste_rx_list_map, 1603 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1604 1605 return (0); 1606 } 1607 1608 static void 1609 ste_init_tx_list(struct ste_softc *sc) 1610 { 1611 struct ste_chain_data *cd; 1612 struct ste_list_data *ld; 1613 int i; 1614 1615 cd = &sc->ste_cdata; 1616 ld = &sc->ste_ldata; 1617 bzero(ld->ste_tx_list, STE_TX_LIST_SZ); 1618 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1619 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1620 cd->ste_tx_chain[i].ste_mbuf = NULL; 1621 if (i == (STE_TX_LIST_CNT - 1)) { 1622 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0]; 1623 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1624 ld->ste_tx_list_paddr + 1625 (sizeof(struct ste_desc) * 0))); 1626 } else { 1627 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1]; 1628 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1629 ld->ste_tx_list_paddr + 1630 (sizeof(struct ste_desc) * (i + 1)))); 1631 } 1632 } 1633 1634 cd->ste_last_tx = NULL; 1635 cd->ste_tx_prod = 0; 1636 cd->ste_tx_cons = 0; 1637 cd->ste_tx_cnt = 0; 1638 1639 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1640 sc->ste_cdata.ste_tx_list_map, 1641 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1642 } 1643 1644 static void 1645 ste_init(void *xsc) 1646 { 1647 struct ste_softc *sc; 1648 1649 sc = xsc; 1650 STE_LOCK(sc); 1651 ste_init_locked(sc); 1652 STE_UNLOCK(sc); 1653 } 1654 1655 static void 1656 ste_init_locked(struct ste_softc *sc) 1657 { 1658 struct ifnet *ifp; 1659 struct mii_data *mii; 1660 uint8_t val; 1661 int i; 1662 1663 STE_LOCK_ASSERT(sc); 1664 ifp = sc->ste_ifp; 1665 mii = device_get_softc(sc->ste_miibus); 1666 1667 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1668 return; 1669 1670 ste_stop(sc); 1671 /* Reset the chip to a known state. */ 1672 ste_reset(sc); 1673 1674 /* Init our MAC address */ 1675 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1676 CSR_WRITE_2(sc, STE_PAR0 + i, 1677 ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) | 1678 IF_LLADDR(sc->ste_ifp)[i + 1] << 8)); 1679 } 1680 1681 /* Init RX list */ 1682 if (ste_init_rx_list(sc) != 0) { 1683 device_printf(sc->ste_dev, 1684 "initialization failed: no memory for RX buffers\n"); 1685 ste_stop(sc); 1686 return; 1687 } 1688 1689 /* Set RX polling interval */ 1690 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); 1691 1692 /* Init TX descriptors */ 1693 ste_init_tx_list(sc); 1694 1695 /* Clear and disable WOL. */ 1696 val = CSR_READ_1(sc, STE_WAKE_EVENT); 1697 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 1698 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 1699 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 1700 1701 /* Set the TX freethresh value */ 1702 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1703 1704 /* Set the TX start threshold for best performance. */ 1705 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1706 1707 /* Set the TX reclaim threshold. */ 1708 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1709 1710 /* Accept VLAN length packets */ 1711 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1712 1713 /* Set up the RX filter. */ 1714 ste_rxfilter(sc); 1715 1716 /* Load the address of the RX list. */ 1717 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1718 ste_wait(sc); 1719 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1720 STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr)); 1721 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1722 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1723 1724 /* Set TX polling interval(defer until we TX first packet). */ 1725 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1726 1727 /* Load address of the TX list */ 1728 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1729 ste_wait(sc); 1730 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1731 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1732 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1733 ste_wait(sc); 1734 /* Select 3.2us timer. */ 1735 STE_CLRBIT4(sc, STE_DMACTL, STE_DMACTL_COUNTDOWN_SPEED | 1736 STE_DMACTL_COUNTDOWN_MODE); 1737 1738 /* Enable receiver and transmitter */ 1739 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1740 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1741 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1742 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1743 1744 /* Enable stats counters. */ 1745 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1746 /* Clear stats counters. */ 1747 ste_stats_clear(sc); 1748 1749 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1750 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1751 #ifdef DEVICE_POLLING 1752 /* Disable interrupts if we are polling. */ 1753 if (ifp->if_capenable & IFCAP_POLLING) 1754 CSR_WRITE_2(sc, STE_IMR, 0); 1755 else 1756 #endif 1757 /* Enable interrupts. */ 1758 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1759 1760 sc->ste_flags &= ~STE_FLAG_LINK; 1761 /* Switch to the current media. */ 1762 mii_mediachg(mii); 1763 1764 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1765 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1766 1767 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 1768 } 1769 1770 static void 1771 ste_stop(struct ste_softc *sc) 1772 { 1773 struct ifnet *ifp; 1774 struct ste_chain_onefrag *cur_rx; 1775 struct ste_chain *cur_tx; 1776 uint32_t val; 1777 int i; 1778 1779 STE_LOCK_ASSERT(sc); 1780 ifp = sc->ste_ifp; 1781 1782 callout_stop(&sc->ste_callout); 1783 sc->ste_timer = 0; 1784 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 1785 1786 CSR_WRITE_2(sc, STE_IMR, 0); 1787 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1788 /* Stop pending DMA. */ 1789 val = CSR_READ_4(sc, STE_DMACTL); 1790 val |= STE_DMACTL_TXDMA_STALL | STE_DMACTL_RXDMA_STALL; 1791 CSR_WRITE_4(sc, STE_DMACTL, val); 1792 ste_wait(sc); 1793 /* Disable auto-polling. */ 1794 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 0); 1795 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1796 /* Nullify DMA address to stop any further DMA. */ 1797 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 0); 1798 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1799 /* Stop TX/RX MAC. */ 1800 val = CSR_READ_2(sc, STE_MACCTL1); 1801 val |= STE_MACCTL1_TX_DISABLE | STE_MACCTL1_RX_DISABLE | 1802 STE_MACCTL1_STATS_DISABLE; 1803 CSR_WRITE_2(sc, STE_MACCTL1, val); 1804 for (i = 0; i < STE_TIMEOUT; i++) { 1805 DELAY(10); 1806 if ((CSR_READ_2(sc, STE_MACCTL1) & (STE_MACCTL1_TX_DISABLE | 1807 STE_MACCTL1_RX_DISABLE | STE_MACCTL1_STATS_DISABLE)) == 0) 1808 break; 1809 } 1810 if (i == STE_TIMEOUT) 1811 device_printf(sc->ste_dev, "Stopping MAC timed out\n"); 1812 /* Acknowledge any pending interrupts. */ 1813 CSR_READ_2(sc, STE_ISR_ACK); 1814 ste_stats_update(sc); 1815 1816 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1817 cur_rx = &sc->ste_cdata.ste_rx_chain[i]; 1818 if (cur_rx->ste_mbuf != NULL) { 1819 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, 1820 cur_rx->ste_map, BUS_DMASYNC_POSTREAD); 1821 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, 1822 cur_rx->ste_map); 1823 m_freem(cur_rx->ste_mbuf); 1824 cur_rx->ste_mbuf = NULL; 1825 } 1826 } 1827 1828 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1829 cur_tx = &sc->ste_cdata.ste_tx_chain[i]; 1830 if (cur_tx->ste_mbuf != NULL) { 1831 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, 1832 cur_tx->ste_map, BUS_DMASYNC_POSTWRITE); 1833 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, 1834 cur_tx->ste_map); 1835 m_freem(cur_tx->ste_mbuf); 1836 cur_tx->ste_mbuf = NULL; 1837 } 1838 } 1839 } 1840 1841 static void 1842 ste_reset(struct ste_softc *sc) 1843 { 1844 uint32_t ctl; 1845 int i; 1846 1847 ctl = CSR_READ_4(sc, STE_ASICCTL); 1848 ctl |= STE_ASICCTL_GLOBAL_RESET | STE_ASICCTL_RX_RESET | 1849 STE_ASICCTL_TX_RESET | STE_ASICCTL_DMA_RESET | 1850 STE_ASICCTL_FIFO_RESET | STE_ASICCTL_NETWORK_RESET | 1851 STE_ASICCTL_AUTOINIT_RESET |STE_ASICCTL_HOST_RESET | 1852 STE_ASICCTL_EXTRESET_RESET; 1853 CSR_WRITE_4(sc, STE_ASICCTL, ctl); 1854 CSR_READ_4(sc, STE_ASICCTL); 1855 /* 1856 * Due to the need of accessing EEPROM controller can take 1857 * up to 1ms to complete the global reset. 1858 */ 1859 DELAY(1000); 1860 1861 for (i = 0; i < STE_TIMEOUT; i++) { 1862 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1863 break; 1864 DELAY(10); 1865 } 1866 1867 if (i == STE_TIMEOUT) 1868 device_printf(sc->ste_dev, "global reset never completed\n"); 1869 } 1870 1871 static void 1872 ste_restart_tx(struct ste_softc *sc) 1873 { 1874 uint16_t mac; 1875 int i; 1876 1877 for (i = 0; i < STE_TIMEOUT; i++) { 1878 mac = CSR_READ_2(sc, STE_MACCTL1); 1879 mac |= STE_MACCTL1_TX_ENABLE; 1880 CSR_WRITE_2(sc, STE_MACCTL1, mac); 1881 mac = CSR_READ_2(sc, STE_MACCTL1); 1882 if ((mac & STE_MACCTL1_TX_ENABLED) != 0) 1883 break; 1884 DELAY(10); 1885 } 1886 1887 if (i == STE_TIMEOUT) 1888 device_printf(sc->ste_dev, "starting Tx failed"); 1889 } 1890 1891 static int 1892 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1893 { 1894 struct ste_softc *sc; 1895 struct ifreq *ifr; 1896 struct mii_data *mii; 1897 int error = 0, mask; 1898 1899 sc = ifp->if_softc; 1900 ifr = (struct ifreq *)data; 1901 1902 switch (command) { 1903 case SIOCSIFFLAGS: 1904 STE_LOCK(sc); 1905 if ((ifp->if_flags & IFF_UP) != 0) { 1906 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1907 ((ifp->if_flags ^ sc->ste_if_flags) & 1908 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1909 ste_rxfilter(sc); 1910 else 1911 ste_init_locked(sc); 1912 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1913 ste_stop(sc); 1914 sc->ste_if_flags = ifp->if_flags; 1915 STE_UNLOCK(sc); 1916 break; 1917 case SIOCADDMULTI: 1918 case SIOCDELMULTI: 1919 STE_LOCK(sc); 1920 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1921 ste_rxfilter(sc); 1922 STE_UNLOCK(sc); 1923 break; 1924 case SIOCGIFMEDIA: 1925 case SIOCSIFMEDIA: 1926 mii = device_get_softc(sc->ste_miibus); 1927 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1928 break; 1929 case SIOCSIFCAP: 1930 STE_LOCK(sc); 1931 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1932 #ifdef DEVICE_POLLING 1933 if ((mask & IFCAP_POLLING) != 0 && 1934 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 1935 ifp->if_capenable ^= IFCAP_POLLING; 1936 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 1937 error = ether_poll_register(ste_poll, ifp); 1938 if (error != 0) { 1939 STE_UNLOCK(sc); 1940 break; 1941 } 1942 /* Disable interrupts. */ 1943 CSR_WRITE_2(sc, STE_IMR, 0); 1944 } else { 1945 error = ether_poll_deregister(ifp); 1946 /* Enable interrupts. */ 1947 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1948 } 1949 } 1950 #endif /* DEVICE_POLLING */ 1951 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1952 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1953 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1954 STE_UNLOCK(sc); 1955 break; 1956 default: 1957 error = ether_ioctl(ifp, command, data); 1958 break; 1959 } 1960 1961 return (error); 1962 } 1963 1964 static int 1965 ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc) 1966 { 1967 struct ste_frag *frag; 1968 struct mbuf *m; 1969 struct ste_desc *desc; 1970 bus_dma_segment_t txsegs[STE_MAXFRAGS]; 1971 int error, i, nsegs; 1972 1973 STE_LOCK_ASSERT(sc); 1974 M_ASSERTPKTHDR((*m_head)); 1975 1976 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1977 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1978 if (error == EFBIG) { 1979 m = m_collapse(*m_head, M_DONTWAIT, STE_MAXFRAGS); 1980 if (m == NULL) { 1981 m_freem(*m_head); 1982 *m_head = NULL; 1983 return (ENOMEM); 1984 } 1985 *m_head = m; 1986 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1987 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1988 if (error != 0) { 1989 m_freem(*m_head); 1990 *m_head = NULL; 1991 return (error); 1992 } 1993 } else if (error != 0) 1994 return (error); 1995 if (nsegs == 0) { 1996 m_freem(*m_head); 1997 *m_head = NULL; 1998 return (EIO); 1999 } 2000 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map, 2001 BUS_DMASYNC_PREWRITE); 2002 2003 desc = txc->ste_ptr; 2004 for (i = 0; i < nsegs; i++) { 2005 frag = &desc->ste_frags[i]; 2006 frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr)); 2007 frag->ste_len = htole32(txsegs[i].ds_len); 2008 } 2009 desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST); 2010 /* 2011 * Because we use Tx polling we can't chain multiple 2012 * Tx descriptors here. Otherwise we race with controller. 2013 */ 2014 desc->ste_next = 0; 2015 if ((sc->ste_cdata.ste_tx_prod % STE_TX_INTR_FRAMES) == 0) 2016 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS | 2017 STE_TXCTL_DMAINTR); 2018 else 2019 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS); 2020 txc->ste_mbuf = *m_head; 2021 STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT); 2022 sc->ste_cdata.ste_tx_cnt++; 2023 2024 return (0); 2025 } 2026 2027 static void 2028 ste_start(struct ifnet *ifp) 2029 { 2030 struct ste_softc *sc; 2031 2032 sc = ifp->if_softc; 2033 STE_LOCK(sc); 2034 ste_start_locked(ifp); 2035 STE_UNLOCK(sc); 2036 } 2037 2038 static void 2039 ste_start_locked(struct ifnet *ifp) 2040 { 2041 struct ste_softc *sc; 2042 struct ste_chain *cur_tx; 2043 struct mbuf *m_head = NULL; 2044 int enq; 2045 2046 sc = ifp->if_softc; 2047 STE_LOCK_ASSERT(sc); 2048 2049 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2050 IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0) 2051 return; 2052 2053 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2054 if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) { 2055 /* 2056 * Controller may have cached copy of the last used 2057 * next ptr so we have to reserve one TFD to avoid 2058 * TFD overruns. 2059 */ 2060 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2061 break; 2062 } 2063 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2064 if (m_head == NULL) 2065 break; 2066 cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod]; 2067 if (ste_encap(sc, &m_head, cur_tx) != 0) { 2068 if (m_head == NULL) 2069 break; 2070 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2071 break; 2072 } 2073 if (sc->ste_cdata.ste_last_tx == NULL) { 2074 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 2075 sc->ste_cdata.ste_tx_list_map, 2076 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2077 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 2078 ste_wait(sc); 2079 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 2080 STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr)); 2081 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 2082 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 2083 ste_wait(sc); 2084 } else { 2085 sc->ste_cdata.ste_last_tx->ste_ptr->ste_next = 2086 sc->ste_cdata.ste_last_tx->ste_phys; 2087 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 2088 sc->ste_cdata.ste_tx_list_map, 2089 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2090 } 2091 sc->ste_cdata.ste_last_tx = cur_tx; 2092 2093 enq++; 2094 /* 2095 * If there's a BPF listener, bounce a copy of this frame 2096 * to him. 2097 */ 2098 BPF_MTAP(ifp, m_head); 2099 } 2100 2101 if (enq > 0) 2102 sc->ste_timer = STE_TX_TIMEOUT; 2103 } 2104 2105 static void 2106 ste_watchdog(struct ste_softc *sc) 2107 { 2108 struct ifnet *ifp; 2109 2110 ifp = sc->ste_ifp; 2111 STE_LOCK_ASSERT(sc); 2112 2113 if (sc->ste_timer == 0 || --sc->ste_timer) 2114 return; 2115 2116 ifp->if_oerrors++; 2117 if_printf(ifp, "watchdog timeout\n"); 2118 2119 ste_txeof(sc); 2120 ste_txeoc(sc); 2121 ste_rxeof(sc, -1); 2122 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2123 ste_init_locked(sc); 2124 2125 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2126 ste_start_locked(ifp); 2127 } 2128 2129 static int 2130 ste_shutdown(device_t dev) 2131 { 2132 2133 return (ste_suspend(dev)); 2134 } 2135 2136 static int 2137 ste_suspend(device_t dev) 2138 { 2139 struct ste_softc *sc; 2140 2141 sc = device_get_softc(dev); 2142 2143 STE_LOCK(sc); 2144 ste_stop(sc); 2145 ste_setwol(sc); 2146 STE_UNLOCK(sc); 2147 2148 return (0); 2149 } 2150 2151 static int 2152 ste_resume(device_t dev) 2153 { 2154 struct ste_softc *sc; 2155 struct ifnet *ifp; 2156 int pmc; 2157 uint16_t pmstat; 2158 2159 sc = device_get_softc(dev); 2160 STE_LOCK(sc); 2161 if (pci_find_extcap(sc->ste_dev, PCIY_PMG, &pmc) == 0) { 2162 /* Disable PME and clear PME status. */ 2163 pmstat = pci_read_config(sc->ste_dev, 2164 pmc + PCIR_POWER_STATUS, 2); 2165 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2166 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2167 pci_write_config(sc->ste_dev, 2168 pmc + PCIR_POWER_STATUS, pmstat, 2); 2169 } 2170 } 2171 ifp = sc->ste_ifp; 2172 if ((ifp->if_flags & IFF_UP) != 0) { 2173 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2174 ste_init_locked(sc); 2175 } 2176 STE_UNLOCK(sc); 2177 2178 return (0); 2179 } 2180 2181 #define STE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2182 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2183 #define STE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 2184 SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 2185 2186 static void 2187 ste_sysctl_node(struct ste_softc *sc) 2188 { 2189 struct sysctl_ctx_list *ctx; 2190 struct sysctl_oid_list *child, *parent; 2191 struct sysctl_oid *tree; 2192 struct ste_hw_stats *stats; 2193 2194 stats = &sc->ste_stats; 2195 ctx = device_get_sysctl_ctx(sc->ste_dev); 2196 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ste_dev)); 2197 2198 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_rx_mod", 2199 CTLFLAG_RW, &sc->ste_int_rx_mod, 0, "ste RX interrupt moderation"); 2200 /* Pull in device tunables. */ 2201 sc->ste_int_rx_mod = STE_IM_RX_TIMER_DEFAULT; 2202 resource_int_value(device_get_name(sc->ste_dev), 2203 device_get_unit(sc->ste_dev), "int_rx_mod", &sc->ste_int_rx_mod); 2204 2205 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2206 NULL, "STE statistics"); 2207 parent = SYSCTL_CHILDREN(tree); 2208 2209 /* Rx statistics. */ 2210 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2211 NULL, "Rx MAC statistics"); 2212 child = SYSCTL_CHILDREN(tree); 2213 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2214 &stats->rx_bytes, "Good octets"); 2215 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2216 &stats->rx_frames, "Good frames"); 2217 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2218 &stats->rx_bcast_frames, "Good broadcast frames"); 2219 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2220 &stats->rx_mcast_frames, "Good multicast frames"); 2221 STE_SYSCTL_STAT_ADD32(ctx, child, "lost_frames", 2222 &stats->rx_lost_frames, "Lost frames"); 2223 2224 /* Tx statistics. */ 2225 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2226 NULL, "Tx MAC statistics"); 2227 child = SYSCTL_CHILDREN(tree); 2228 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2229 &stats->tx_bytes, "Good octets"); 2230 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2231 &stats->tx_frames, "Good frames"); 2232 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2233 &stats->tx_bcast_frames, "Good broadcast frames"); 2234 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2235 &stats->tx_mcast_frames, "Good multicast frames"); 2236 STE_SYSCTL_STAT_ADD32(ctx, child, "carrier_errs", 2237 &stats->tx_carrsense_errs, "Carrier sense errors"); 2238 STE_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 2239 &stats->tx_single_colls, "Single collisions"); 2240 STE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 2241 &stats->tx_multi_colls, "Multiple collisions"); 2242 STE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2243 &stats->tx_late_colls, "Late collisions"); 2244 STE_SYSCTL_STAT_ADD32(ctx, child, "defers", 2245 &stats->tx_frames_defered, "Frames with deferrals"); 2246 STE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 2247 &stats->tx_excess_defers, "Frames with excessive derferrals"); 2248 STE_SYSCTL_STAT_ADD32(ctx, child, "abort", 2249 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 2250 } 2251 2252 #undef STE_SYSCTL_STAT_ADD32 2253 #undef STE_SYSCTL_STAT_ADD64 2254 2255 static void 2256 ste_setwol(struct ste_softc *sc) 2257 { 2258 struct ifnet *ifp; 2259 uint16_t pmstat; 2260 uint8_t val; 2261 int pmc; 2262 2263 STE_LOCK_ASSERT(sc); 2264 2265 if (pci_find_extcap(sc->ste_dev, PCIY_PMG, &pmc) != 0) { 2266 /* Disable WOL. */ 2267 CSR_READ_1(sc, STE_WAKE_EVENT); 2268 CSR_WRITE_1(sc, STE_WAKE_EVENT, 0); 2269 return; 2270 } 2271 2272 ifp = sc->ste_ifp; 2273 val = CSR_READ_1(sc, STE_WAKE_EVENT); 2274 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 2275 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 2276 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2277 val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB; 2278 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 2279 /* Request PME. */ 2280 pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2); 2281 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2282 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2283 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2284 pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2285 } 2286