1 /*- 2 * Copyright (c) 1997, 1998, 1999 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #ifdef HAVE_KERNEL_OPTION_HEADERS 37 #include "opt_device_polling.h" 38 #endif 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/bus.h> 43 #include <sys/endian.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/mbuf.h> 48 #include <sys/module.h> 49 #include <sys/rman.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include <net/bpf.h> 55 #include <net/if.h> 56 #include <net/if_arp.h> 57 #include <net/ethernet.h> 58 #include <net/if_dl.h> 59 #include <net/if_media.h> 60 #include <net/if_types.h> 61 #include <net/if_vlan_var.h> 62 63 #include <machine/bus.h> 64 #include <machine/resource.h> 65 66 #include <dev/mii/mii.h> 67 #include <dev/mii/miivar.h> 68 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcivar.h> 71 72 #include <dev/ste/if_stereg.h> 73 74 /* "device miibus" required. See GENERIC if you get errors here. */ 75 #include "miibus_if.h" 76 77 MODULE_DEPEND(ste, pci, 1, 1, 1); 78 MODULE_DEPEND(ste, ether, 1, 1, 1); 79 MODULE_DEPEND(ste, miibus, 1, 1, 1); 80 81 /* Define to show Tx error status. */ 82 #define STE_SHOW_TXERRORS 83 84 /* 85 * Various supported device vendors/types and their names. 86 */ 87 static struct ste_type ste_devs[] = { 88 { ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" }, 89 { ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" }, 90 { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, 91 { 0, 0, NULL } 92 }; 93 94 static int ste_attach(device_t); 95 static int ste_detach(device_t); 96 static int ste_probe(device_t); 97 static int ste_resume(device_t); 98 static int ste_shutdown(device_t); 99 static int ste_suspend(device_t); 100 101 static int ste_dma_alloc(struct ste_softc *); 102 static void ste_dma_free(struct ste_softc *); 103 static void ste_dmamap_cb(void *, bus_dma_segment_t *, int, int); 104 static int ste_eeprom_wait(struct ste_softc *); 105 static int ste_encap(struct ste_softc *, struct mbuf **, 106 struct ste_chain *); 107 static int ste_ifmedia_upd(struct ifnet *); 108 static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); 109 static void ste_init(void *); 110 static void ste_init_locked(struct ste_softc *); 111 static int ste_init_rx_list(struct ste_softc *); 112 static void ste_init_tx_list(struct ste_softc *); 113 static void ste_intr(void *); 114 static int ste_ioctl(struct ifnet *, u_long, caddr_t); 115 static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *); 116 static void ste_mii_send(struct ste_softc *, uint32_t, int); 117 static void ste_mii_sync(struct ste_softc *); 118 static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *); 119 static int ste_miibus_readreg(device_t, int, int); 120 static void ste_miibus_statchg(device_t); 121 static int ste_miibus_writereg(device_t, int, int, int); 122 static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *); 123 static int ste_read_eeprom(struct ste_softc *, uint16_t *, int, int); 124 static void ste_reset(struct ste_softc *); 125 static void ste_restart_tx(struct ste_softc *); 126 static int ste_rxeof(struct ste_softc *, int); 127 static void ste_rxfilter(struct ste_softc *); 128 static void ste_setwol(struct ste_softc *); 129 static void ste_start(struct ifnet *); 130 static void ste_start_locked(struct ifnet *); 131 static void ste_stats_clear(struct ste_softc *); 132 static void ste_stats_update(struct ste_softc *); 133 static void ste_stop(struct ste_softc *); 134 static void ste_sysctl_node(struct ste_softc *); 135 static void ste_tick(void *); 136 static void ste_txeoc(struct ste_softc *); 137 static void ste_txeof(struct ste_softc *); 138 static void ste_wait(struct ste_softc *); 139 static void ste_watchdog(struct ste_softc *); 140 141 static device_method_t ste_methods[] = { 142 /* Device interface */ 143 DEVMETHOD(device_probe, ste_probe), 144 DEVMETHOD(device_attach, ste_attach), 145 DEVMETHOD(device_detach, ste_detach), 146 DEVMETHOD(device_shutdown, ste_shutdown), 147 DEVMETHOD(device_suspend, ste_suspend), 148 DEVMETHOD(device_resume, ste_resume), 149 150 /* bus interface */ 151 DEVMETHOD(bus_print_child, bus_generic_print_child), 152 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 153 154 /* MII interface */ 155 DEVMETHOD(miibus_readreg, ste_miibus_readreg), 156 DEVMETHOD(miibus_writereg, ste_miibus_writereg), 157 DEVMETHOD(miibus_statchg, ste_miibus_statchg), 158 159 { 0, 0 } 160 }; 161 162 static driver_t ste_driver = { 163 "ste", 164 ste_methods, 165 sizeof(struct ste_softc) 166 }; 167 168 static devclass_t ste_devclass; 169 170 DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); 171 DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); 172 173 #define STE_SETBIT4(sc, reg, x) \ 174 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 175 176 #define STE_CLRBIT4(sc, reg, x) \ 177 CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 178 179 #define STE_SETBIT2(sc, reg, x) \ 180 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) 181 182 #define STE_CLRBIT2(sc, reg, x) \ 183 CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) 184 185 #define STE_SETBIT1(sc, reg, x) \ 186 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) 187 188 #define STE_CLRBIT1(sc, reg, x) \ 189 CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) 190 191 192 #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) 193 #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) 194 195 /* 196 * Sync the PHYs by setting data bit and strobing the clock 32 times. 197 */ 198 static void 199 ste_mii_sync(struct ste_softc *sc) 200 { 201 int i; 202 203 MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); 204 205 for (i = 0; i < 32; i++) { 206 MII_SET(STE_PHYCTL_MCLK); 207 DELAY(1); 208 MII_CLR(STE_PHYCTL_MCLK); 209 DELAY(1); 210 } 211 } 212 213 /* 214 * Clock a series of bits through the MII. 215 */ 216 static void 217 ste_mii_send(struct ste_softc *sc, uint32_t bits, int cnt) 218 { 219 int i; 220 221 MII_CLR(STE_PHYCTL_MCLK); 222 223 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 224 if (bits & i) { 225 MII_SET(STE_PHYCTL_MDATA); 226 } else { 227 MII_CLR(STE_PHYCTL_MDATA); 228 } 229 DELAY(1); 230 MII_CLR(STE_PHYCTL_MCLK); 231 DELAY(1); 232 MII_SET(STE_PHYCTL_MCLK); 233 } 234 } 235 236 /* 237 * Read an PHY register through the MII. 238 */ 239 static int 240 ste_mii_readreg(struct ste_softc *sc, struct ste_mii_frame *frame) 241 { 242 int i, ack; 243 244 /* 245 * Set up frame for RX. 246 */ 247 frame->mii_stdelim = STE_MII_STARTDELIM; 248 frame->mii_opcode = STE_MII_READOP; 249 frame->mii_turnaround = 0; 250 frame->mii_data = 0; 251 252 CSR_WRITE_2(sc, STE_PHYCTL, 0); 253 /* 254 * Turn on data xmit. 255 */ 256 MII_SET(STE_PHYCTL_MDIR); 257 258 ste_mii_sync(sc); 259 260 /* 261 * Send command/address info. 262 */ 263 ste_mii_send(sc, frame->mii_stdelim, 2); 264 ste_mii_send(sc, frame->mii_opcode, 2); 265 ste_mii_send(sc, frame->mii_phyaddr, 5); 266 ste_mii_send(sc, frame->mii_regaddr, 5); 267 268 /* Turn off xmit. */ 269 MII_CLR(STE_PHYCTL_MDIR); 270 271 /* Idle bit */ 272 MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); 273 DELAY(1); 274 MII_SET(STE_PHYCTL_MCLK); 275 DELAY(1); 276 277 /* Check for ack */ 278 MII_CLR(STE_PHYCTL_MCLK); 279 DELAY(1); 280 ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; 281 MII_SET(STE_PHYCTL_MCLK); 282 DELAY(1); 283 284 /* 285 * Now try reading data bits. If the ack failed, we still 286 * need to clock through 16 cycles to keep the PHY(s) in sync. 287 */ 288 if (ack) { 289 for (i = 0; i < 16; i++) { 290 MII_CLR(STE_PHYCTL_MCLK); 291 DELAY(1); 292 MII_SET(STE_PHYCTL_MCLK); 293 DELAY(1); 294 } 295 goto fail; 296 } 297 298 for (i = 0x8000; i; i >>= 1) { 299 MII_CLR(STE_PHYCTL_MCLK); 300 DELAY(1); 301 if (!ack) { 302 if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) 303 frame->mii_data |= i; 304 DELAY(1); 305 } 306 MII_SET(STE_PHYCTL_MCLK); 307 DELAY(1); 308 } 309 310 fail: 311 312 MII_CLR(STE_PHYCTL_MCLK); 313 DELAY(1); 314 MII_SET(STE_PHYCTL_MCLK); 315 DELAY(1); 316 317 if (ack) 318 return (1); 319 return (0); 320 } 321 322 /* 323 * Write to a PHY register through the MII. 324 */ 325 static int 326 ste_mii_writereg(struct ste_softc *sc, struct ste_mii_frame *frame) 327 { 328 329 /* 330 * Set up frame for TX. 331 */ 332 333 frame->mii_stdelim = STE_MII_STARTDELIM; 334 frame->mii_opcode = STE_MII_WRITEOP; 335 frame->mii_turnaround = STE_MII_TURNAROUND; 336 337 /* 338 * Turn on data output. 339 */ 340 MII_SET(STE_PHYCTL_MDIR); 341 342 ste_mii_sync(sc); 343 344 ste_mii_send(sc, frame->mii_stdelim, 2); 345 ste_mii_send(sc, frame->mii_opcode, 2); 346 ste_mii_send(sc, frame->mii_phyaddr, 5); 347 ste_mii_send(sc, frame->mii_regaddr, 5); 348 ste_mii_send(sc, frame->mii_turnaround, 2); 349 ste_mii_send(sc, frame->mii_data, 16); 350 351 /* Idle bit. */ 352 MII_SET(STE_PHYCTL_MCLK); 353 DELAY(1); 354 MII_CLR(STE_PHYCTL_MCLK); 355 DELAY(1); 356 357 /* 358 * Turn off xmit. 359 */ 360 MII_CLR(STE_PHYCTL_MDIR); 361 362 return (0); 363 } 364 365 static int 366 ste_miibus_readreg(device_t dev, int phy, int reg) 367 { 368 struct ste_softc *sc; 369 struct ste_mii_frame frame; 370 371 sc = device_get_softc(dev); 372 bzero((char *)&frame, sizeof(frame)); 373 374 frame.mii_phyaddr = phy; 375 frame.mii_regaddr = reg; 376 ste_mii_readreg(sc, &frame); 377 378 return (frame.mii_data); 379 } 380 381 static int 382 ste_miibus_writereg(device_t dev, int phy, int reg, int data) 383 { 384 struct ste_softc *sc; 385 struct ste_mii_frame frame; 386 387 sc = device_get_softc(dev); 388 bzero((char *)&frame, sizeof(frame)); 389 390 frame.mii_phyaddr = phy; 391 frame.mii_regaddr = reg; 392 frame.mii_data = data; 393 394 ste_mii_writereg(sc, &frame); 395 396 return (0); 397 } 398 399 static void 400 ste_miibus_statchg(device_t dev) 401 { 402 struct ste_softc *sc; 403 struct mii_data *mii; 404 struct ifnet *ifp; 405 uint16_t cfg; 406 407 sc = device_get_softc(dev); 408 409 mii = device_get_softc(sc->ste_miibus); 410 ifp = sc->ste_ifp; 411 if (mii == NULL || ifp == NULL || 412 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 413 return; 414 415 sc->ste_flags &= ~STE_FLAG_LINK; 416 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 417 (IFM_ACTIVE | IFM_AVALID)) { 418 switch (IFM_SUBTYPE(mii->mii_media_active)) { 419 case IFM_10_T: 420 case IFM_100_TX: 421 case IFM_100_FX: 422 case IFM_100_T4: 423 sc->ste_flags |= STE_FLAG_LINK; 424 default: 425 break; 426 } 427 } 428 429 /* Program MACs with resolved speed/duplex/flow-control. */ 430 if ((sc->ste_flags & STE_FLAG_LINK) != 0) { 431 cfg = CSR_READ_2(sc, STE_MACCTL0); 432 cfg &= ~(STE_MACCTL0_FLOWCTL_ENABLE | STE_MACCTL0_FULLDUPLEX); 433 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 434 /* 435 * ST201 data sheet says driver should enable receiving 436 * MAC control frames bit of receive mode register to 437 * receive flow-control frames but the register has no 438 * such bits. In addition the controller has no ability 439 * to send pause frames so it should be handled in 440 * driver. Implementing pause timer handling in driver 441 * layer is not trivial, so don't enable flow-control 442 * here. 443 */ 444 cfg |= STE_MACCTL0_FULLDUPLEX; 445 } 446 CSR_WRITE_2(sc, STE_MACCTL0, cfg); 447 } 448 } 449 450 static int 451 ste_ifmedia_upd(struct ifnet *ifp) 452 { 453 struct ste_softc *sc; 454 struct mii_data *mii; 455 struct mii_softc *miisc; 456 int error; 457 458 sc = ifp->if_softc; 459 STE_LOCK(sc); 460 mii = device_get_softc(sc->ste_miibus); 461 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 462 PHY_RESET(miisc); 463 error = mii_mediachg(mii); 464 STE_UNLOCK(sc); 465 466 return (error); 467 } 468 469 static void 470 ste_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 471 { 472 struct ste_softc *sc; 473 struct mii_data *mii; 474 475 sc = ifp->if_softc; 476 mii = device_get_softc(sc->ste_miibus); 477 478 STE_LOCK(sc); 479 if ((ifp->if_flags & IFF_UP) == 0) { 480 STE_UNLOCK(sc); 481 return; 482 } 483 mii_pollstat(mii); 484 ifmr->ifm_active = mii->mii_media_active; 485 ifmr->ifm_status = mii->mii_media_status; 486 STE_UNLOCK(sc); 487 } 488 489 static void 490 ste_wait(struct ste_softc *sc) 491 { 492 int i; 493 494 for (i = 0; i < STE_TIMEOUT; i++) { 495 if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) 496 break; 497 DELAY(1); 498 } 499 500 if (i == STE_TIMEOUT) 501 device_printf(sc->ste_dev, "command never completed!\n"); 502 } 503 504 /* 505 * The EEPROM is slow: give it time to come ready after issuing 506 * it a command. 507 */ 508 static int 509 ste_eeprom_wait(struct ste_softc *sc) 510 { 511 int i; 512 513 DELAY(1000); 514 515 for (i = 0; i < 100; i++) { 516 if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) 517 DELAY(1000); 518 else 519 break; 520 } 521 522 if (i == 100) { 523 device_printf(sc->ste_dev, "eeprom failed to come ready\n"); 524 return (1); 525 } 526 527 return (0); 528 } 529 530 /* 531 * Read a sequence of words from the EEPROM. Note that ethernet address 532 * data is stored in the EEPROM in network byte order. 533 */ 534 static int 535 ste_read_eeprom(struct ste_softc *sc, uint16_t *dest, int off, int cnt) 536 { 537 int err = 0, i; 538 539 if (ste_eeprom_wait(sc)) 540 return (1); 541 542 for (i = 0; i < cnt; i++) { 543 CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); 544 err = ste_eeprom_wait(sc); 545 if (err) 546 break; 547 *dest = le16toh(CSR_READ_2(sc, STE_EEPROM_DATA)); 548 dest++; 549 } 550 551 return (err ? 1 : 0); 552 } 553 554 static void 555 ste_rxfilter(struct ste_softc *sc) 556 { 557 struct ifnet *ifp; 558 struct ifmultiaddr *ifma; 559 uint32_t hashes[2] = { 0, 0 }; 560 uint8_t rxcfg; 561 int h; 562 563 STE_LOCK_ASSERT(sc); 564 565 ifp = sc->ste_ifp; 566 rxcfg = CSR_READ_1(sc, STE_RX_MODE); 567 rxcfg |= STE_RXMODE_UNICAST; 568 rxcfg &= ~(STE_RXMODE_ALLMULTI | STE_RXMODE_MULTIHASH | 569 STE_RXMODE_BROADCAST | STE_RXMODE_PROMISC); 570 if (ifp->if_flags & IFF_BROADCAST) 571 rxcfg |= STE_RXMODE_BROADCAST; 572 if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { 573 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 574 rxcfg |= STE_RXMODE_ALLMULTI; 575 if ((ifp->if_flags & IFF_PROMISC) != 0) 576 rxcfg |= STE_RXMODE_PROMISC; 577 goto chipit; 578 } 579 580 rxcfg |= STE_RXMODE_MULTIHASH; 581 /* Now program new ones. */ 582 if_maddr_rlock(ifp); 583 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 584 if (ifma->ifma_addr->sa_family != AF_LINK) 585 continue; 586 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 587 ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; 588 if (h < 32) 589 hashes[0] |= (1 << h); 590 else 591 hashes[1] |= (1 << (h - 32)); 592 } 593 if_maddr_runlock(ifp); 594 595 chipit: 596 CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); 597 CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); 598 CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); 599 CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); 600 CSR_WRITE_1(sc, STE_RX_MODE, rxcfg); 601 CSR_READ_1(sc, STE_RX_MODE); 602 } 603 604 #ifdef DEVICE_POLLING 605 static poll_handler_t ste_poll, ste_poll_locked; 606 607 static int 608 ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 609 { 610 struct ste_softc *sc = ifp->if_softc; 611 int rx_npkts = 0; 612 613 STE_LOCK(sc); 614 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 615 rx_npkts = ste_poll_locked(ifp, cmd, count); 616 STE_UNLOCK(sc); 617 return (rx_npkts); 618 } 619 620 static int 621 ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 622 { 623 struct ste_softc *sc = ifp->if_softc; 624 int rx_npkts; 625 626 STE_LOCK_ASSERT(sc); 627 628 rx_npkts = ste_rxeof(sc, count); 629 ste_txeof(sc); 630 ste_txeoc(sc); 631 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 632 ste_start_locked(ifp); 633 634 if (cmd == POLL_AND_CHECK_STATUS) { 635 uint16_t status; 636 637 status = CSR_READ_2(sc, STE_ISR_ACK); 638 639 if (status & STE_ISR_STATS_OFLOW) 640 ste_stats_update(sc); 641 642 if (status & STE_ISR_HOSTERR) { 643 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 644 ste_init_locked(sc); 645 } 646 } 647 return (rx_npkts); 648 } 649 #endif /* DEVICE_POLLING */ 650 651 static void 652 ste_intr(void *xsc) 653 { 654 struct ste_softc *sc; 655 struct ifnet *ifp; 656 uint16_t intrs, status; 657 658 sc = xsc; 659 STE_LOCK(sc); 660 ifp = sc->ste_ifp; 661 662 #ifdef DEVICE_POLLING 663 if (ifp->if_capenable & IFCAP_POLLING) { 664 STE_UNLOCK(sc); 665 return; 666 } 667 #endif 668 /* Reading STE_ISR_ACK clears STE_IMR register. */ 669 status = CSR_READ_2(sc, STE_ISR_ACK); 670 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 671 STE_UNLOCK(sc); 672 return; 673 } 674 675 intrs = STE_INTRS; 676 if (status == 0xFFFF || (status & intrs) == 0) 677 goto done; 678 679 if (sc->ste_int_rx_act > 0) { 680 status &= ~STE_ISR_RX_DMADONE; 681 intrs &= ~STE_IMR_RX_DMADONE; 682 } 683 684 if ((status & (STE_ISR_SOFTINTR | STE_ISR_RX_DMADONE)) != 0) { 685 ste_rxeof(sc, -1); 686 /* 687 * The controller has no ability to Rx interrupt 688 * moderation feature. Receiving 64 bytes frames 689 * from wire generates too many interrupts which in 690 * turn make system useless to process other useful 691 * things. Fortunately ST201 supports single shot 692 * timer so use the timer to implement Rx interrupt 693 * moderation in driver. This adds more register 694 * access but it greatly reduces number of Rx 695 * interrupts under high network load. 696 */ 697 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 698 (sc->ste_int_rx_mod != 0)) { 699 if ((status & STE_ISR_RX_DMADONE) != 0) { 700 CSR_WRITE_2(sc, STE_COUNTDOWN, 701 STE_TIMER_USECS(sc->ste_int_rx_mod)); 702 intrs &= ~STE_IMR_RX_DMADONE; 703 sc->ste_int_rx_act = 1; 704 } else { 705 intrs |= STE_IMR_RX_DMADONE; 706 sc->ste_int_rx_act = 0; 707 } 708 } 709 } 710 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 711 if ((status & STE_ISR_TX_DMADONE) != 0) 712 ste_txeof(sc); 713 if ((status & STE_ISR_TX_DONE) != 0) 714 ste_txeoc(sc); 715 if ((status & STE_ISR_STATS_OFLOW) != 0) 716 ste_stats_update(sc); 717 if ((status & STE_ISR_HOSTERR) != 0) { 718 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 719 ste_init_locked(sc); 720 STE_UNLOCK(sc); 721 return; 722 } 723 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 724 ste_start_locked(ifp); 725 done: 726 /* Re-enable interrupts */ 727 CSR_WRITE_2(sc, STE_IMR, intrs); 728 } 729 STE_UNLOCK(sc); 730 } 731 732 /* 733 * A frame has been uploaded: pass the resulting mbuf chain up to 734 * the higher level protocols. 735 */ 736 static int 737 ste_rxeof(struct ste_softc *sc, int count) 738 { 739 struct mbuf *m; 740 struct ifnet *ifp; 741 struct ste_chain_onefrag *cur_rx; 742 uint32_t rxstat; 743 int total_len, rx_npkts; 744 745 ifp = sc->ste_ifp; 746 747 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 748 sc->ste_cdata.ste_rx_list_map, 749 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 750 751 cur_rx = sc->ste_cdata.ste_rx_head; 752 for (rx_npkts = 0; rx_npkts < STE_RX_LIST_CNT; rx_npkts++, 753 cur_rx = cur_rx->ste_next) { 754 rxstat = le32toh(cur_rx->ste_ptr->ste_status); 755 if ((rxstat & STE_RXSTAT_DMADONE) == 0) 756 break; 757 #ifdef DEVICE_POLLING 758 if (ifp->if_capenable & IFCAP_POLLING) { 759 if (count == 0) 760 break; 761 count--; 762 } 763 #endif 764 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 765 break; 766 /* 767 * If an error occurs, update stats, clear the 768 * status word and leave the mbuf cluster in place: 769 * it should simply get re-used next time this descriptor 770 * comes up in the ring. 771 */ 772 if (rxstat & STE_RXSTAT_FRAME_ERR) { 773 ifp->if_ierrors++; 774 cur_rx->ste_ptr->ste_status = 0; 775 continue; 776 } 777 778 /* No errors; receive the packet. */ 779 m = cur_rx->ste_mbuf; 780 total_len = STE_RX_BYTES(rxstat); 781 782 /* 783 * Try to conjure up a new mbuf cluster. If that 784 * fails, it means we have an out of memory condition and 785 * should leave the buffer in place and continue. This will 786 * result in a lost packet, but there's little else we 787 * can do in this situation. 788 */ 789 if (ste_newbuf(sc, cur_rx) != 0) { 790 ifp->if_iqdrops++; 791 cur_rx->ste_ptr->ste_status = 0; 792 continue; 793 } 794 795 m->m_pkthdr.rcvif = ifp; 796 m->m_pkthdr.len = m->m_len = total_len; 797 798 ifp->if_ipackets++; 799 STE_UNLOCK(sc); 800 (*ifp->if_input)(ifp, m); 801 STE_LOCK(sc); 802 } 803 804 if (rx_npkts > 0) { 805 sc->ste_cdata.ste_rx_head = cur_rx; 806 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 807 sc->ste_cdata.ste_rx_list_map, 808 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 809 } 810 811 return (rx_npkts); 812 } 813 814 static void 815 ste_txeoc(struct ste_softc *sc) 816 { 817 uint16_t txstat; 818 struct ifnet *ifp; 819 820 STE_LOCK_ASSERT(sc); 821 822 ifp = sc->ste_ifp; 823 824 /* 825 * STE_TX_STATUS register implements a queue of up to 31 826 * transmit status byte. Writing an arbitrary value to the 827 * register will advance the queue to the next transmit 828 * status byte. This means if driver does not read 829 * STE_TX_STATUS register after completing sending more 830 * than 31 frames the controller would be stalled so driver 831 * should re-wake the Tx MAC. This is the most severe 832 * limitation of ST201 based controller. 833 */ 834 for (;;) { 835 txstat = CSR_READ_2(sc, STE_TX_STATUS); 836 if ((txstat & STE_TXSTATUS_TXDONE) == 0) 837 break; 838 if ((txstat & (STE_TXSTATUS_UNDERRUN | 839 STE_TXSTATUS_EXCESSCOLLS | STE_TXSTATUS_RECLAIMERR | 840 STE_TXSTATUS_STATSOFLOW)) != 0) { 841 ifp->if_oerrors++; 842 #ifdef STE_SHOW_TXERRORS 843 device_printf(sc->ste_dev, "TX error : 0x%b\n", 844 txstat & 0xFF, STE_ERR_BITS); 845 #endif 846 if ((txstat & STE_TXSTATUS_UNDERRUN) != 0 && 847 sc->ste_tx_thresh < STE_PACKET_SIZE) { 848 sc->ste_tx_thresh += STE_MIN_FRAMELEN; 849 if (sc->ste_tx_thresh > STE_PACKET_SIZE) 850 sc->ste_tx_thresh = STE_PACKET_SIZE; 851 device_printf(sc->ste_dev, 852 "TX underrun, increasing TX" 853 " start threshold to %d bytes\n", 854 sc->ste_tx_thresh); 855 /* Make sure to disable active DMA cycles. */ 856 STE_SETBIT4(sc, STE_DMACTL, 857 STE_DMACTL_TXDMA_STALL); 858 ste_wait(sc); 859 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 860 ste_init_locked(sc); 861 break; 862 } 863 /* Restart Tx. */ 864 ste_restart_tx(sc); 865 } 866 /* 867 * Advance to next status and ACK TxComplete 868 * interrupt. ST201 data sheet was wrong here, to 869 * get next Tx status, we have to write both 870 * STE_TX_STATUS and STE_TX_FRAMEID register. 871 * Otherwise controller returns the same status 872 * as well as not acknowledge Tx completion 873 * interrupt. 874 */ 875 CSR_WRITE_2(sc, STE_TX_STATUS, txstat); 876 } 877 } 878 879 static void 880 ste_tick(void *arg) 881 { 882 struct ste_softc *sc; 883 struct mii_data *mii; 884 885 sc = (struct ste_softc *)arg; 886 887 STE_LOCK_ASSERT(sc); 888 889 mii = device_get_softc(sc->ste_miibus); 890 mii_tick(mii); 891 /* 892 * ukphy(4) does not seem to generate CB that reports 893 * resolved link state so if we know we lost a link, 894 * explicitly check the link state. 895 */ 896 if ((sc->ste_flags & STE_FLAG_LINK) == 0) 897 ste_miibus_statchg(sc->ste_dev); 898 /* 899 * Because we are not generating Tx completion 900 * interrupt for every frame, reclaim transmitted 901 * buffers here. 902 */ 903 ste_txeof(sc); 904 ste_txeoc(sc); 905 ste_stats_update(sc); 906 ste_watchdog(sc); 907 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 908 } 909 910 static void 911 ste_txeof(struct ste_softc *sc) 912 { 913 struct ifnet *ifp; 914 struct ste_chain *cur_tx; 915 uint32_t txstat; 916 int idx; 917 918 STE_LOCK_ASSERT(sc); 919 920 ifp = sc->ste_ifp; 921 idx = sc->ste_cdata.ste_tx_cons; 922 if (idx == sc->ste_cdata.ste_tx_prod) 923 return; 924 925 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 926 sc->ste_cdata.ste_tx_list_map, 927 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 928 929 while (idx != sc->ste_cdata.ste_tx_prod) { 930 cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; 931 txstat = le32toh(cur_tx->ste_ptr->ste_ctl); 932 if ((txstat & STE_TXCTL_DMADONE) == 0) 933 break; 934 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map, 935 BUS_DMASYNC_POSTWRITE); 936 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, cur_tx->ste_map); 937 KASSERT(cur_tx->ste_mbuf != NULL, 938 ("%s: freeing NULL mbuf!\n", __func__)); 939 m_freem(cur_tx->ste_mbuf); 940 cur_tx->ste_mbuf = NULL; 941 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 942 ifp->if_opackets++; 943 sc->ste_cdata.ste_tx_cnt--; 944 STE_INC(idx, STE_TX_LIST_CNT); 945 } 946 947 sc->ste_cdata.ste_tx_cons = idx; 948 if (sc->ste_cdata.ste_tx_cnt == 0) 949 sc->ste_timer = 0; 950 } 951 952 static void 953 ste_stats_clear(struct ste_softc *sc) 954 { 955 956 STE_LOCK_ASSERT(sc); 957 958 /* Rx stats. */ 959 CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO); 960 CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI); 961 CSR_READ_2(sc, STE_STAT_RX_FRAMES); 962 CSR_READ_1(sc, STE_STAT_RX_BCAST); 963 CSR_READ_1(sc, STE_STAT_RX_MCAST); 964 CSR_READ_1(sc, STE_STAT_RX_LOST); 965 /* Tx stats. */ 966 CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO); 967 CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI); 968 CSR_READ_2(sc, STE_STAT_TX_FRAMES); 969 CSR_READ_1(sc, STE_STAT_TX_BCAST); 970 CSR_READ_1(sc, STE_STAT_TX_MCAST); 971 CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 972 CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 973 CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 974 CSR_READ_1(sc, STE_STAT_LATE_COLLS); 975 CSR_READ_1(sc, STE_STAT_TX_DEFER); 976 CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 977 CSR_READ_1(sc, STE_STAT_TX_ABORT); 978 } 979 980 static void 981 ste_stats_update(struct ste_softc *sc) 982 { 983 struct ifnet *ifp; 984 struct ste_hw_stats *stats; 985 uint32_t val; 986 987 STE_LOCK_ASSERT(sc); 988 989 ifp = sc->ste_ifp; 990 stats = &sc->ste_stats; 991 /* Rx stats. */ 992 val = (uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_LO) | 993 ((uint32_t)CSR_READ_2(sc, STE_STAT_RX_OCTETS_HI)) << 16; 994 val &= 0x000FFFFF; 995 stats->rx_bytes += val; 996 stats->rx_frames += CSR_READ_2(sc, STE_STAT_RX_FRAMES); 997 stats->rx_bcast_frames += CSR_READ_1(sc, STE_STAT_RX_BCAST); 998 stats->rx_mcast_frames += CSR_READ_1(sc, STE_STAT_RX_MCAST); 999 stats->rx_lost_frames += CSR_READ_1(sc, STE_STAT_RX_LOST); 1000 /* Tx stats. */ 1001 val = (uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_LO) | 1002 ((uint32_t)CSR_READ_2(sc, STE_STAT_TX_OCTETS_HI)) << 16; 1003 val &= 0x000FFFFF; 1004 stats->tx_bytes += val; 1005 stats->tx_frames += CSR_READ_2(sc, STE_STAT_TX_FRAMES); 1006 stats->tx_bcast_frames += CSR_READ_1(sc, STE_STAT_TX_BCAST); 1007 stats->tx_mcast_frames += CSR_READ_1(sc, STE_STAT_TX_MCAST); 1008 stats->tx_carrsense_errs += CSR_READ_1(sc, STE_STAT_CARRIER_ERR); 1009 val = CSR_READ_1(sc, STE_STAT_SINGLE_COLLS); 1010 stats->tx_single_colls += val; 1011 ifp->if_collisions += val; 1012 val = CSR_READ_1(sc, STE_STAT_MULTI_COLLS); 1013 stats->tx_multi_colls += val; 1014 ifp->if_collisions += val; 1015 val += CSR_READ_1(sc, STE_STAT_LATE_COLLS); 1016 stats->tx_late_colls += val; 1017 ifp->if_collisions += val; 1018 stats->tx_frames_defered += CSR_READ_1(sc, STE_STAT_TX_DEFER); 1019 stats->tx_excess_defers += CSR_READ_1(sc, STE_STAT_TX_EXDEFER); 1020 stats->tx_abort += CSR_READ_1(sc, STE_STAT_TX_ABORT); 1021 } 1022 1023 /* 1024 * Probe for a Sundance ST201 chip. Check the PCI vendor and device 1025 * IDs against our list and return a device name if we find a match. 1026 */ 1027 static int 1028 ste_probe(device_t dev) 1029 { 1030 struct ste_type *t; 1031 1032 t = ste_devs; 1033 1034 while (t->ste_name != NULL) { 1035 if ((pci_get_vendor(dev) == t->ste_vid) && 1036 (pci_get_device(dev) == t->ste_did)) { 1037 device_set_desc(dev, t->ste_name); 1038 return (BUS_PROBE_DEFAULT); 1039 } 1040 t++; 1041 } 1042 1043 return (ENXIO); 1044 } 1045 1046 /* 1047 * Attach the interface. Allocate softc structures, do ifmedia 1048 * setup and ethernet/BPF attach. 1049 */ 1050 static int 1051 ste_attach(device_t dev) 1052 { 1053 struct ste_softc *sc; 1054 struct ifnet *ifp; 1055 uint16_t eaddr[ETHER_ADDR_LEN / 2]; 1056 int error = 0, phy, pmc, prefer_iomap, rid; 1057 1058 sc = device_get_softc(dev); 1059 sc->ste_dev = dev; 1060 1061 /* 1062 * Only use one PHY since this chip reports multiple 1063 * Note on the DFE-550 the PHY is at 1 on the DFE-580 1064 * it is at 0 & 1. It is rev 0x12. 1065 */ 1066 if (pci_get_vendor(dev) == DL_VENDORID && 1067 pci_get_device(dev) == DL_DEVICEID_DL10050 && 1068 pci_get_revid(dev) == 0x12 ) 1069 sc->ste_flags |= STE_FLAG_ONE_PHY; 1070 1071 mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1072 MTX_DEF); 1073 /* 1074 * Map control/status registers. 1075 */ 1076 pci_enable_busmaster(dev); 1077 1078 /* 1079 * Prefer memory space register mapping over IO space but use 1080 * IO space for a device that is known to have issues on memory 1081 * mapping. 1082 */ 1083 prefer_iomap = 0; 1084 if (pci_get_device(dev) == ST_DEVICEID_ST201_1) 1085 prefer_iomap = 1; 1086 else 1087 resource_int_value(device_get_name(sc->ste_dev), 1088 device_get_unit(sc->ste_dev), "prefer_iomap", 1089 &prefer_iomap); 1090 if (prefer_iomap == 0) { 1091 sc->ste_res_id = PCIR_BAR(1); 1092 sc->ste_res_type = SYS_RES_MEMORY; 1093 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 1094 &sc->ste_res_id, RF_ACTIVE); 1095 } 1096 if (prefer_iomap || sc->ste_res == NULL) { 1097 sc->ste_res_id = PCIR_BAR(0); 1098 sc->ste_res_type = SYS_RES_IOPORT; 1099 sc->ste_res = bus_alloc_resource_any(dev, sc->ste_res_type, 1100 &sc->ste_res_id, RF_ACTIVE); 1101 } 1102 if (sc->ste_res == NULL) { 1103 device_printf(dev, "couldn't map ports/memory\n"); 1104 error = ENXIO; 1105 goto fail; 1106 } 1107 1108 /* Allocate interrupt */ 1109 rid = 0; 1110 sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1111 RF_SHAREABLE | RF_ACTIVE); 1112 1113 if (sc->ste_irq == NULL) { 1114 device_printf(dev, "couldn't map interrupt\n"); 1115 error = ENXIO; 1116 goto fail; 1117 } 1118 1119 callout_init_mtx(&sc->ste_callout, &sc->ste_mtx, 0); 1120 1121 /* Reset the adapter. */ 1122 ste_reset(sc); 1123 1124 /* 1125 * Get station address from the EEPROM. 1126 */ 1127 if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, ETHER_ADDR_LEN / 2)) { 1128 device_printf(dev, "failed to read station address\n"); 1129 error = ENXIO; 1130 goto fail; 1131 } 1132 ste_sysctl_node(sc); 1133 1134 if ((error = ste_dma_alloc(sc)) != 0) 1135 goto fail; 1136 1137 ifp = sc->ste_ifp = if_alloc(IFT_ETHER); 1138 if (ifp == NULL) { 1139 device_printf(dev, "can not if_alloc()\n"); 1140 error = ENOSPC; 1141 goto fail; 1142 } 1143 1144 /* Do MII setup. */ 1145 phy = MII_PHY_ANY; 1146 if ((sc->ste_flags & STE_FLAG_ONE_PHY) != 0) 1147 phy = 0; 1148 error = mii_attach(dev, &sc->ste_miibus, ifp, ste_ifmedia_upd, 1149 ste_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 1150 if (error != 0) { 1151 device_printf(dev, "attaching PHYs failed\n"); 1152 goto fail; 1153 } 1154 1155 ifp->if_softc = sc; 1156 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1157 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1158 ifp->if_ioctl = ste_ioctl; 1159 ifp->if_start = ste_start; 1160 ifp->if_init = ste_init; 1161 IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); 1162 ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; 1163 IFQ_SET_READY(&ifp->if_snd); 1164 1165 sc->ste_tx_thresh = STE_TXSTART_THRESH; 1166 1167 /* 1168 * Call MI attach routine. 1169 */ 1170 ether_ifattach(ifp, (uint8_t *)eaddr); 1171 1172 /* 1173 * Tell the upper layer(s) we support long frames. 1174 */ 1175 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1176 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1177 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 1178 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 1179 ifp->if_capenable = ifp->if_capabilities; 1180 #ifdef DEVICE_POLLING 1181 ifp->if_capabilities |= IFCAP_POLLING; 1182 #endif 1183 1184 /* Hook interrupt last to avoid having to lock softc */ 1185 error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE, 1186 NULL, ste_intr, sc, &sc->ste_intrhand); 1187 1188 if (error) { 1189 device_printf(dev, "couldn't set up irq\n"); 1190 ether_ifdetach(ifp); 1191 goto fail; 1192 } 1193 1194 fail: 1195 if (error) 1196 ste_detach(dev); 1197 1198 return (error); 1199 } 1200 1201 /* 1202 * Shutdown hardware and free up resources. This can be called any 1203 * time after the mutex has been initialized. It is called in both 1204 * the error case in attach and the normal detach case so it needs 1205 * to be careful about only freeing resources that have actually been 1206 * allocated. 1207 */ 1208 static int 1209 ste_detach(device_t dev) 1210 { 1211 struct ste_softc *sc; 1212 struct ifnet *ifp; 1213 1214 sc = device_get_softc(dev); 1215 KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); 1216 ifp = sc->ste_ifp; 1217 1218 #ifdef DEVICE_POLLING 1219 if (ifp->if_capenable & IFCAP_POLLING) 1220 ether_poll_deregister(ifp); 1221 #endif 1222 1223 /* These should only be active if attach succeeded */ 1224 if (device_is_attached(dev)) { 1225 ether_ifdetach(ifp); 1226 STE_LOCK(sc); 1227 ste_stop(sc); 1228 STE_UNLOCK(sc); 1229 callout_drain(&sc->ste_callout); 1230 } 1231 if (sc->ste_miibus) 1232 device_delete_child(dev, sc->ste_miibus); 1233 bus_generic_detach(dev); 1234 1235 if (sc->ste_intrhand) 1236 bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); 1237 if (sc->ste_irq) 1238 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); 1239 if (sc->ste_res) 1240 bus_release_resource(dev, sc->ste_res_type, sc->ste_res_id, 1241 sc->ste_res); 1242 1243 if (ifp) 1244 if_free(ifp); 1245 1246 ste_dma_free(sc); 1247 mtx_destroy(&sc->ste_mtx); 1248 1249 return (0); 1250 } 1251 1252 struct ste_dmamap_arg { 1253 bus_addr_t ste_busaddr; 1254 }; 1255 1256 static void 1257 ste_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1258 { 1259 struct ste_dmamap_arg *ctx; 1260 1261 if (error != 0) 1262 return; 1263 1264 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1265 1266 ctx = (struct ste_dmamap_arg *)arg; 1267 ctx->ste_busaddr = segs[0].ds_addr; 1268 } 1269 1270 static int 1271 ste_dma_alloc(struct ste_softc *sc) 1272 { 1273 struct ste_chain *txc; 1274 struct ste_chain_onefrag *rxc; 1275 struct ste_dmamap_arg ctx; 1276 int error, i; 1277 1278 /* Create parent DMA tag. */ 1279 error = bus_dma_tag_create( 1280 bus_get_dma_tag(sc->ste_dev), /* parent */ 1281 1, 0, /* alignment, boundary */ 1282 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1283 BUS_SPACE_MAXADDR, /* highaddr */ 1284 NULL, NULL, /* filter, filterarg */ 1285 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1286 0, /* nsegments */ 1287 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1288 0, /* flags */ 1289 NULL, NULL, /* lockfunc, lockarg */ 1290 &sc->ste_cdata.ste_parent_tag); 1291 if (error != 0) { 1292 device_printf(sc->ste_dev, 1293 "could not create parent DMA tag.\n"); 1294 goto fail; 1295 } 1296 1297 /* Create DMA tag for Tx descriptor list. */ 1298 error = bus_dma_tag_create( 1299 sc->ste_cdata.ste_parent_tag, /* parent */ 1300 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1301 BUS_SPACE_MAXADDR, /* lowaddr */ 1302 BUS_SPACE_MAXADDR, /* highaddr */ 1303 NULL, NULL, /* filter, filterarg */ 1304 STE_TX_LIST_SZ, /* maxsize */ 1305 1, /* nsegments */ 1306 STE_TX_LIST_SZ, /* maxsegsize */ 1307 0, /* flags */ 1308 NULL, NULL, /* lockfunc, lockarg */ 1309 &sc->ste_cdata.ste_tx_list_tag); 1310 if (error != 0) { 1311 device_printf(sc->ste_dev, 1312 "could not create Tx list DMA tag.\n"); 1313 goto fail; 1314 } 1315 1316 /* Create DMA tag for Rx descriptor list. */ 1317 error = bus_dma_tag_create( 1318 sc->ste_cdata.ste_parent_tag, /* parent */ 1319 STE_DESC_ALIGN, 0, /* alignment, boundary */ 1320 BUS_SPACE_MAXADDR, /* lowaddr */ 1321 BUS_SPACE_MAXADDR, /* highaddr */ 1322 NULL, NULL, /* filter, filterarg */ 1323 STE_RX_LIST_SZ, /* maxsize */ 1324 1, /* nsegments */ 1325 STE_RX_LIST_SZ, /* maxsegsize */ 1326 0, /* flags */ 1327 NULL, NULL, /* lockfunc, lockarg */ 1328 &sc->ste_cdata.ste_rx_list_tag); 1329 if (error != 0) { 1330 device_printf(sc->ste_dev, 1331 "could not create Rx list DMA tag.\n"); 1332 goto fail; 1333 } 1334 1335 /* Create DMA tag for Tx buffers. */ 1336 error = bus_dma_tag_create( 1337 sc->ste_cdata.ste_parent_tag, /* parent */ 1338 1, 0, /* alignment, boundary */ 1339 BUS_SPACE_MAXADDR, /* lowaddr */ 1340 BUS_SPACE_MAXADDR, /* highaddr */ 1341 NULL, NULL, /* filter, filterarg */ 1342 MCLBYTES * STE_MAXFRAGS, /* maxsize */ 1343 STE_MAXFRAGS, /* nsegments */ 1344 MCLBYTES, /* maxsegsize */ 1345 0, /* flags */ 1346 NULL, NULL, /* lockfunc, lockarg */ 1347 &sc->ste_cdata.ste_tx_tag); 1348 if (error != 0) { 1349 device_printf(sc->ste_dev, "could not create Tx DMA tag.\n"); 1350 goto fail; 1351 } 1352 1353 /* Create DMA tag for Rx buffers. */ 1354 error = bus_dma_tag_create( 1355 sc->ste_cdata.ste_parent_tag, /* parent */ 1356 1, 0, /* alignment, boundary */ 1357 BUS_SPACE_MAXADDR, /* lowaddr */ 1358 BUS_SPACE_MAXADDR, /* highaddr */ 1359 NULL, NULL, /* filter, filterarg */ 1360 MCLBYTES, /* maxsize */ 1361 1, /* nsegments */ 1362 MCLBYTES, /* maxsegsize */ 1363 0, /* flags */ 1364 NULL, NULL, /* lockfunc, lockarg */ 1365 &sc->ste_cdata.ste_rx_tag); 1366 if (error != 0) { 1367 device_printf(sc->ste_dev, "could not create Rx DMA tag.\n"); 1368 goto fail; 1369 } 1370 1371 /* Allocate DMA'able memory and load the DMA map for Tx list. */ 1372 error = bus_dmamem_alloc(sc->ste_cdata.ste_tx_list_tag, 1373 (void **)&sc->ste_ldata.ste_tx_list, 1374 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1375 &sc->ste_cdata.ste_tx_list_map); 1376 if (error != 0) { 1377 device_printf(sc->ste_dev, 1378 "could not allocate DMA'able memory for Tx list.\n"); 1379 goto fail; 1380 } 1381 ctx.ste_busaddr = 0; 1382 error = bus_dmamap_load(sc->ste_cdata.ste_tx_list_tag, 1383 sc->ste_cdata.ste_tx_list_map, sc->ste_ldata.ste_tx_list, 1384 STE_TX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1385 if (error != 0 || ctx.ste_busaddr == 0) { 1386 device_printf(sc->ste_dev, 1387 "could not load DMA'able memory for Tx list.\n"); 1388 goto fail; 1389 } 1390 sc->ste_ldata.ste_tx_list_paddr = ctx.ste_busaddr; 1391 1392 /* Allocate DMA'able memory and load the DMA map for Rx list. */ 1393 error = bus_dmamem_alloc(sc->ste_cdata.ste_rx_list_tag, 1394 (void **)&sc->ste_ldata.ste_rx_list, 1395 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1396 &sc->ste_cdata.ste_rx_list_map); 1397 if (error != 0) { 1398 device_printf(sc->ste_dev, 1399 "could not allocate DMA'able memory for Rx list.\n"); 1400 goto fail; 1401 } 1402 ctx.ste_busaddr = 0; 1403 error = bus_dmamap_load(sc->ste_cdata.ste_rx_list_tag, 1404 sc->ste_cdata.ste_rx_list_map, sc->ste_ldata.ste_rx_list, 1405 STE_RX_LIST_SZ, ste_dmamap_cb, &ctx, 0); 1406 if (error != 0 || ctx.ste_busaddr == 0) { 1407 device_printf(sc->ste_dev, 1408 "could not load DMA'able memory for Rx list.\n"); 1409 goto fail; 1410 } 1411 sc->ste_ldata.ste_rx_list_paddr = ctx.ste_busaddr; 1412 1413 /* Create DMA maps for Tx buffers. */ 1414 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1415 txc = &sc->ste_cdata.ste_tx_chain[i]; 1416 txc->ste_ptr = NULL; 1417 txc->ste_mbuf = NULL; 1418 txc->ste_next = NULL; 1419 txc->ste_phys = 0; 1420 txc->ste_map = NULL; 1421 error = bus_dmamap_create(sc->ste_cdata.ste_tx_tag, 0, 1422 &txc->ste_map); 1423 if (error != 0) { 1424 device_printf(sc->ste_dev, 1425 "could not create Tx dmamap.\n"); 1426 goto fail; 1427 } 1428 } 1429 /* Create DMA maps for Rx buffers. */ 1430 if ((error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1431 &sc->ste_cdata.ste_rx_sparemap)) != 0) { 1432 device_printf(sc->ste_dev, 1433 "could not create spare Rx dmamap.\n"); 1434 goto fail; 1435 } 1436 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1437 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1438 rxc->ste_ptr = NULL; 1439 rxc->ste_mbuf = NULL; 1440 rxc->ste_next = NULL; 1441 rxc->ste_map = NULL; 1442 error = bus_dmamap_create(sc->ste_cdata.ste_rx_tag, 0, 1443 &rxc->ste_map); 1444 if (error != 0) { 1445 device_printf(sc->ste_dev, 1446 "could not create Rx dmamap.\n"); 1447 goto fail; 1448 } 1449 } 1450 1451 fail: 1452 return (error); 1453 } 1454 1455 static void 1456 ste_dma_free(struct ste_softc *sc) 1457 { 1458 struct ste_chain *txc; 1459 struct ste_chain_onefrag *rxc; 1460 int i; 1461 1462 /* Tx buffers. */ 1463 if (sc->ste_cdata.ste_tx_tag != NULL) { 1464 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1465 txc = &sc->ste_cdata.ste_tx_chain[i]; 1466 if (txc->ste_map != NULL) { 1467 bus_dmamap_destroy(sc->ste_cdata.ste_tx_tag, 1468 txc->ste_map); 1469 txc->ste_map = NULL; 1470 } 1471 } 1472 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_tag); 1473 sc->ste_cdata.ste_tx_tag = NULL; 1474 } 1475 /* Rx buffers. */ 1476 if (sc->ste_cdata.ste_rx_tag != NULL) { 1477 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1478 rxc = &sc->ste_cdata.ste_rx_chain[i]; 1479 if (rxc->ste_map != NULL) { 1480 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1481 rxc->ste_map); 1482 rxc->ste_map = NULL; 1483 } 1484 } 1485 if (sc->ste_cdata.ste_rx_sparemap != NULL) { 1486 bus_dmamap_destroy(sc->ste_cdata.ste_rx_tag, 1487 sc->ste_cdata.ste_rx_sparemap); 1488 sc->ste_cdata.ste_rx_sparemap = NULL; 1489 } 1490 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_tag); 1491 sc->ste_cdata.ste_rx_tag = NULL; 1492 } 1493 /* Tx descriptor list. */ 1494 if (sc->ste_cdata.ste_tx_list_tag != NULL) { 1495 if (sc->ste_cdata.ste_tx_list_map != NULL) 1496 bus_dmamap_unload(sc->ste_cdata.ste_tx_list_tag, 1497 sc->ste_cdata.ste_tx_list_map); 1498 if (sc->ste_cdata.ste_tx_list_map != NULL && 1499 sc->ste_ldata.ste_tx_list != NULL) 1500 bus_dmamem_free(sc->ste_cdata.ste_tx_list_tag, 1501 sc->ste_ldata.ste_tx_list, 1502 sc->ste_cdata.ste_tx_list_map); 1503 sc->ste_ldata.ste_tx_list = NULL; 1504 sc->ste_cdata.ste_tx_list_map = NULL; 1505 bus_dma_tag_destroy(sc->ste_cdata.ste_tx_list_tag); 1506 sc->ste_cdata.ste_tx_list_tag = NULL; 1507 } 1508 /* Rx descriptor list. */ 1509 if (sc->ste_cdata.ste_rx_list_tag != NULL) { 1510 if (sc->ste_cdata.ste_rx_list_map != NULL) 1511 bus_dmamap_unload(sc->ste_cdata.ste_rx_list_tag, 1512 sc->ste_cdata.ste_rx_list_map); 1513 if (sc->ste_cdata.ste_rx_list_map != NULL && 1514 sc->ste_ldata.ste_rx_list != NULL) 1515 bus_dmamem_free(sc->ste_cdata.ste_rx_list_tag, 1516 sc->ste_ldata.ste_rx_list, 1517 sc->ste_cdata.ste_rx_list_map); 1518 sc->ste_ldata.ste_rx_list = NULL; 1519 sc->ste_cdata.ste_rx_list_map = NULL; 1520 bus_dma_tag_destroy(sc->ste_cdata.ste_rx_list_tag); 1521 sc->ste_cdata.ste_rx_list_tag = NULL; 1522 } 1523 if (sc->ste_cdata.ste_parent_tag != NULL) { 1524 bus_dma_tag_destroy(sc->ste_cdata.ste_parent_tag); 1525 sc->ste_cdata.ste_parent_tag = NULL; 1526 } 1527 } 1528 1529 static int 1530 ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc) 1531 { 1532 struct mbuf *m; 1533 bus_dma_segment_t segs[1]; 1534 bus_dmamap_t map; 1535 int error, nsegs; 1536 1537 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1538 if (m == NULL) 1539 return (ENOBUFS); 1540 m->m_len = m->m_pkthdr.len = MCLBYTES; 1541 m_adj(m, ETHER_ALIGN); 1542 1543 if ((error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_rx_tag, 1544 sc->ste_cdata.ste_rx_sparemap, m, segs, &nsegs, 0)) != 0) { 1545 m_freem(m); 1546 return (error); 1547 } 1548 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1549 1550 if (rxc->ste_mbuf != NULL) { 1551 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1552 BUS_DMASYNC_POSTREAD); 1553 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, rxc->ste_map); 1554 } 1555 map = rxc->ste_map; 1556 rxc->ste_map = sc->ste_cdata.ste_rx_sparemap; 1557 sc->ste_cdata.ste_rx_sparemap = map; 1558 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, rxc->ste_map, 1559 BUS_DMASYNC_PREREAD); 1560 rxc->ste_mbuf = m; 1561 rxc->ste_ptr->ste_status = 0; 1562 rxc->ste_ptr->ste_frag.ste_addr = htole32(segs[0].ds_addr); 1563 rxc->ste_ptr->ste_frag.ste_len = htole32(segs[0].ds_len | 1564 STE_FRAG_LAST); 1565 return (0); 1566 } 1567 1568 static int 1569 ste_init_rx_list(struct ste_softc *sc) 1570 { 1571 struct ste_chain_data *cd; 1572 struct ste_list_data *ld; 1573 int error, i; 1574 1575 sc->ste_int_rx_act = 0; 1576 cd = &sc->ste_cdata; 1577 ld = &sc->ste_ldata; 1578 bzero(ld->ste_rx_list, STE_RX_LIST_SZ); 1579 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1580 cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; 1581 error = ste_newbuf(sc, &cd->ste_rx_chain[i]); 1582 if (error != 0) 1583 return (error); 1584 if (i == (STE_RX_LIST_CNT - 1)) { 1585 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0]; 1586 ld->ste_rx_list[i].ste_next = 1587 htole32(ld->ste_rx_list_paddr + 1588 (sizeof(struct ste_desc_onefrag) * 0)); 1589 } else { 1590 cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1]; 1591 ld->ste_rx_list[i].ste_next = 1592 htole32(ld->ste_rx_list_paddr + 1593 (sizeof(struct ste_desc_onefrag) * (i + 1))); 1594 } 1595 } 1596 1597 cd->ste_rx_head = &cd->ste_rx_chain[0]; 1598 bus_dmamap_sync(sc->ste_cdata.ste_rx_list_tag, 1599 sc->ste_cdata.ste_rx_list_map, 1600 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1601 1602 return (0); 1603 } 1604 1605 static void 1606 ste_init_tx_list(struct ste_softc *sc) 1607 { 1608 struct ste_chain_data *cd; 1609 struct ste_list_data *ld; 1610 int i; 1611 1612 cd = &sc->ste_cdata; 1613 ld = &sc->ste_ldata; 1614 bzero(ld->ste_tx_list, STE_TX_LIST_SZ); 1615 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1616 cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; 1617 cd->ste_tx_chain[i].ste_mbuf = NULL; 1618 if (i == (STE_TX_LIST_CNT - 1)) { 1619 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0]; 1620 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1621 ld->ste_tx_list_paddr + 1622 (sizeof(struct ste_desc) * 0))); 1623 } else { 1624 cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1]; 1625 cd->ste_tx_chain[i].ste_phys = htole32(STE_ADDR_LO( 1626 ld->ste_tx_list_paddr + 1627 (sizeof(struct ste_desc) * (i + 1)))); 1628 } 1629 } 1630 1631 cd->ste_last_tx = NULL; 1632 cd->ste_tx_prod = 0; 1633 cd->ste_tx_cons = 0; 1634 cd->ste_tx_cnt = 0; 1635 1636 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 1637 sc->ste_cdata.ste_tx_list_map, 1638 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1639 } 1640 1641 static void 1642 ste_init(void *xsc) 1643 { 1644 struct ste_softc *sc; 1645 1646 sc = xsc; 1647 STE_LOCK(sc); 1648 ste_init_locked(sc); 1649 STE_UNLOCK(sc); 1650 } 1651 1652 static void 1653 ste_init_locked(struct ste_softc *sc) 1654 { 1655 struct ifnet *ifp; 1656 struct mii_data *mii; 1657 uint8_t val; 1658 int i; 1659 1660 STE_LOCK_ASSERT(sc); 1661 ifp = sc->ste_ifp; 1662 mii = device_get_softc(sc->ste_miibus); 1663 1664 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1665 return; 1666 1667 ste_stop(sc); 1668 /* Reset the chip to a known state. */ 1669 ste_reset(sc); 1670 1671 /* Init our MAC address */ 1672 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1673 CSR_WRITE_2(sc, STE_PAR0 + i, 1674 ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) | 1675 IF_LLADDR(sc->ste_ifp)[i + 1] << 8)); 1676 } 1677 1678 /* Init RX list */ 1679 if (ste_init_rx_list(sc) != 0) { 1680 device_printf(sc->ste_dev, 1681 "initialization failed: no memory for RX buffers\n"); 1682 ste_stop(sc); 1683 return; 1684 } 1685 1686 /* Set RX polling interval */ 1687 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); 1688 1689 /* Init TX descriptors */ 1690 ste_init_tx_list(sc); 1691 1692 /* Clear and disable WOL. */ 1693 val = CSR_READ_1(sc, STE_WAKE_EVENT); 1694 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 1695 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 1696 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 1697 1698 /* Set the TX freethresh value */ 1699 CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); 1700 1701 /* Set the TX start threshold for best performance. */ 1702 CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); 1703 1704 /* Set the TX reclaim threshold. */ 1705 CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); 1706 1707 /* Accept VLAN length packets */ 1708 CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); 1709 1710 /* Set up the RX filter. */ 1711 ste_rxfilter(sc); 1712 1713 /* Load the address of the RX list. */ 1714 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); 1715 ste_wait(sc); 1716 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 1717 STE_ADDR_LO(sc->ste_ldata.ste_rx_list_paddr)); 1718 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1719 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); 1720 1721 /* Set TX polling interval(defer until we TX first packet). */ 1722 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1723 1724 /* Load address of the TX list */ 1725 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 1726 ste_wait(sc); 1727 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1728 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1729 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 1730 ste_wait(sc); 1731 /* Select 3.2us timer. */ 1732 STE_CLRBIT4(sc, STE_DMACTL, STE_DMACTL_COUNTDOWN_SPEED | 1733 STE_DMACTL_COUNTDOWN_MODE); 1734 1735 /* Enable receiver and transmitter */ 1736 CSR_WRITE_2(sc, STE_MACCTL0, 0); 1737 CSR_WRITE_2(sc, STE_MACCTL1, 0); 1738 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); 1739 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); 1740 1741 /* Enable stats counters. */ 1742 STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); 1743 /* Clear stats counters. */ 1744 ste_stats_clear(sc); 1745 1746 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1747 CSR_WRITE_2(sc, STE_ISR, 0xFFFF); 1748 #ifdef DEVICE_POLLING 1749 /* Disable interrupts if we are polling. */ 1750 if (ifp->if_capenable & IFCAP_POLLING) 1751 CSR_WRITE_2(sc, STE_IMR, 0); 1752 else 1753 #endif 1754 /* Enable interrupts. */ 1755 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1756 1757 sc->ste_flags &= ~STE_FLAG_LINK; 1758 /* Switch to the current media. */ 1759 mii_mediachg(mii); 1760 1761 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1762 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1763 1764 callout_reset(&sc->ste_callout, hz, ste_tick, sc); 1765 } 1766 1767 static void 1768 ste_stop(struct ste_softc *sc) 1769 { 1770 struct ifnet *ifp; 1771 struct ste_chain_onefrag *cur_rx; 1772 struct ste_chain *cur_tx; 1773 uint32_t val; 1774 int i; 1775 1776 STE_LOCK_ASSERT(sc); 1777 ifp = sc->ste_ifp; 1778 1779 callout_stop(&sc->ste_callout); 1780 sc->ste_timer = 0; 1781 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); 1782 1783 CSR_WRITE_2(sc, STE_IMR, 0); 1784 CSR_WRITE_2(sc, STE_COUNTDOWN, 0); 1785 /* Stop pending DMA. */ 1786 val = CSR_READ_4(sc, STE_DMACTL); 1787 val |= STE_DMACTL_TXDMA_STALL | STE_DMACTL_RXDMA_STALL; 1788 CSR_WRITE_4(sc, STE_DMACTL, val); 1789 ste_wait(sc); 1790 /* Disable auto-polling. */ 1791 CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 0); 1792 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); 1793 /* Nullify DMA address to stop any further DMA. */ 1794 CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, 0); 1795 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); 1796 /* Stop TX/RX MAC. */ 1797 val = CSR_READ_2(sc, STE_MACCTL1); 1798 val |= STE_MACCTL1_TX_DISABLE | STE_MACCTL1_RX_DISABLE | 1799 STE_MACCTL1_STATS_DISABLE; 1800 CSR_WRITE_2(sc, STE_MACCTL1, val); 1801 for (i = 0; i < STE_TIMEOUT; i++) { 1802 DELAY(10); 1803 if ((CSR_READ_2(sc, STE_MACCTL1) & (STE_MACCTL1_TX_DISABLE | 1804 STE_MACCTL1_RX_DISABLE | STE_MACCTL1_STATS_DISABLE)) == 0) 1805 break; 1806 } 1807 if (i == STE_TIMEOUT) 1808 device_printf(sc->ste_dev, "Stopping MAC timed out\n"); 1809 /* Acknowledge any pending interrupts. */ 1810 CSR_READ_2(sc, STE_ISR_ACK); 1811 ste_stats_update(sc); 1812 1813 for (i = 0; i < STE_RX_LIST_CNT; i++) { 1814 cur_rx = &sc->ste_cdata.ste_rx_chain[i]; 1815 if (cur_rx->ste_mbuf != NULL) { 1816 bus_dmamap_sync(sc->ste_cdata.ste_rx_tag, 1817 cur_rx->ste_map, BUS_DMASYNC_POSTREAD); 1818 bus_dmamap_unload(sc->ste_cdata.ste_rx_tag, 1819 cur_rx->ste_map); 1820 m_freem(cur_rx->ste_mbuf); 1821 cur_rx->ste_mbuf = NULL; 1822 } 1823 } 1824 1825 for (i = 0; i < STE_TX_LIST_CNT; i++) { 1826 cur_tx = &sc->ste_cdata.ste_tx_chain[i]; 1827 if (cur_tx->ste_mbuf != NULL) { 1828 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, 1829 cur_tx->ste_map, BUS_DMASYNC_POSTWRITE); 1830 bus_dmamap_unload(sc->ste_cdata.ste_tx_tag, 1831 cur_tx->ste_map); 1832 m_freem(cur_tx->ste_mbuf); 1833 cur_tx->ste_mbuf = NULL; 1834 } 1835 } 1836 } 1837 1838 static void 1839 ste_reset(struct ste_softc *sc) 1840 { 1841 uint32_t ctl; 1842 int i; 1843 1844 ctl = CSR_READ_4(sc, STE_ASICCTL); 1845 ctl |= STE_ASICCTL_GLOBAL_RESET | STE_ASICCTL_RX_RESET | 1846 STE_ASICCTL_TX_RESET | STE_ASICCTL_DMA_RESET | 1847 STE_ASICCTL_FIFO_RESET | STE_ASICCTL_NETWORK_RESET | 1848 STE_ASICCTL_AUTOINIT_RESET |STE_ASICCTL_HOST_RESET | 1849 STE_ASICCTL_EXTRESET_RESET; 1850 CSR_WRITE_4(sc, STE_ASICCTL, ctl); 1851 CSR_READ_4(sc, STE_ASICCTL); 1852 /* 1853 * Due to the need of accessing EEPROM controller can take 1854 * up to 1ms to complete the global reset. 1855 */ 1856 DELAY(1000); 1857 1858 for (i = 0; i < STE_TIMEOUT; i++) { 1859 if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) 1860 break; 1861 DELAY(10); 1862 } 1863 1864 if (i == STE_TIMEOUT) 1865 device_printf(sc->ste_dev, "global reset never completed\n"); 1866 } 1867 1868 static void 1869 ste_restart_tx(struct ste_softc *sc) 1870 { 1871 uint16_t mac; 1872 int i; 1873 1874 for (i = 0; i < STE_TIMEOUT; i++) { 1875 mac = CSR_READ_2(sc, STE_MACCTL1); 1876 mac |= STE_MACCTL1_TX_ENABLE; 1877 CSR_WRITE_2(sc, STE_MACCTL1, mac); 1878 mac = CSR_READ_2(sc, STE_MACCTL1); 1879 if ((mac & STE_MACCTL1_TX_ENABLED) != 0) 1880 break; 1881 DELAY(10); 1882 } 1883 1884 if (i == STE_TIMEOUT) 1885 device_printf(sc->ste_dev, "starting Tx failed"); 1886 } 1887 1888 static int 1889 ste_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1890 { 1891 struct ste_softc *sc; 1892 struct ifreq *ifr; 1893 struct mii_data *mii; 1894 int error = 0, mask; 1895 1896 sc = ifp->if_softc; 1897 ifr = (struct ifreq *)data; 1898 1899 switch (command) { 1900 case SIOCSIFFLAGS: 1901 STE_LOCK(sc); 1902 if ((ifp->if_flags & IFF_UP) != 0) { 1903 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1904 ((ifp->if_flags ^ sc->ste_if_flags) & 1905 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1906 ste_rxfilter(sc); 1907 else 1908 ste_init_locked(sc); 1909 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1910 ste_stop(sc); 1911 sc->ste_if_flags = ifp->if_flags; 1912 STE_UNLOCK(sc); 1913 break; 1914 case SIOCADDMULTI: 1915 case SIOCDELMULTI: 1916 STE_LOCK(sc); 1917 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1918 ste_rxfilter(sc); 1919 STE_UNLOCK(sc); 1920 break; 1921 case SIOCGIFMEDIA: 1922 case SIOCSIFMEDIA: 1923 mii = device_get_softc(sc->ste_miibus); 1924 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1925 break; 1926 case SIOCSIFCAP: 1927 STE_LOCK(sc); 1928 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1929 #ifdef DEVICE_POLLING 1930 if ((mask & IFCAP_POLLING) != 0 && 1931 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 1932 ifp->if_capenable ^= IFCAP_POLLING; 1933 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 1934 error = ether_poll_register(ste_poll, ifp); 1935 if (error != 0) { 1936 STE_UNLOCK(sc); 1937 break; 1938 } 1939 /* Disable interrupts. */ 1940 CSR_WRITE_2(sc, STE_IMR, 0); 1941 } else { 1942 error = ether_poll_deregister(ifp); 1943 /* Enable interrupts. */ 1944 CSR_WRITE_2(sc, STE_IMR, STE_INTRS); 1945 } 1946 } 1947 #endif /* DEVICE_POLLING */ 1948 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1949 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1950 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1951 STE_UNLOCK(sc); 1952 break; 1953 default: 1954 error = ether_ioctl(ifp, command, data); 1955 break; 1956 } 1957 1958 return (error); 1959 } 1960 1961 static int 1962 ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc) 1963 { 1964 struct ste_frag *frag; 1965 struct mbuf *m; 1966 struct ste_desc *desc; 1967 bus_dma_segment_t txsegs[STE_MAXFRAGS]; 1968 int error, i, nsegs; 1969 1970 STE_LOCK_ASSERT(sc); 1971 M_ASSERTPKTHDR((*m_head)); 1972 1973 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1974 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1975 if (error == EFBIG) { 1976 m = m_collapse(*m_head, M_DONTWAIT, STE_MAXFRAGS); 1977 if (m == NULL) { 1978 m_freem(*m_head); 1979 *m_head = NULL; 1980 return (ENOMEM); 1981 } 1982 *m_head = m; 1983 error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag, 1984 txc->ste_map, *m_head, txsegs, &nsegs, 0); 1985 if (error != 0) { 1986 m_freem(*m_head); 1987 *m_head = NULL; 1988 return (error); 1989 } 1990 } else if (error != 0) 1991 return (error); 1992 if (nsegs == 0) { 1993 m_freem(*m_head); 1994 *m_head = NULL; 1995 return (EIO); 1996 } 1997 bus_dmamap_sync(sc->ste_cdata.ste_tx_tag, txc->ste_map, 1998 BUS_DMASYNC_PREWRITE); 1999 2000 desc = txc->ste_ptr; 2001 for (i = 0; i < nsegs; i++) { 2002 frag = &desc->ste_frags[i]; 2003 frag->ste_addr = htole32(STE_ADDR_LO(txsegs[i].ds_addr)); 2004 frag->ste_len = htole32(txsegs[i].ds_len); 2005 } 2006 desc->ste_frags[i - 1].ste_len |= htole32(STE_FRAG_LAST); 2007 /* 2008 * Because we use Tx polling we can't chain multiple 2009 * Tx descriptors here. Otherwise we race with controller. 2010 */ 2011 desc->ste_next = 0; 2012 if ((sc->ste_cdata.ste_tx_prod % STE_TX_INTR_FRAMES) == 0) 2013 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS | 2014 STE_TXCTL_DMAINTR); 2015 else 2016 desc->ste_ctl = htole32(STE_TXCTL_ALIGN_DIS); 2017 txc->ste_mbuf = *m_head; 2018 STE_INC(sc->ste_cdata.ste_tx_prod, STE_TX_LIST_CNT); 2019 sc->ste_cdata.ste_tx_cnt++; 2020 2021 return (0); 2022 } 2023 2024 static void 2025 ste_start(struct ifnet *ifp) 2026 { 2027 struct ste_softc *sc; 2028 2029 sc = ifp->if_softc; 2030 STE_LOCK(sc); 2031 ste_start_locked(ifp); 2032 STE_UNLOCK(sc); 2033 } 2034 2035 static void 2036 ste_start_locked(struct ifnet *ifp) 2037 { 2038 struct ste_softc *sc; 2039 struct ste_chain *cur_tx; 2040 struct mbuf *m_head = NULL; 2041 int enq; 2042 2043 sc = ifp->if_softc; 2044 STE_LOCK_ASSERT(sc); 2045 2046 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2047 IFF_DRV_RUNNING || (sc->ste_flags & STE_FLAG_LINK) == 0) 2048 return; 2049 2050 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { 2051 if (sc->ste_cdata.ste_tx_cnt == STE_TX_LIST_CNT - 1) { 2052 /* 2053 * Controller may have cached copy of the last used 2054 * next ptr so we have to reserve one TFD to avoid 2055 * TFD overruns. 2056 */ 2057 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2058 break; 2059 } 2060 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2061 if (m_head == NULL) 2062 break; 2063 cur_tx = &sc->ste_cdata.ste_tx_chain[sc->ste_cdata.ste_tx_prod]; 2064 if (ste_encap(sc, &m_head, cur_tx) != 0) { 2065 if (m_head == NULL) 2066 break; 2067 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2068 break; 2069 } 2070 if (sc->ste_cdata.ste_last_tx == NULL) { 2071 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 2072 sc->ste_cdata.ste_tx_list_map, 2073 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2074 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); 2075 ste_wait(sc); 2076 CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 2077 STE_ADDR_LO(sc->ste_ldata.ste_tx_list_paddr)); 2078 CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); 2079 STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); 2080 ste_wait(sc); 2081 } else { 2082 sc->ste_cdata.ste_last_tx->ste_ptr->ste_next = 2083 sc->ste_cdata.ste_last_tx->ste_phys; 2084 bus_dmamap_sync(sc->ste_cdata.ste_tx_list_tag, 2085 sc->ste_cdata.ste_tx_list_map, 2086 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2087 } 2088 sc->ste_cdata.ste_last_tx = cur_tx; 2089 2090 enq++; 2091 /* 2092 * If there's a BPF listener, bounce a copy of this frame 2093 * to him. 2094 */ 2095 BPF_MTAP(ifp, m_head); 2096 } 2097 2098 if (enq > 0) 2099 sc->ste_timer = STE_TX_TIMEOUT; 2100 } 2101 2102 static void 2103 ste_watchdog(struct ste_softc *sc) 2104 { 2105 struct ifnet *ifp; 2106 2107 ifp = sc->ste_ifp; 2108 STE_LOCK_ASSERT(sc); 2109 2110 if (sc->ste_timer == 0 || --sc->ste_timer) 2111 return; 2112 2113 ifp->if_oerrors++; 2114 if_printf(ifp, "watchdog timeout\n"); 2115 2116 ste_txeof(sc); 2117 ste_txeoc(sc); 2118 ste_rxeof(sc, -1); 2119 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2120 ste_init_locked(sc); 2121 2122 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2123 ste_start_locked(ifp); 2124 } 2125 2126 static int 2127 ste_shutdown(device_t dev) 2128 { 2129 2130 return (ste_suspend(dev)); 2131 } 2132 2133 static int 2134 ste_suspend(device_t dev) 2135 { 2136 struct ste_softc *sc; 2137 2138 sc = device_get_softc(dev); 2139 2140 STE_LOCK(sc); 2141 ste_stop(sc); 2142 ste_setwol(sc); 2143 STE_UNLOCK(sc); 2144 2145 return (0); 2146 } 2147 2148 static int 2149 ste_resume(device_t dev) 2150 { 2151 struct ste_softc *sc; 2152 struct ifnet *ifp; 2153 int pmc; 2154 uint16_t pmstat; 2155 2156 sc = device_get_softc(dev); 2157 STE_LOCK(sc); 2158 if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) == 0) { 2159 /* Disable PME and clear PME status. */ 2160 pmstat = pci_read_config(sc->ste_dev, 2161 pmc + PCIR_POWER_STATUS, 2); 2162 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2163 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2164 pci_write_config(sc->ste_dev, 2165 pmc + PCIR_POWER_STATUS, pmstat, 2); 2166 } 2167 } 2168 ifp = sc->ste_ifp; 2169 if ((ifp->if_flags & IFF_UP) != 0) { 2170 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2171 ste_init_locked(sc); 2172 } 2173 STE_UNLOCK(sc); 2174 2175 return (0); 2176 } 2177 2178 #define STE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2179 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2180 #define STE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 2181 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 2182 2183 static void 2184 ste_sysctl_node(struct ste_softc *sc) 2185 { 2186 struct sysctl_ctx_list *ctx; 2187 struct sysctl_oid_list *child, *parent; 2188 struct sysctl_oid *tree; 2189 struct ste_hw_stats *stats; 2190 2191 stats = &sc->ste_stats; 2192 ctx = device_get_sysctl_ctx(sc->ste_dev); 2193 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ste_dev)); 2194 2195 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "int_rx_mod", 2196 CTLFLAG_RW, &sc->ste_int_rx_mod, 0, "ste RX interrupt moderation"); 2197 /* Pull in device tunables. */ 2198 sc->ste_int_rx_mod = STE_IM_RX_TIMER_DEFAULT; 2199 resource_int_value(device_get_name(sc->ste_dev), 2200 device_get_unit(sc->ste_dev), "int_rx_mod", &sc->ste_int_rx_mod); 2201 2202 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2203 NULL, "STE statistics"); 2204 parent = SYSCTL_CHILDREN(tree); 2205 2206 /* Rx statistics. */ 2207 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2208 NULL, "Rx MAC statistics"); 2209 child = SYSCTL_CHILDREN(tree); 2210 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2211 &stats->rx_bytes, "Good octets"); 2212 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2213 &stats->rx_frames, "Good frames"); 2214 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2215 &stats->rx_bcast_frames, "Good broadcast frames"); 2216 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2217 &stats->rx_mcast_frames, "Good multicast frames"); 2218 STE_SYSCTL_STAT_ADD32(ctx, child, "lost_frames", 2219 &stats->rx_lost_frames, "Lost frames"); 2220 2221 /* Tx statistics. */ 2222 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2223 NULL, "Tx MAC statistics"); 2224 child = SYSCTL_CHILDREN(tree); 2225 STE_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 2226 &stats->tx_bytes, "Good octets"); 2227 STE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 2228 &stats->tx_frames, "Good frames"); 2229 STE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 2230 &stats->tx_bcast_frames, "Good broadcast frames"); 2231 STE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 2232 &stats->tx_mcast_frames, "Good multicast frames"); 2233 STE_SYSCTL_STAT_ADD32(ctx, child, "carrier_errs", 2234 &stats->tx_carrsense_errs, "Carrier sense errors"); 2235 STE_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 2236 &stats->tx_single_colls, "Single collisions"); 2237 STE_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 2238 &stats->tx_multi_colls, "Multiple collisions"); 2239 STE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 2240 &stats->tx_late_colls, "Late collisions"); 2241 STE_SYSCTL_STAT_ADD32(ctx, child, "defers", 2242 &stats->tx_frames_defered, "Frames with deferrals"); 2243 STE_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 2244 &stats->tx_excess_defers, "Frames with excessive derferrals"); 2245 STE_SYSCTL_STAT_ADD32(ctx, child, "abort", 2246 &stats->tx_abort, "Aborted frames due to Excessive collisions"); 2247 } 2248 2249 #undef STE_SYSCTL_STAT_ADD32 2250 #undef STE_SYSCTL_STAT_ADD64 2251 2252 static void 2253 ste_setwol(struct ste_softc *sc) 2254 { 2255 struct ifnet *ifp; 2256 uint16_t pmstat; 2257 uint8_t val; 2258 int pmc; 2259 2260 STE_LOCK_ASSERT(sc); 2261 2262 if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) != 0) { 2263 /* Disable WOL. */ 2264 CSR_READ_1(sc, STE_WAKE_EVENT); 2265 CSR_WRITE_1(sc, STE_WAKE_EVENT, 0); 2266 return; 2267 } 2268 2269 ifp = sc->ste_ifp; 2270 val = CSR_READ_1(sc, STE_WAKE_EVENT); 2271 val &= ~(STE_WAKEEVENT_WAKEPKT_ENB | STE_WAKEEVENT_MAGICPKT_ENB | 2272 STE_WAKEEVENT_LINKEVT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB); 2273 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2274 val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB; 2275 CSR_WRITE_1(sc, STE_WAKE_EVENT, val); 2276 /* Request PME. */ 2277 pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2); 2278 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2279 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2280 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2281 pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2282 } 2283