1 /*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * RealTek 8129/8139 PCI NIC driver 38 * 39 * Supports several extremely cheap PCI 10/100 adapters based on 40 * the RealTek chipset. Datasheets can be obtained from 41 * www.realtek.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 /* 48 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 49 * probably the worst PCI ethernet controller ever made, with the possible 50 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 51 * DMA, but it has a terrible interface that nullifies any performance 52 * gains that bus-master DMA usually offers. 53 * 54 * For transmission, the chip offers a series of four TX descriptor 55 * registers. Each transmit frame must be in a contiguous buffer, aligned 56 * on a longword (32-bit) boundary. This means we almost always have to 57 * do mbuf copies in order to transmit a frame, except in the unlikely 58 * case where a) the packet fits into a single mbuf, and b) the packet 59 * is 32-bit aligned within the mbuf's data area. The presence of only 60 * four descriptor registers means that we can never have more than four 61 * packets queued for transmission at any one time. 62 * 63 * Reception is not much better. The driver has to allocate a single large 64 * buffer area (up to 64K in size) into which the chip will DMA received 65 * frames. Because we don't know where within this region received packets 66 * will begin or end, we have no choice but to copy data from the buffer 67 * area into mbufs in order to pass the packets up to the higher protocol 68 * levels. 69 * 70 * It's impossible given this rotten design to really achieve decent 71 * performance at 100Mbps, unless you happen to have a 400Mhz PII or 72 * some equally overmuscled CPU to drive it. 73 * 74 * On the bright side, the 8139 does have a built-in PHY, although 75 * rather than using an MDIO serial interface like most other NICs, the 76 * PHY registers are directly accessible through the 8139's register 77 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 78 * filter. 79 * 80 * The 8129 chip is an older version of the 8139 that uses an external PHY 81 * chip. The 8129 has a serial MDIO interface for accessing the MII where 82 * the 8139 lets you directly access the on-board PHY registers. We need 83 * to select which interface to use depending on the chip type. 84 */ 85 86 #ifdef HAVE_KERNEL_OPTION_HEADERS 87 #include "opt_device_polling.h" 88 #endif 89 90 #include <sys/param.h> 91 #include <sys/endian.h> 92 #include <sys/systm.h> 93 #include <sys/sockio.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/module.h> 98 #include <sys/socket.h> 99 #include <sys/sysctl.h> 100 101 #include <net/if.h> 102 #include <net/if_var.h> 103 #include <net/if_arp.h> 104 #include <net/ethernet.h> 105 #include <net/if_dl.h> 106 #include <net/if_media.h> 107 #include <net/if_types.h> 108 109 #include <net/bpf.h> 110 111 #include <machine/bus.h> 112 #include <machine/resource.h> 113 #include <sys/bus.h> 114 #include <sys/rman.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/mii_bitbang.h> 118 #include <dev/mii/miivar.h> 119 120 #include <dev/pci/pcireg.h> 121 #include <dev/pci/pcivar.h> 122 123 MODULE_DEPEND(rl, pci, 1, 1, 1); 124 MODULE_DEPEND(rl, ether, 1, 1, 1); 125 MODULE_DEPEND(rl, miibus, 1, 1, 1); 126 127 /* "device miibus" required. See GENERIC if you get errors here. */ 128 #include "miibus_if.h" 129 130 #include <dev/rl/if_rlreg.h> 131 132 /* 133 * Various supported device vendors/types and their names. 134 */ 135 static const struct rl_type rl_devs[] = { 136 { RT_VENDORID, RT_DEVICEID_8129, RL_8129, 137 "RealTek 8129 10/100BaseTX" }, 138 { RT_VENDORID, RT_DEVICEID_8139, RL_8139, 139 "RealTek 8139 10/100BaseTX" }, 140 { RT_VENDORID, RT_DEVICEID_8139D, RL_8139, 141 "RealTek 8139 10/100BaseTX" }, 142 { RT_VENDORID, RT_DEVICEID_8138, RL_8139, 143 "RealTek 8139 10/100BaseTX CardBus" }, 144 { RT_VENDORID, RT_DEVICEID_8100, RL_8139, 145 "RealTek 8100 10/100BaseTX" }, 146 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 147 "Accton MPX 5030/5038 10/100BaseTX" }, 148 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, 149 "Delta Electronics 8139 10/100BaseTX" }, 150 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, 151 "Addtron Technology 8139 10/100BaseTX" }, 152 { DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139, 153 "D-Link DFE-520TX (rev. C1) 10/100BaseTX" }, 154 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, 155 "D-Link DFE-530TX+ 10/100BaseTX" }, 156 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, 157 "D-Link DFE-690TXD 10/100BaseTX" }, 158 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 159 "Nortel Networks 10/100BaseTX" }, 160 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, 161 "Corega FEther CB-TXD" }, 162 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, 163 "Corega FEtherII CB-TXD" }, 164 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, 165 "Peppercon AG ROL-F" }, 166 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139, 167 "Planex FNW-3603-TX" }, 168 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, 169 "Planex FNW-3800-TX" }, 170 { CP_VENDORID, RT_DEVICEID_8139, RL_8139, 171 "Compaq HNE-300" }, 172 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, 173 "LevelOne FPC-0106TX" }, 174 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, 175 "Edimax EP-4103DL CardBus" } 176 }; 177 178 static int rl_attach(device_t); 179 static int rl_detach(device_t); 180 static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int); 181 static int rl_dma_alloc(struct rl_softc *); 182 static void rl_dma_free(struct rl_softc *); 183 static void rl_eeprom_putbyte(struct rl_softc *, int); 184 static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *); 185 static int rl_encap(struct rl_softc *, struct mbuf **); 186 static int rl_list_tx_init(struct rl_softc *); 187 static int rl_list_rx_init(struct rl_softc *); 188 static int rl_ifmedia_upd(struct ifnet *); 189 static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 190 static int rl_ioctl(struct ifnet *, u_long, caddr_t); 191 static void rl_intr(void *); 192 static void rl_init(void *); 193 static void rl_init_locked(struct rl_softc *sc); 194 static int rl_miibus_readreg(device_t, int, int); 195 static void rl_miibus_statchg(device_t); 196 static int rl_miibus_writereg(device_t, int, int, int); 197 #ifdef DEVICE_POLLING 198 static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 199 static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); 200 #endif 201 static int rl_probe(device_t); 202 static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int); 203 static void rl_reset(struct rl_softc *); 204 static int rl_resume(device_t); 205 static int rl_rxeof(struct rl_softc *); 206 static void rl_rxfilter(struct rl_softc *); 207 static int rl_shutdown(device_t); 208 static void rl_start(struct ifnet *); 209 static void rl_start_locked(struct ifnet *); 210 static void rl_stop(struct rl_softc *); 211 static int rl_suspend(device_t); 212 static void rl_tick(void *); 213 static void rl_txeof(struct rl_softc *); 214 static void rl_watchdog(struct rl_softc *); 215 static void rl_setwol(struct rl_softc *); 216 static void rl_clrwol(struct rl_softc *); 217 218 /* 219 * MII bit-bang glue 220 */ 221 static uint32_t rl_mii_bitbang_read(device_t); 222 static void rl_mii_bitbang_write(device_t, uint32_t); 223 224 static const struct mii_bitbang_ops rl_mii_bitbang_ops = { 225 rl_mii_bitbang_read, 226 rl_mii_bitbang_write, 227 { 228 RL_MII_DATAOUT, /* MII_BIT_MDO */ 229 RL_MII_DATAIN, /* MII_BIT_MDI */ 230 RL_MII_CLK, /* MII_BIT_MDC */ 231 RL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 232 0, /* MII_BIT_DIR_PHY_HOST */ 233 } 234 }; 235 236 static device_method_t rl_methods[] = { 237 /* Device interface */ 238 DEVMETHOD(device_probe, rl_probe), 239 DEVMETHOD(device_attach, rl_attach), 240 DEVMETHOD(device_detach, rl_detach), 241 DEVMETHOD(device_suspend, rl_suspend), 242 DEVMETHOD(device_resume, rl_resume), 243 DEVMETHOD(device_shutdown, rl_shutdown), 244 245 /* MII interface */ 246 DEVMETHOD(miibus_readreg, rl_miibus_readreg), 247 DEVMETHOD(miibus_writereg, rl_miibus_writereg), 248 DEVMETHOD(miibus_statchg, rl_miibus_statchg), 249 250 DEVMETHOD_END 251 }; 252 253 static driver_t rl_driver = { 254 "rl", 255 rl_methods, 256 sizeof(struct rl_softc) 257 }; 258 259 static devclass_t rl_devclass; 260 261 DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); 262 DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); 263 DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); 264 265 #define EE_SET(x) \ 266 CSR_WRITE_1(sc, RL_EECMD, \ 267 CSR_READ_1(sc, RL_EECMD) | x) 268 269 #define EE_CLR(x) \ 270 CSR_WRITE_1(sc, RL_EECMD, \ 271 CSR_READ_1(sc, RL_EECMD) & ~x) 272 273 /* 274 * Send a read command and address to the EEPROM, check for ACK. 275 */ 276 static void 277 rl_eeprom_putbyte(struct rl_softc *sc, int addr) 278 { 279 int d, i; 280 281 d = addr | sc->rl_eecmd_read; 282 283 /* 284 * Feed in each bit and strobe the clock. 285 */ 286 for (i = 0x400; i; i >>= 1) { 287 if (d & i) { 288 EE_SET(RL_EE_DATAIN); 289 } else { 290 EE_CLR(RL_EE_DATAIN); 291 } 292 DELAY(100); 293 EE_SET(RL_EE_CLK); 294 DELAY(150); 295 EE_CLR(RL_EE_CLK); 296 DELAY(100); 297 } 298 } 299 300 /* 301 * Read a word of data stored in the EEPROM at address 'addr.' 302 */ 303 static void 304 rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest) 305 { 306 int i; 307 uint16_t word = 0; 308 309 /* Enter EEPROM access mode. */ 310 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 311 312 /* 313 * Send address of word we want to read. 314 */ 315 rl_eeprom_putbyte(sc, addr); 316 317 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 318 319 /* 320 * Start reading bits from EEPROM. 321 */ 322 for (i = 0x8000; i; i >>= 1) { 323 EE_SET(RL_EE_CLK); 324 DELAY(100); 325 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 326 word |= i; 327 EE_CLR(RL_EE_CLK); 328 DELAY(100); 329 } 330 331 /* Turn off EEPROM access mode. */ 332 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 333 334 *dest = word; 335 } 336 337 /* 338 * Read a sequence of words from the EEPROM. 339 */ 340 static void 341 rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap) 342 { 343 int i; 344 uint16_t word = 0, *ptr; 345 346 for (i = 0; i < cnt; i++) { 347 rl_eeprom_getword(sc, off + i, &word); 348 ptr = (uint16_t *)(dest + (i * 2)); 349 if (swap) 350 *ptr = ntohs(word); 351 else 352 *ptr = word; 353 } 354 } 355 356 /* 357 * Read the MII serial port for the MII bit-bang module. 358 */ 359 static uint32_t 360 rl_mii_bitbang_read(device_t dev) 361 { 362 struct rl_softc *sc; 363 uint32_t val; 364 365 sc = device_get_softc(dev); 366 367 val = CSR_READ_1(sc, RL_MII); 368 CSR_BARRIER(sc, RL_MII, 1, 369 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 370 371 return (val); 372 } 373 374 /* 375 * Write the MII serial port for the MII bit-bang module. 376 */ 377 static void 378 rl_mii_bitbang_write(device_t dev, uint32_t val) 379 { 380 struct rl_softc *sc; 381 382 sc = device_get_softc(dev); 383 384 CSR_WRITE_1(sc, RL_MII, val); 385 CSR_BARRIER(sc, RL_MII, 1, 386 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 387 } 388 389 static int 390 rl_miibus_readreg(device_t dev, int phy, int reg) 391 { 392 struct rl_softc *sc; 393 uint16_t rl8139_reg; 394 395 sc = device_get_softc(dev); 396 397 if (sc->rl_type == RL_8139) { 398 switch (reg) { 399 case MII_BMCR: 400 rl8139_reg = RL_BMCR; 401 break; 402 case MII_BMSR: 403 rl8139_reg = RL_BMSR; 404 break; 405 case MII_ANAR: 406 rl8139_reg = RL_ANAR; 407 break; 408 case MII_ANER: 409 rl8139_reg = RL_ANER; 410 break; 411 case MII_ANLPAR: 412 rl8139_reg = RL_LPAR; 413 break; 414 case MII_PHYIDR1: 415 case MII_PHYIDR2: 416 return (0); 417 /* 418 * Allow the rlphy driver to read the media status 419 * register. If we have a link partner which does not 420 * support NWAY, this is the register which will tell 421 * us the results of parallel detection. 422 */ 423 case RL_MEDIASTAT: 424 return (CSR_READ_1(sc, RL_MEDIASTAT)); 425 default: 426 device_printf(sc->rl_dev, "bad phy register\n"); 427 return (0); 428 } 429 return (CSR_READ_2(sc, rl8139_reg)); 430 } 431 432 return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg)); 433 } 434 435 static int 436 rl_miibus_writereg(device_t dev, int phy, int reg, int data) 437 { 438 struct rl_softc *sc; 439 uint16_t rl8139_reg; 440 441 sc = device_get_softc(dev); 442 443 if (sc->rl_type == RL_8139) { 444 switch (reg) { 445 case MII_BMCR: 446 rl8139_reg = RL_BMCR; 447 break; 448 case MII_BMSR: 449 rl8139_reg = RL_BMSR; 450 break; 451 case MII_ANAR: 452 rl8139_reg = RL_ANAR; 453 break; 454 case MII_ANER: 455 rl8139_reg = RL_ANER; 456 break; 457 case MII_ANLPAR: 458 rl8139_reg = RL_LPAR; 459 break; 460 case MII_PHYIDR1: 461 case MII_PHYIDR2: 462 return (0); 463 break; 464 default: 465 device_printf(sc->rl_dev, "bad phy register\n"); 466 return (0); 467 } 468 CSR_WRITE_2(sc, rl8139_reg, data); 469 return (0); 470 } 471 472 mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data); 473 474 return (0); 475 } 476 477 static void 478 rl_miibus_statchg(device_t dev) 479 { 480 struct rl_softc *sc; 481 struct ifnet *ifp; 482 struct mii_data *mii; 483 484 sc = device_get_softc(dev); 485 mii = device_get_softc(sc->rl_miibus); 486 ifp = sc->rl_ifp; 487 if (mii == NULL || ifp == NULL || 488 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 489 return; 490 491 sc->rl_flags &= ~RL_FLAG_LINK; 492 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 493 (IFM_ACTIVE | IFM_AVALID)) { 494 switch (IFM_SUBTYPE(mii->mii_media_active)) { 495 case IFM_10_T: 496 case IFM_100_TX: 497 sc->rl_flags |= RL_FLAG_LINK; 498 break; 499 default: 500 break; 501 } 502 } 503 /* 504 * RealTek controllers do not provide any interface to 505 * Tx/Rx MACs for resolved speed, duplex and flow-control 506 * parameters. 507 */ 508 } 509 510 /* 511 * Program the 64-bit multicast hash filter. 512 */ 513 static void 514 rl_rxfilter(struct rl_softc *sc) 515 { 516 struct ifnet *ifp = sc->rl_ifp; 517 int h = 0; 518 uint32_t hashes[2] = { 0, 0 }; 519 struct ifmultiaddr *ifma; 520 uint32_t rxfilt; 521 522 RL_LOCK_ASSERT(sc); 523 524 rxfilt = CSR_READ_4(sc, RL_RXCFG); 525 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 526 RL_RXCFG_RX_MULTI); 527 /* Always accept frames destined for this host. */ 528 rxfilt |= RL_RXCFG_RX_INDIV; 529 /* Set capture broadcast bit to capture broadcast frames. */ 530 if (ifp->if_flags & IFF_BROADCAST) 531 rxfilt |= RL_RXCFG_RX_BROAD; 532 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 533 rxfilt |= RL_RXCFG_RX_MULTI; 534 if (ifp->if_flags & IFF_PROMISC) 535 rxfilt |= RL_RXCFG_RX_ALLPHYS; 536 hashes[0] = 0xFFFFFFFF; 537 hashes[1] = 0xFFFFFFFF; 538 } else { 539 /* Now program new ones. */ 540 if_maddr_rlock(ifp); 541 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 542 if (ifma->ifma_addr->sa_family != AF_LINK) 543 continue; 544 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 545 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 546 if (h < 32) 547 hashes[0] |= (1 << h); 548 else 549 hashes[1] |= (1 << (h - 32)); 550 } 551 if_maddr_runlock(ifp); 552 if (hashes[0] != 0 || hashes[1] != 0) 553 rxfilt |= RL_RXCFG_RX_MULTI; 554 } 555 556 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 557 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 558 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 559 } 560 561 static void 562 rl_reset(struct rl_softc *sc) 563 { 564 int i; 565 566 RL_LOCK_ASSERT(sc); 567 568 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 569 570 for (i = 0; i < RL_TIMEOUT; i++) { 571 DELAY(10); 572 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 573 break; 574 } 575 if (i == RL_TIMEOUT) 576 device_printf(sc->rl_dev, "reset never completed!\n"); 577 } 578 579 /* 580 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device 581 * IDs against our list and return a device name if we find a match. 582 */ 583 static int 584 rl_probe(device_t dev) 585 { 586 const struct rl_type *t; 587 uint16_t devid, revid, vendor; 588 int i; 589 590 vendor = pci_get_vendor(dev); 591 devid = pci_get_device(dev); 592 revid = pci_get_revid(dev); 593 594 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 595 if (revid == 0x20) { 596 /* 8139C+, let re(4) take care of this device. */ 597 return (ENXIO); 598 } 599 } 600 t = rl_devs; 601 for (i = 0; i < nitems(rl_devs); i++, t++) { 602 if (vendor == t->rl_vid && devid == t->rl_did) { 603 device_set_desc(dev, t->rl_name); 604 return (BUS_PROBE_DEFAULT); 605 } 606 } 607 608 return (ENXIO); 609 } 610 611 struct rl_dmamap_arg { 612 bus_addr_t rl_busaddr; 613 }; 614 615 static void 616 rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 617 { 618 struct rl_dmamap_arg *ctx; 619 620 if (error != 0) 621 return; 622 623 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 624 625 ctx = (struct rl_dmamap_arg *)arg; 626 ctx->rl_busaddr = segs[0].ds_addr; 627 } 628 629 /* 630 * Attach the interface. Allocate softc structures, do ifmedia 631 * setup and ethernet/BPF attach. 632 */ 633 static int 634 rl_attach(device_t dev) 635 { 636 uint8_t eaddr[ETHER_ADDR_LEN]; 637 uint16_t as[3]; 638 struct ifnet *ifp; 639 struct rl_softc *sc; 640 const struct rl_type *t; 641 struct sysctl_ctx_list *ctx; 642 struct sysctl_oid_list *children; 643 int error = 0, hwrev, i, phy, pmc, rid; 644 int prefer_iomap, unit; 645 uint16_t rl_did = 0; 646 char tn[32]; 647 648 sc = device_get_softc(dev); 649 unit = device_get_unit(dev); 650 sc->rl_dev = dev; 651 652 sc->rl_twister_enable = 0; 653 snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit); 654 TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable); 655 ctx = device_get_sysctl_ctx(sc->rl_dev); 656 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); 657 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD, 658 &sc->rl_twister_enable, 0, ""); 659 660 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 661 MTX_DEF); 662 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 663 664 pci_enable_busmaster(dev); 665 666 667 /* 668 * Map control/status registers. 669 * Default to using PIO access for this driver. On SMP systems, 670 * there appear to be problems with memory mapped mode: it looks 671 * like doing too many memory mapped access back to back in rapid 672 * succession can hang the bus. I'm inclined to blame this on 673 * crummy design/construction on the part of RealTek. Memory 674 * mapped mode does appear to work on uniprocessor systems though. 675 */ 676 prefer_iomap = 1; 677 snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit); 678 TUNABLE_INT_FETCH(tn, &prefer_iomap); 679 if (prefer_iomap) { 680 sc->rl_res_id = PCIR_BAR(0); 681 sc->rl_res_type = SYS_RES_IOPORT; 682 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 683 &sc->rl_res_id, RF_ACTIVE); 684 } 685 if (prefer_iomap == 0 || sc->rl_res == NULL) { 686 sc->rl_res_id = PCIR_BAR(1); 687 sc->rl_res_type = SYS_RES_MEMORY; 688 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 689 &sc->rl_res_id, RF_ACTIVE); 690 } 691 if (sc->rl_res == NULL) { 692 device_printf(dev, "couldn't map ports/memory\n"); 693 error = ENXIO; 694 goto fail; 695 } 696 697 #ifdef notdef 698 /* 699 * Detect the Realtek 8139B. For some reason, this chip is very 700 * unstable when left to autoselect the media 701 * The best workaround is to set the device to the required 702 * media type or to set it to the 10 Meg speed. 703 */ 704 if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF) 705 device_printf(dev, 706 "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n"); 707 #endif 708 709 sc->rl_btag = rman_get_bustag(sc->rl_res); 710 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 711 712 /* Allocate interrupt */ 713 rid = 0; 714 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 715 RF_SHAREABLE | RF_ACTIVE); 716 717 if (sc->rl_irq[0] == NULL) { 718 device_printf(dev, "couldn't map interrupt\n"); 719 error = ENXIO; 720 goto fail; 721 } 722 723 sc->rl_cfg0 = RL_8139_CFG0; 724 sc->rl_cfg1 = RL_8139_CFG1; 725 sc->rl_cfg2 = 0; 726 sc->rl_cfg3 = RL_8139_CFG3; 727 sc->rl_cfg4 = RL_8139_CFG4; 728 sc->rl_cfg5 = RL_8139_CFG5; 729 730 /* 731 * Reset the adapter. Only take the lock here as it's needed in 732 * order to call rl_reset(). 733 */ 734 RL_LOCK(sc); 735 rl_reset(sc); 736 RL_UNLOCK(sc); 737 738 sc->rl_eecmd_read = RL_EECMD_READ_6BIT; 739 rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0); 740 if (rl_did != 0x8129) 741 sc->rl_eecmd_read = RL_EECMD_READ_8BIT; 742 743 /* 744 * Get station address from the EEPROM. 745 */ 746 rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0); 747 for (i = 0; i < 3; i++) { 748 eaddr[(i * 2) + 0] = as[i] & 0xff; 749 eaddr[(i * 2) + 1] = as[i] >> 8; 750 } 751 752 /* 753 * Now read the exact device type from the EEPROM to find 754 * out if it's an 8129 or 8139. 755 */ 756 rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0); 757 758 t = rl_devs; 759 sc->rl_type = 0; 760 while(t->rl_name != NULL) { 761 if (rl_did == t->rl_did) { 762 sc->rl_type = t->rl_basetype; 763 break; 764 } 765 t++; 766 } 767 768 if (sc->rl_type == 0) { 769 device_printf(dev, "unknown device ID: %x assuming 8139\n", 770 rl_did); 771 sc->rl_type = RL_8139; 772 /* 773 * Read RL_IDR register to get ethernet address as accessing 774 * EEPROM may not extract correct address. 775 */ 776 for (i = 0; i < ETHER_ADDR_LEN; i++) 777 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 778 } 779 780 if ((error = rl_dma_alloc(sc)) != 0) 781 goto fail; 782 783 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 784 if (ifp == NULL) { 785 device_printf(dev, "can not if_alloc()\n"); 786 error = ENOSPC; 787 goto fail; 788 } 789 790 #define RL_PHYAD_INTERNAL 0 791 792 /* Do MII setup */ 793 phy = MII_PHY_ANY; 794 if (sc->rl_type == RL_8139) 795 phy = RL_PHYAD_INTERNAL; 796 error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd, 797 rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 798 if (error != 0) { 799 device_printf(dev, "attaching PHYs failed\n"); 800 goto fail; 801 } 802 803 ifp->if_softc = sc; 804 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 805 ifp->if_mtu = ETHERMTU; 806 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 807 ifp->if_ioctl = rl_ioctl; 808 ifp->if_start = rl_start; 809 ifp->if_init = rl_init; 810 ifp->if_capabilities = IFCAP_VLAN_MTU; 811 /* Check WOL for RTL8139B or newer controllers. */ 812 if (sc->rl_type == RL_8139 && 813 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) { 814 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 815 switch (hwrev) { 816 case RL_HWREV_8139B: 817 case RL_HWREV_8130: 818 case RL_HWREV_8139C: 819 case RL_HWREV_8139D: 820 case RL_HWREV_8101: 821 case RL_HWREV_8100: 822 ifp->if_capabilities |= IFCAP_WOL; 823 /* Disable WOL. */ 824 rl_clrwol(sc); 825 break; 826 default: 827 break; 828 } 829 } 830 ifp->if_capenable = ifp->if_capabilities; 831 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); 832 #ifdef DEVICE_POLLING 833 ifp->if_capabilities |= IFCAP_POLLING; 834 #endif 835 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 836 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 837 IFQ_SET_READY(&ifp->if_snd); 838 839 /* 840 * Call MI attach routine. 841 */ 842 ether_ifattach(ifp, eaddr); 843 844 /* Hook interrupt last to avoid having to lock softc */ 845 error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, 846 NULL, rl_intr, sc, &sc->rl_intrhand[0]); 847 if (error) { 848 device_printf(sc->rl_dev, "couldn't set up irq\n"); 849 ether_ifdetach(ifp); 850 } 851 852 fail: 853 if (error) 854 rl_detach(dev); 855 856 return (error); 857 } 858 859 /* 860 * Shutdown hardware and free up resources. This can be called any 861 * time after the mutex has been initialized. It is called in both 862 * the error case in attach and the normal detach case so it needs 863 * to be careful about only freeing resources that have actually been 864 * allocated. 865 */ 866 static int 867 rl_detach(device_t dev) 868 { 869 struct rl_softc *sc; 870 struct ifnet *ifp; 871 872 sc = device_get_softc(dev); 873 ifp = sc->rl_ifp; 874 875 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); 876 877 #ifdef DEVICE_POLLING 878 if (ifp->if_capenable & IFCAP_POLLING) 879 ether_poll_deregister(ifp); 880 #endif 881 /* These should only be active if attach succeeded */ 882 if (device_is_attached(dev)) { 883 RL_LOCK(sc); 884 rl_stop(sc); 885 RL_UNLOCK(sc); 886 callout_drain(&sc->rl_stat_callout); 887 ether_ifdetach(ifp); 888 } 889 #if 0 890 sc->suspended = 1; 891 #endif 892 if (sc->rl_miibus) 893 device_delete_child(dev, sc->rl_miibus); 894 bus_generic_detach(dev); 895 896 if (sc->rl_intrhand[0]) 897 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); 898 if (sc->rl_irq[0]) 899 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]); 900 if (sc->rl_res) 901 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 902 sc->rl_res); 903 904 if (ifp) 905 if_free(ifp); 906 907 rl_dma_free(sc); 908 909 mtx_destroy(&sc->rl_mtx); 910 911 return (0); 912 } 913 914 static int 915 rl_dma_alloc(struct rl_softc *sc) 916 { 917 struct rl_dmamap_arg ctx; 918 int error, i; 919 920 /* 921 * Allocate the parent bus DMA tag appropriate for PCI. 922 */ 923 error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */ 924 1, 0, /* alignment, boundary */ 925 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 926 BUS_SPACE_MAXADDR, /* highaddr */ 927 NULL, NULL, /* filter, filterarg */ 928 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 929 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 930 0, /* flags */ 931 NULL, NULL, /* lockfunc, lockarg */ 932 &sc->rl_parent_tag); 933 if (error) { 934 device_printf(sc->rl_dev, 935 "failed to create parent DMA tag.\n"); 936 goto fail; 937 } 938 /* Create DMA tag for Rx memory block. */ 939 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 940 RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */ 941 BUS_SPACE_MAXADDR, /* lowaddr */ 942 BUS_SPACE_MAXADDR, /* highaddr */ 943 NULL, NULL, /* filter, filterarg */ 944 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */ 945 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */ 946 0, /* flags */ 947 NULL, NULL, /* lockfunc, lockarg */ 948 &sc->rl_cdata.rl_rx_tag); 949 if (error) { 950 device_printf(sc->rl_dev, 951 "failed to create Rx memory block DMA tag.\n"); 952 goto fail; 953 } 954 /* Create DMA tag for Tx buffer. */ 955 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 956 RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */ 957 BUS_SPACE_MAXADDR, /* lowaddr */ 958 BUS_SPACE_MAXADDR, /* highaddr */ 959 NULL, NULL, /* filter, filterarg */ 960 MCLBYTES, 1, /* maxsize, nsegments */ 961 MCLBYTES, /* maxsegsize */ 962 0, /* flags */ 963 NULL, NULL, /* lockfunc, lockarg */ 964 &sc->rl_cdata.rl_tx_tag); 965 if (error) { 966 device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n"); 967 goto fail; 968 } 969 970 /* 971 * Allocate DMA'able memory and load DMA map for Rx memory block. 972 */ 973 error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag, 974 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK | 975 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap); 976 if (error != 0) { 977 device_printf(sc->rl_dev, 978 "failed to allocate Rx DMA memory block.\n"); 979 goto fail; 980 } 981 ctx.rl_busaddr = 0; 982 error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag, 983 sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf, 984 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx, 985 BUS_DMA_NOWAIT); 986 if (error != 0 || ctx.rl_busaddr == 0) { 987 device_printf(sc->rl_dev, 988 "could not load Rx DMA memory block.\n"); 989 goto fail; 990 } 991 sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr; 992 993 /* Create DMA maps for Tx buffers. */ 994 for (i = 0; i < RL_TX_LIST_CNT; i++) { 995 sc->rl_cdata.rl_tx_chain[i] = NULL; 996 sc->rl_cdata.rl_tx_dmamap[i] = NULL; 997 error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0, 998 &sc->rl_cdata.rl_tx_dmamap[i]); 999 if (error != 0) { 1000 device_printf(sc->rl_dev, 1001 "could not create Tx dmamap.\n"); 1002 goto fail; 1003 } 1004 } 1005 1006 /* Leave a few bytes before the start of the RX ring buffer. */ 1007 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; 1008 sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE; 1009 1010 fail: 1011 return (error); 1012 } 1013 1014 static void 1015 rl_dma_free(struct rl_softc *sc) 1016 { 1017 int i; 1018 1019 /* Rx memory block. */ 1020 if (sc->rl_cdata.rl_rx_tag != NULL) { 1021 if (sc->rl_cdata.rl_rx_buf_paddr != 0) 1022 bus_dmamap_unload(sc->rl_cdata.rl_rx_tag, 1023 sc->rl_cdata.rl_rx_dmamap); 1024 if (sc->rl_cdata.rl_rx_buf_ptr != NULL) 1025 bus_dmamem_free(sc->rl_cdata.rl_rx_tag, 1026 sc->rl_cdata.rl_rx_buf_ptr, 1027 sc->rl_cdata.rl_rx_dmamap); 1028 sc->rl_cdata.rl_rx_buf_ptr = NULL; 1029 sc->rl_cdata.rl_rx_buf = NULL; 1030 sc->rl_cdata.rl_rx_buf_paddr = 0; 1031 bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag); 1032 sc->rl_cdata.rl_tx_tag = NULL; 1033 } 1034 1035 /* Tx buffers. */ 1036 if (sc->rl_cdata.rl_tx_tag != NULL) { 1037 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1038 if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) { 1039 bus_dmamap_destroy( 1040 sc->rl_cdata.rl_tx_tag, 1041 sc->rl_cdata.rl_tx_dmamap[i]); 1042 sc->rl_cdata.rl_tx_dmamap[i] = NULL; 1043 } 1044 } 1045 bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag); 1046 sc->rl_cdata.rl_tx_tag = NULL; 1047 } 1048 1049 if (sc->rl_parent_tag != NULL) { 1050 bus_dma_tag_destroy(sc->rl_parent_tag); 1051 sc->rl_parent_tag = NULL; 1052 } 1053 } 1054 1055 /* 1056 * Initialize the transmit descriptors. 1057 */ 1058 static int 1059 rl_list_tx_init(struct rl_softc *sc) 1060 { 1061 struct rl_chain_data *cd; 1062 int i; 1063 1064 RL_LOCK_ASSERT(sc); 1065 1066 cd = &sc->rl_cdata; 1067 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1068 cd->rl_tx_chain[i] = NULL; 1069 CSR_WRITE_4(sc, 1070 RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); 1071 } 1072 1073 sc->rl_cdata.cur_tx = 0; 1074 sc->rl_cdata.last_tx = 0; 1075 1076 return (0); 1077 } 1078 1079 static int 1080 rl_list_rx_init(struct rl_softc *sc) 1081 { 1082 1083 RL_LOCK_ASSERT(sc); 1084 1085 bzero(sc->rl_cdata.rl_rx_buf_ptr, 1086 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ); 1087 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap, 1088 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1089 1090 return (0); 1091 } 1092 1093 /* 1094 * A frame has been uploaded: pass the resulting mbuf chain up to 1095 * the higher level protocols. 1096 * 1097 * You know there's something wrong with a PCI bus-master chip design 1098 * when you have to use m_devget(). 1099 * 1100 * The receive operation is badly documented in the datasheet, so I'll 1101 * attempt to document it here. The driver provides a buffer area and 1102 * places its base address in the RX buffer start address register. 1103 * The chip then begins copying frames into the RX buffer. Each frame 1104 * is preceded by a 32-bit RX status word which specifies the length 1105 * of the frame and certain other status bits. Each frame (starting with 1106 * the status word) is also 32-bit aligned. The frame length is in the 1107 * first 16 bits of the status word; the lower 15 bits correspond with 1108 * the 'rx status register' mentioned in the datasheet. 1109 * 1110 * Note: to make the Alpha happy, the frame payload needs to be aligned 1111 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) 1112 * as the offset argument to m_devget(). 1113 */ 1114 static int 1115 rl_rxeof(struct rl_softc *sc) 1116 { 1117 struct mbuf *m; 1118 struct ifnet *ifp = sc->rl_ifp; 1119 uint8_t *rxbufpos; 1120 int total_len = 0; 1121 int wrap = 0; 1122 int rx_npkts = 0; 1123 uint32_t rxstat; 1124 uint16_t cur_rx; 1125 uint16_t limit; 1126 uint16_t max_bytes, rx_bytes = 0; 1127 1128 RL_LOCK_ASSERT(sc); 1129 1130 bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap, 1131 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1132 1133 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; 1134 1135 /* Do not try to read past this point. */ 1136 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; 1137 1138 if (limit < cur_rx) 1139 max_bytes = (RL_RXBUFLEN - cur_rx) + limit; 1140 else 1141 max_bytes = limit - cur_rx; 1142 1143 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { 1144 #ifdef DEVICE_POLLING 1145 if (ifp->if_capenable & IFCAP_POLLING) { 1146 if (sc->rxcycles <= 0) 1147 break; 1148 sc->rxcycles--; 1149 } 1150 #endif 1151 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; 1152 rxstat = le32toh(*(uint32_t *)rxbufpos); 1153 1154 /* 1155 * Here's a totally undocumented fact for you. When the 1156 * RealTek chip is in the process of copying a packet into 1157 * RAM for you, the length will be 0xfff0. If you spot a 1158 * packet header with this value, you need to stop. The 1159 * datasheet makes absolutely no mention of this and 1160 * RealTek should be shot for this. 1161 */ 1162 total_len = rxstat >> 16; 1163 if (total_len == RL_RXSTAT_UNFINISHED) 1164 break; 1165 1166 if (!(rxstat & RL_RXSTAT_RXOK) || 1167 total_len < ETHER_MIN_LEN || 1168 total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) { 1169 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1170 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1171 rl_init_locked(sc); 1172 return (rx_npkts); 1173 } 1174 1175 /* No errors; receive the packet. */ 1176 rx_bytes += total_len + 4; 1177 1178 /* 1179 * XXX The RealTek chip includes the CRC with every 1180 * received frame, and there's no way to turn this 1181 * behavior off (at least, I can't find anything in 1182 * the manual that explains how to do it) so we have 1183 * to trim off the CRC manually. 1184 */ 1185 total_len -= ETHER_CRC_LEN; 1186 1187 /* 1188 * Avoid trying to read more bytes than we know 1189 * the chip has prepared for us. 1190 */ 1191 if (rx_bytes > max_bytes) 1192 break; 1193 1194 rxbufpos = sc->rl_cdata.rl_rx_buf + 1195 ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN); 1196 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) 1197 rxbufpos = sc->rl_cdata.rl_rx_buf; 1198 1199 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; 1200 if (total_len > wrap) { 1201 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1202 NULL); 1203 if (m != NULL) 1204 m_copyback(m, wrap, total_len - wrap, 1205 sc->rl_cdata.rl_rx_buf); 1206 cur_rx = (total_len - wrap + ETHER_CRC_LEN); 1207 } else { 1208 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1209 NULL); 1210 cur_rx += total_len + 4 + ETHER_CRC_LEN; 1211 } 1212 1213 /* Round up to 32-bit boundary. */ 1214 cur_rx = (cur_rx + 3) & ~3; 1215 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); 1216 1217 if (m == NULL) { 1218 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1219 continue; 1220 } 1221 1222 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1223 RL_UNLOCK(sc); 1224 (*ifp->if_input)(ifp, m); 1225 RL_LOCK(sc); 1226 rx_npkts++; 1227 } 1228 1229 /* No need to sync Rx memory block as we didn't modify it. */ 1230 return (rx_npkts); 1231 } 1232 1233 /* 1234 * A frame was downloaded to the chip. It's safe for us to clean up 1235 * the list buffers. 1236 */ 1237 static void 1238 rl_txeof(struct rl_softc *sc) 1239 { 1240 struct ifnet *ifp = sc->rl_ifp; 1241 uint32_t txstat; 1242 1243 RL_LOCK_ASSERT(sc); 1244 1245 /* 1246 * Go through our tx list and free mbufs for those 1247 * frames that have been uploaded. 1248 */ 1249 do { 1250 if (RL_LAST_TXMBUF(sc) == NULL) 1251 break; 1252 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); 1253 if (!(txstat & (RL_TXSTAT_TX_OK| 1254 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) 1255 break; 1256 1257 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & RL_TXSTAT_COLLCNT) >> 24); 1258 1259 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc), 1260 BUS_DMASYNC_POSTWRITE); 1261 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc)); 1262 m_freem(RL_LAST_TXMBUF(sc)); 1263 RL_LAST_TXMBUF(sc) = NULL; 1264 /* 1265 * If there was a transmit underrun, bump the TX threshold. 1266 * Make sure not to overflow the 63 * 32byte we can address 1267 * with the 6 available bit. 1268 */ 1269 if ((txstat & RL_TXSTAT_TX_UNDERRUN) && 1270 (sc->rl_txthresh < 2016)) 1271 sc->rl_txthresh += 32; 1272 if (txstat & RL_TXSTAT_TX_OK) 1273 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1274 else { 1275 int oldthresh; 1276 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1277 if ((txstat & RL_TXSTAT_TXABRT) || 1278 (txstat & RL_TXSTAT_OUTOFWIN)) 1279 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1280 oldthresh = sc->rl_txthresh; 1281 /* error recovery */ 1282 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1283 rl_init_locked(sc); 1284 /* restore original threshold */ 1285 sc->rl_txthresh = oldthresh; 1286 return; 1287 } 1288 RL_INC(sc->rl_cdata.last_tx); 1289 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1290 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); 1291 1292 if (RL_LAST_TXMBUF(sc) == NULL) 1293 sc->rl_watchdog_timer = 0; 1294 } 1295 1296 static void 1297 rl_twister_update(struct rl_softc *sc) 1298 { 1299 uint16_t linktest; 1300 /* 1301 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for 1302 * Linux driver. Values undocumented otherwise. 1303 */ 1304 static const uint32_t param[4][4] = { 1305 {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, 1306 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, 1307 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, 1308 {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} 1309 }; 1310 1311 /* 1312 * Tune the so-called twister registers of the RTL8139. These 1313 * are used to compensate for impedance mismatches. The 1314 * method for tuning these registers is undocumented and the 1315 * following procedure is collected from public sources. 1316 */ 1317 switch (sc->rl_twister) 1318 { 1319 case CHK_LINK: 1320 /* 1321 * If we have a sufficient link, then we can proceed in 1322 * the state machine to the next stage. If not, then 1323 * disable further tuning after writing sane defaults. 1324 */ 1325 if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) { 1326 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD); 1327 sc->rl_twister = FIND_ROW; 1328 } else { 1329 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD); 1330 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); 1331 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); 1332 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); 1333 sc->rl_twister = DONE; 1334 } 1335 break; 1336 case FIND_ROW: 1337 /* 1338 * Read how long it took to see the echo to find the tuning 1339 * row to use. 1340 */ 1341 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; 1342 if (linktest == RL_CSCFG_ROW3) 1343 sc->rl_twist_row = 3; 1344 else if (linktest == RL_CSCFG_ROW2) 1345 sc->rl_twist_row = 2; 1346 else if (linktest == RL_CSCFG_ROW1) 1347 sc->rl_twist_row = 1; 1348 else 1349 sc->rl_twist_row = 0; 1350 sc->rl_twist_col = 0; 1351 sc->rl_twister = SET_PARAM; 1352 break; 1353 case SET_PARAM: 1354 if (sc->rl_twist_col == 0) 1355 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); 1356 CSR_WRITE_4(sc, RL_PARA7C, 1357 param[sc->rl_twist_row][sc->rl_twist_col]); 1358 if (++sc->rl_twist_col == 4) { 1359 if (sc->rl_twist_row == 3) 1360 sc->rl_twister = RECHK_LONG; 1361 else 1362 sc->rl_twister = DONE; 1363 } 1364 break; 1365 case RECHK_LONG: 1366 /* 1367 * For long cables, we have to double check to make sure we 1368 * don't mistune. 1369 */ 1370 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; 1371 if (linktest == RL_CSCFG_ROW3) 1372 sc->rl_twister = DONE; 1373 else { 1374 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE); 1375 sc->rl_twister = RETUNE; 1376 } 1377 break; 1378 case RETUNE: 1379 /* Retune for a shorter cable (try column 2) */ 1380 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); 1381 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); 1382 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); 1383 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); 1384 sc->rl_twist_row--; 1385 sc->rl_twist_col = 0; 1386 sc->rl_twister = SET_PARAM; 1387 break; 1388 1389 case DONE: 1390 break; 1391 } 1392 1393 } 1394 1395 static void 1396 rl_tick(void *xsc) 1397 { 1398 struct rl_softc *sc = xsc; 1399 struct mii_data *mii; 1400 int ticks; 1401 1402 RL_LOCK_ASSERT(sc); 1403 /* 1404 * If we're doing the twister cable calibration, then we need to defer 1405 * watchdog timeouts. This is a no-op in normal operations, but 1406 * can falsely trigger when the cable calibration takes a while and 1407 * there was traffic ready to go when rl was started. 1408 * 1409 * We don't defer mii_tick since that updates the mii status, which 1410 * helps the twister process, at least according to similar patches 1411 * for the Linux driver I found online while doing the fixes. Worst 1412 * case is a few extra mii reads during calibration. 1413 */ 1414 mii = device_get_softc(sc->rl_miibus); 1415 mii_tick(mii); 1416 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1417 rl_miibus_statchg(sc->rl_dev); 1418 if (sc->rl_twister_enable) { 1419 if (sc->rl_twister == DONE) 1420 rl_watchdog(sc); 1421 else 1422 rl_twister_update(sc); 1423 if (sc->rl_twister == DONE) 1424 ticks = hz; 1425 else 1426 ticks = hz / 10; 1427 } else { 1428 rl_watchdog(sc); 1429 ticks = hz; 1430 } 1431 1432 callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc); 1433 } 1434 1435 #ifdef DEVICE_POLLING 1436 static int 1437 rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1438 { 1439 struct rl_softc *sc = ifp->if_softc; 1440 int rx_npkts = 0; 1441 1442 RL_LOCK(sc); 1443 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1444 rx_npkts = rl_poll_locked(ifp, cmd, count); 1445 RL_UNLOCK(sc); 1446 return (rx_npkts); 1447 } 1448 1449 static int 1450 rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1451 { 1452 struct rl_softc *sc = ifp->if_softc; 1453 int rx_npkts; 1454 1455 RL_LOCK_ASSERT(sc); 1456 1457 sc->rxcycles = count; 1458 rx_npkts = rl_rxeof(sc); 1459 rl_txeof(sc); 1460 1461 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1462 rl_start_locked(ifp); 1463 1464 if (cmd == POLL_AND_CHECK_STATUS) { 1465 uint16_t status; 1466 1467 /* We should also check the status register. */ 1468 status = CSR_READ_2(sc, RL_ISR); 1469 if (status == 0xffff) 1470 return (rx_npkts); 1471 if (status != 0) 1472 CSR_WRITE_2(sc, RL_ISR, status); 1473 1474 /* XXX We should check behaviour on receiver stalls. */ 1475 1476 if (status & RL_ISR_SYSTEM_ERR) { 1477 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1478 rl_init_locked(sc); 1479 } 1480 } 1481 return (rx_npkts); 1482 } 1483 #endif /* DEVICE_POLLING */ 1484 1485 static void 1486 rl_intr(void *arg) 1487 { 1488 struct rl_softc *sc = arg; 1489 struct ifnet *ifp = sc->rl_ifp; 1490 uint16_t status; 1491 int count; 1492 1493 RL_LOCK(sc); 1494 1495 if (sc->suspended) 1496 goto done_locked; 1497 1498 #ifdef DEVICE_POLLING 1499 if (ifp->if_capenable & IFCAP_POLLING) 1500 goto done_locked; 1501 #endif 1502 1503 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1504 goto done_locked2; 1505 status = CSR_READ_2(sc, RL_ISR); 1506 if (status == 0xffff || (status & RL_INTRS) == 0) 1507 goto done_locked; 1508 /* 1509 * Ours, disable further interrupts. 1510 */ 1511 CSR_WRITE_2(sc, RL_IMR, 0); 1512 for (count = 16; count > 0; count--) { 1513 CSR_WRITE_2(sc, RL_ISR, status); 1514 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1515 if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR)) 1516 rl_rxeof(sc); 1517 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR)) 1518 rl_txeof(sc); 1519 if (status & RL_ISR_SYSTEM_ERR) { 1520 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1521 rl_init_locked(sc); 1522 RL_UNLOCK(sc); 1523 return; 1524 } 1525 } 1526 status = CSR_READ_2(sc, RL_ISR); 1527 /* If the card has gone away, the read returns 0xffff. */ 1528 if (status == 0xffff || (status & RL_INTRS) == 0) 1529 break; 1530 } 1531 1532 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1533 rl_start_locked(ifp); 1534 1535 done_locked2: 1536 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1537 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 1538 done_locked: 1539 RL_UNLOCK(sc); 1540 } 1541 1542 /* 1543 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1544 * pointers to the fragment pointers. 1545 */ 1546 static int 1547 rl_encap(struct rl_softc *sc, struct mbuf **m_head) 1548 { 1549 struct mbuf *m; 1550 bus_dma_segment_t txsegs[1]; 1551 int error, nsegs, padlen; 1552 1553 RL_LOCK_ASSERT(sc); 1554 1555 m = *m_head; 1556 padlen = 0; 1557 /* 1558 * Hardware doesn't auto-pad, so we have to make sure 1559 * pad short frames out to the minimum frame length. 1560 */ 1561 if (m->m_pkthdr.len < RL_MIN_FRAMELEN) 1562 padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len; 1563 /* 1564 * The RealTek is brain damaged and wants longword-aligned 1565 * TX buffers, plus we can only have one fragment buffer 1566 * per packet. We have to copy pretty much all the time. 1567 */ 1568 if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 || 1569 (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) { 1570 m = m_defrag(*m_head, M_NOWAIT); 1571 if (m == NULL) { 1572 m_freem(*m_head); 1573 *m_head = NULL; 1574 return (ENOMEM); 1575 } 1576 } 1577 *m_head = m; 1578 1579 if (padlen > 0) { 1580 /* 1581 * Make security-conscious people happy: zero out the 1582 * bytes in the pad area, since we don't know what 1583 * this mbuf cluster buffer's previous user might 1584 * have left in it. 1585 */ 1586 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1587 m->m_pkthdr.len += padlen; 1588 m->m_len = m->m_pkthdr.len; 1589 } 1590 1591 error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag, 1592 RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0); 1593 if (error != 0) 1594 return (error); 1595 if (nsegs == 0) { 1596 m_freem(*m_head); 1597 *m_head = NULL; 1598 return (EIO); 1599 } 1600 1601 RL_CUR_TXMBUF(sc) = m; 1602 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc), 1603 BUS_DMASYNC_PREWRITE); 1604 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr)); 1605 1606 return (0); 1607 } 1608 1609 /* 1610 * Main transmit routine. 1611 */ 1612 static void 1613 rl_start(struct ifnet *ifp) 1614 { 1615 struct rl_softc *sc = ifp->if_softc; 1616 1617 RL_LOCK(sc); 1618 rl_start_locked(ifp); 1619 RL_UNLOCK(sc); 1620 } 1621 1622 static void 1623 rl_start_locked(struct ifnet *ifp) 1624 { 1625 struct rl_softc *sc = ifp->if_softc; 1626 struct mbuf *m_head = NULL; 1627 1628 RL_LOCK_ASSERT(sc); 1629 1630 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1631 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) 1632 return; 1633 1634 while (RL_CUR_TXMBUF(sc) == NULL) { 1635 1636 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1637 1638 if (m_head == NULL) 1639 break; 1640 1641 if (rl_encap(sc, &m_head)) { 1642 if (m_head == NULL) 1643 break; 1644 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1645 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1646 break; 1647 } 1648 1649 /* Pass a copy of this mbuf chain to the bpf subsystem. */ 1650 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); 1651 1652 /* Transmit the frame. */ 1653 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), 1654 RL_TXTHRESH(sc->rl_txthresh) | 1655 RL_CUR_TXMBUF(sc)->m_pkthdr.len); 1656 1657 RL_INC(sc->rl_cdata.cur_tx); 1658 1659 /* Set a timeout in case the chip goes out to lunch. */ 1660 sc->rl_watchdog_timer = 5; 1661 } 1662 1663 /* 1664 * We broke out of the loop because all our TX slots are 1665 * full. Mark the NIC as busy until it drains some of the 1666 * packets from the queue. 1667 */ 1668 if (RL_CUR_TXMBUF(sc) != NULL) 1669 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1670 } 1671 1672 static void 1673 rl_init(void *xsc) 1674 { 1675 struct rl_softc *sc = xsc; 1676 1677 RL_LOCK(sc); 1678 rl_init_locked(sc); 1679 RL_UNLOCK(sc); 1680 } 1681 1682 static void 1683 rl_init_locked(struct rl_softc *sc) 1684 { 1685 struct ifnet *ifp = sc->rl_ifp; 1686 struct mii_data *mii; 1687 uint32_t eaddr[2]; 1688 1689 RL_LOCK_ASSERT(sc); 1690 1691 mii = device_get_softc(sc->rl_miibus); 1692 1693 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1694 return; 1695 1696 /* 1697 * Cancel pending I/O and free all RX/TX buffers. 1698 */ 1699 rl_stop(sc); 1700 1701 rl_reset(sc); 1702 if (sc->rl_twister_enable) { 1703 /* 1704 * Reset twister register tuning state. The twister 1705 * registers and their tuning are undocumented, but 1706 * are necessary to cope with bad links. rl_twister = 1707 * DONE here will disable this entirely. 1708 */ 1709 sc->rl_twister = CHK_LINK; 1710 } 1711 1712 /* 1713 * Init our MAC address. Even though the chipset 1714 * documentation doesn't mention it, we need to enter "Config 1715 * register write enable" mode to modify the ID registers. 1716 */ 1717 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1718 bzero(eaddr, sizeof(eaddr)); 1719 bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN); 1720 CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]); 1721 CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]); 1722 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1723 1724 /* Init the RX memory block pointer register. */ 1725 CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr + 1726 RL_RX_8139_BUF_RESERVE); 1727 /* Init TX descriptors. */ 1728 rl_list_tx_init(sc); 1729 /* Init Rx memory block. */ 1730 rl_list_rx_init(sc); 1731 1732 /* 1733 * Enable transmit and receive. 1734 */ 1735 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1736 1737 /* 1738 * Set the initial TX and RX configuration. 1739 */ 1740 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1741 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 1742 1743 /* Set RX filter. */ 1744 rl_rxfilter(sc); 1745 1746 #ifdef DEVICE_POLLING 1747 /* Disable interrupts if we are polling. */ 1748 if (ifp->if_capenable & IFCAP_POLLING) 1749 CSR_WRITE_2(sc, RL_IMR, 0); 1750 else 1751 #endif 1752 /* Enable interrupts. */ 1753 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 1754 1755 /* Set initial TX threshold */ 1756 sc->rl_txthresh = RL_TX_THRESH_INIT; 1757 1758 /* Start RX/TX process. */ 1759 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1760 1761 /* Enable receiver and transmitter. */ 1762 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1763 1764 sc->rl_flags &= ~RL_FLAG_LINK; 1765 mii_mediachg(mii); 1766 1767 CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); 1768 1769 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1770 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1771 1772 callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc); 1773 } 1774 1775 /* 1776 * Set media options. 1777 */ 1778 static int 1779 rl_ifmedia_upd(struct ifnet *ifp) 1780 { 1781 struct rl_softc *sc = ifp->if_softc; 1782 struct mii_data *mii; 1783 1784 mii = device_get_softc(sc->rl_miibus); 1785 1786 RL_LOCK(sc); 1787 mii_mediachg(mii); 1788 RL_UNLOCK(sc); 1789 1790 return (0); 1791 } 1792 1793 /* 1794 * Report current media status. 1795 */ 1796 static void 1797 rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1798 { 1799 struct rl_softc *sc = ifp->if_softc; 1800 struct mii_data *mii; 1801 1802 mii = device_get_softc(sc->rl_miibus); 1803 1804 RL_LOCK(sc); 1805 mii_pollstat(mii); 1806 ifmr->ifm_active = mii->mii_media_active; 1807 ifmr->ifm_status = mii->mii_media_status; 1808 RL_UNLOCK(sc); 1809 } 1810 1811 static int 1812 rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1813 { 1814 struct ifreq *ifr = (struct ifreq *)data; 1815 struct mii_data *mii; 1816 struct rl_softc *sc = ifp->if_softc; 1817 int error = 0, mask; 1818 1819 switch (command) { 1820 case SIOCSIFFLAGS: 1821 RL_LOCK(sc); 1822 if (ifp->if_flags & IFF_UP) { 1823 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1824 ((ifp->if_flags ^ sc->rl_if_flags) & 1825 (IFF_PROMISC | IFF_ALLMULTI))) 1826 rl_rxfilter(sc); 1827 else 1828 rl_init_locked(sc); 1829 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1830 rl_stop(sc); 1831 sc->rl_if_flags = ifp->if_flags; 1832 RL_UNLOCK(sc); 1833 break; 1834 case SIOCADDMULTI: 1835 case SIOCDELMULTI: 1836 RL_LOCK(sc); 1837 rl_rxfilter(sc); 1838 RL_UNLOCK(sc); 1839 break; 1840 case SIOCGIFMEDIA: 1841 case SIOCSIFMEDIA: 1842 mii = device_get_softc(sc->rl_miibus); 1843 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1844 break; 1845 case SIOCSIFCAP: 1846 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1847 #ifdef DEVICE_POLLING 1848 if (ifr->ifr_reqcap & IFCAP_POLLING && 1849 !(ifp->if_capenable & IFCAP_POLLING)) { 1850 error = ether_poll_register(rl_poll, ifp); 1851 if (error) 1852 return(error); 1853 RL_LOCK(sc); 1854 /* Disable interrupts */ 1855 CSR_WRITE_2(sc, RL_IMR, 0x0000); 1856 ifp->if_capenable |= IFCAP_POLLING; 1857 RL_UNLOCK(sc); 1858 return (error); 1859 1860 } 1861 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 1862 ifp->if_capenable & IFCAP_POLLING) { 1863 error = ether_poll_deregister(ifp); 1864 /* Enable interrupts. */ 1865 RL_LOCK(sc); 1866 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 1867 ifp->if_capenable &= ~IFCAP_POLLING; 1868 RL_UNLOCK(sc); 1869 return (error); 1870 } 1871 #endif /* DEVICE_POLLING */ 1872 if ((mask & IFCAP_WOL) != 0 && 1873 (ifp->if_capabilities & IFCAP_WOL) != 0) { 1874 if ((mask & IFCAP_WOL_UCAST) != 0) 1875 ifp->if_capenable ^= IFCAP_WOL_UCAST; 1876 if ((mask & IFCAP_WOL_MCAST) != 0) 1877 ifp->if_capenable ^= IFCAP_WOL_MCAST; 1878 if ((mask & IFCAP_WOL_MAGIC) != 0) 1879 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1880 } 1881 break; 1882 default: 1883 error = ether_ioctl(ifp, command, data); 1884 break; 1885 } 1886 1887 return (error); 1888 } 1889 1890 static void 1891 rl_watchdog(struct rl_softc *sc) 1892 { 1893 1894 RL_LOCK_ASSERT(sc); 1895 1896 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0) 1897 return; 1898 1899 device_printf(sc->rl_dev, "watchdog timeout\n"); 1900 if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1); 1901 1902 rl_txeof(sc); 1903 rl_rxeof(sc); 1904 sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1905 rl_init_locked(sc); 1906 } 1907 1908 /* 1909 * Stop the adapter and free any mbufs allocated to the 1910 * RX and TX lists. 1911 */ 1912 static void 1913 rl_stop(struct rl_softc *sc) 1914 { 1915 int i; 1916 struct ifnet *ifp = sc->rl_ifp; 1917 1918 RL_LOCK_ASSERT(sc); 1919 1920 sc->rl_watchdog_timer = 0; 1921 callout_stop(&sc->rl_stat_callout); 1922 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1923 sc->rl_flags &= ~RL_FLAG_LINK; 1924 1925 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 1926 CSR_WRITE_2(sc, RL_IMR, 0x0000); 1927 for (i = 0; i < RL_TIMEOUT; i++) { 1928 DELAY(10); 1929 if ((CSR_READ_1(sc, RL_COMMAND) & 1930 (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0) 1931 break; 1932 } 1933 if (i == RL_TIMEOUT) 1934 device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n"); 1935 1936 /* 1937 * Free the TX list buffers. 1938 */ 1939 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1940 if (sc->rl_cdata.rl_tx_chain[i] != NULL) { 1941 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, 1942 sc->rl_cdata.rl_tx_dmamap[i], 1943 BUS_DMASYNC_POSTWRITE); 1944 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, 1945 sc->rl_cdata.rl_tx_dmamap[i]); 1946 m_freem(sc->rl_cdata.rl_tx_chain[i]); 1947 sc->rl_cdata.rl_tx_chain[i] = NULL; 1948 CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 1949 0x0000000); 1950 } 1951 } 1952 } 1953 1954 /* 1955 * Device suspend routine. Stop the interface and save some PCI 1956 * settings in case the BIOS doesn't restore them properly on 1957 * resume. 1958 */ 1959 static int 1960 rl_suspend(device_t dev) 1961 { 1962 struct rl_softc *sc; 1963 1964 sc = device_get_softc(dev); 1965 1966 RL_LOCK(sc); 1967 rl_stop(sc); 1968 rl_setwol(sc); 1969 sc->suspended = 1; 1970 RL_UNLOCK(sc); 1971 1972 return (0); 1973 } 1974 1975 /* 1976 * Device resume routine. Restore some PCI settings in case the BIOS 1977 * doesn't, re-enable busmastering, and restart the interface if 1978 * appropriate. 1979 */ 1980 static int 1981 rl_resume(device_t dev) 1982 { 1983 struct rl_softc *sc; 1984 struct ifnet *ifp; 1985 int pmc; 1986 uint16_t pmstat; 1987 1988 sc = device_get_softc(dev); 1989 ifp = sc->rl_ifp; 1990 1991 RL_LOCK(sc); 1992 1993 if ((ifp->if_capabilities & IFCAP_WOL) != 0 && 1994 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) { 1995 /* Disable PME and clear PME status. */ 1996 pmstat = pci_read_config(sc->rl_dev, 1997 pmc + PCIR_POWER_STATUS, 2); 1998 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 1999 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2000 pci_write_config(sc->rl_dev, 2001 pmc + PCIR_POWER_STATUS, pmstat, 2); 2002 } 2003 /* 2004 * Clear WOL matching such that normal Rx filtering 2005 * wouldn't interfere with WOL patterns. 2006 */ 2007 rl_clrwol(sc); 2008 } 2009 2010 /* reinitialize interface if necessary */ 2011 if (ifp->if_flags & IFF_UP) 2012 rl_init_locked(sc); 2013 2014 sc->suspended = 0; 2015 2016 RL_UNLOCK(sc); 2017 2018 return (0); 2019 } 2020 2021 /* 2022 * Stop all chip I/O so that the kernel's probe routines don't 2023 * get confused by errant DMAs when rebooting. 2024 */ 2025 static int 2026 rl_shutdown(device_t dev) 2027 { 2028 struct rl_softc *sc; 2029 2030 sc = device_get_softc(dev); 2031 2032 RL_LOCK(sc); 2033 rl_stop(sc); 2034 /* 2035 * Mark interface as down since otherwise we will panic if 2036 * interrupt comes in later on, which can happen in some 2037 * cases. 2038 */ 2039 sc->rl_ifp->if_flags &= ~IFF_UP; 2040 rl_setwol(sc); 2041 RL_UNLOCK(sc); 2042 2043 return (0); 2044 } 2045 2046 static void 2047 rl_setwol(struct rl_softc *sc) 2048 { 2049 struct ifnet *ifp; 2050 int pmc; 2051 uint16_t pmstat; 2052 uint8_t v; 2053 2054 RL_LOCK_ASSERT(sc); 2055 2056 ifp = sc->rl_ifp; 2057 if ((ifp->if_capabilities & IFCAP_WOL) == 0) 2058 return; 2059 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 2060 return; 2061 2062 /* Enable config register write. */ 2063 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 2064 2065 /* Enable PME. */ 2066 v = CSR_READ_1(sc, sc->rl_cfg1); 2067 v &= ~RL_CFG1_PME; 2068 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2069 v |= RL_CFG1_PME; 2070 CSR_WRITE_1(sc, sc->rl_cfg1, v); 2071 2072 v = CSR_READ_1(sc, sc->rl_cfg3); 2073 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 2074 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2075 v |= RL_CFG3_WOL_MAGIC; 2076 CSR_WRITE_1(sc, sc->rl_cfg3, v); 2077 2078 v = CSR_READ_1(sc, sc->rl_cfg5); 2079 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 2080 v &= ~RL_CFG5_WOL_LANWAKE; 2081 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2082 v |= RL_CFG5_WOL_UCAST; 2083 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2084 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 2085 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2086 v |= RL_CFG5_WOL_LANWAKE; 2087 CSR_WRITE_1(sc, sc->rl_cfg5, v); 2088 2089 /* Config register write done. */ 2090 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2091 2092 /* Request PME if WOL is requested. */ 2093 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 2094 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2095 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2096 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2097 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2098 } 2099 2100 static void 2101 rl_clrwol(struct rl_softc *sc) 2102 { 2103 struct ifnet *ifp; 2104 uint8_t v; 2105 2106 ifp = sc->rl_ifp; 2107 if ((ifp->if_capabilities & IFCAP_WOL) == 0) 2108 return; 2109 2110 /* Enable config register write. */ 2111 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 2112 2113 v = CSR_READ_1(sc, sc->rl_cfg3); 2114 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 2115 CSR_WRITE_1(sc, sc->rl_cfg3, v); 2116 2117 /* Config register write done. */ 2118 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2119 2120 v = CSR_READ_1(sc, sc->rl_cfg5); 2121 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 2122 v &= ~RL_CFG5_WOL_LANWAKE; 2123 CSR_WRITE_1(sc, sc->rl_cfg5, v); 2124 } 2125