1 /*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * RealTek 8129/8139 PCI NIC driver 38 * 39 * Supports several extremely cheap PCI 10/100 adapters based on 40 * the RealTek chipset. Datasheets can be obtained from 41 * www.realtek.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 /* 48 * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is 49 * probably the worst PCI ethernet controller ever made, with the possible 50 * exception of the FEAST chip made by SMC. The 8139 supports bus-master 51 * DMA, but it has a terrible interface that nullifies any performance 52 * gains that bus-master DMA usually offers. 53 * 54 * For transmission, the chip offers a series of four TX descriptor 55 * registers. Each transmit frame must be in a contiguous buffer, aligned 56 * on a longword (32-bit) boundary. This means we almost always have to 57 * do mbuf copies in order to transmit a frame, except in the unlikely 58 * case where a) the packet fits into a single mbuf, and b) the packet 59 * is 32-bit aligned within the mbuf's data area. The presence of only 60 * four descriptor registers means that we can never have more than four 61 * packets queued for transmission at any one time. 62 * 63 * Reception is not much better. The driver has to allocate a single large 64 * buffer area (up to 64K in size) into which the chip will DMA received 65 * frames. Because we don't know where within this region received packets 66 * will begin or end, we have no choice but to copy data from the buffer 67 * area into mbufs in order to pass the packets up to the higher protocol 68 * levels. 69 * 70 * It's impossible given this rotten design to really achieve decent 71 * performance at 100Mbps, unless you happen to have a 400Mhz PII or 72 * some equally overmuscled CPU to drive it. 73 * 74 * On the bright side, the 8139 does have a built-in PHY, although 75 * rather than using an MDIO serial interface like most other NICs, the 76 * PHY registers are directly accessible through the 8139's register 77 * space. The 8139 supports autonegotiation, as well as a 64-bit multicast 78 * filter. 79 * 80 * The 8129 chip is an older version of the 8139 that uses an external PHY 81 * chip. The 8129 has a serial MDIO interface for accessing the MII where 82 * the 8139 lets you directly access the on-board PHY registers. We need 83 * to select which interface to use depending on the chip type. 84 */ 85 86 #ifdef HAVE_KERNEL_OPTION_HEADERS 87 #include "opt_device_polling.h" 88 #endif 89 90 #include <sys/param.h> 91 #include <sys/endian.h> 92 #include <sys/systm.h> 93 #include <sys/sockio.h> 94 #include <sys/mbuf.h> 95 #include <sys/malloc.h> 96 #include <sys/kernel.h> 97 #include <sys/module.h> 98 #include <sys/socket.h> 99 #include <sys/sysctl.h> 100 101 #include <net/if.h> 102 #include <net/if_var.h> 103 #include <net/if_arp.h> 104 #include <net/ethernet.h> 105 #include <net/if_dl.h> 106 #include <net/if_media.h> 107 #include <net/if_types.h> 108 109 #include <net/bpf.h> 110 111 #include <machine/bus.h> 112 #include <machine/resource.h> 113 #include <sys/bus.h> 114 #include <sys/rman.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/mii_bitbang.h> 118 #include <dev/mii/miivar.h> 119 120 #include <dev/pci/pcireg.h> 121 #include <dev/pci/pcivar.h> 122 123 MODULE_DEPEND(rl, pci, 1, 1, 1); 124 MODULE_DEPEND(rl, ether, 1, 1, 1); 125 MODULE_DEPEND(rl, miibus, 1, 1, 1); 126 127 /* "device miibus" required. See GENERIC if you get errors here. */ 128 #include "miibus_if.h" 129 130 #include <dev/rl/if_rlreg.h> 131 132 /* 133 * Various supported device vendors/types and their names. 134 */ 135 static const struct rl_type rl_devs[] = { 136 { RT_VENDORID, RT_DEVICEID_8129, RL_8129, 137 "RealTek 8129 10/100BaseTX" }, 138 { RT_VENDORID, RT_DEVICEID_8139, RL_8139, 139 "RealTek 8139 10/100BaseTX" }, 140 { RT_VENDORID, RT_DEVICEID_8139D, RL_8139, 141 "RealTek 8139 10/100BaseTX" }, 142 { RT_VENDORID, RT_DEVICEID_8138, RL_8139, 143 "RealTek 8139 10/100BaseTX CardBus" }, 144 { RT_VENDORID, RT_DEVICEID_8100, RL_8139, 145 "RealTek 8100 10/100BaseTX" }, 146 { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 147 "Accton MPX 5030/5038 10/100BaseTX" }, 148 { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, 149 "Delta Electronics 8139 10/100BaseTX" }, 150 { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, 151 "Addtron Technology 8139 10/100BaseTX" }, 152 { DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139, 153 "D-Link DFE-520TX (rev. C1) 10/100BaseTX" }, 154 { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, 155 "D-Link DFE-530TX+ 10/100BaseTX" }, 156 { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, 157 "D-Link DFE-690TXD 10/100BaseTX" }, 158 { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, 159 "Nortel Networks 10/100BaseTX" }, 160 { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, 161 "Corega FEther CB-TXD" }, 162 { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, 163 "Corega FEtherII CB-TXD" }, 164 { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, 165 "Peppercon AG ROL-F" }, 166 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139, 167 "Planex FNW-3603-TX" }, 168 { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, 169 "Planex FNW-3800-TX" }, 170 { CP_VENDORID, RT_DEVICEID_8139, RL_8139, 171 "Compaq HNE-300" }, 172 { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, 173 "LevelOne FPC-0106TX" }, 174 { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, 175 "Edimax EP-4103DL CardBus" } 176 }; 177 178 static int rl_attach(device_t); 179 static int rl_detach(device_t); 180 static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int); 181 static int rl_dma_alloc(struct rl_softc *); 182 static void rl_dma_free(struct rl_softc *); 183 static void rl_eeprom_putbyte(struct rl_softc *, int); 184 static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *); 185 static int rl_encap(struct rl_softc *, struct mbuf **); 186 static int rl_list_tx_init(struct rl_softc *); 187 static int rl_list_rx_init(struct rl_softc *); 188 static int rl_ifmedia_upd(struct ifnet *); 189 static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 190 static int rl_ioctl(struct ifnet *, u_long, caddr_t); 191 static void rl_intr(void *); 192 static void rl_init(void *); 193 static void rl_init_locked(struct rl_softc *sc); 194 static int rl_miibus_readreg(device_t, int, int); 195 static void rl_miibus_statchg(device_t); 196 static int rl_miibus_writereg(device_t, int, int, int); 197 #ifdef DEVICE_POLLING 198 static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); 199 static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); 200 #endif 201 static int rl_probe(device_t); 202 static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int); 203 static void rl_reset(struct rl_softc *); 204 static int rl_resume(device_t); 205 static int rl_rxeof(struct rl_softc *); 206 static void rl_rxfilter(struct rl_softc *); 207 static int rl_shutdown(device_t); 208 static void rl_start(struct ifnet *); 209 static void rl_start_locked(struct ifnet *); 210 static void rl_stop(struct rl_softc *); 211 static int rl_suspend(device_t); 212 static void rl_tick(void *); 213 static void rl_txeof(struct rl_softc *); 214 static void rl_watchdog(struct rl_softc *); 215 static void rl_setwol(struct rl_softc *); 216 static void rl_clrwol(struct rl_softc *); 217 218 /* 219 * MII bit-bang glue 220 */ 221 static uint32_t rl_mii_bitbang_read(device_t); 222 static void rl_mii_bitbang_write(device_t, uint32_t); 223 224 static const struct mii_bitbang_ops rl_mii_bitbang_ops = { 225 rl_mii_bitbang_read, 226 rl_mii_bitbang_write, 227 { 228 RL_MII_DATAOUT, /* MII_BIT_MDO */ 229 RL_MII_DATAIN, /* MII_BIT_MDI */ 230 RL_MII_CLK, /* MII_BIT_MDC */ 231 RL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 232 0, /* MII_BIT_DIR_PHY_HOST */ 233 } 234 }; 235 236 static device_method_t rl_methods[] = { 237 /* Device interface */ 238 DEVMETHOD(device_probe, rl_probe), 239 DEVMETHOD(device_attach, rl_attach), 240 DEVMETHOD(device_detach, rl_detach), 241 DEVMETHOD(device_suspend, rl_suspend), 242 DEVMETHOD(device_resume, rl_resume), 243 DEVMETHOD(device_shutdown, rl_shutdown), 244 245 /* MII interface */ 246 DEVMETHOD(miibus_readreg, rl_miibus_readreg), 247 DEVMETHOD(miibus_writereg, rl_miibus_writereg), 248 DEVMETHOD(miibus_statchg, rl_miibus_statchg), 249 250 DEVMETHOD_END 251 }; 252 253 static driver_t rl_driver = { 254 "rl", 255 rl_methods, 256 sizeof(struct rl_softc) 257 }; 258 259 DRIVER_MODULE(rl, pci, rl_driver, 0, 0); 260 MODULE_PNP_INFO("U16:vendor;U16:device", pci, rl, rl_devs, 261 nitems(rl_devs) - 1); 262 DRIVER_MODULE(rl, cardbus, rl_driver, 0, 0); 263 DRIVER_MODULE(miibus, rl, miibus_driver, 0, 0); 264 265 #define EE_SET(x) \ 266 CSR_WRITE_1(sc, RL_EECMD, \ 267 CSR_READ_1(sc, RL_EECMD) | x) 268 269 #define EE_CLR(x) \ 270 CSR_WRITE_1(sc, RL_EECMD, \ 271 CSR_READ_1(sc, RL_EECMD) & ~x) 272 273 /* 274 * Send a read command and address to the EEPROM, check for ACK. 275 */ 276 static void 277 rl_eeprom_putbyte(struct rl_softc *sc, int addr) 278 { 279 int d, i; 280 281 d = addr | sc->rl_eecmd_read; 282 283 /* 284 * Feed in each bit and strobe the clock. 285 */ 286 for (i = 0x400; i; i >>= 1) { 287 if (d & i) { 288 EE_SET(RL_EE_DATAIN); 289 } else { 290 EE_CLR(RL_EE_DATAIN); 291 } 292 DELAY(100); 293 EE_SET(RL_EE_CLK); 294 DELAY(150); 295 EE_CLR(RL_EE_CLK); 296 DELAY(100); 297 } 298 } 299 300 /* 301 * Read a word of data stored in the EEPROM at address 'addr.' 302 */ 303 static void 304 rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest) 305 { 306 int i; 307 uint16_t word = 0; 308 309 /* Enter EEPROM access mode. */ 310 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 311 312 /* 313 * Send address of word we want to read. 314 */ 315 rl_eeprom_putbyte(sc, addr); 316 317 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); 318 319 /* 320 * Start reading bits from EEPROM. 321 */ 322 for (i = 0x8000; i; i >>= 1) { 323 EE_SET(RL_EE_CLK); 324 DELAY(100); 325 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 326 word |= i; 327 EE_CLR(RL_EE_CLK); 328 DELAY(100); 329 } 330 331 /* Turn off EEPROM access mode. */ 332 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 333 334 *dest = word; 335 } 336 337 /* 338 * Read a sequence of words from the EEPROM. 339 */ 340 static void 341 rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap) 342 { 343 int i; 344 uint16_t word = 0, *ptr; 345 346 for (i = 0; i < cnt; i++) { 347 rl_eeprom_getword(sc, off + i, &word); 348 ptr = (uint16_t *)(dest + (i * 2)); 349 if (swap) 350 *ptr = ntohs(word); 351 else 352 *ptr = word; 353 } 354 } 355 356 /* 357 * Read the MII serial port for the MII bit-bang module. 358 */ 359 static uint32_t 360 rl_mii_bitbang_read(device_t dev) 361 { 362 struct rl_softc *sc; 363 uint32_t val; 364 365 sc = device_get_softc(dev); 366 367 val = CSR_READ_1(sc, RL_MII); 368 CSR_BARRIER(sc, RL_MII, 1, 369 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 370 371 return (val); 372 } 373 374 /* 375 * Write the MII serial port for the MII bit-bang module. 376 */ 377 static void 378 rl_mii_bitbang_write(device_t dev, uint32_t val) 379 { 380 struct rl_softc *sc; 381 382 sc = device_get_softc(dev); 383 384 CSR_WRITE_1(sc, RL_MII, val); 385 CSR_BARRIER(sc, RL_MII, 1, 386 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 387 } 388 389 static int 390 rl_miibus_readreg(device_t dev, int phy, int reg) 391 { 392 struct rl_softc *sc; 393 uint16_t rl8139_reg; 394 395 sc = device_get_softc(dev); 396 397 if (sc->rl_type == RL_8139) { 398 switch (reg) { 399 case MII_BMCR: 400 rl8139_reg = RL_BMCR; 401 break; 402 case MII_BMSR: 403 rl8139_reg = RL_BMSR; 404 break; 405 case MII_ANAR: 406 rl8139_reg = RL_ANAR; 407 break; 408 case MII_ANER: 409 rl8139_reg = RL_ANER; 410 break; 411 case MII_ANLPAR: 412 rl8139_reg = RL_LPAR; 413 break; 414 case MII_PHYIDR1: 415 case MII_PHYIDR2: 416 return (0); 417 /* 418 * Allow the rlphy driver to read the media status 419 * register. If we have a link partner which does not 420 * support NWAY, this is the register which will tell 421 * us the results of parallel detection. 422 */ 423 case RL_MEDIASTAT: 424 return (CSR_READ_1(sc, RL_MEDIASTAT)); 425 default: 426 device_printf(sc->rl_dev, "bad phy register\n"); 427 return (0); 428 } 429 return (CSR_READ_2(sc, rl8139_reg)); 430 } 431 432 return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg)); 433 } 434 435 static int 436 rl_miibus_writereg(device_t dev, int phy, int reg, int data) 437 { 438 struct rl_softc *sc; 439 uint16_t rl8139_reg; 440 441 sc = device_get_softc(dev); 442 443 if (sc->rl_type == RL_8139) { 444 switch (reg) { 445 case MII_BMCR: 446 rl8139_reg = RL_BMCR; 447 break; 448 case MII_BMSR: 449 rl8139_reg = RL_BMSR; 450 break; 451 case MII_ANAR: 452 rl8139_reg = RL_ANAR; 453 break; 454 case MII_ANER: 455 rl8139_reg = RL_ANER; 456 break; 457 case MII_ANLPAR: 458 rl8139_reg = RL_LPAR; 459 break; 460 case MII_PHYIDR1: 461 case MII_PHYIDR2: 462 return (0); 463 break; 464 default: 465 device_printf(sc->rl_dev, "bad phy register\n"); 466 return (0); 467 } 468 CSR_WRITE_2(sc, rl8139_reg, data); 469 return (0); 470 } 471 472 mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data); 473 474 return (0); 475 } 476 477 static void 478 rl_miibus_statchg(device_t dev) 479 { 480 struct rl_softc *sc; 481 struct ifnet *ifp; 482 struct mii_data *mii; 483 484 sc = device_get_softc(dev); 485 mii = device_get_softc(sc->rl_miibus); 486 ifp = sc->rl_ifp; 487 if (mii == NULL || ifp == NULL || 488 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 489 return; 490 491 sc->rl_flags &= ~RL_FLAG_LINK; 492 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 493 (IFM_ACTIVE | IFM_AVALID)) { 494 switch (IFM_SUBTYPE(mii->mii_media_active)) { 495 case IFM_10_T: 496 case IFM_100_TX: 497 sc->rl_flags |= RL_FLAG_LINK; 498 break; 499 default: 500 break; 501 } 502 } 503 /* 504 * RealTek controllers do not provide any interface to 505 * Tx/Rx MACs for resolved speed, duplex and flow-control 506 * parameters. 507 */ 508 } 509 510 static u_int 511 rl_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 512 { 513 uint32_t *hashes = arg; 514 int h; 515 516 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 517 if (h < 32) 518 hashes[0] |= (1 << h); 519 else 520 hashes[1] |= (1 << (h - 32)); 521 522 return (1); 523 } 524 525 /* 526 * Program the 64-bit multicast hash filter. 527 */ 528 static void 529 rl_rxfilter(struct rl_softc *sc) 530 { 531 struct ifnet *ifp = sc->rl_ifp; 532 uint32_t hashes[2] = { 0, 0 }; 533 uint32_t rxfilt; 534 535 RL_LOCK_ASSERT(sc); 536 537 rxfilt = CSR_READ_4(sc, RL_RXCFG); 538 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | 539 RL_RXCFG_RX_MULTI); 540 /* Always accept frames destined for this host. */ 541 rxfilt |= RL_RXCFG_RX_INDIV; 542 /* Set capture broadcast bit to capture broadcast frames. */ 543 if (ifp->if_flags & IFF_BROADCAST) 544 rxfilt |= RL_RXCFG_RX_BROAD; 545 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 546 rxfilt |= RL_RXCFG_RX_MULTI; 547 if (ifp->if_flags & IFF_PROMISC) 548 rxfilt |= RL_RXCFG_RX_ALLPHYS; 549 hashes[0] = 0xFFFFFFFF; 550 hashes[1] = 0xFFFFFFFF; 551 } else { 552 /* Now program new ones. */ 553 if_foreach_llmaddr(ifp, rl_hash_maddr, hashes); 554 if (hashes[0] != 0 || hashes[1] != 0) 555 rxfilt |= RL_RXCFG_RX_MULTI; 556 } 557 558 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 559 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 560 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 561 } 562 563 static void 564 rl_reset(struct rl_softc *sc) 565 { 566 int i; 567 568 RL_LOCK_ASSERT(sc); 569 570 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 571 572 for (i = 0; i < RL_TIMEOUT; i++) { 573 DELAY(10); 574 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 575 break; 576 } 577 if (i == RL_TIMEOUT) 578 device_printf(sc->rl_dev, "reset never completed!\n"); 579 } 580 581 /* 582 * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device 583 * IDs against our list and return a device name if we find a match. 584 */ 585 static int 586 rl_probe(device_t dev) 587 { 588 const struct rl_type *t; 589 uint16_t devid, revid, vendor; 590 int i; 591 592 vendor = pci_get_vendor(dev); 593 devid = pci_get_device(dev); 594 revid = pci_get_revid(dev); 595 596 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 597 if (revid == 0x20) { 598 /* 8139C+, let re(4) take care of this device. */ 599 return (ENXIO); 600 } 601 } 602 t = rl_devs; 603 for (i = 0; i < nitems(rl_devs); i++, t++) { 604 if (vendor == t->rl_vid && devid == t->rl_did) { 605 device_set_desc(dev, t->rl_name); 606 return (BUS_PROBE_DEFAULT); 607 } 608 } 609 610 return (ENXIO); 611 } 612 613 struct rl_dmamap_arg { 614 bus_addr_t rl_busaddr; 615 }; 616 617 static void 618 rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 619 { 620 struct rl_dmamap_arg *ctx; 621 622 if (error != 0) 623 return; 624 625 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 626 627 ctx = (struct rl_dmamap_arg *)arg; 628 ctx->rl_busaddr = segs[0].ds_addr; 629 } 630 631 /* 632 * Attach the interface. Allocate softc structures, do ifmedia 633 * setup and ethernet/BPF attach. 634 */ 635 static int 636 rl_attach(device_t dev) 637 { 638 uint8_t eaddr[ETHER_ADDR_LEN]; 639 uint16_t as[3]; 640 struct ifnet *ifp; 641 struct rl_softc *sc; 642 const struct rl_type *t; 643 struct sysctl_ctx_list *ctx; 644 struct sysctl_oid_list *children; 645 int error = 0, hwrev, i, phy, pmc, rid; 646 int prefer_iomap, unit; 647 uint16_t rl_did = 0; 648 char tn[32]; 649 650 sc = device_get_softc(dev); 651 unit = device_get_unit(dev); 652 sc->rl_dev = dev; 653 654 sc->rl_twister_enable = 0; 655 snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit); 656 TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable); 657 ctx = device_get_sysctl_ctx(sc->rl_dev); 658 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); 659 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD, 660 &sc->rl_twister_enable, 0, ""); 661 662 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 663 MTX_DEF); 664 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 665 666 pci_enable_busmaster(dev); 667 668 /* 669 * Map control/status registers. 670 * Default to using PIO access for this driver. On SMP systems, 671 * there appear to be problems with memory mapped mode: it looks 672 * like doing too many memory mapped access back to back in rapid 673 * succession can hang the bus. I'm inclined to blame this on 674 * crummy design/construction on the part of RealTek. Memory 675 * mapped mode does appear to work on uniprocessor systems though. 676 */ 677 prefer_iomap = 1; 678 snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit); 679 TUNABLE_INT_FETCH(tn, &prefer_iomap); 680 if (prefer_iomap) { 681 sc->rl_res_id = PCIR_BAR(0); 682 sc->rl_res_type = SYS_RES_IOPORT; 683 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 684 &sc->rl_res_id, RF_ACTIVE); 685 } 686 if (prefer_iomap == 0 || sc->rl_res == NULL) { 687 sc->rl_res_id = PCIR_BAR(1); 688 sc->rl_res_type = SYS_RES_MEMORY; 689 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 690 &sc->rl_res_id, RF_ACTIVE); 691 } 692 if (sc->rl_res == NULL) { 693 device_printf(dev, "couldn't map ports/memory\n"); 694 error = ENXIO; 695 goto fail; 696 } 697 698 #ifdef notdef 699 /* 700 * Detect the Realtek 8139B. For some reason, this chip is very 701 * unstable when left to autoselect the media 702 * The best workaround is to set the device to the required 703 * media type or to set it to the 10 Meg speed. 704 */ 705 if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF) 706 device_printf(dev, 707 "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n"); 708 #endif 709 710 sc->rl_btag = rman_get_bustag(sc->rl_res); 711 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 712 713 /* Allocate interrupt */ 714 rid = 0; 715 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 716 RF_SHAREABLE | RF_ACTIVE); 717 718 if (sc->rl_irq[0] == NULL) { 719 device_printf(dev, "couldn't map interrupt\n"); 720 error = ENXIO; 721 goto fail; 722 } 723 724 sc->rl_cfg0 = RL_8139_CFG0; 725 sc->rl_cfg1 = RL_8139_CFG1; 726 sc->rl_cfg2 = 0; 727 sc->rl_cfg3 = RL_8139_CFG3; 728 sc->rl_cfg4 = RL_8139_CFG4; 729 sc->rl_cfg5 = RL_8139_CFG5; 730 731 /* 732 * Reset the adapter. Only take the lock here as it's needed in 733 * order to call rl_reset(). 734 */ 735 RL_LOCK(sc); 736 rl_reset(sc); 737 RL_UNLOCK(sc); 738 739 sc->rl_eecmd_read = RL_EECMD_READ_6BIT; 740 rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0); 741 if (rl_did != 0x8129) 742 sc->rl_eecmd_read = RL_EECMD_READ_8BIT; 743 744 /* 745 * Get station address from the EEPROM. 746 */ 747 rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0); 748 for (i = 0; i < 3; i++) { 749 eaddr[(i * 2) + 0] = as[i] & 0xff; 750 eaddr[(i * 2) + 1] = as[i] >> 8; 751 } 752 753 /* 754 * Now read the exact device type from the EEPROM to find 755 * out if it's an 8129 or 8139. 756 */ 757 rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0); 758 759 t = rl_devs; 760 sc->rl_type = 0; 761 while(t->rl_name != NULL) { 762 if (rl_did == t->rl_did) { 763 sc->rl_type = t->rl_basetype; 764 break; 765 } 766 t++; 767 } 768 769 if (sc->rl_type == 0) { 770 device_printf(dev, "unknown device ID: %x assuming 8139\n", 771 rl_did); 772 sc->rl_type = RL_8139; 773 /* 774 * Read RL_IDR register to get ethernet address as accessing 775 * EEPROM may not extract correct address. 776 */ 777 for (i = 0; i < ETHER_ADDR_LEN; i++) 778 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 779 } 780 781 if ((error = rl_dma_alloc(sc)) != 0) 782 goto fail; 783 784 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 785 if (ifp == NULL) { 786 device_printf(dev, "can not if_alloc()\n"); 787 error = ENOSPC; 788 goto fail; 789 } 790 791 #define RL_PHYAD_INTERNAL 0 792 793 /* Do MII setup */ 794 phy = MII_PHY_ANY; 795 if (sc->rl_type == RL_8139) 796 phy = RL_PHYAD_INTERNAL; 797 error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd, 798 rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 799 if (error != 0) { 800 device_printf(dev, "attaching PHYs failed\n"); 801 goto fail; 802 } 803 804 ifp->if_softc = sc; 805 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 806 ifp->if_mtu = ETHERMTU; 807 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 808 ifp->if_ioctl = rl_ioctl; 809 ifp->if_start = rl_start; 810 ifp->if_init = rl_init; 811 ifp->if_capabilities = IFCAP_VLAN_MTU; 812 /* Check WOL for RTL8139B or newer controllers. */ 813 if (sc->rl_type == RL_8139 && 814 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) { 815 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 816 switch (hwrev) { 817 case RL_HWREV_8139B: 818 case RL_HWREV_8130: 819 case RL_HWREV_8139C: 820 case RL_HWREV_8139D: 821 case RL_HWREV_8101: 822 case RL_HWREV_8100: 823 ifp->if_capabilities |= IFCAP_WOL; 824 /* Disable WOL. */ 825 rl_clrwol(sc); 826 break; 827 default: 828 break; 829 } 830 } 831 ifp->if_capenable = ifp->if_capabilities; 832 ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); 833 #ifdef DEVICE_POLLING 834 ifp->if_capabilities |= IFCAP_POLLING; 835 #endif 836 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 837 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 838 IFQ_SET_READY(&ifp->if_snd); 839 840 /* 841 * Call MI attach routine. 842 */ 843 ether_ifattach(ifp, eaddr); 844 845 /* Hook interrupt last to avoid having to lock softc */ 846 error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, 847 NULL, rl_intr, sc, &sc->rl_intrhand[0]); 848 if (error) { 849 device_printf(sc->rl_dev, "couldn't set up irq\n"); 850 ether_ifdetach(ifp); 851 } 852 853 fail: 854 if (error) 855 rl_detach(dev); 856 857 return (error); 858 } 859 860 /* 861 * Shutdown hardware and free up resources. This can be called any 862 * time after the mutex has been initialized. It is called in both 863 * the error case in attach and the normal detach case so it needs 864 * to be careful about only freeing resources that have actually been 865 * allocated. 866 */ 867 static int 868 rl_detach(device_t dev) 869 { 870 struct rl_softc *sc; 871 struct ifnet *ifp; 872 873 sc = device_get_softc(dev); 874 ifp = sc->rl_ifp; 875 876 KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); 877 878 #ifdef DEVICE_POLLING 879 if (ifp->if_capenable & IFCAP_POLLING) 880 ether_poll_deregister(ifp); 881 #endif 882 /* These should only be active if attach succeeded */ 883 if (device_is_attached(dev)) { 884 RL_LOCK(sc); 885 rl_stop(sc); 886 RL_UNLOCK(sc); 887 callout_drain(&sc->rl_stat_callout); 888 ether_ifdetach(ifp); 889 } 890 #if 0 891 sc->suspended = 1; 892 #endif 893 if (sc->rl_miibus) 894 device_delete_child(dev, sc->rl_miibus); 895 bus_generic_detach(dev); 896 897 if (sc->rl_intrhand[0]) 898 bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); 899 if (sc->rl_irq[0]) 900 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]); 901 if (sc->rl_res) 902 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 903 sc->rl_res); 904 905 if (ifp) 906 if_free(ifp); 907 908 rl_dma_free(sc); 909 910 mtx_destroy(&sc->rl_mtx); 911 912 return (0); 913 } 914 915 static int 916 rl_dma_alloc(struct rl_softc *sc) 917 { 918 struct rl_dmamap_arg ctx; 919 int error, i; 920 921 /* 922 * Allocate the parent bus DMA tag appropriate for PCI. 923 */ 924 error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */ 925 1, 0, /* alignment, boundary */ 926 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 927 BUS_SPACE_MAXADDR, /* highaddr */ 928 NULL, NULL, /* filter, filterarg */ 929 BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ 930 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 931 0, /* flags */ 932 NULL, NULL, /* lockfunc, lockarg */ 933 &sc->rl_parent_tag); 934 if (error) { 935 device_printf(sc->rl_dev, 936 "failed to create parent DMA tag.\n"); 937 goto fail; 938 } 939 /* Create DMA tag for Rx memory block. */ 940 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 941 RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */ 942 BUS_SPACE_MAXADDR, /* lowaddr */ 943 BUS_SPACE_MAXADDR, /* highaddr */ 944 NULL, NULL, /* filter, filterarg */ 945 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */ 946 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */ 947 0, /* flags */ 948 NULL, NULL, /* lockfunc, lockarg */ 949 &sc->rl_cdata.rl_rx_tag); 950 if (error) { 951 device_printf(sc->rl_dev, 952 "failed to create Rx memory block DMA tag.\n"); 953 goto fail; 954 } 955 /* Create DMA tag for Tx buffer. */ 956 error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 957 RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */ 958 BUS_SPACE_MAXADDR, /* lowaddr */ 959 BUS_SPACE_MAXADDR, /* highaddr */ 960 NULL, NULL, /* filter, filterarg */ 961 MCLBYTES, 1, /* maxsize, nsegments */ 962 MCLBYTES, /* maxsegsize */ 963 0, /* flags */ 964 NULL, NULL, /* lockfunc, lockarg */ 965 &sc->rl_cdata.rl_tx_tag); 966 if (error) { 967 device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n"); 968 goto fail; 969 } 970 971 /* 972 * Allocate DMA'able memory and load DMA map for Rx memory block. 973 */ 974 error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag, 975 (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK | 976 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap); 977 if (error != 0) { 978 device_printf(sc->rl_dev, 979 "failed to allocate Rx DMA memory block.\n"); 980 goto fail; 981 } 982 ctx.rl_busaddr = 0; 983 error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag, 984 sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf, 985 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx, 986 BUS_DMA_NOWAIT); 987 if (error != 0 || ctx.rl_busaddr == 0) { 988 device_printf(sc->rl_dev, 989 "could not load Rx DMA memory block.\n"); 990 goto fail; 991 } 992 sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr; 993 994 /* Create DMA maps for Tx buffers. */ 995 for (i = 0; i < RL_TX_LIST_CNT; i++) { 996 sc->rl_cdata.rl_tx_chain[i] = NULL; 997 sc->rl_cdata.rl_tx_dmamap[i] = NULL; 998 error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0, 999 &sc->rl_cdata.rl_tx_dmamap[i]); 1000 if (error != 0) { 1001 device_printf(sc->rl_dev, 1002 "could not create Tx dmamap.\n"); 1003 goto fail; 1004 } 1005 } 1006 1007 /* Leave a few bytes before the start of the RX ring buffer. */ 1008 sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; 1009 sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE; 1010 1011 fail: 1012 return (error); 1013 } 1014 1015 static void 1016 rl_dma_free(struct rl_softc *sc) 1017 { 1018 int i; 1019 1020 /* Rx memory block. */ 1021 if (sc->rl_cdata.rl_rx_tag != NULL) { 1022 if (sc->rl_cdata.rl_rx_buf_paddr != 0) 1023 bus_dmamap_unload(sc->rl_cdata.rl_rx_tag, 1024 sc->rl_cdata.rl_rx_dmamap); 1025 if (sc->rl_cdata.rl_rx_buf_ptr != NULL) 1026 bus_dmamem_free(sc->rl_cdata.rl_rx_tag, 1027 sc->rl_cdata.rl_rx_buf_ptr, 1028 sc->rl_cdata.rl_rx_dmamap); 1029 sc->rl_cdata.rl_rx_buf_ptr = NULL; 1030 sc->rl_cdata.rl_rx_buf = NULL; 1031 sc->rl_cdata.rl_rx_buf_paddr = 0; 1032 bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag); 1033 sc->rl_cdata.rl_tx_tag = NULL; 1034 } 1035 1036 /* Tx buffers. */ 1037 if (sc->rl_cdata.rl_tx_tag != NULL) { 1038 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1039 if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) { 1040 bus_dmamap_destroy( 1041 sc->rl_cdata.rl_tx_tag, 1042 sc->rl_cdata.rl_tx_dmamap[i]); 1043 sc->rl_cdata.rl_tx_dmamap[i] = NULL; 1044 } 1045 } 1046 bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag); 1047 sc->rl_cdata.rl_tx_tag = NULL; 1048 } 1049 1050 if (sc->rl_parent_tag != NULL) { 1051 bus_dma_tag_destroy(sc->rl_parent_tag); 1052 sc->rl_parent_tag = NULL; 1053 } 1054 } 1055 1056 /* 1057 * Initialize the transmit descriptors. 1058 */ 1059 static int 1060 rl_list_tx_init(struct rl_softc *sc) 1061 { 1062 struct rl_chain_data *cd; 1063 int i; 1064 1065 RL_LOCK_ASSERT(sc); 1066 1067 cd = &sc->rl_cdata; 1068 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1069 cd->rl_tx_chain[i] = NULL; 1070 CSR_WRITE_4(sc, 1071 RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); 1072 } 1073 1074 sc->rl_cdata.cur_tx = 0; 1075 sc->rl_cdata.last_tx = 0; 1076 1077 return (0); 1078 } 1079 1080 static int 1081 rl_list_rx_init(struct rl_softc *sc) 1082 { 1083 1084 RL_LOCK_ASSERT(sc); 1085 1086 bzero(sc->rl_cdata.rl_rx_buf_ptr, 1087 RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ); 1088 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap, 1089 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1090 1091 return (0); 1092 } 1093 1094 /* 1095 * A frame has been uploaded: pass the resulting mbuf chain up to 1096 * the higher level protocols. 1097 * 1098 * You know there's something wrong with a PCI bus-master chip design 1099 * when you have to use m_devget(). 1100 * 1101 * The receive operation is badly documented in the datasheet, so I'll 1102 * attempt to document it here. The driver provides a buffer area and 1103 * places its base address in the RX buffer start address register. 1104 * The chip then begins copying frames into the RX buffer. Each frame 1105 * is preceded by a 32-bit RX status word which specifies the length 1106 * of the frame and certain other status bits. Each frame (starting with 1107 * the status word) is also 32-bit aligned. The frame length is in the 1108 * first 16 bits of the status word; the lower 15 bits correspond with 1109 * the 'rx status register' mentioned in the datasheet. 1110 * 1111 * Note: to make the Alpha happy, the frame payload needs to be aligned 1112 * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) 1113 * as the offset argument to m_devget(). 1114 */ 1115 static int 1116 rl_rxeof(struct rl_softc *sc) 1117 { 1118 struct mbuf *m; 1119 struct ifnet *ifp = sc->rl_ifp; 1120 uint8_t *rxbufpos; 1121 int total_len = 0; 1122 int wrap = 0; 1123 int rx_npkts = 0; 1124 uint32_t rxstat; 1125 uint16_t cur_rx; 1126 uint16_t limit; 1127 uint16_t max_bytes, rx_bytes = 0; 1128 1129 RL_LOCK_ASSERT(sc); 1130 1131 bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap, 1132 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1133 1134 cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; 1135 1136 /* Do not try to read past this point. */ 1137 limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; 1138 1139 if (limit < cur_rx) 1140 max_bytes = (RL_RXBUFLEN - cur_rx) + limit; 1141 else 1142 max_bytes = limit - cur_rx; 1143 1144 while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { 1145 #ifdef DEVICE_POLLING 1146 if (ifp->if_capenable & IFCAP_POLLING) { 1147 if (sc->rxcycles <= 0) 1148 break; 1149 sc->rxcycles--; 1150 } 1151 #endif 1152 rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; 1153 rxstat = le32toh(*(uint32_t *)rxbufpos); 1154 1155 /* 1156 * Here's a totally undocumented fact for you. When the 1157 * RealTek chip is in the process of copying a packet into 1158 * RAM for you, the length will be 0xfff0. If you spot a 1159 * packet header with this value, you need to stop. The 1160 * datasheet makes absolutely no mention of this and 1161 * RealTek should be shot for this. 1162 */ 1163 total_len = rxstat >> 16; 1164 if (total_len == RL_RXSTAT_UNFINISHED) 1165 break; 1166 1167 if (!(rxstat & RL_RXSTAT_RXOK) || 1168 total_len < ETHER_MIN_LEN || 1169 total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) { 1170 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1171 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1172 rl_init_locked(sc); 1173 return (rx_npkts); 1174 } 1175 1176 /* No errors; receive the packet. */ 1177 rx_bytes += total_len + 4; 1178 1179 /* 1180 * XXX The RealTek chip includes the CRC with every 1181 * received frame, and there's no way to turn this 1182 * behavior off (at least, I can't find anything in 1183 * the manual that explains how to do it) so we have 1184 * to trim off the CRC manually. 1185 */ 1186 total_len -= ETHER_CRC_LEN; 1187 1188 /* 1189 * Avoid trying to read more bytes than we know 1190 * the chip has prepared for us. 1191 */ 1192 if (rx_bytes > max_bytes) 1193 break; 1194 1195 rxbufpos = sc->rl_cdata.rl_rx_buf + 1196 ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN); 1197 if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) 1198 rxbufpos = sc->rl_cdata.rl_rx_buf; 1199 1200 wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; 1201 if (total_len > wrap) { 1202 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1203 NULL); 1204 if (m != NULL) 1205 m_copyback(m, wrap, total_len - wrap, 1206 sc->rl_cdata.rl_rx_buf); 1207 cur_rx = (total_len - wrap + ETHER_CRC_LEN); 1208 } else { 1209 m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, 1210 NULL); 1211 cur_rx += total_len + 4 + ETHER_CRC_LEN; 1212 } 1213 1214 /* Round up to 32-bit boundary. */ 1215 cur_rx = (cur_rx + 3) & ~3; 1216 CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); 1217 1218 if (m == NULL) { 1219 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1220 continue; 1221 } 1222 1223 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1224 RL_UNLOCK(sc); 1225 (*ifp->if_input)(ifp, m); 1226 RL_LOCK(sc); 1227 rx_npkts++; 1228 } 1229 1230 /* No need to sync Rx memory block as we didn't modify it. */ 1231 return (rx_npkts); 1232 } 1233 1234 /* 1235 * A frame was downloaded to the chip. It's safe for us to clean up 1236 * the list buffers. 1237 */ 1238 static void 1239 rl_txeof(struct rl_softc *sc) 1240 { 1241 struct ifnet *ifp = sc->rl_ifp; 1242 uint32_t txstat; 1243 1244 RL_LOCK_ASSERT(sc); 1245 1246 /* 1247 * Go through our tx list and free mbufs for those 1248 * frames that have been uploaded. 1249 */ 1250 do { 1251 if (RL_LAST_TXMBUF(sc) == NULL) 1252 break; 1253 txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); 1254 if (!(txstat & (RL_TXSTAT_TX_OK| 1255 RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) 1256 break; 1257 1258 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & RL_TXSTAT_COLLCNT) >> 24); 1259 1260 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc), 1261 BUS_DMASYNC_POSTWRITE); 1262 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc)); 1263 m_freem(RL_LAST_TXMBUF(sc)); 1264 RL_LAST_TXMBUF(sc) = NULL; 1265 /* 1266 * If there was a transmit underrun, bump the TX threshold. 1267 * Make sure not to overflow the 63 * 32byte we can address 1268 * with the 6 available bit. 1269 */ 1270 if ((txstat & RL_TXSTAT_TX_UNDERRUN) && 1271 (sc->rl_txthresh < 2016)) 1272 sc->rl_txthresh += 32; 1273 if (txstat & RL_TXSTAT_TX_OK) 1274 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1275 else { 1276 int oldthresh; 1277 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1278 if ((txstat & RL_TXSTAT_TXABRT) || 1279 (txstat & RL_TXSTAT_OUTOFWIN)) 1280 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1281 oldthresh = sc->rl_txthresh; 1282 /* error recovery */ 1283 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1284 rl_init_locked(sc); 1285 /* restore original threshold */ 1286 sc->rl_txthresh = oldthresh; 1287 return; 1288 } 1289 RL_INC(sc->rl_cdata.last_tx); 1290 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1291 } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); 1292 1293 if (RL_LAST_TXMBUF(sc) == NULL) 1294 sc->rl_watchdog_timer = 0; 1295 } 1296 1297 static void 1298 rl_twister_update(struct rl_softc *sc) 1299 { 1300 uint16_t linktest; 1301 /* 1302 * Table provided by RealTek (Kinston <shangh@realtek.com.tw>) for 1303 * Linux driver. Values undocumented otherwise. 1304 */ 1305 static const uint32_t param[4][4] = { 1306 {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, 1307 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, 1308 {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, 1309 {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} 1310 }; 1311 1312 /* 1313 * Tune the so-called twister registers of the RTL8139. These 1314 * are used to compensate for impedance mismatches. The 1315 * method for tuning these registers is undocumented and the 1316 * following procedure is collected from public sources. 1317 */ 1318 switch (sc->rl_twister) 1319 { 1320 case CHK_LINK: 1321 /* 1322 * If we have a sufficient link, then we can proceed in 1323 * the state machine to the next stage. If not, then 1324 * disable further tuning after writing sane defaults. 1325 */ 1326 if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) { 1327 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD); 1328 sc->rl_twister = FIND_ROW; 1329 } else { 1330 CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD); 1331 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); 1332 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); 1333 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); 1334 sc->rl_twister = DONE; 1335 } 1336 break; 1337 case FIND_ROW: 1338 /* 1339 * Read how long it took to see the echo to find the tuning 1340 * row to use. 1341 */ 1342 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; 1343 if (linktest == RL_CSCFG_ROW3) 1344 sc->rl_twist_row = 3; 1345 else if (linktest == RL_CSCFG_ROW2) 1346 sc->rl_twist_row = 2; 1347 else if (linktest == RL_CSCFG_ROW1) 1348 sc->rl_twist_row = 1; 1349 else 1350 sc->rl_twist_row = 0; 1351 sc->rl_twist_col = 0; 1352 sc->rl_twister = SET_PARAM; 1353 break; 1354 case SET_PARAM: 1355 if (sc->rl_twist_col == 0) 1356 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); 1357 CSR_WRITE_4(sc, RL_PARA7C, 1358 param[sc->rl_twist_row][sc->rl_twist_col]); 1359 if (++sc->rl_twist_col == 4) { 1360 if (sc->rl_twist_row == 3) 1361 sc->rl_twister = RECHK_LONG; 1362 else 1363 sc->rl_twister = DONE; 1364 } 1365 break; 1366 case RECHK_LONG: 1367 /* 1368 * For long cables, we have to double check to make sure we 1369 * don't mistune. 1370 */ 1371 linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; 1372 if (linktest == RL_CSCFG_ROW3) 1373 sc->rl_twister = DONE; 1374 else { 1375 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE); 1376 sc->rl_twister = RETUNE; 1377 } 1378 break; 1379 case RETUNE: 1380 /* Retune for a shorter cable (try column 2) */ 1381 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); 1382 CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); 1383 CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); 1384 CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); 1385 sc->rl_twist_row--; 1386 sc->rl_twist_col = 0; 1387 sc->rl_twister = SET_PARAM; 1388 break; 1389 1390 case DONE: 1391 break; 1392 } 1393 1394 } 1395 1396 static void 1397 rl_tick(void *xsc) 1398 { 1399 struct rl_softc *sc = xsc; 1400 struct mii_data *mii; 1401 int ticks; 1402 1403 RL_LOCK_ASSERT(sc); 1404 /* 1405 * If we're doing the twister cable calibration, then we need to defer 1406 * watchdog timeouts. This is a no-op in normal operations, but 1407 * can falsely trigger when the cable calibration takes a while and 1408 * there was traffic ready to go when rl was started. 1409 * 1410 * We don't defer mii_tick since that updates the mii status, which 1411 * helps the twister process, at least according to similar patches 1412 * for the Linux driver I found online while doing the fixes. Worst 1413 * case is a few extra mii reads during calibration. 1414 */ 1415 mii = device_get_softc(sc->rl_miibus); 1416 mii_tick(mii); 1417 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 1418 rl_miibus_statchg(sc->rl_dev); 1419 if (sc->rl_twister_enable) { 1420 if (sc->rl_twister == DONE) 1421 rl_watchdog(sc); 1422 else 1423 rl_twister_update(sc); 1424 if (sc->rl_twister == DONE) 1425 ticks = hz; 1426 else 1427 ticks = hz / 10; 1428 } else { 1429 rl_watchdog(sc); 1430 ticks = hz; 1431 } 1432 1433 callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc); 1434 } 1435 1436 #ifdef DEVICE_POLLING 1437 static int 1438 rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1439 { 1440 struct rl_softc *sc = ifp->if_softc; 1441 int rx_npkts = 0; 1442 1443 RL_LOCK(sc); 1444 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1445 rx_npkts = rl_poll_locked(ifp, cmd, count); 1446 RL_UNLOCK(sc); 1447 return (rx_npkts); 1448 } 1449 1450 static int 1451 rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1452 { 1453 struct rl_softc *sc = ifp->if_softc; 1454 int rx_npkts; 1455 1456 RL_LOCK_ASSERT(sc); 1457 1458 sc->rxcycles = count; 1459 rx_npkts = rl_rxeof(sc); 1460 rl_txeof(sc); 1461 1462 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1463 rl_start_locked(ifp); 1464 1465 if (cmd == POLL_AND_CHECK_STATUS) { 1466 uint16_t status; 1467 1468 /* We should also check the status register. */ 1469 status = CSR_READ_2(sc, RL_ISR); 1470 if (status == 0xffff) 1471 return (rx_npkts); 1472 if (status != 0) 1473 CSR_WRITE_2(sc, RL_ISR, status); 1474 1475 /* XXX We should check behaviour on receiver stalls. */ 1476 1477 if (status & RL_ISR_SYSTEM_ERR) { 1478 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1479 rl_init_locked(sc); 1480 } 1481 } 1482 return (rx_npkts); 1483 } 1484 #endif /* DEVICE_POLLING */ 1485 1486 static void 1487 rl_intr(void *arg) 1488 { 1489 struct rl_softc *sc = arg; 1490 struct ifnet *ifp = sc->rl_ifp; 1491 uint16_t status; 1492 int count; 1493 1494 RL_LOCK(sc); 1495 1496 if (sc->suspended) 1497 goto done_locked; 1498 1499 #ifdef DEVICE_POLLING 1500 if (ifp->if_capenable & IFCAP_POLLING) 1501 goto done_locked; 1502 #endif 1503 1504 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1505 goto done_locked2; 1506 status = CSR_READ_2(sc, RL_ISR); 1507 if (status == 0xffff || (status & RL_INTRS) == 0) 1508 goto done_locked; 1509 /* 1510 * Ours, disable further interrupts. 1511 */ 1512 CSR_WRITE_2(sc, RL_IMR, 0); 1513 for (count = 16; count > 0; count--) { 1514 CSR_WRITE_2(sc, RL_ISR, status); 1515 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1516 if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR)) 1517 rl_rxeof(sc); 1518 if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR)) 1519 rl_txeof(sc); 1520 if (status & RL_ISR_SYSTEM_ERR) { 1521 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1522 rl_init_locked(sc); 1523 RL_UNLOCK(sc); 1524 return; 1525 } 1526 } 1527 status = CSR_READ_2(sc, RL_ISR); 1528 /* If the card has gone away, the read returns 0xffff. */ 1529 if (status == 0xffff || (status & RL_INTRS) == 0) 1530 break; 1531 } 1532 1533 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1534 rl_start_locked(ifp); 1535 1536 done_locked2: 1537 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1538 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 1539 done_locked: 1540 RL_UNLOCK(sc); 1541 } 1542 1543 /* 1544 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1545 * pointers to the fragment pointers. 1546 */ 1547 static int 1548 rl_encap(struct rl_softc *sc, struct mbuf **m_head) 1549 { 1550 struct mbuf *m; 1551 bus_dma_segment_t txsegs[1]; 1552 int error, nsegs, padlen; 1553 1554 RL_LOCK_ASSERT(sc); 1555 1556 m = *m_head; 1557 padlen = 0; 1558 /* 1559 * Hardware doesn't auto-pad, so we have to make sure 1560 * pad short frames out to the minimum frame length. 1561 */ 1562 if (m->m_pkthdr.len < RL_MIN_FRAMELEN) 1563 padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len; 1564 /* 1565 * The RealTek is brain damaged and wants longword-aligned 1566 * TX buffers, plus we can only have one fragment buffer 1567 * per packet. We have to copy pretty much all the time. 1568 */ 1569 if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 || 1570 (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) { 1571 m = m_defrag(*m_head, M_NOWAIT); 1572 if (m == NULL) { 1573 m_freem(*m_head); 1574 *m_head = NULL; 1575 return (ENOMEM); 1576 } 1577 } 1578 *m_head = m; 1579 1580 if (padlen > 0) { 1581 /* 1582 * Make security-conscious people happy: zero out the 1583 * bytes in the pad area, since we don't know what 1584 * this mbuf cluster buffer's previous user might 1585 * have left in it. 1586 */ 1587 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1588 m->m_pkthdr.len += padlen; 1589 m->m_len = m->m_pkthdr.len; 1590 } 1591 1592 error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag, 1593 RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0); 1594 if (error != 0) 1595 return (error); 1596 if (nsegs == 0) { 1597 m_freem(*m_head); 1598 *m_head = NULL; 1599 return (EIO); 1600 } 1601 1602 RL_CUR_TXMBUF(sc) = m; 1603 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc), 1604 BUS_DMASYNC_PREWRITE); 1605 CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr)); 1606 1607 return (0); 1608 } 1609 1610 /* 1611 * Main transmit routine. 1612 */ 1613 static void 1614 rl_start(struct ifnet *ifp) 1615 { 1616 struct rl_softc *sc = ifp->if_softc; 1617 1618 RL_LOCK(sc); 1619 rl_start_locked(ifp); 1620 RL_UNLOCK(sc); 1621 } 1622 1623 static void 1624 rl_start_locked(struct ifnet *ifp) 1625 { 1626 struct rl_softc *sc = ifp->if_softc; 1627 struct mbuf *m_head = NULL; 1628 1629 RL_LOCK_ASSERT(sc); 1630 1631 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1632 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) 1633 return; 1634 1635 while (RL_CUR_TXMBUF(sc) == NULL) { 1636 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1637 1638 if (m_head == NULL) 1639 break; 1640 1641 if (rl_encap(sc, &m_head)) { 1642 if (m_head == NULL) 1643 break; 1644 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1645 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1646 break; 1647 } 1648 1649 /* Pass a copy of this mbuf chain to the bpf subsystem. */ 1650 BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); 1651 1652 /* Transmit the frame. */ 1653 CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), 1654 RL_TXTHRESH(sc->rl_txthresh) | 1655 RL_CUR_TXMBUF(sc)->m_pkthdr.len); 1656 1657 RL_INC(sc->rl_cdata.cur_tx); 1658 1659 /* Set a timeout in case the chip goes out to lunch. */ 1660 sc->rl_watchdog_timer = 5; 1661 } 1662 1663 /* 1664 * We broke out of the loop because all our TX slots are 1665 * full. Mark the NIC as busy until it drains some of the 1666 * packets from the queue. 1667 */ 1668 if (RL_CUR_TXMBUF(sc) != NULL) 1669 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1670 } 1671 1672 static void 1673 rl_init(void *xsc) 1674 { 1675 struct rl_softc *sc = xsc; 1676 1677 RL_LOCK(sc); 1678 rl_init_locked(sc); 1679 RL_UNLOCK(sc); 1680 } 1681 1682 static void 1683 rl_init_locked(struct rl_softc *sc) 1684 { 1685 struct ifnet *ifp = sc->rl_ifp; 1686 struct mii_data *mii; 1687 uint32_t eaddr[2]; 1688 1689 RL_LOCK_ASSERT(sc); 1690 1691 mii = device_get_softc(sc->rl_miibus); 1692 1693 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1694 return; 1695 1696 /* 1697 * Cancel pending I/O and free all RX/TX buffers. 1698 */ 1699 rl_stop(sc); 1700 1701 rl_reset(sc); 1702 if (sc->rl_twister_enable) { 1703 /* 1704 * Reset twister register tuning state. The twister 1705 * registers and their tuning are undocumented, but 1706 * are necessary to cope with bad links. rl_twister = 1707 * DONE here will disable this entirely. 1708 */ 1709 sc->rl_twister = CHK_LINK; 1710 } 1711 1712 /* 1713 * Init our MAC address. Even though the chipset 1714 * documentation doesn't mention it, we need to enter "Config 1715 * register write enable" mode to modify the ID registers. 1716 */ 1717 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 1718 bzero(eaddr, sizeof(eaddr)); 1719 bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN); 1720 CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]); 1721 CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]); 1722 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1723 1724 /* Init the RX memory block pointer register. */ 1725 CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr + 1726 RL_RX_8139_BUF_RESERVE); 1727 /* Init TX descriptors. */ 1728 rl_list_tx_init(sc); 1729 /* Init Rx memory block. */ 1730 rl_list_rx_init(sc); 1731 1732 /* 1733 * Enable transmit and receive. 1734 */ 1735 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1736 1737 /* 1738 * Set the initial TX and RX configuration. 1739 */ 1740 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 1741 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 1742 1743 /* Set RX filter. */ 1744 rl_rxfilter(sc); 1745 1746 #ifdef DEVICE_POLLING 1747 /* Disable interrupts if we are polling. */ 1748 if (ifp->if_capenable & IFCAP_POLLING) 1749 CSR_WRITE_2(sc, RL_IMR, 0); 1750 else 1751 #endif 1752 /* Enable interrupts. */ 1753 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 1754 1755 /* Set initial TX threshold */ 1756 sc->rl_txthresh = RL_TX_THRESH_INIT; 1757 1758 /* Start RX/TX process. */ 1759 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 1760 1761 /* Enable receiver and transmitter. */ 1762 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 1763 1764 sc->rl_flags &= ~RL_FLAG_LINK; 1765 mii_mediachg(mii); 1766 1767 CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); 1768 1769 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1770 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1771 1772 callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc); 1773 } 1774 1775 /* 1776 * Set media options. 1777 */ 1778 static int 1779 rl_ifmedia_upd(struct ifnet *ifp) 1780 { 1781 struct rl_softc *sc = ifp->if_softc; 1782 struct mii_data *mii; 1783 1784 mii = device_get_softc(sc->rl_miibus); 1785 1786 RL_LOCK(sc); 1787 mii_mediachg(mii); 1788 RL_UNLOCK(sc); 1789 1790 return (0); 1791 } 1792 1793 /* 1794 * Report current media status. 1795 */ 1796 static void 1797 rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1798 { 1799 struct rl_softc *sc = ifp->if_softc; 1800 struct mii_data *mii; 1801 1802 mii = device_get_softc(sc->rl_miibus); 1803 1804 RL_LOCK(sc); 1805 mii_pollstat(mii); 1806 ifmr->ifm_active = mii->mii_media_active; 1807 ifmr->ifm_status = mii->mii_media_status; 1808 RL_UNLOCK(sc); 1809 } 1810 1811 static int 1812 rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1813 { 1814 struct ifreq *ifr = (struct ifreq *)data; 1815 struct mii_data *mii; 1816 struct rl_softc *sc = ifp->if_softc; 1817 int error = 0, mask; 1818 1819 switch (command) { 1820 case SIOCSIFFLAGS: 1821 RL_LOCK(sc); 1822 if (ifp->if_flags & IFF_UP) { 1823 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1824 ((ifp->if_flags ^ sc->rl_if_flags) & 1825 (IFF_PROMISC | IFF_ALLMULTI))) 1826 rl_rxfilter(sc); 1827 else 1828 rl_init_locked(sc); 1829 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1830 rl_stop(sc); 1831 sc->rl_if_flags = ifp->if_flags; 1832 RL_UNLOCK(sc); 1833 break; 1834 case SIOCADDMULTI: 1835 case SIOCDELMULTI: 1836 RL_LOCK(sc); 1837 rl_rxfilter(sc); 1838 RL_UNLOCK(sc); 1839 break; 1840 case SIOCGIFMEDIA: 1841 case SIOCSIFMEDIA: 1842 mii = device_get_softc(sc->rl_miibus); 1843 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1844 break; 1845 case SIOCSIFCAP: 1846 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1847 #ifdef DEVICE_POLLING 1848 if (ifr->ifr_reqcap & IFCAP_POLLING && 1849 !(ifp->if_capenable & IFCAP_POLLING)) { 1850 error = ether_poll_register(rl_poll, ifp); 1851 if (error) 1852 return(error); 1853 RL_LOCK(sc); 1854 /* Disable interrupts */ 1855 CSR_WRITE_2(sc, RL_IMR, 0x0000); 1856 ifp->if_capenable |= IFCAP_POLLING; 1857 RL_UNLOCK(sc); 1858 return (error); 1859 1860 } 1861 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 1862 ifp->if_capenable & IFCAP_POLLING) { 1863 error = ether_poll_deregister(ifp); 1864 /* Enable interrupts. */ 1865 RL_LOCK(sc); 1866 CSR_WRITE_2(sc, RL_IMR, RL_INTRS); 1867 ifp->if_capenable &= ~IFCAP_POLLING; 1868 RL_UNLOCK(sc); 1869 return (error); 1870 } 1871 #endif /* DEVICE_POLLING */ 1872 if ((mask & IFCAP_WOL) != 0 && 1873 (ifp->if_capabilities & IFCAP_WOL) != 0) { 1874 if ((mask & IFCAP_WOL_UCAST) != 0) 1875 ifp->if_capenable ^= IFCAP_WOL_UCAST; 1876 if ((mask & IFCAP_WOL_MCAST) != 0) 1877 ifp->if_capenable ^= IFCAP_WOL_MCAST; 1878 if ((mask & IFCAP_WOL_MAGIC) != 0) 1879 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1880 } 1881 break; 1882 default: 1883 error = ether_ioctl(ifp, command, data); 1884 break; 1885 } 1886 1887 return (error); 1888 } 1889 1890 static void 1891 rl_watchdog(struct rl_softc *sc) 1892 { 1893 1894 RL_LOCK_ASSERT(sc); 1895 1896 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0) 1897 return; 1898 1899 device_printf(sc->rl_dev, "watchdog timeout\n"); 1900 if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1); 1901 1902 rl_txeof(sc); 1903 rl_rxeof(sc); 1904 sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1905 rl_init_locked(sc); 1906 } 1907 1908 /* 1909 * Stop the adapter and free any mbufs allocated to the 1910 * RX and TX lists. 1911 */ 1912 static void 1913 rl_stop(struct rl_softc *sc) 1914 { 1915 int i; 1916 struct ifnet *ifp = sc->rl_ifp; 1917 1918 RL_LOCK_ASSERT(sc); 1919 1920 sc->rl_watchdog_timer = 0; 1921 callout_stop(&sc->rl_stat_callout); 1922 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1923 sc->rl_flags &= ~RL_FLAG_LINK; 1924 1925 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 1926 CSR_WRITE_2(sc, RL_IMR, 0x0000); 1927 for (i = 0; i < RL_TIMEOUT; i++) { 1928 DELAY(10); 1929 if ((CSR_READ_1(sc, RL_COMMAND) & 1930 (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0) 1931 break; 1932 } 1933 if (i == RL_TIMEOUT) 1934 device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n"); 1935 1936 /* 1937 * Free the TX list buffers. 1938 */ 1939 for (i = 0; i < RL_TX_LIST_CNT; i++) { 1940 if (sc->rl_cdata.rl_tx_chain[i] != NULL) { 1941 bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, 1942 sc->rl_cdata.rl_tx_dmamap[i], 1943 BUS_DMASYNC_POSTWRITE); 1944 bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, 1945 sc->rl_cdata.rl_tx_dmamap[i]); 1946 m_freem(sc->rl_cdata.rl_tx_chain[i]); 1947 sc->rl_cdata.rl_tx_chain[i] = NULL; 1948 CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 1949 0x0000000); 1950 } 1951 } 1952 } 1953 1954 /* 1955 * Device suspend routine. Stop the interface and save some PCI 1956 * settings in case the BIOS doesn't restore them properly on 1957 * resume. 1958 */ 1959 static int 1960 rl_suspend(device_t dev) 1961 { 1962 struct rl_softc *sc; 1963 1964 sc = device_get_softc(dev); 1965 1966 RL_LOCK(sc); 1967 rl_stop(sc); 1968 rl_setwol(sc); 1969 sc->suspended = 1; 1970 RL_UNLOCK(sc); 1971 1972 return (0); 1973 } 1974 1975 /* 1976 * Device resume routine. Restore some PCI settings in case the BIOS 1977 * doesn't, re-enable busmastering, and restart the interface if 1978 * appropriate. 1979 */ 1980 static int 1981 rl_resume(device_t dev) 1982 { 1983 struct rl_softc *sc; 1984 struct ifnet *ifp; 1985 int pmc; 1986 uint16_t pmstat; 1987 1988 sc = device_get_softc(dev); 1989 ifp = sc->rl_ifp; 1990 1991 RL_LOCK(sc); 1992 1993 if ((ifp->if_capabilities & IFCAP_WOL) != 0 && 1994 pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) { 1995 /* Disable PME and clear PME status. */ 1996 pmstat = pci_read_config(sc->rl_dev, 1997 pmc + PCIR_POWER_STATUS, 2); 1998 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 1999 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2000 pci_write_config(sc->rl_dev, 2001 pmc + PCIR_POWER_STATUS, pmstat, 2); 2002 } 2003 /* 2004 * Clear WOL matching such that normal Rx filtering 2005 * wouldn't interfere with WOL patterns. 2006 */ 2007 rl_clrwol(sc); 2008 } 2009 2010 /* reinitialize interface if necessary */ 2011 if (ifp->if_flags & IFF_UP) 2012 rl_init_locked(sc); 2013 2014 sc->suspended = 0; 2015 2016 RL_UNLOCK(sc); 2017 2018 return (0); 2019 } 2020 2021 /* 2022 * Stop all chip I/O so that the kernel's probe routines don't 2023 * get confused by errant DMAs when rebooting. 2024 */ 2025 static int 2026 rl_shutdown(device_t dev) 2027 { 2028 struct rl_softc *sc; 2029 2030 sc = device_get_softc(dev); 2031 2032 RL_LOCK(sc); 2033 rl_stop(sc); 2034 /* 2035 * Mark interface as down since otherwise we will panic if 2036 * interrupt comes in later on, which can happen in some 2037 * cases. 2038 */ 2039 sc->rl_ifp->if_flags &= ~IFF_UP; 2040 rl_setwol(sc); 2041 RL_UNLOCK(sc); 2042 2043 return (0); 2044 } 2045 2046 static void 2047 rl_setwol(struct rl_softc *sc) 2048 { 2049 struct ifnet *ifp; 2050 int pmc; 2051 uint16_t pmstat; 2052 uint8_t v; 2053 2054 RL_LOCK_ASSERT(sc); 2055 2056 ifp = sc->rl_ifp; 2057 if ((ifp->if_capabilities & IFCAP_WOL) == 0) 2058 return; 2059 if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 2060 return; 2061 2062 /* Enable config register write. */ 2063 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 2064 2065 /* Enable PME. */ 2066 v = CSR_READ_1(sc, sc->rl_cfg1); 2067 v &= ~RL_CFG1_PME; 2068 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2069 v |= RL_CFG1_PME; 2070 CSR_WRITE_1(sc, sc->rl_cfg1, v); 2071 2072 v = CSR_READ_1(sc, sc->rl_cfg3); 2073 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 2074 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2075 v |= RL_CFG3_WOL_MAGIC; 2076 CSR_WRITE_1(sc, sc->rl_cfg3, v); 2077 2078 v = CSR_READ_1(sc, sc->rl_cfg5); 2079 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 2080 v &= ~RL_CFG5_WOL_LANWAKE; 2081 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2082 v |= RL_CFG5_WOL_UCAST; 2083 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2084 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 2085 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2086 v |= RL_CFG5_WOL_LANWAKE; 2087 CSR_WRITE_1(sc, sc->rl_cfg5, v); 2088 2089 /* Config register write done. */ 2090 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2091 2092 /* Request PME if WOL is requested. */ 2093 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 2094 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2095 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2096 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2097 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2098 } 2099 2100 static void 2101 rl_clrwol(struct rl_softc *sc) 2102 { 2103 struct ifnet *ifp; 2104 uint8_t v; 2105 2106 ifp = sc->rl_ifp; 2107 if ((ifp->if_capabilities & IFCAP_WOL) == 0) 2108 return; 2109 2110 /* Enable config register write. */ 2111 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 2112 2113 v = CSR_READ_1(sc, sc->rl_cfg3); 2114 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 2115 CSR_WRITE_1(sc, sc->rl_cfg3, v); 2116 2117 /* Config register write done. */ 2118 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2119 2120 v = CSR_READ_1(sc, sc->rl_cfg5); 2121 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 2122 v &= ~RL_CFG5_WOL_LANWAKE; 2123 CSR_WRITE_1(sc, sc->rl_cfg5, v); 2124 } 2125