1 /*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111 #ifdef HAVE_KERNEL_OPTION_HEADERS 112 #include "opt_device_polling.h" 113 #endif 114 115 #include <sys/param.h> 116 #include <sys/endian.h> 117 #include <sys/systm.h> 118 #include <sys/sockio.h> 119 #include <sys/mbuf.h> 120 #include <sys/malloc.h> 121 #include <sys/module.h> 122 #include <sys/kernel.h> 123 #include <sys/socket.h> 124 #include <sys/lock.h> 125 #include <sys/mutex.h> 126 #include <sys/taskqueue.h> 127 128 #include <net/if.h> 129 #include <net/if_arp.h> 130 #include <net/ethernet.h> 131 #include <net/if_dl.h> 132 #include <net/if_media.h> 133 #include <net/if_types.h> 134 #include <net/if_vlan_var.h> 135 136 #include <net/bpf.h> 137 138 #include <machine/bus.h> 139 #include <machine/resource.h> 140 #include <sys/bus.h> 141 #include <sys/rman.h> 142 143 #include <dev/mii/mii.h> 144 #include <dev/mii/miivar.h> 145 146 #include <dev/pci/pcireg.h> 147 #include <dev/pci/pcivar.h> 148 149 #include <pci/if_rlreg.h> 150 151 MODULE_DEPEND(re, pci, 1, 1, 1); 152 MODULE_DEPEND(re, ether, 1, 1, 1); 153 MODULE_DEPEND(re, miibus, 1, 1, 1); 154 155 /* "device miibus" required. See GENERIC if you get errors here. */ 156 #include "miibus_if.h" 157 158 /* Tunables. */ 159 static int msi_disable = 0; 160 TUNABLE_INT("hw.re.msi_disable", &msi_disable); 161 static int prefer_iomap = 0; 162 TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); 163 164 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 165 166 /* 167 * Various supported device vendors/types and their names. 168 */ 169 static struct rl_type re_devs[] = { 170 { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, 171 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 172 { RT_VENDORID, RT_DEVICEID_8139, 0, 173 "RealTek 8139C+ 10/100BaseTX" }, 174 { RT_VENDORID, RT_DEVICEID_8101E, 0, 175 "RealTek 8101E/8102E/8102EL PCIe 10/100baseTX" }, 176 { RT_VENDORID, RT_DEVICEID_8168, 0, 177 "RealTek 8168/8168B/8168C/8168CP/8168D/8111B/8111C/8111CP PCIe " 178 "Gigabit Ethernet" }, 179 { RT_VENDORID, RT_DEVICEID_8169, 0, 180 "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, 181 { RT_VENDORID, RT_DEVICEID_8169SC, 0, 182 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 183 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, 184 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 185 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, 186 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 187 { USR_VENDORID, USR_DEVICEID_997902, 0, 188 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } 189 }; 190 191 static struct rl_hwrev re_hwrevs[] = { 192 { RL_HWREV_8139, RL_8139, "" }, 193 { RL_HWREV_8139A, RL_8139, "A" }, 194 { RL_HWREV_8139AG, RL_8139, "A-G" }, 195 { RL_HWREV_8139B, RL_8139, "B" }, 196 { RL_HWREV_8130, RL_8139, "8130" }, 197 { RL_HWREV_8139C, RL_8139, "C" }, 198 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, 199 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, 200 { RL_HWREV_8168_SPIN1, RL_8169, "8168"}, 201 { RL_HWREV_8169, RL_8169, "8169"}, 202 { RL_HWREV_8169S, RL_8169, "8169S"}, 203 { RL_HWREV_8110S, RL_8169, "8110S"}, 204 { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB"}, 205 { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC"}, 206 { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL"}, 207 { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC"}, 208 { RL_HWREV_8100, RL_8139, "8100"}, 209 { RL_HWREV_8101, RL_8139, "8101"}, 210 { RL_HWREV_8100E, RL_8169, "8100E"}, 211 { RL_HWREV_8101E, RL_8169, "8101E"}, 212 { RL_HWREV_8102E, RL_8169, "8102E"}, 213 { RL_HWREV_8102EL, RL_8169, "8102EL"}, 214 { RL_HWREV_8168_SPIN2, RL_8169, "8168"}, 215 { RL_HWREV_8168_SPIN3, RL_8169, "8168"}, 216 { RL_HWREV_8168C, RL_8169, "8168C/8111C"}, 217 { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C"}, 218 { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP"}, 219 { RL_HWREV_8168D, RL_8169, "8168D"}, 220 { 0, 0, NULL } 221 }; 222 223 static int re_probe (device_t); 224 static int re_attach (device_t); 225 static int re_detach (device_t); 226 227 static int re_encap (struct rl_softc *, struct mbuf **); 228 229 static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 230 static int re_allocmem (device_t, struct rl_softc *); 231 static __inline void re_discard_rxbuf 232 (struct rl_softc *, int); 233 static int re_newbuf (struct rl_softc *, int); 234 static int re_rx_list_init (struct rl_softc *); 235 static int re_tx_list_init (struct rl_softc *); 236 #ifdef RE_FIXUP_RX 237 static __inline void re_fixup_rx 238 (struct mbuf *); 239 #endif 240 static int re_rxeof (struct rl_softc *, int *); 241 static void re_txeof (struct rl_softc *); 242 #ifdef DEVICE_POLLING 243 static int re_poll (struct ifnet *, enum poll_cmd, int); 244 static int re_poll_locked (struct ifnet *, enum poll_cmd, int); 245 #endif 246 static int re_intr (void *); 247 static void re_tick (void *); 248 static void re_tx_task (void *, int); 249 static void re_int_task (void *, int); 250 static void re_start (struct ifnet *); 251 static int re_ioctl (struct ifnet *, u_long, caddr_t); 252 static void re_init (void *); 253 static void re_init_locked (struct rl_softc *); 254 static void re_stop (struct rl_softc *); 255 static void re_watchdog (struct rl_softc *); 256 static int re_suspend (device_t); 257 static int re_resume (device_t); 258 static int re_shutdown (device_t); 259 static int re_ifmedia_upd (struct ifnet *); 260 static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 261 262 static void re_eeprom_putbyte (struct rl_softc *, int); 263 static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 264 static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 265 static int re_gmii_readreg (device_t, int, int); 266 static int re_gmii_writereg (device_t, int, int, int); 267 268 static int re_miibus_readreg (device_t, int, int); 269 static int re_miibus_writereg (device_t, int, int, int); 270 static void re_miibus_statchg (device_t); 271 272 static void re_set_rxmode (struct rl_softc *); 273 static void re_reset (struct rl_softc *); 274 static void re_setwol (struct rl_softc *); 275 static void re_clrwol (struct rl_softc *); 276 277 #ifdef RE_DIAG 278 static int re_diag (struct rl_softc *); 279 #endif 280 281 static device_method_t re_methods[] = { 282 /* Device interface */ 283 DEVMETHOD(device_probe, re_probe), 284 DEVMETHOD(device_attach, re_attach), 285 DEVMETHOD(device_detach, re_detach), 286 DEVMETHOD(device_suspend, re_suspend), 287 DEVMETHOD(device_resume, re_resume), 288 DEVMETHOD(device_shutdown, re_shutdown), 289 290 /* bus interface */ 291 DEVMETHOD(bus_print_child, bus_generic_print_child), 292 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 293 294 /* MII interface */ 295 DEVMETHOD(miibus_readreg, re_miibus_readreg), 296 DEVMETHOD(miibus_writereg, re_miibus_writereg), 297 DEVMETHOD(miibus_statchg, re_miibus_statchg), 298 299 { 0, 0 } 300 }; 301 302 static driver_t re_driver = { 303 "re", 304 re_methods, 305 sizeof(struct rl_softc) 306 }; 307 308 static devclass_t re_devclass; 309 310 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 311 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 312 313 #define EE_SET(x) \ 314 CSR_WRITE_1(sc, RL_EECMD, \ 315 CSR_READ_1(sc, RL_EECMD) | x) 316 317 #define EE_CLR(x) \ 318 CSR_WRITE_1(sc, RL_EECMD, \ 319 CSR_READ_1(sc, RL_EECMD) & ~x) 320 321 /* 322 * Send a read command and address to the EEPROM, check for ACK. 323 */ 324 static void 325 re_eeprom_putbyte(struct rl_softc *sc, int addr) 326 { 327 int d, i; 328 329 d = addr | (RL_9346_READ << sc->rl_eewidth); 330 331 /* 332 * Feed in each bit and strobe the clock. 333 */ 334 335 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 336 if (d & i) { 337 EE_SET(RL_EE_DATAIN); 338 } else { 339 EE_CLR(RL_EE_DATAIN); 340 } 341 DELAY(100); 342 EE_SET(RL_EE_CLK); 343 DELAY(150); 344 EE_CLR(RL_EE_CLK); 345 DELAY(100); 346 } 347 } 348 349 /* 350 * Read a word of data stored in the EEPROM at address 'addr.' 351 */ 352 static void 353 re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) 354 { 355 int i; 356 u_int16_t word = 0; 357 358 /* 359 * Send address of word we want to read. 360 */ 361 re_eeprom_putbyte(sc, addr); 362 363 /* 364 * Start reading bits from EEPROM. 365 */ 366 for (i = 0x8000; i; i >>= 1) { 367 EE_SET(RL_EE_CLK); 368 DELAY(100); 369 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 370 word |= i; 371 EE_CLR(RL_EE_CLK); 372 DELAY(100); 373 } 374 375 *dest = word; 376 } 377 378 /* 379 * Read a sequence of words from the EEPROM. 380 */ 381 static void 382 re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) 383 { 384 int i; 385 u_int16_t word = 0, *ptr; 386 387 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 388 389 DELAY(100); 390 391 for (i = 0; i < cnt; i++) { 392 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 393 re_eeprom_getword(sc, off + i, &word); 394 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 395 ptr = (u_int16_t *)(dest + (i * 2)); 396 *ptr = word; 397 } 398 399 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 400 } 401 402 static int 403 re_gmii_readreg(device_t dev, int phy, int reg) 404 { 405 struct rl_softc *sc; 406 u_int32_t rval; 407 int i; 408 409 if (phy != 1) 410 return (0); 411 412 sc = device_get_softc(dev); 413 414 /* Let the rgephy driver read the GMEDIASTAT register */ 415 416 if (reg == RL_GMEDIASTAT) { 417 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 418 return (rval); 419 } 420 421 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 422 DELAY(1000); 423 424 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 425 rval = CSR_READ_4(sc, RL_PHYAR); 426 if (rval & RL_PHYAR_BUSY) 427 break; 428 DELAY(100); 429 } 430 431 if (i == RL_PHY_TIMEOUT) { 432 device_printf(sc->rl_dev, "PHY read failed\n"); 433 return (0); 434 } 435 436 return (rval & RL_PHYAR_PHYDATA); 437 } 438 439 static int 440 re_gmii_writereg(device_t dev, int phy, int reg, int data) 441 { 442 struct rl_softc *sc; 443 u_int32_t rval; 444 int i; 445 446 sc = device_get_softc(dev); 447 448 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 449 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 450 DELAY(1000); 451 452 for (i = 0; i < RL_PHY_TIMEOUT; i++) { 453 rval = CSR_READ_4(sc, RL_PHYAR); 454 if (!(rval & RL_PHYAR_BUSY)) 455 break; 456 DELAY(100); 457 } 458 459 if (i == RL_PHY_TIMEOUT) { 460 device_printf(sc->rl_dev, "PHY write failed\n"); 461 return (0); 462 } 463 464 return (0); 465 } 466 467 static int 468 re_miibus_readreg(device_t dev, int phy, int reg) 469 { 470 struct rl_softc *sc; 471 u_int16_t rval = 0; 472 u_int16_t re8139_reg = 0; 473 474 sc = device_get_softc(dev); 475 476 if (sc->rl_type == RL_8169) { 477 rval = re_gmii_readreg(dev, phy, reg); 478 return (rval); 479 } 480 481 /* Pretend the internal PHY is only at address 0 */ 482 if (phy) { 483 return (0); 484 } 485 switch (reg) { 486 case MII_BMCR: 487 re8139_reg = RL_BMCR; 488 break; 489 case MII_BMSR: 490 re8139_reg = RL_BMSR; 491 break; 492 case MII_ANAR: 493 re8139_reg = RL_ANAR; 494 break; 495 case MII_ANER: 496 re8139_reg = RL_ANER; 497 break; 498 case MII_ANLPAR: 499 re8139_reg = RL_LPAR; 500 break; 501 case MII_PHYIDR1: 502 case MII_PHYIDR2: 503 return (0); 504 /* 505 * Allow the rlphy driver to read the media status 506 * register. If we have a link partner which does not 507 * support NWAY, this is the register which will tell 508 * us the results of parallel detection. 509 */ 510 case RL_MEDIASTAT: 511 rval = CSR_READ_1(sc, RL_MEDIASTAT); 512 return (rval); 513 default: 514 device_printf(sc->rl_dev, "bad phy register\n"); 515 return (0); 516 } 517 rval = CSR_READ_2(sc, re8139_reg); 518 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 519 /* 8139C+ has different bit layout. */ 520 rval &= ~(BMCR_LOOP | BMCR_ISO); 521 } 522 return (rval); 523 } 524 525 static int 526 re_miibus_writereg(device_t dev, int phy, int reg, int data) 527 { 528 struct rl_softc *sc; 529 u_int16_t re8139_reg = 0; 530 int rval = 0; 531 532 sc = device_get_softc(dev); 533 534 if (sc->rl_type == RL_8169) { 535 rval = re_gmii_writereg(dev, phy, reg, data); 536 return (rval); 537 } 538 539 /* Pretend the internal PHY is only at address 0 */ 540 if (phy) 541 return (0); 542 543 switch (reg) { 544 case MII_BMCR: 545 re8139_reg = RL_BMCR; 546 if (sc->rl_type == RL_8139CPLUS) { 547 /* 8139C+ has different bit layout. */ 548 data &= ~(BMCR_LOOP | BMCR_ISO); 549 } 550 break; 551 case MII_BMSR: 552 re8139_reg = RL_BMSR; 553 break; 554 case MII_ANAR: 555 re8139_reg = RL_ANAR; 556 break; 557 case MII_ANER: 558 re8139_reg = RL_ANER; 559 break; 560 case MII_ANLPAR: 561 re8139_reg = RL_LPAR; 562 break; 563 case MII_PHYIDR1: 564 case MII_PHYIDR2: 565 return (0); 566 break; 567 default: 568 device_printf(sc->rl_dev, "bad phy register\n"); 569 return (0); 570 } 571 CSR_WRITE_2(sc, re8139_reg, data); 572 return (0); 573 } 574 575 static void 576 re_miibus_statchg(device_t dev) 577 { 578 struct rl_softc *sc; 579 struct ifnet *ifp; 580 struct mii_data *mii; 581 582 sc = device_get_softc(dev); 583 mii = device_get_softc(sc->rl_miibus); 584 ifp = sc->rl_ifp; 585 if (mii == NULL || ifp == NULL || 586 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 587 return; 588 589 sc->rl_flags &= ~RL_FLAG_LINK; 590 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 591 (IFM_ACTIVE | IFM_AVALID)) { 592 switch (IFM_SUBTYPE(mii->mii_media_active)) { 593 case IFM_10_T: 594 case IFM_100_TX: 595 sc->rl_flags |= RL_FLAG_LINK; 596 break; 597 case IFM_1000_T: 598 if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) 599 break; 600 sc->rl_flags |= RL_FLAG_LINK; 601 break; 602 default: 603 break; 604 } 605 } 606 /* 607 * RealTek controllers does not provide any interface to 608 * Tx/Rx MACs for resolved speed, duplex and flow-control 609 * parameters. 610 */ 611 } 612 613 /* 614 * Set the RX configuration and 64-bit multicast hash filter. 615 */ 616 static void 617 re_set_rxmode(struct rl_softc *sc) 618 { 619 struct ifnet *ifp; 620 struct ifmultiaddr *ifma; 621 uint32_t hashes[2] = { 0, 0 }; 622 uint32_t h, rxfilt; 623 624 RL_LOCK_ASSERT(sc); 625 626 ifp = sc->rl_ifp; 627 628 rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; 629 630 if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { 631 if (ifp->if_flags & IFF_PROMISC) 632 rxfilt |= RL_RXCFG_RX_ALLPHYS; 633 /* 634 * Unlike other hardwares, we have to explicitly set 635 * RL_RXCFG_RX_MULTI to receive multicast frames in 636 * promiscuous mode. 637 */ 638 rxfilt |= RL_RXCFG_RX_MULTI; 639 hashes[0] = hashes[1] = 0xffffffff; 640 goto done; 641 } 642 643 IF_ADDR_LOCK(ifp); 644 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 645 if (ifma->ifma_addr->sa_family != AF_LINK) 646 continue; 647 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 648 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 649 if (h < 32) 650 hashes[0] |= (1 << h); 651 else 652 hashes[1] |= (1 << (h - 32)); 653 } 654 IF_ADDR_UNLOCK(ifp); 655 656 if (hashes[0] != 0 || hashes[1] != 0) { 657 /* 658 * For some unfathomable reason, RealTek decided to 659 * reverse the order of the multicast hash registers 660 * in the PCI Express parts. This means we have to 661 * write the hash pattern in reverse order for those 662 * devices. 663 */ 664 if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { 665 h = bswap32(hashes[0]); 666 hashes[0] = bswap32(hashes[1]); 667 hashes[1] = h; 668 } 669 rxfilt |= RL_RXCFG_RX_MULTI; 670 } 671 672 done: 673 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 674 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 675 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 676 } 677 678 static void 679 re_reset(struct rl_softc *sc) 680 { 681 int i; 682 683 RL_LOCK_ASSERT(sc); 684 685 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 686 687 for (i = 0; i < RL_TIMEOUT; i++) { 688 DELAY(10); 689 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 690 break; 691 } 692 if (i == RL_TIMEOUT) 693 device_printf(sc->rl_dev, "reset never completed!\n"); 694 695 if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) 696 CSR_WRITE_1(sc, 0x82, 1); 697 if (sc->rl_hwrev == RL_HWREV_8169S) 698 re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); 699 } 700 701 #ifdef RE_DIAG 702 703 /* 704 * The following routine is designed to test for a defect on some 705 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 706 * lines connected to the bus, however for a 32-bit only card, they 707 * should be pulled high. The result of this defect is that the 708 * NIC will not work right if you plug it into a 64-bit slot: DMA 709 * operations will be done with 64-bit transfers, which will fail 710 * because the 64-bit data lines aren't connected. 711 * 712 * There's no way to work around this (short of talking a soldering 713 * iron to the board), however we can detect it. The method we use 714 * here is to put the NIC into digital loopback mode, set the receiver 715 * to promiscuous mode, and then try to send a frame. We then compare 716 * the frame data we sent to what was received. If the data matches, 717 * then the NIC is working correctly, otherwise we know the user has 718 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 719 * slot. In the latter case, there's no way the NIC can work correctly, 720 * so we print out a message on the console and abort the device attach. 721 */ 722 723 static int 724 re_diag(struct rl_softc *sc) 725 { 726 struct ifnet *ifp = sc->rl_ifp; 727 struct mbuf *m0; 728 struct ether_header *eh; 729 struct rl_desc *cur_rx; 730 u_int16_t status; 731 u_int32_t rxstat; 732 int total_len, i, error = 0, phyaddr; 733 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 734 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 735 736 /* Allocate a single mbuf */ 737 MGETHDR(m0, M_DONTWAIT, MT_DATA); 738 if (m0 == NULL) 739 return (ENOBUFS); 740 741 RL_LOCK(sc); 742 743 /* 744 * Initialize the NIC in test mode. This sets the chip up 745 * so that it can send and receive frames, but performs the 746 * following special functions: 747 * - Puts receiver in promiscuous mode 748 * - Enables digital loopback mode 749 * - Leaves interrupts turned off 750 */ 751 752 ifp->if_flags |= IFF_PROMISC; 753 sc->rl_testmode = 1; 754 re_init_locked(sc); 755 sc->rl_flags |= RL_FLAG_LINK; 756 if (sc->rl_type == RL_8169) 757 phyaddr = 1; 758 else 759 phyaddr = 0; 760 761 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 762 for (i = 0; i < RL_TIMEOUT; i++) { 763 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 764 if (!(status & BMCR_RESET)) 765 break; 766 } 767 768 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 769 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 770 771 DELAY(100000); 772 773 /* Put some data in the mbuf */ 774 775 eh = mtod(m0, struct ether_header *); 776 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 777 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 778 eh->ether_type = htons(ETHERTYPE_IP); 779 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 780 781 /* 782 * Queue the packet, start transmission. 783 * Note: IF_HANDOFF() ultimately calls re_start() for us. 784 */ 785 786 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 787 RL_UNLOCK(sc); 788 /* XXX: re_diag must not be called when in ALTQ mode */ 789 IF_HANDOFF(&ifp->if_snd, m0, ifp); 790 RL_LOCK(sc); 791 m0 = NULL; 792 793 /* Wait for it to propagate through the chip */ 794 795 DELAY(100000); 796 for (i = 0; i < RL_TIMEOUT; i++) { 797 status = CSR_READ_2(sc, RL_ISR); 798 CSR_WRITE_2(sc, RL_ISR, status); 799 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 800 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 801 break; 802 DELAY(10); 803 } 804 805 if (i == RL_TIMEOUT) { 806 device_printf(sc->rl_dev, 807 "diagnostic failed, failed to receive packet in" 808 " loopback mode\n"); 809 error = EIO; 810 goto done; 811 } 812 813 /* 814 * The packet should have been dumped into the first 815 * entry in the RX DMA ring. Grab it from there. 816 */ 817 818 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 819 sc->rl_ldata.rl_rx_list_map, 820 BUS_DMASYNC_POSTREAD); 821 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, 822 sc->rl_ldata.rl_rx_desc[0].rx_dmamap, 823 BUS_DMASYNC_POSTREAD); 824 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 825 sc->rl_ldata.rl_rx_desc[0].rx_dmamap); 826 827 m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; 828 sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; 829 eh = mtod(m0, struct ether_header *); 830 831 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 832 total_len = RL_RXBYTES(cur_rx); 833 rxstat = le32toh(cur_rx->rl_cmdstat); 834 835 if (total_len != ETHER_MIN_LEN) { 836 device_printf(sc->rl_dev, 837 "diagnostic failed, received short packet\n"); 838 error = EIO; 839 goto done; 840 } 841 842 /* Test that the received packet data matches what we sent. */ 843 844 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 845 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 846 ntohs(eh->ether_type) != ETHERTYPE_IP) { 847 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 848 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 849 dst, ":", src, ":", ETHERTYPE_IP); 850 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 851 eh->ether_dhost, ":", eh->ether_shost, ":", 852 ntohs(eh->ether_type)); 853 device_printf(sc->rl_dev, "You may have a defective 32-bit " 854 "NIC plugged into a 64-bit PCI slot.\n"); 855 device_printf(sc->rl_dev, "Please re-install the NIC in a " 856 "32-bit slot for proper operation.\n"); 857 device_printf(sc->rl_dev, "Read the re(4) man page for more " 858 "details.\n"); 859 error = EIO; 860 } 861 862 done: 863 /* Turn interface off, release resources */ 864 865 sc->rl_testmode = 0; 866 sc->rl_flags &= ~RL_FLAG_LINK; 867 ifp->if_flags &= ~IFF_PROMISC; 868 re_stop(sc); 869 if (m0 != NULL) 870 m_freem(m0); 871 872 RL_UNLOCK(sc); 873 874 return (error); 875 } 876 877 #endif 878 879 /* 880 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 881 * IDs against our list and return a device name if we find a match. 882 */ 883 static int 884 re_probe(device_t dev) 885 { 886 struct rl_type *t; 887 uint16_t devid, vendor; 888 uint16_t revid, sdevid; 889 int i; 890 891 vendor = pci_get_vendor(dev); 892 devid = pci_get_device(dev); 893 revid = pci_get_revid(dev); 894 sdevid = pci_get_subdevice(dev); 895 896 if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { 897 if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { 898 /* 899 * Only attach to rev. 3 of the Linksys EG1032 adapter. 900 * Rev. 2 is supported by sk(4). 901 */ 902 return (ENXIO); 903 } 904 } 905 906 if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { 907 if (revid != 0x20) { 908 /* 8139, let rl(4) take care of this device. */ 909 return (ENXIO); 910 } 911 } 912 913 t = re_devs; 914 for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { 915 if (vendor == t->rl_vid && devid == t->rl_did) { 916 device_set_desc(dev, t->rl_name); 917 return (BUS_PROBE_DEFAULT); 918 } 919 } 920 921 return (ENXIO); 922 } 923 924 /* 925 * Map a single buffer address. 926 */ 927 928 static void 929 re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 930 { 931 bus_addr_t *addr; 932 933 if (error) 934 return; 935 936 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 937 addr = arg; 938 *addr = segs->ds_addr; 939 } 940 941 static int 942 re_allocmem(device_t dev, struct rl_softc *sc) 943 { 944 bus_size_t rx_list_size, tx_list_size; 945 int error; 946 int i; 947 948 rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); 949 tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); 950 951 /* 952 * Allocate the parent bus DMA tag appropriate for PCI. 953 * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD 954 * register should be set. However some RealTek chips are known 955 * to be buggy on DAC handling, therefore disable DAC by limiting 956 * DMA address space to 32bit. PCIe variants of RealTek chips 957 * may not have the limitation but I took safer path. 958 */ 959 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 960 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 961 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, 962 NULL, NULL, &sc->rl_parent_tag); 963 if (error) { 964 device_printf(dev, "could not allocate parent DMA tag\n"); 965 return (error); 966 } 967 968 /* 969 * Allocate map for TX mbufs. 970 */ 971 error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, 972 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, 973 NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, 974 NULL, NULL, &sc->rl_ldata.rl_tx_mtag); 975 if (error) { 976 device_printf(dev, "could not allocate TX DMA tag\n"); 977 return (error); 978 } 979 980 /* 981 * Allocate map for RX mbufs. 982 */ 983 984 error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, 985 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 986 MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); 987 if (error) { 988 device_printf(dev, "could not allocate RX DMA tag\n"); 989 return (error); 990 } 991 992 /* 993 * Allocate map for TX descriptor list. 994 */ 995 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 996 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 997 NULL, tx_list_size, 1, tx_list_size, 0, 998 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 999 if (error) { 1000 device_printf(dev, "could not allocate TX DMA ring tag\n"); 1001 return (error); 1002 } 1003 1004 /* Allocate DMA'able memory for the TX ring */ 1005 1006 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1007 (void **)&sc->rl_ldata.rl_tx_list, 1008 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1009 &sc->rl_ldata.rl_tx_list_map); 1010 if (error) { 1011 device_printf(dev, "could not allocate TX DMA ring\n"); 1012 return (error); 1013 } 1014 1015 /* Load the map for the TX ring. */ 1016 1017 sc->rl_ldata.rl_tx_list_addr = 0; 1018 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1019 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1020 tx_list_size, re_dma_map_addr, 1021 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1022 if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { 1023 device_printf(dev, "could not load TX DMA ring\n"); 1024 return (ENOMEM); 1025 } 1026 1027 /* Create DMA maps for TX buffers */ 1028 1029 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 1030 error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, 1031 &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1032 if (error) { 1033 device_printf(dev, "could not create DMA map for TX\n"); 1034 return (error); 1035 } 1036 } 1037 1038 /* 1039 * Allocate map for RX descriptor list. 1040 */ 1041 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1042 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1043 NULL, rx_list_size, 1, rx_list_size, 0, 1044 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1045 if (error) { 1046 device_printf(dev, "could not create RX DMA ring tag\n"); 1047 return (error); 1048 } 1049 1050 /* Allocate DMA'able memory for the RX ring */ 1051 1052 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1053 (void **)&sc->rl_ldata.rl_rx_list, 1054 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, 1055 &sc->rl_ldata.rl_rx_list_map); 1056 if (error) { 1057 device_printf(dev, "could not allocate RX DMA ring\n"); 1058 return (error); 1059 } 1060 1061 /* Load the map for the RX ring. */ 1062 1063 sc->rl_ldata.rl_rx_list_addr = 0; 1064 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1065 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1066 rx_list_size, re_dma_map_addr, 1067 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1068 if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { 1069 device_printf(dev, "could not load RX DMA ring\n"); 1070 return (ENOMEM); 1071 } 1072 1073 /* Create DMA maps for RX buffers */ 1074 1075 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1076 &sc->rl_ldata.rl_rx_sparemap); 1077 if (error) { 1078 device_printf(dev, "could not create spare DMA map for RX\n"); 1079 return (error); 1080 } 1081 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1082 error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, 1083 &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1084 if (error) { 1085 device_printf(dev, "could not create DMA map for RX\n"); 1086 return (error); 1087 } 1088 } 1089 1090 return (0); 1091 } 1092 1093 /* 1094 * Attach the interface. Allocate softc structures, do ifmedia 1095 * setup and ethernet/BPF attach. 1096 */ 1097 static int 1098 re_attach(device_t dev) 1099 { 1100 u_char eaddr[ETHER_ADDR_LEN]; 1101 u_int16_t as[ETHER_ADDR_LEN / 2]; 1102 struct rl_softc *sc; 1103 struct ifnet *ifp; 1104 struct rl_hwrev *hw_rev; 1105 int hwrev; 1106 u_int16_t devid, re_did = 0; 1107 int error = 0, rid, i; 1108 int msic, reg; 1109 uint8_t cfg; 1110 1111 sc = device_get_softc(dev); 1112 sc->rl_dev = dev; 1113 1114 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1115 MTX_DEF); 1116 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1117 1118 /* 1119 * Map control/status registers. 1120 */ 1121 pci_enable_busmaster(dev); 1122 1123 devid = pci_get_device(dev); 1124 /* 1125 * Prefer memory space register mapping over IO space. 1126 * Because RTL8169SC does not seem to work when memory mapping 1127 * is used always activate io mapping. 1128 */ 1129 if (devid == RT_DEVICEID_8169SC) 1130 prefer_iomap = 1; 1131 if (prefer_iomap == 0) { 1132 sc->rl_res_id = PCIR_BAR(1); 1133 sc->rl_res_type = SYS_RES_MEMORY; 1134 /* RTL8168/8101E seems to use different BARs. */ 1135 if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) 1136 sc->rl_res_id = PCIR_BAR(2); 1137 } else { 1138 sc->rl_res_id = PCIR_BAR(0); 1139 sc->rl_res_type = SYS_RES_IOPORT; 1140 } 1141 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1142 &sc->rl_res_id, RF_ACTIVE); 1143 if (sc->rl_res == NULL && prefer_iomap == 0) { 1144 sc->rl_res_id = PCIR_BAR(0); 1145 sc->rl_res_type = SYS_RES_IOPORT; 1146 sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, 1147 &sc->rl_res_id, RF_ACTIVE); 1148 } 1149 if (sc->rl_res == NULL) { 1150 device_printf(dev, "couldn't map ports/memory\n"); 1151 error = ENXIO; 1152 goto fail; 1153 } 1154 1155 sc->rl_btag = rman_get_bustag(sc->rl_res); 1156 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1157 1158 msic = 0; 1159 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1160 sc->rl_flags |= RL_FLAG_PCIE; 1161 msic = pci_msi_count(dev); 1162 if (bootverbose) 1163 device_printf(dev, "MSI count : %d\n", msic); 1164 } 1165 if (msic > 0 && msi_disable == 0) { 1166 msic = 1; 1167 if (pci_alloc_msi(dev, &msic) == 0) { 1168 if (msic == RL_MSI_MESSAGES) { 1169 device_printf(dev, "Using %d MSI messages\n", 1170 msic); 1171 sc->rl_flags |= RL_FLAG_MSI; 1172 /* Explicitly set MSI enable bit. */ 1173 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1174 cfg = CSR_READ_1(sc, RL_CFG2); 1175 cfg |= RL_CFG2_MSI; 1176 CSR_WRITE_1(sc, RL_CFG2, cfg); 1177 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1178 } else 1179 pci_release_msi(dev); 1180 } 1181 } 1182 1183 /* Allocate interrupt */ 1184 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1185 rid = 0; 1186 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1187 RF_SHAREABLE | RF_ACTIVE); 1188 if (sc->rl_irq[0] == NULL) { 1189 device_printf(dev, "couldn't allocate IRQ resources\n"); 1190 error = ENXIO; 1191 goto fail; 1192 } 1193 } else { 1194 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1195 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1196 SYS_RES_IRQ, &rid, RF_ACTIVE); 1197 if (sc->rl_irq[i] == NULL) { 1198 device_printf(dev, 1199 "couldn't llocate IRQ resources for " 1200 "message %d\n", rid); 1201 error = ENXIO; 1202 goto fail; 1203 } 1204 } 1205 } 1206 1207 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1208 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1209 cfg = CSR_READ_1(sc, RL_CFG2); 1210 if ((cfg & RL_CFG2_MSI) != 0) { 1211 device_printf(dev, "turning off MSI enable bit.\n"); 1212 cfg &= ~RL_CFG2_MSI; 1213 CSR_WRITE_1(sc, RL_CFG2, cfg); 1214 } 1215 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1216 } 1217 1218 /* Reset the adapter. */ 1219 RL_LOCK(sc); 1220 re_reset(sc); 1221 RL_UNLOCK(sc); 1222 1223 hw_rev = re_hwrevs; 1224 hwrev = CSR_READ_4(sc, RL_TXCFG); 1225 switch (hwrev & 0x70000000) { 1226 case 0x00000000: 1227 case 0x10000000: 1228 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); 1229 hwrev &= (RL_TXCFG_HWREV | 0x80000000); 1230 break; 1231 default: 1232 device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); 1233 hwrev &= RL_TXCFG_HWREV; 1234 break; 1235 } 1236 device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000); 1237 while (hw_rev->rl_desc != NULL) { 1238 if (hw_rev->rl_rev == hwrev) { 1239 sc->rl_type = hw_rev->rl_type; 1240 sc->rl_hwrev = hw_rev->rl_rev; 1241 break; 1242 } 1243 hw_rev++; 1244 } 1245 if (hw_rev->rl_desc == NULL) { 1246 device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); 1247 error = ENXIO; 1248 goto fail; 1249 } 1250 1251 switch (hw_rev->rl_rev) { 1252 case RL_HWREV_8139CPLUS: 1253 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_FASTETHER | 1254 RL_FLAG_AUTOPAD; 1255 break; 1256 case RL_HWREV_8100E: 1257 case RL_HWREV_8101E: 1258 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_PHYWAKE | 1259 RL_FLAG_FASTETHER; 1260 break; 1261 case RL_HWREV_8102E: 1262 case RL_HWREV_8102EL: 1263 sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_PHYWAKE | 1264 RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | 1265 RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; 1266 break; 1267 case RL_HWREV_8168_SPIN1: 1268 case RL_HWREV_8168_SPIN2: 1269 sc->rl_flags |= RL_FLAG_WOLRXENB; 1270 /* FALLTHROUGH */ 1271 case RL_HWREV_8168_SPIN3: 1272 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; 1273 break; 1274 case RL_HWREV_8168C_SPIN2: 1275 sc->rl_flags |= RL_FLAG_MACSLEEP; 1276 /* FALLTHROUGH */ 1277 case RL_HWREV_8168C: 1278 if ((hwrev & 0x00700000) == 0x00200000) 1279 sc->rl_flags |= RL_FLAG_MACSLEEP; 1280 /* FALLTHROUGH */ 1281 case RL_HWREV_8168CP: 1282 case RL_HWREV_8168D: 1283 sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | 1284 RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | 1285 RL_FLAG_AUTOPAD; 1286 /* 1287 * These controllers support jumbo frame but it seems 1288 * that enabling it requires touching additional magic 1289 * registers. Depending on MAC revisions some 1290 * controllers need to disable checksum offload. So 1291 * disable jumbo frame until I have better idea what 1292 * it really requires to make it support. 1293 * RTL8168C/CP : supports up to 6KB jumbo frame. 1294 * RTL8111C/CP : supports up to 9KB jumbo frame. 1295 */ 1296 sc->rl_flags |= RL_FLAG_NOJUMBO; 1297 break; 1298 case RL_HWREV_8169_8110SB: 1299 case RL_HWREV_8169_8110SBL: 1300 case RL_HWREV_8169_8110SC: 1301 case RL_HWREV_8169_8110SCE: 1302 sc->rl_flags |= RL_FLAG_PHYWAKE; 1303 /* FALLTHROUGH */ 1304 case RL_HWREV_8169: 1305 case RL_HWREV_8169S: 1306 case RL_HWREV_8110S: 1307 sc->rl_flags |= RL_FLAG_MACRESET; 1308 break; 1309 default: 1310 break; 1311 } 1312 1313 /* Enable PME. */ 1314 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 1315 cfg = CSR_READ_1(sc, RL_CFG1); 1316 cfg |= RL_CFG1_PME; 1317 CSR_WRITE_1(sc, RL_CFG1, cfg); 1318 cfg = CSR_READ_1(sc, RL_CFG5); 1319 cfg &= RL_CFG5_PME_STS; 1320 CSR_WRITE_1(sc, RL_CFG5, cfg); 1321 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 1322 1323 if ((sc->rl_flags & RL_FLAG_PAR) != 0) { 1324 /* 1325 * XXX Should have a better way to extract station 1326 * address from EEPROM. 1327 */ 1328 for (i = 0; i < ETHER_ADDR_LEN; i++) 1329 eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); 1330 } else { 1331 sc->rl_eewidth = RL_9356_ADDR_LEN; 1332 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1333 if (re_did != 0x8129) 1334 sc->rl_eewidth = RL_9346_ADDR_LEN; 1335 1336 /* 1337 * Get station address from the EEPROM. 1338 */ 1339 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1340 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1341 as[i] = le16toh(as[i]); 1342 bcopy(as, eaddr, sizeof(eaddr)); 1343 } 1344 1345 if (sc->rl_type == RL_8169) { 1346 /* Set RX length mask and number of descriptors. */ 1347 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1348 sc->rl_txstart = RL_GTXSTART; 1349 sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; 1350 sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; 1351 } else { 1352 /* Set RX length mask and number of descriptors. */ 1353 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1354 sc->rl_txstart = RL_TXSTART; 1355 sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; 1356 sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; 1357 } 1358 1359 error = re_allocmem(dev, sc); 1360 if (error) 1361 goto fail; 1362 1363 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1364 if (ifp == NULL) { 1365 device_printf(dev, "can not if_alloc()\n"); 1366 error = ENOSPC; 1367 goto fail; 1368 } 1369 1370 /* Take controller out of deep sleep mode. */ 1371 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 1372 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 1373 CSR_WRITE_1(sc, RL_GPIO, 1374 CSR_READ_1(sc, RL_GPIO) | 0x01); 1375 else 1376 CSR_WRITE_1(sc, RL_GPIO, 1377 CSR_READ_1(sc, RL_GPIO) & ~0x01); 1378 } 1379 1380 /* Take PHY out of power down mode. */ 1381 if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { 1382 re_gmii_writereg(dev, 1, 0x1f, 0); 1383 re_gmii_writereg(dev, 1, 0x0e, 0); 1384 } 1385 1386 /* Do MII setup */ 1387 if (mii_phy_probe(dev, &sc->rl_miibus, 1388 re_ifmedia_upd, re_ifmedia_sts)) { 1389 device_printf(dev, "MII without any phy!\n"); 1390 error = ENXIO; 1391 goto fail; 1392 } 1393 1394 ifp->if_softc = sc; 1395 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1396 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1397 ifp->if_ioctl = re_ioctl; 1398 ifp->if_start = re_start; 1399 ifp->if_hwassist = RE_CSUM_FEATURES; 1400 ifp->if_capabilities = IFCAP_HWCSUM; 1401 ifp->if_capenable = ifp->if_capabilities; 1402 ifp->if_init = re_init; 1403 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1404 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1405 IFQ_SET_READY(&ifp->if_snd); 1406 1407 TASK_INIT(&sc->rl_txtask, 1, re_tx_task, ifp); 1408 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1409 1410 /* 1411 * XXX 1412 * Still have no idea how to make TSO work on 8168C, 8168CP, 1413 * 8111C and 8111CP. 1414 */ 1415 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 1416 ifp->if_hwassist |= CSUM_TSO; 1417 ifp->if_capabilities |= IFCAP_TSO4; 1418 } 1419 1420 /* 1421 * Call MI attach routine. 1422 */ 1423 ether_ifattach(ifp, eaddr); 1424 1425 /* VLAN capability setup */ 1426 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1427 if (ifp->if_capabilities & IFCAP_HWCSUM) 1428 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1429 /* Enable WOL if PM is supported. */ 1430 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, ®) == 0) 1431 ifp->if_capabilities |= IFCAP_WOL; 1432 ifp->if_capenable = ifp->if_capabilities; 1433 /* 1434 * Don't enable TSO by default. Under certain 1435 * circumtances the controller generated corrupted 1436 * packets in TSO size. 1437 */ 1438 ifp->if_hwassist &= ~CSUM_TSO; 1439 ifp->if_capenable &= ~IFCAP_TSO4; 1440 #ifdef DEVICE_POLLING 1441 ifp->if_capabilities |= IFCAP_POLLING; 1442 #endif 1443 /* 1444 * Tell the upper layer(s) we support long frames. 1445 * Must appear after the call to ether_ifattach() because 1446 * ether_ifattach() sets ifi_hdrlen to the default value. 1447 */ 1448 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1449 1450 #ifdef RE_DIAG 1451 /* 1452 * Perform hardware diagnostic on the original RTL8169. 1453 * Some 32-bit cards were incorrectly wired and would 1454 * malfunction if plugged into a 64-bit slot. 1455 */ 1456 1457 if (hwrev == RL_HWREV_8169) { 1458 error = re_diag(sc); 1459 if (error) { 1460 device_printf(dev, 1461 "attach aborted due to hardware diag failure\n"); 1462 ether_ifdetach(ifp); 1463 goto fail; 1464 } 1465 } 1466 #endif 1467 1468 /* Hook interrupt last to avoid having to lock softc */ 1469 if ((sc->rl_flags & RL_FLAG_MSI) == 0) 1470 error = bus_setup_intr(dev, sc->rl_irq[0], 1471 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1472 &sc->rl_intrhand[0]); 1473 else { 1474 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1475 error = bus_setup_intr(dev, sc->rl_irq[i], 1476 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1477 &sc->rl_intrhand[i]); 1478 if (error != 0) 1479 break; 1480 } 1481 } 1482 if (error) { 1483 device_printf(dev, "couldn't set up irq\n"); 1484 ether_ifdetach(ifp); 1485 } 1486 1487 fail: 1488 1489 if (error) 1490 re_detach(dev); 1491 1492 return (error); 1493 } 1494 1495 /* 1496 * Shutdown hardware and free up resources. This can be called any 1497 * time after the mutex has been initialized. It is called in both 1498 * the error case in attach and the normal detach case so it needs 1499 * to be careful about only freeing resources that have actually been 1500 * allocated. 1501 */ 1502 static int 1503 re_detach(device_t dev) 1504 { 1505 struct rl_softc *sc; 1506 struct ifnet *ifp; 1507 int i, rid; 1508 1509 sc = device_get_softc(dev); 1510 ifp = sc->rl_ifp; 1511 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1512 1513 /* These should only be active if attach succeeded */ 1514 if (device_is_attached(dev)) { 1515 #ifdef DEVICE_POLLING 1516 if (ifp->if_capenable & IFCAP_POLLING) 1517 ether_poll_deregister(ifp); 1518 #endif 1519 RL_LOCK(sc); 1520 #if 0 1521 sc->suspended = 1; 1522 #endif 1523 re_stop(sc); 1524 RL_UNLOCK(sc); 1525 callout_drain(&sc->rl_stat_callout); 1526 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1527 taskqueue_drain(taskqueue_fast, &sc->rl_txtask); 1528 /* 1529 * Force off the IFF_UP flag here, in case someone 1530 * still had a BPF descriptor attached to this 1531 * interface. If they do, ether_ifdetach() will cause 1532 * the BPF code to try and clear the promisc mode 1533 * flag, which will bubble down to re_ioctl(), 1534 * which will try to call re_init() again. This will 1535 * turn the NIC back on and restart the MII ticker, 1536 * which will panic the system when the kernel tries 1537 * to invoke the re_tick() function that isn't there 1538 * anymore. 1539 */ 1540 ifp->if_flags &= ~IFF_UP; 1541 ether_ifdetach(ifp); 1542 } 1543 if (sc->rl_miibus) 1544 device_delete_child(dev, sc->rl_miibus); 1545 bus_generic_detach(dev); 1546 1547 /* 1548 * The rest is resource deallocation, so we should already be 1549 * stopped here. 1550 */ 1551 1552 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1553 if (sc->rl_intrhand[i] != NULL) { 1554 bus_teardown_intr(dev, sc->rl_irq[i], 1555 sc->rl_intrhand[i]); 1556 sc->rl_intrhand[i] = NULL; 1557 } 1558 } 1559 if (ifp != NULL) 1560 if_free(ifp); 1561 if ((sc->rl_flags & RL_FLAG_MSI) == 0) { 1562 if (sc->rl_irq[0] != NULL) { 1563 bus_release_resource(dev, SYS_RES_IRQ, 0, 1564 sc->rl_irq[0]); 1565 sc->rl_irq[0] = NULL; 1566 } 1567 } else { 1568 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1569 if (sc->rl_irq[i] != NULL) { 1570 bus_release_resource(dev, SYS_RES_IRQ, rid, 1571 sc->rl_irq[i]); 1572 sc->rl_irq[i] = NULL; 1573 } 1574 } 1575 pci_release_msi(dev); 1576 } 1577 if (sc->rl_res) 1578 bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, 1579 sc->rl_res); 1580 1581 /* Unload and free the RX DMA ring memory and map */ 1582 1583 if (sc->rl_ldata.rl_rx_list_tag) { 1584 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1585 sc->rl_ldata.rl_rx_list_map); 1586 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1587 sc->rl_ldata.rl_rx_list, 1588 sc->rl_ldata.rl_rx_list_map); 1589 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1590 } 1591 1592 /* Unload and free the TX DMA ring memory and map */ 1593 1594 if (sc->rl_ldata.rl_tx_list_tag) { 1595 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1596 sc->rl_ldata.rl_tx_list_map); 1597 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1598 sc->rl_ldata.rl_tx_list, 1599 sc->rl_ldata.rl_tx_list_map); 1600 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1601 } 1602 1603 /* Destroy all the RX and TX buffer maps */ 1604 1605 if (sc->rl_ldata.rl_tx_mtag) { 1606 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 1607 bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, 1608 sc->rl_ldata.rl_tx_desc[i].tx_dmamap); 1609 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); 1610 } 1611 if (sc->rl_ldata.rl_rx_mtag) { 1612 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) 1613 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1614 sc->rl_ldata.rl_rx_desc[i].rx_dmamap); 1615 if (sc->rl_ldata.rl_rx_sparemap) 1616 bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, 1617 sc->rl_ldata.rl_rx_sparemap); 1618 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); 1619 } 1620 1621 /* Unload and free the stats buffer and map */ 1622 1623 if (sc->rl_ldata.rl_stag) { 1624 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1625 sc->rl_ldata.rl_rx_list_map); 1626 bus_dmamem_free(sc->rl_ldata.rl_stag, 1627 sc->rl_ldata.rl_stats, 1628 sc->rl_ldata.rl_smap); 1629 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1630 } 1631 1632 if (sc->rl_parent_tag) 1633 bus_dma_tag_destroy(sc->rl_parent_tag); 1634 1635 mtx_destroy(&sc->rl_mtx); 1636 1637 return (0); 1638 } 1639 1640 static __inline void 1641 re_discard_rxbuf(struct rl_softc *sc, int idx) 1642 { 1643 struct rl_desc *desc; 1644 struct rl_rxdesc *rxd; 1645 uint32_t cmdstat; 1646 1647 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1648 desc = &sc->rl_ldata.rl_rx_list[idx]; 1649 desc->rl_vlanctl = 0; 1650 cmdstat = rxd->rx_size; 1651 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1652 cmdstat |= RL_RDESC_CMD_EOR; 1653 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1654 } 1655 1656 static int 1657 re_newbuf(struct rl_softc *sc, int idx) 1658 { 1659 struct mbuf *m; 1660 struct rl_rxdesc *rxd; 1661 bus_dma_segment_t segs[1]; 1662 bus_dmamap_t map; 1663 struct rl_desc *desc; 1664 uint32_t cmdstat; 1665 int error, nsegs; 1666 1667 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1668 if (m == NULL) 1669 return (ENOBUFS); 1670 1671 m->m_len = m->m_pkthdr.len = MCLBYTES; 1672 #ifdef RE_FIXUP_RX 1673 /* 1674 * This is part of an evil trick to deal with non-x86 platforms. 1675 * The RealTek chip requires RX buffers to be aligned on 64-bit 1676 * boundaries, but that will hose non-x86 machines. To get around 1677 * this, we leave some empty space at the start of each buffer 1678 * and for non-x86 hosts, we copy the buffer back six bytes 1679 * to achieve word alignment. This is slightly more efficient 1680 * than allocating a new buffer, copying the contents, and 1681 * discarding the old buffer. 1682 */ 1683 m_adj(m, RE_ETHER_ALIGN); 1684 #endif 1685 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, 1686 sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); 1687 if (error != 0) { 1688 m_freem(m); 1689 return (ENOBUFS); 1690 } 1691 KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); 1692 1693 rxd = &sc->rl_ldata.rl_rx_desc[idx]; 1694 if (rxd->rx_m != NULL) { 1695 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1696 BUS_DMASYNC_POSTREAD); 1697 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); 1698 } 1699 1700 rxd->rx_m = m; 1701 map = rxd->rx_dmamap; 1702 rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; 1703 rxd->rx_size = segs[0].ds_len; 1704 sc->rl_ldata.rl_rx_sparemap = map; 1705 bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, 1706 BUS_DMASYNC_PREREAD); 1707 1708 desc = &sc->rl_ldata.rl_rx_list[idx]; 1709 desc->rl_vlanctl = 0; 1710 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); 1711 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); 1712 cmdstat = segs[0].ds_len; 1713 if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) 1714 cmdstat |= RL_RDESC_CMD_EOR; 1715 desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); 1716 1717 return (0); 1718 } 1719 1720 #ifdef RE_FIXUP_RX 1721 static __inline void 1722 re_fixup_rx(struct mbuf *m) 1723 { 1724 int i; 1725 uint16_t *src, *dst; 1726 1727 src = mtod(m, uint16_t *); 1728 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 1729 1730 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1731 *dst++ = *src++; 1732 1733 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 1734 } 1735 #endif 1736 1737 static int 1738 re_tx_list_init(struct rl_softc *sc) 1739 { 1740 struct rl_desc *desc; 1741 int i; 1742 1743 RL_LOCK_ASSERT(sc); 1744 1745 bzero(sc->rl_ldata.rl_tx_list, 1746 sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); 1747 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) 1748 sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; 1749 /* Set EOR. */ 1750 desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; 1751 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); 1752 1753 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1754 sc->rl_ldata.rl_tx_list_map, 1755 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1756 1757 sc->rl_ldata.rl_tx_prodidx = 0; 1758 sc->rl_ldata.rl_tx_considx = 0; 1759 sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; 1760 1761 return (0); 1762 } 1763 1764 static int 1765 re_rx_list_init(struct rl_softc *sc) 1766 { 1767 int error, i; 1768 1769 bzero(sc->rl_ldata.rl_rx_list, 1770 sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); 1771 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 1772 sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; 1773 if ((error = re_newbuf(sc, i)) != 0) 1774 return (error); 1775 } 1776 1777 /* Flush the RX descriptors */ 1778 1779 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1780 sc->rl_ldata.rl_rx_list_map, 1781 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1782 1783 sc->rl_ldata.rl_rx_prodidx = 0; 1784 sc->rl_head = sc->rl_tail = NULL; 1785 1786 return (0); 1787 } 1788 1789 /* 1790 * RX handler for C+ and 8169. For the gigE chips, we support 1791 * the reception of jumbo frames that have been fragmented 1792 * across multiple 2K mbuf cluster buffers. 1793 */ 1794 static int 1795 re_rxeof(struct rl_softc *sc, int *rx_npktsp) 1796 { 1797 struct mbuf *m; 1798 struct ifnet *ifp; 1799 int i, total_len; 1800 struct rl_desc *cur_rx; 1801 u_int32_t rxstat, rxvlan; 1802 int maxpkt = 16, rx_npkts = 0; 1803 1804 RL_LOCK_ASSERT(sc); 1805 1806 ifp = sc->rl_ifp; 1807 1808 /* Invalidate the descriptor memory */ 1809 1810 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1811 sc->rl_ldata.rl_rx_list_map, 1812 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1813 1814 for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; 1815 i = RL_RX_DESC_NXT(sc, i)) { 1816 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1817 rxstat = le32toh(cur_rx->rl_cmdstat); 1818 if ((rxstat & RL_RDESC_STAT_OWN) != 0) 1819 break; 1820 total_len = rxstat & sc->rl_rxlenmask; 1821 rxvlan = le32toh(cur_rx->rl_vlanctl); 1822 m = sc->rl_ldata.rl_rx_desc[i].rx_m; 1823 1824 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1825 if (re_newbuf(sc, i) != 0) { 1826 /* 1827 * If this is part of a multi-fragment packet, 1828 * discard all the pieces. 1829 */ 1830 if (sc->rl_head != NULL) { 1831 m_freem(sc->rl_head); 1832 sc->rl_head = sc->rl_tail = NULL; 1833 } 1834 re_discard_rxbuf(sc, i); 1835 continue; 1836 } 1837 m->m_len = RE_RX_DESC_BUFLEN; 1838 if (sc->rl_head == NULL) 1839 sc->rl_head = sc->rl_tail = m; 1840 else { 1841 m->m_flags &= ~M_PKTHDR; 1842 sc->rl_tail->m_next = m; 1843 sc->rl_tail = m; 1844 } 1845 continue; 1846 } 1847 1848 /* 1849 * NOTE: for the 8139C+, the frame length field 1850 * is always 12 bits in size, but for the gigE chips, 1851 * it is 13 bits (since the max RX frame length is 16K). 1852 * Unfortunately, all 32 bits in the status word 1853 * were already used, so to make room for the extra 1854 * length bit, RealTek took out the 'frame alignment 1855 * error' bit and shifted the other status bits 1856 * over one slot. The OWN, EOR, FS and LS bits are 1857 * still in the same places. We have already extracted 1858 * the frame length and checked the OWN bit, so rather 1859 * than using an alternate bit mapping, we shift the 1860 * status bits one space to the right so we can evaluate 1861 * them using the 8169 status as though it was in the 1862 * same format as that of the 8139C+. 1863 */ 1864 if (sc->rl_type == RL_8169) 1865 rxstat >>= 1; 1866 1867 /* 1868 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1869 * set, but if CRC is clear, it will still be a valid frame. 1870 */ 1871 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1872 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1873 ifp->if_ierrors++; 1874 /* 1875 * If this is part of a multi-fragment packet, 1876 * discard all the pieces. 1877 */ 1878 if (sc->rl_head != NULL) { 1879 m_freem(sc->rl_head); 1880 sc->rl_head = sc->rl_tail = NULL; 1881 } 1882 re_discard_rxbuf(sc, i); 1883 continue; 1884 } 1885 1886 /* 1887 * If allocating a replacement mbuf fails, 1888 * reload the current one. 1889 */ 1890 1891 if (re_newbuf(sc, i) != 0) { 1892 ifp->if_iqdrops++; 1893 if (sc->rl_head != NULL) { 1894 m_freem(sc->rl_head); 1895 sc->rl_head = sc->rl_tail = NULL; 1896 } 1897 re_discard_rxbuf(sc, i); 1898 continue; 1899 } 1900 1901 if (sc->rl_head != NULL) { 1902 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1903 if (m->m_len == 0) 1904 m->m_len = RE_RX_DESC_BUFLEN; 1905 /* 1906 * Special case: if there's 4 bytes or less 1907 * in this buffer, the mbuf can be discarded: 1908 * the last 4 bytes is the CRC, which we don't 1909 * care about anyway. 1910 */ 1911 if (m->m_len <= ETHER_CRC_LEN) { 1912 sc->rl_tail->m_len -= 1913 (ETHER_CRC_LEN - m->m_len); 1914 m_freem(m); 1915 } else { 1916 m->m_len -= ETHER_CRC_LEN; 1917 m->m_flags &= ~M_PKTHDR; 1918 sc->rl_tail->m_next = m; 1919 } 1920 m = sc->rl_head; 1921 sc->rl_head = sc->rl_tail = NULL; 1922 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1923 } else 1924 m->m_pkthdr.len = m->m_len = 1925 (total_len - ETHER_CRC_LEN); 1926 1927 #ifdef RE_FIXUP_RX 1928 re_fixup_rx(m); 1929 #endif 1930 ifp->if_ipackets++; 1931 m->m_pkthdr.rcvif = ifp; 1932 1933 /* Do RX checksumming if enabled */ 1934 1935 if (ifp->if_capenable & IFCAP_RXCSUM) { 1936 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 1937 /* Check IP header checksum */ 1938 if (rxstat & RL_RDESC_STAT_PROTOID) 1939 m->m_pkthdr.csum_flags |= 1940 CSUM_IP_CHECKED; 1941 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1942 m->m_pkthdr.csum_flags |= 1943 CSUM_IP_VALID; 1944 1945 /* Check TCP/UDP checksum */ 1946 if ((RL_TCPPKT(rxstat) && 1947 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1948 (RL_UDPPKT(rxstat) && 1949 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1950 m->m_pkthdr.csum_flags |= 1951 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1952 m->m_pkthdr.csum_data = 0xffff; 1953 } 1954 } else { 1955 /* 1956 * RTL8168C/RTL816CP/RTL8111C/RTL8111CP 1957 */ 1958 if ((rxstat & RL_RDESC_STAT_PROTOID) && 1959 (rxvlan & RL_RDESC_IPV4)) 1960 m->m_pkthdr.csum_flags |= 1961 CSUM_IP_CHECKED; 1962 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && 1963 (rxvlan & RL_RDESC_IPV4)) 1964 m->m_pkthdr.csum_flags |= 1965 CSUM_IP_VALID; 1966 if (((rxstat & RL_RDESC_STAT_TCP) && 1967 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1968 ((rxstat & RL_RDESC_STAT_UDP) && 1969 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1970 m->m_pkthdr.csum_flags |= 1971 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1972 m->m_pkthdr.csum_data = 0xffff; 1973 } 1974 } 1975 } 1976 maxpkt--; 1977 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1978 m->m_pkthdr.ether_vtag = 1979 bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); 1980 m->m_flags |= M_VLANTAG; 1981 } 1982 RL_UNLOCK(sc); 1983 (*ifp->if_input)(ifp, m); 1984 RL_LOCK(sc); 1985 rx_npkts++; 1986 } 1987 1988 /* Flush the RX DMA ring */ 1989 1990 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1991 sc->rl_ldata.rl_rx_list_map, 1992 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1993 1994 sc->rl_ldata.rl_rx_prodidx = i; 1995 1996 if (rx_npktsp != NULL) 1997 *rx_npktsp = rx_npkts; 1998 if (maxpkt) 1999 return(EAGAIN); 2000 2001 return(0); 2002 } 2003 2004 static void 2005 re_txeof(struct rl_softc *sc) 2006 { 2007 struct ifnet *ifp; 2008 struct rl_txdesc *txd; 2009 u_int32_t txstat; 2010 int cons; 2011 2012 cons = sc->rl_ldata.rl_tx_considx; 2013 if (cons == sc->rl_ldata.rl_tx_prodidx) 2014 return; 2015 2016 ifp = sc->rl_ifp; 2017 /* Invalidate the TX descriptor list */ 2018 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2019 sc->rl_ldata.rl_tx_list_map, 2020 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2021 2022 for (; cons != sc->rl_ldata.rl_tx_prodidx; 2023 cons = RL_TX_DESC_NXT(sc, cons)) { 2024 txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); 2025 if (txstat & RL_TDESC_STAT_OWN) 2026 break; 2027 /* 2028 * We only stash mbufs in the last descriptor 2029 * in a fragment chain, which also happens to 2030 * be the only place where the TX status bits 2031 * are valid. 2032 */ 2033 if (txstat & RL_TDESC_CMD_EOF) { 2034 txd = &sc->rl_ldata.rl_tx_desc[cons]; 2035 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2036 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2037 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2038 txd->tx_dmamap); 2039 KASSERT(txd->tx_m != NULL, 2040 ("%s: freeing NULL mbufs!", __func__)); 2041 m_freem(txd->tx_m); 2042 txd->tx_m = NULL; 2043 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 2044 RL_TDESC_STAT_COLCNT)) 2045 ifp->if_collisions++; 2046 if (txstat & RL_TDESC_STAT_TXERRSUM) 2047 ifp->if_oerrors++; 2048 else 2049 ifp->if_opackets++; 2050 } 2051 sc->rl_ldata.rl_tx_free++; 2052 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2053 } 2054 sc->rl_ldata.rl_tx_considx = cons; 2055 2056 /* No changes made to the TX ring, so no flush needed */ 2057 2058 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { 2059 #ifdef RE_TX_MODERATION 2060 /* 2061 * If not all descriptors have been reaped yet, reload 2062 * the timer so that we will eventually get another 2063 * interrupt that will cause us to re-enter this routine. 2064 * This is done in case the transmitter has gone idle. 2065 */ 2066 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2067 #endif 2068 } else 2069 sc->rl_watchdog_timer = 0; 2070 } 2071 2072 static void 2073 re_tick(void *xsc) 2074 { 2075 struct rl_softc *sc; 2076 struct mii_data *mii; 2077 2078 sc = xsc; 2079 2080 RL_LOCK_ASSERT(sc); 2081 2082 mii = device_get_softc(sc->rl_miibus); 2083 mii_tick(mii); 2084 if ((sc->rl_flags & RL_FLAG_LINK) == 0) 2085 re_miibus_statchg(sc->rl_dev); 2086 /* 2087 * Reclaim transmitted frames here. Technically it is not 2088 * necessary to do here but it ensures periodic reclamation 2089 * regardless of Tx completion interrupt which seems to be 2090 * lost on PCIe based controllers under certain situations. 2091 */ 2092 re_txeof(sc); 2093 re_watchdog(sc); 2094 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2095 } 2096 2097 #ifdef DEVICE_POLLING 2098 static int 2099 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 2100 { 2101 struct rl_softc *sc = ifp->if_softc; 2102 int rx_npkts = 0; 2103 2104 RL_LOCK(sc); 2105 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2106 rx_npkts = re_poll_locked(ifp, cmd, count); 2107 RL_UNLOCK(sc); 2108 return (rx_npkts); 2109 } 2110 2111 static int 2112 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2113 { 2114 struct rl_softc *sc = ifp->if_softc; 2115 int rx_npkts; 2116 2117 RL_LOCK_ASSERT(sc); 2118 2119 sc->rxcycles = count; 2120 re_rxeof(sc, &rx_npkts); 2121 re_txeof(sc); 2122 2123 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2124 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2125 2126 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2127 u_int16_t status; 2128 2129 status = CSR_READ_2(sc, RL_ISR); 2130 if (status == 0xffff) 2131 return (rx_npkts); 2132 if (status) 2133 CSR_WRITE_2(sc, RL_ISR, status); 2134 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2135 (sc->rl_flags & RL_FLAG_PCIE)) 2136 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2137 2138 /* 2139 * XXX check behaviour on receiver stalls. 2140 */ 2141 2142 if (status & RL_ISR_SYSTEM_ERR) 2143 re_init_locked(sc); 2144 } 2145 return (rx_npkts); 2146 } 2147 #endif /* DEVICE_POLLING */ 2148 2149 static int 2150 re_intr(void *arg) 2151 { 2152 struct rl_softc *sc; 2153 uint16_t status; 2154 2155 sc = arg; 2156 2157 status = CSR_READ_2(sc, RL_ISR); 2158 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2159 return (FILTER_STRAY); 2160 CSR_WRITE_2(sc, RL_IMR, 0); 2161 2162 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2163 2164 return (FILTER_HANDLED); 2165 } 2166 2167 static void 2168 re_int_task(void *arg, int npending) 2169 { 2170 struct rl_softc *sc; 2171 struct ifnet *ifp; 2172 u_int16_t status; 2173 int rval = 0; 2174 2175 sc = arg; 2176 ifp = sc->rl_ifp; 2177 2178 RL_LOCK(sc); 2179 2180 status = CSR_READ_2(sc, RL_ISR); 2181 CSR_WRITE_2(sc, RL_ISR, status); 2182 2183 if (sc->suspended || 2184 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2185 RL_UNLOCK(sc); 2186 return; 2187 } 2188 2189 #ifdef DEVICE_POLLING 2190 if (ifp->if_capenable & IFCAP_POLLING) { 2191 RL_UNLOCK(sc); 2192 return; 2193 } 2194 #endif 2195 2196 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2197 rval = re_rxeof(sc, NULL); 2198 2199 /* 2200 * Some chips will ignore a second TX request issued 2201 * while an existing transmission is in progress. If 2202 * the transmitter goes idle but there are still 2203 * packets waiting to be sent, we need to restart the 2204 * channel here to flush them out. This only seems to 2205 * be required with the PCIe devices. 2206 */ 2207 if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && 2208 (sc->rl_flags & RL_FLAG_PCIE)) 2209 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2210 if (status & ( 2211 #ifdef RE_TX_MODERATION 2212 RL_ISR_TIMEOUT_EXPIRED| 2213 #else 2214 RL_ISR_TX_OK| 2215 #endif 2216 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2217 re_txeof(sc); 2218 2219 if (status & RL_ISR_SYSTEM_ERR) 2220 re_init_locked(sc); 2221 2222 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2223 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2224 2225 RL_UNLOCK(sc); 2226 2227 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2228 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2229 return; 2230 } 2231 2232 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2233 } 2234 2235 static int 2236 re_encap(struct rl_softc *sc, struct mbuf **m_head) 2237 { 2238 struct rl_txdesc *txd, *txd_last; 2239 bus_dma_segment_t segs[RL_NTXSEGS]; 2240 bus_dmamap_t map; 2241 struct mbuf *m_new; 2242 struct rl_desc *desc; 2243 int nsegs, prod; 2244 int i, error, ei, si; 2245 int padlen; 2246 uint32_t cmdstat, csum_flags, vlanctl; 2247 2248 RL_LOCK_ASSERT(sc); 2249 M_ASSERTPKTHDR((*m_head)); 2250 2251 /* 2252 * With some of the RealTek chips, using the checksum offload 2253 * support in conjunction with the autopadding feature results 2254 * in the transmission of corrupt frames. For example, if we 2255 * need to send a really small IP fragment that's less than 60 2256 * bytes in size, and IP header checksumming is enabled, the 2257 * resulting ethernet frame that appears on the wire will 2258 * have garbled payload. To work around this, if TX IP checksum 2259 * offload is enabled, we always manually pad short frames out 2260 * to the minimum ethernet frame size. 2261 */ 2262 if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && 2263 (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && 2264 ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { 2265 padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; 2266 if (M_WRITABLE(*m_head) == 0) { 2267 /* Get a writable copy. */ 2268 m_new = m_dup(*m_head, M_DONTWAIT); 2269 m_freem(*m_head); 2270 if (m_new == NULL) { 2271 *m_head = NULL; 2272 return (ENOBUFS); 2273 } 2274 *m_head = m_new; 2275 } 2276 if ((*m_head)->m_next != NULL || 2277 M_TRAILINGSPACE(*m_head) < padlen) { 2278 m_new = m_defrag(*m_head, M_DONTWAIT); 2279 if (m_new == NULL) { 2280 m_freem(*m_head); 2281 *m_head = NULL; 2282 return (ENOBUFS); 2283 } 2284 } else 2285 m_new = *m_head; 2286 2287 /* 2288 * Manually pad short frames, and zero the pad space 2289 * to avoid leaking data. 2290 */ 2291 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); 2292 m_new->m_pkthdr.len += padlen; 2293 m_new->m_len = m_new->m_pkthdr.len; 2294 *m_head = m_new; 2295 } 2296 2297 prod = sc->rl_ldata.rl_tx_prodidx; 2298 txd = &sc->rl_ldata.rl_tx_desc[prod]; 2299 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2300 *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2301 if (error == EFBIG) { 2302 m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS); 2303 if (m_new == NULL) { 2304 m_freem(*m_head); 2305 *m_head = NULL; 2306 return (ENOBUFS); 2307 } 2308 *m_head = m_new; 2309 error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, 2310 txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); 2311 if (error != 0) { 2312 m_freem(*m_head); 2313 *m_head = NULL; 2314 return (error); 2315 } 2316 } else if (error != 0) 2317 return (error); 2318 if (nsegs == 0) { 2319 m_freem(*m_head); 2320 *m_head = NULL; 2321 return (EIO); 2322 } 2323 2324 /* Check for number of available descriptors. */ 2325 if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { 2326 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); 2327 return (ENOBUFS); 2328 } 2329 2330 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, 2331 BUS_DMASYNC_PREWRITE); 2332 2333 /* 2334 * Set up checksum offload. Note: checksum offload bits must 2335 * appear in all descriptors of a multi-descriptor transmit 2336 * attempt. This is according to testing done with an 8169 2337 * chip. This is a requirement. 2338 */ 2339 vlanctl = 0; 2340 csum_flags = 0; 2341 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) 2342 csum_flags = RL_TDESC_CMD_LGSEND | 2343 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2344 RL_TDESC_CMD_MSSVAL_SHIFT); 2345 else { 2346 /* 2347 * Unconditionally enable IP checksum if TCP or UDP 2348 * checksum is required. Otherwise, TCP/UDP checksum 2349 * does't make effects. 2350 */ 2351 if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { 2352 if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { 2353 csum_flags |= RL_TDESC_CMD_IPCSUM; 2354 if (((*m_head)->m_pkthdr.csum_flags & 2355 CSUM_TCP) != 0) 2356 csum_flags |= RL_TDESC_CMD_TCPCSUM; 2357 if (((*m_head)->m_pkthdr.csum_flags & 2358 CSUM_UDP) != 0) 2359 csum_flags |= RL_TDESC_CMD_UDPCSUM; 2360 } else { 2361 vlanctl |= RL_TDESC_CMD_IPCSUMV2; 2362 if (((*m_head)->m_pkthdr.csum_flags & 2363 CSUM_TCP) != 0) 2364 vlanctl |= RL_TDESC_CMD_TCPCSUMV2; 2365 if (((*m_head)->m_pkthdr.csum_flags & 2366 CSUM_UDP) != 0) 2367 vlanctl |= RL_TDESC_CMD_UDPCSUMV2; 2368 } 2369 } 2370 } 2371 2372 /* 2373 * Set up hardware VLAN tagging. Note: vlan tag info must 2374 * appear in all descriptors of a multi-descriptor 2375 * transmission attempt. 2376 */ 2377 if ((*m_head)->m_flags & M_VLANTAG) 2378 vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | 2379 RL_TDESC_VLANCTL_TAG; 2380 2381 si = prod; 2382 for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { 2383 desc = &sc->rl_ldata.rl_tx_list[prod]; 2384 desc->rl_vlanctl = htole32(vlanctl); 2385 desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 2386 desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 2387 cmdstat = segs[i].ds_len; 2388 if (i != 0) 2389 cmdstat |= RL_TDESC_CMD_OWN; 2390 if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) 2391 cmdstat |= RL_TDESC_CMD_EOR; 2392 desc->rl_cmdstat = htole32(cmdstat | csum_flags); 2393 sc->rl_ldata.rl_tx_free--; 2394 } 2395 /* Update producer index. */ 2396 sc->rl_ldata.rl_tx_prodidx = prod; 2397 2398 /* Set EOF on the last descriptor. */ 2399 ei = RL_TX_DESC_PRV(sc, prod); 2400 desc = &sc->rl_ldata.rl_tx_list[ei]; 2401 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 2402 2403 desc = &sc->rl_ldata.rl_tx_list[si]; 2404 /* Set SOF and transfer ownership of packet to the chip. */ 2405 desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); 2406 2407 /* 2408 * Insure that the map for this transmission 2409 * is placed at the array index of the last descriptor 2410 * in this chain. (Swap last and first dmamaps.) 2411 */ 2412 txd_last = &sc->rl_ldata.rl_tx_desc[ei]; 2413 map = txd->tx_dmamap; 2414 txd->tx_dmamap = txd_last->tx_dmamap; 2415 txd_last->tx_dmamap = map; 2416 txd_last->tx_m = *m_head; 2417 2418 return (0); 2419 } 2420 2421 static void 2422 re_tx_task(void *arg, int npending) 2423 { 2424 struct ifnet *ifp; 2425 2426 ifp = arg; 2427 re_start(ifp); 2428 } 2429 2430 /* 2431 * Main transmit routine for C+ and gigE NICs. 2432 */ 2433 static void 2434 re_start(struct ifnet *ifp) 2435 { 2436 struct rl_softc *sc; 2437 struct mbuf *m_head; 2438 int queued; 2439 2440 sc = ifp->if_softc; 2441 2442 RL_LOCK(sc); 2443 2444 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2445 IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) { 2446 RL_UNLOCK(sc); 2447 return; 2448 } 2449 2450 for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2451 sc->rl_ldata.rl_tx_free > 1;) { 2452 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2453 if (m_head == NULL) 2454 break; 2455 2456 if (re_encap(sc, &m_head) != 0) { 2457 if (m_head == NULL) 2458 break; 2459 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2460 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2461 break; 2462 } 2463 2464 /* 2465 * If there's a BPF listener, bounce a copy of this frame 2466 * to him. 2467 */ 2468 ETHER_BPF_MTAP(ifp, m_head); 2469 2470 queued++; 2471 } 2472 2473 if (queued == 0) { 2474 #ifdef RE_TX_MODERATION 2475 if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) 2476 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2477 #endif 2478 RL_UNLOCK(sc); 2479 return; 2480 } 2481 2482 /* Flush the TX descriptors */ 2483 2484 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2485 sc->rl_ldata.rl_tx_list_map, 2486 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2487 2488 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2489 2490 #ifdef RE_TX_MODERATION 2491 /* 2492 * Use the countdown timer for interrupt moderation. 2493 * 'TX done' interrupts are disabled. Instead, we reset the 2494 * countdown timer, which will begin counting until it hits 2495 * the value in the TIMERINT register, and then trigger an 2496 * interrupt. Each time we write to the TIMERCNT register, 2497 * the timer count is reset to 0. 2498 */ 2499 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2500 #endif 2501 2502 /* 2503 * Set a timeout in case the chip goes out to lunch. 2504 */ 2505 sc->rl_watchdog_timer = 5; 2506 2507 RL_UNLOCK(sc); 2508 } 2509 2510 static void 2511 re_init(void *xsc) 2512 { 2513 struct rl_softc *sc = xsc; 2514 2515 RL_LOCK(sc); 2516 re_init_locked(sc); 2517 RL_UNLOCK(sc); 2518 } 2519 2520 static void 2521 re_init_locked(struct rl_softc *sc) 2522 { 2523 struct ifnet *ifp = sc->rl_ifp; 2524 struct mii_data *mii; 2525 uint32_t reg; 2526 uint16_t cfg; 2527 union { 2528 uint32_t align_dummy; 2529 u_char eaddr[ETHER_ADDR_LEN]; 2530 } eaddr; 2531 2532 RL_LOCK_ASSERT(sc); 2533 2534 mii = device_get_softc(sc->rl_miibus); 2535 2536 /* 2537 * Cancel pending I/O and free all RX/TX buffers. 2538 */ 2539 re_stop(sc); 2540 2541 /* Put controller into known state. */ 2542 re_reset(sc); 2543 2544 /* 2545 * Enable C+ RX and TX mode, as well as VLAN stripping and 2546 * RX checksum offload. We must configure the C+ register 2547 * before all others. 2548 */ 2549 cfg = RL_CPLUSCMD_PCI_MRW; 2550 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2551 cfg |= RL_CPLUSCMD_RXCSUM_ENB; 2552 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2553 cfg |= RL_CPLUSCMD_VLANSTRIP; 2554 if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { 2555 cfg |= RL_CPLUSCMD_MACSTAT_DIS; 2556 /* XXX magic. */ 2557 cfg |= 0x0001; 2558 } else 2559 cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; 2560 CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); 2561 if (sc->rl_hwrev == RL_HWREV_8169_8110SC || 2562 sc->rl_hwrev == RL_HWREV_8169_8110SCE) { 2563 reg = 0x000fff00; 2564 if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_PCI66MHZ) != 0) 2565 reg |= 0x000000ff; 2566 if (sc->rl_hwrev == RL_HWREV_8169_8110SCE) 2567 reg |= 0x00f00000; 2568 CSR_WRITE_4(sc, 0x7c, reg); 2569 /* Disable interrupt mitigation. */ 2570 CSR_WRITE_2(sc, 0xe2, 0); 2571 } 2572 /* 2573 * Disable TSO if interface MTU size is greater than MSS 2574 * allowed in controller. 2575 */ 2576 if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { 2577 ifp->if_capenable &= ~IFCAP_TSO4; 2578 ifp->if_hwassist &= ~CSUM_TSO; 2579 } 2580 2581 /* 2582 * Init our MAC address. Even though the chipset 2583 * documentation doesn't mention it, we need to enter "Config 2584 * register write enable" mode to modify the ID registers. 2585 */ 2586 /* Copy MAC address on stack to align. */ 2587 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 2588 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2589 CSR_WRITE_4(sc, RL_IDR0, 2590 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 2591 CSR_WRITE_4(sc, RL_IDR4, 2592 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 2593 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2594 2595 /* 2596 * For C+ mode, initialize the RX descriptors and mbufs. 2597 */ 2598 re_rx_list_init(sc); 2599 re_tx_list_init(sc); 2600 2601 /* 2602 * Load the addresses of the RX and TX lists into the chip. 2603 */ 2604 2605 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 2606 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 2607 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2608 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 2609 2610 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 2611 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 2612 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2613 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 2614 2615 /* 2616 * Enable transmit and receive. 2617 */ 2618 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2619 2620 /* 2621 * Set the initial TX configuration. 2622 */ 2623 if (sc->rl_testmode) { 2624 if (sc->rl_type == RL_8169) 2625 CSR_WRITE_4(sc, RL_TXCFG, 2626 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 2627 else 2628 CSR_WRITE_4(sc, RL_TXCFG, 2629 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 2630 } else 2631 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2632 2633 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 2634 2635 /* 2636 * Set the initial RX configuration. 2637 */ 2638 re_set_rxmode(sc); 2639 2640 #ifdef DEVICE_POLLING 2641 /* 2642 * Disable interrupts if we are polling. 2643 */ 2644 if (ifp->if_capenable & IFCAP_POLLING) 2645 CSR_WRITE_2(sc, RL_IMR, 0); 2646 else /* otherwise ... */ 2647 #endif 2648 2649 /* 2650 * Enable interrupts. 2651 */ 2652 if (sc->rl_testmode) 2653 CSR_WRITE_2(sc, RL_IMR, 0); 2654 else 2655 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2656 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 2657 2658 /* Set initial TX threshold */ 2659 sc->rl_txthresh = RL_TX_THRESH_INIT; 2660 2661 /* Start RX/TX process. */ 2662 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2663 #ifdef notdef 2664 /* Enable receiver and transmitter. */ 2665 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2666 #endif 2667 2668 #ifdef RE_TX_MODERATION 2669 /* 2670 * Initialize the timer interrupt register so that 2671 * a timer interrupt will be generated once the timer 2672 * reaches a certain number of ticks. The timer is 2673 * reloaded on each transmit. This gives us TX interrupt 2674 * moderation, which dramatically improves TX frame rate. 2675 */ 2676 if (sc->rl_type == RL_8169) 2677 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 2678 else 2679 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2680 #endif 2681 2682 /* 2683 * For 8169 gigE NICs, set the max allowed RX packet 2684 * size so we can receive jumbo frames. 2685 */ 2686 if (sc->rl_type == RL_8169) 2687 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 2688 2689 if (sc->rl_testmode) 2690 return; 2691 2692 mii_mediachg(mii); 2693 2694 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 2695 2696 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2697 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2698 2699 sc->rl_flags &= ~RL_FLAG_LINK; 2700 sc->rl_watchdog_timer = 0; 2701 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2702 } 2703 2704 /* 2705 * Set media options. 2706 */ 2707 static int 2708 re_ifmedia_upd(struct ifnet *ifp) 2709 { 2710 struct rl_softc *sc; 2711 struct mii_data *mii; 2712 int error; 2713 2714 sc = ifp->if_softc; 2715 mii = device_get_softc(sc->rl_miibus); 2716 RL_LOCK(sc); 2717 error = mii_mediachg(mii); 2718 RL_UNLOCK(sc); 2719 2720 return (error); 2721 } 2722 2723 /* 2724 * Report current media status. 2725 */ 2726 static void 2727 re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2728 { 2729 struct rl_softc *sc; 2730 struct mii_data *mii; 2731 2732 sc = ifp->if_softc; 2733 mii = device_get_softc(sc->rl_miibus); 2734 2735 RL_LOCK(sc); 2736 mii_pollstat(mii); 2737 RL_UNLOCK(sc); 2738 ifmr->ifm_active = mii->mii_media_active; 2739 ifmr->ifm_status = mii->mii_media_status; 2740 } 2741 2742 static int 2743 re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2744 { 2745 struct rl_softc *sc = ifp->if_softc; 2746 struct ifreq *ifr = (struct ifreq *) data; 2747 struct mii_data *mii; 2748 int error = 0; 2749 2750 switch (command) { 2751 case SIOCSIFMTU: 2752 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > RL_JUMBO_MTU) { 2753 error = EINVAL; 2754 break; 2755 } 2756 if ((sc->rl_flags & RL_FLAG_NOJUMBO) != 0 && 2757 ifr->ifr_mtu > RL_MAX_FRAMELEN) { 2758 error = EINVAL; 2759 break; 2760 } 2761 RL_LOCK(sc); 2762 if (ifp->if_mtu != ifr->ifr_mtu) 2763 ifp->if_mtu = ifr->ifr_mtu; 2764 if (ifp->if_mtu > RL_TSO_MTU && 2765 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2766 ifp->if_capenable &= ~IFCAP_TSO4; 2767 ifp->if_hwassist &= ~CSUM_TSO; 2768 } 2769 RL_UNLOCK(sc); 2770 break; 2771 case SIOCSIFFLAGS: 2772 RL_LOCK(sc); 2773 if ((ifp->if_flags & IFF_UP) != 0) { 2774 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2775 if (((ifp->if_flags ^ sc->rl_if_flags) 2776 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2777 re_set_rxmode(sc); 2778 } else 2779 re_init_locked(sc); 2780 } else { 2781 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2782 re_stop(sc); 2783 } 2784 sc->rl_if_flags = ifp->if_flags; 2785 RL_UNLOCK(sc); 2786 break; 2787 case SIOCADDMULTI: 2788 case SIOCDELMULTI: 2789 RL_LOCK(sc); 2790 re_set_rxmode(sc); 2791 RL_UNLOCK(sc); 2792 break; 2793 case SIOCGIFMEDIA: 2794 case SIOCSIFMEDIA: 2795 mii = device_get_softc(sc->rl_miibus); 2796 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2797 break; 2798 case SIOCSIFCAP: 2799 { 2800 int mask, reinit; 2801 2802 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2803 reinit = 0; 2804 #ifdef DEVICE_POLLING 2805 if (mask & IFCAP_POLLING) { 2806 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2807 error = ether_poll_register(re_poll, ifp); 2808 if (error) 2809 return(error); 2810 RL_LOCK(sc); 2811 /* Disable interrupts */ 2812 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2813 ifp->if_capenable |= IFCAP_POLLING; 2814 RL_UNLOCK(sc); 2815 } else { 2816 error = ether_poll_deregister(ifp); 2817 /* Enable interrupts. */ 2818 RL_LOCK(sc); 2819 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2820 ifp->if_capenable &= ~IFCAP_POLLING; 2821 RL_UNLOCK(sc); 2822 } 2823 } 2824 #endif /* DEVICE_POLLING */ 2825 if (mask & IFCAP_HWCSUM) { 2826 ifp->if_capenable ^= IFCAP_HWCSUM; 2827 if (ifp->if_capenable & IFCAP_TXCSUM) 2828 ifp->if_hwassist |= RE_CSUM_FEATURES; 2829 else 2830 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 2831 reinit = 1; 2832 } 2833 if (mask & IFCAP_VLAN_HWTAGGING) { 2834 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2835 reinit = 1; 2836 } 2837 if (mask & IFCAP_TSO4) { 2838 ifp->if_capenable ^= IFCAP_TSO4; 2839 if ((IFCAP_TSO4 & ifp->if_capenable) && 2840 (IFCAP_TSO4 & ifp->if_capabilities)) 2841 ifp->if_hwassist |= CSUM_TSO; 2842 else 2843 ifp->if_hwassist &= ~CSUM_TSO; 2844 if (ifp->if_mtu > RL_TSO_MTU && 2845 (ifp->if_capenable & IFCAP_TSO4) != 0) { 2846 ifp->if_capenable &= ~IFCAP_TSO4; 2847 ifp->if_hwassist &= ~CSUM_TSO; 2848 } 2849 } 2850 if ((mask & IFCAP_WOL) != 0 && 2851 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2852 if ((mask & IFCAP_WOL_UCAST) != 0) 2853 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2854 if ((mask & IFCAP_WOL_MCAST) != 0) 2855 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2856 if ((mask & IFCAP_WOL_MAGIC) != 0) 2857 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2858 } 2859 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) 2860 re_init(sc); 2861 VLAN_CAPABILITIES(ifp); 2862 } 2863 break; 2864 default: 2865 error = ether_ioctl(ifp, command, data); 2866 break; 2867 } 2868 2869 return (error); 2870 } 2871 2872 static void 2873 re_watchdog(struct rl_softc *sc) 2874 { 2875 struct ifnet *ifp; 2876 2877 RL_LOCK_ASSERT(sc); 2878 2879 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 2880 return; 2881 2882 ifp = sc->rl_ifp; 2883 re_txeof(sc); 2884 if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { 2885 if_printf(ifp, "watchdog timeout (missed Tx interrupts) " 2886 "-- recovering\n"); 2887 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2888 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2889 return; 2890 } 2891 2892 if_printf(ifp, "watchdog timeout\n"); 2893 ifp->if_oerrors++; 2894 2895 re_rxeof(sc, NULL); 2896 re_init_locked(sc); 2897 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2898 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2899 } 2900 2901 /* 2902 * Stop the adapter and free any mbufs allocated to the 2903 * RX and TX lists. 2904 */ 2905 static void 2906 re_stop(struct rl_softc *sc) 2907 { 2908 int i; 2909 struct ifnet *ifp; 2910 struct rl_txdesc *txd; 2911 struct rl_rxdesc *rxd; 2912 2913 RL_LOCK_ASSERT(sc); 2914 2915 ifp = sc->rl_ifp; 2916 2917 sc->rl_watchdog_timer = 0; 2918 callout_stop(&sc->rl_stat_callout); 2919 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2920 2921 if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) 2922 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | 2923 RL_CMD_RX_ENB); 2924 else 2925 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2926 DELAY(1000); 2927 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2928 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2929 2930 if (sc->rl_head != NULL) { 2931 m_freem(sc->rl_head); 2932 sc->rl_head = sc->rl_tail = NULL; 2933 } 2934 2935 /* Free the TX list buffers. */ 2936 2937 for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { 2938 txd = &sc->rl_ldata.rl_tx_desc[i]; 2939 if (txd->tx_m != NULL) { 2940 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2941 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2942 bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, 2943 txd->tx_dmamap); 2944 m_freem(txd->tx_m); 2945 txd->tx_m = NULL; 2946 } 2947 } 2948 2949 /* Free the RX list buffers. */ 2950 2951 for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { 2952 rxd = &sc->rl_ldata.rl_rx_desc[i]; 2953 if (rxd->rx_m != NULL) { 2954 bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, 2955 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2956 bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, 2957 rxd->rx_dmamap); 2958 m_freem(rxd->rx_m); 2959 rxd->rx_m = NULL; 2960 } 2961 } 2962 } 2963 2964 /* 2965 * Device suspend routine. Stop the interface and save some PCI 2966 * settings in case the BIOS doesn't restore them properly on 2967 * resume. 2968 */ 2969 static int 2970 re_suspend(device_t dev) 2971 { 2972 struct rl_softc *sc; 2973 2974 sc = device_get_softc(dev); 2975 2976 RL_LOCK(sc); 2977 re_stop(sc); 2978 re_setwol(sc); 2979 sc->suspended = 1; 2980 RL_UNLOCK(sc); 2981 2982 return (0); 2983 } 2984 2985 /* 2986 * Device resume routine. Restore some PCI settings in case the BIOS 2987 * doesn't, re-enable busmastering, and restart the interface if 2988 * appropriate. 2989 */ 2990 static int 2991 re_resume(device_t dev) 2992 { 2993 struct rl_softc *sc; 2994 struct ifnet *ifp; 2995 2996 sc = device_get_softc(dev); 2997 2998 RL_LOCK(sc); 2999 3000 ifp = sc->rl_ifp; 3001 /* Take controller out of sleep mode. */ 3002 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3003 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3004 CSR_WRITE_1(sc, RL_GPIO, 3005 CSR_READ_1(sc, RL_GPIO) | 0x01); 3006 } 3007 3008 /* reinitialize interface if necessary */ 3009 if (ifp->if_flags & IFF_UP) 3010 re_init_locked(sc); 3011 3012 /* 3013 * Clear WOL matching such that normal Rx filtering 3014 * wouldn't interfere with WOL patterns. 3015 */ 3016 re_clrwol(sc); 3017 sc->suspended = 0; 3018 RL_UNLOCK(sc); 3019 3020 return (0); 3021 } 3022 3023 /* 3024 * Stop all chip I/O so that the kernel's probe routines don't 3025 * get confused by errant DMAs when rebooting. 3026 */ 3027 static int 3028 re_shutdown(device_t dev) 3029 { 3030 struct rl_softc *sc; 3031 3032 sc = device_get_softc(dev); 3033 3034 RL_LOCK(sc); 3035 re_stop(sc); 3036 /* 3037 * Mark interface as down since otherwise we will panic if 3038 * interrupt comes in later on, which can happen in some 3039 * cases. 3040 */ 3041 sc->rl_ifp->if_flags &= ~IFF_UP; 3042 re_setwol(sc); 3043 RL_UNLOCK(sc); 3044 3045 return (0); 3046 } 3047 3048 static void 3049 re_setwol(struct rl_softc *sc) 3050 { 3051 struct ifnet *ifp; 3052 int pmc; 3053 uint16_t pmstat; 3054 uint8_t v; 3055 3056 RL_LOCK_ASSERT(sc); 3057 3058 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3059 return; 3060 3061 ifp = sc->rl_ifp; 3062 /* Put controller into sleep mode. */ 3063 if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { 3064 if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) 3065 CSR_WRITE_1(sc, RL_GPIO, 3066 CSR_READ_1(sc, RL_GPIO) & ~0x01); 3067 } 3068 if ((ifp->if_capenable & IFCAP_WOL) != 0 && 3069 (sc->rl_flags & RL_FLAG_WOLRXENB) != 0) 3070 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); 3071 /* Enable config register write. */ 3072 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3073 3074 /* Enable PME. */ 3075 v = CSR_READ_1(sc, RL_CFG1); 3076 v &= ~RL_CFG1_PME; 3077 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3078 v |= RL_CFG1_PME; 3079 CSR_WRITE_1(sc, RL_CFG1, v); 3080 3081 v = CSR_READ_1(sc, RL_CFG3); 3082 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3083 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 3084 v |= RL_CFG3_WOL_MAGIC; 3085 CSR_WRITE_1(sc, RL_CFG3, v); 3086 3087 /* Config register write done. */ 3088 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3089 3090 v = CSR_READ_1(sc, RL_CFG5); 3091 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3092 v &= ~RL_CFG5_WOL_LANWAKE; 3093 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 3094 v |= RL_CFG5_WOL_UCAST; 3095 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 3096 v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; 3097 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3098 v |= RL_CFG5_WOL_LANWAKE; 3099 CSR_WRITE_1(sc, RL_CFG5, v); 3100 3101 /* 3102 * It seems that hardware resets its link speed to 100Mbps in 3103 * power down mode so switching to 100Mbps in driver is not 3104 * needed. 3105 */ 3106 3107 /* Request PME if WOL is requested. */ 3108 pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); 3109 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 3110 if ((ifp->if_capenable & IFCAP_WOL) != 0) 3111 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 3112 pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 3113 } 3114 3115 static void 3116 re_clrwol(struct rl_softc *sc) 3117 { 3118 int pmc; 3119 uint8_t v; 3120 3121 RL_LOCK_ASSERT(sc); 3122 3123 if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) 3124 return; 3125 3126 /* Enable config register write. */ 3127 CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); 3128 3129 v = CSR_READ_1(sc, RL_CFG3); 3130 v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); 3131 CSR_WRITE_1(sc, RL_CFG3, v); 3132 3133 /* Config register write done. */ 3134 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 3135 3136 v = CSR_READ_1(sc, RL_CFG5); 3137 v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); 3138 v &= ~RL_CFG5_WOL_LANWAKE; 3139 CSR_WRITE_1(sc, RL_CFG5, v); 3140 } 3141