1 /*- 2 * Copyright (c) 1997, 1998-2003 3 * Bill Paul <wpaul@windriver.com>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver 38 * 39 * Written by Bill Paul <wpaul@windriver.com> 40 * Senior Networking Software Engineer 41 * Wind River Systems 42 */ 43 44 /* 45 * This driver is designed to support RealTek's next generation of 46 * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently 47 * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, 48 * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. 49 * 50 * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible 51 * with the older 8139 family, however it also supports a special 52 * C+ mode of operation that provides several new performance enhancing 53 * features. These include: 54 * 55 * o Descriptor based DMA mechanism. Each descriptor represents 56 * a single packet fragment. Data buffers may be aligned on 57 * any byte boundary. 58 * 59 * o 64-bit DMA 60 * 61 * o TCP/IP checksum offload for both RX and TX 62 * 63 * o High and normal priority transmit DMA rings 64 * 65 * o VLAN tag insertion and extraction 66 * 67 * o TCP large send (segmentation offload) 68 * 69 * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ 70 * programming API is fairly straightforward. The RX filtering, EEPROM 71 * access and PHY access is the same as it is on the older 8139 series 72 * chips. 73 * 74 * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the 75 * same programming API and feature set as the 8139C+ with the following 76 * differences and additions: 77 * 78 * o 1000Mbps mode 79 * 80 * o Jumbo frames 81 * 82 * o GMII and TBI ports/registers for interfacing with copper 83 * or fiber PHYs 84 * 85 * o RX and TX DMA rings can have up to 1024 descriptors 86 * (the 8139C+ allows a maximum of 64) 87 * 88 * o Slight differences in register layout from the 8139C+ 89 * 90 * The TX start and timer interrupt registers are at different locations 91 * on the 8169 than they are on the 8139C+. Also, the status word in the 92 * RX descriptor has a slightly different bit layout. The 8169 does not 93 * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' 94 * copper gigE PHY. 95 * 96 * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs 97 * (the 'S' stands for 'single-chip'). These devices have the same 98 * programming API as the older 8169, but also have some vendor-specific 99 * registers for the on-board PHY. The 8110S is a LAN-on-motherboard 100 * part designed to be pin-compatible with the RealTek 8100 10/100 chip. 101 * 102 * This driver takes advantage of the RX and TX checksum offload and 103 * VLAN tag insertion/extraction features. It also implements TX 104 * interrupt moderation using the timer interrupt registers, which 105 * significantly reduces TX interrupt load. There is also support 106 * for jumbo frames, however the 8169/8169S/8110S can not transmit 107 * jumbo frames larger than 7440, so the max MTU possible with this 108 * driver is 7422 bytes. 109 */ 110 111 #ifdef HAVE_KERNEL_OPTION_HEADERS 112 #include "opt_device_polling.h" 113 #endif 114 115 #include <sys/param.h> 116 #include <sys/endian.h> 117 #include <sys/systm.h> 118 #include <sys/sockio.h> 119 #include <sys/mbuf.h> 120 #include <sys/malloc.h> 121 #include <sys/module.h> 122 #include <sys/kernel.h> 123 #include <sys/socket.h> 124 #include <sys/lock.h> 125 #include <sys/mutex.h> 126 #include <sys/taskqueue.h> 127 128 #include <net/if.h> 129 #include <net/if_arp.h> 130 #include <net/ethernet.h> 131 #include <net/if_dl.h> 132 #include <net/if_media.h> 133 #include <net/if_types.h> 134 #include <net/if_vlan_var.h> 135 136 #include <net/bpf.h> 137 138 #include <machine/bus.h> 139 #include <machine/resource.h> 140 #include <sys/bus.h> 141 #include <sys/rman.h> 142 143 #include <dev/mii/mii.h> 144 #include <dev/mii/miivar.h> 145 146 #include <dev/pci/pcireg.h> 147 #include <dev/pci/pcivar.h> 148 149 MODULE_DEPEND(re, pci, 1, 1, 1); 150 MODULE_DEPEND(re, ether, 1, 1, 1); 151 MODULE_DEPEND(re, miibus, 1, 1, 1); 152 153 /* "device miibus" required. See GENERIC if you get errors here. */ 154 #include "miibus_if.h" 155 156 /* 157 * Default to using PIO access for this driver. 158 */ 159 #define RE_USEIOSPACE 160 161 #include <pci/if_rlreg.h> 162 163 /* Tunables. */ 164 static int msi_disable = 0; 165 TUNABLE_INT("hw.re.msi_disable", &msi_disable); 166 167 #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 168 169 /* 170 * Various supported device vendors/types and their names. 171 */ 172 static struct rl_type re_devs[] = { 173 { DLINK_VENDORID, DLINK_DEVICEID_528T, RL_HWREV_8169S, 174 "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, 175 { DLINK_VENDORID, DLINK_DEVICEID_528T, RL_HWREV_8169_8110SB, 176 "D-Link DGE-528(T) Rev.B1 Gigabit Ethernet Adapter" }, 177 { RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS, 178 "RealTek 8139C+ 10/100BaseTX" }, 179 { RT_VENDORID, RT_DEVICEID_8101E, RL_HWREV_8101E, 180 "RealTek 8101E PCIe 10/100baseTX" }, 181 { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN1, 182 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 183 { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN2, 184 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 185 { RT_VENDORID, RT_DEVICEID_8168, RL_HWREV_8168_SPIN3, 186 "RealTek 8168/8111B PCIe Gigabit Ethernet" }, 187 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169, 188 "RealTek 8169 Gigabit Ethernet" }, 189 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S, 190 "RealTek 8169S Single-chip Gigabit Ethernet" }, 191 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169_8110SB, 192 "RealTek 8169SB/8110SB Single-chip Gigabit Ethernet" }, 193 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169_8110SC, 194 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 195 { RT_VENDORID, RT_DEVICEID_8169SC, RL_HWREV_8169_8110SC, 196 "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, 197 { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S, 198 "RealTek 8110S Single-chip Gigabit Ethernet" }, 199 { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, RL_HWREV_8169S, 200 "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, 201 { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, RL_HWREV_8169S, 202 "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, 203 { USR_VENDORID, USR_DEVICEID_997902, RL_HWREV_8169S, 204 "US Robotics 997902 (RTL8169S) Gigabit Ethernet" }, 205 { 0, 0, 0, NULL } 206 }; 207 208 static struct rl_hwrev re_hwrevs[] = { 209 { RL_HWREV_8139, RL_8139, "" }, 210 { RL_HWREV_8139A, RL_8139, "A" }, 211 { RL_HWREV_8139AG, RL_8139, "A-G" }, 212 { RL_HWREV_8139B, RL_8139, "B" }, 213 { RL_HWREV_8130, RL_8139, "8130" }, 214 { RL_HWREV_8139C, RL_8139, "C" }, 215 { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, 216 { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, 217 { RL_HWREV_8168_SPIN1, RL_8169, "8168"}, 218 { RL_HWREV_8169, RL_8169, "8169"}, 219 { RL_HWREV_8169S, RL_8169, "8169S"}, 220 { RL_HWREV_8110S, RL_8169, "8110S"}, 221 { RL_HWREV_8169_8110SB, RL_8169, "8169SB"}, 222 { RL_HWREV_8169_8110SC, RL_8169, "8169SC"}, 223 { RL_HWREV_8100, RL_8139, "8100"}, 224 { RL_HWREV_8101, RL_8139, "8101"}, 225 { RL_HWREV_8100E, RL_8169, "8100E"}, 226 { RL_HWREV_8101E, RL_8169, "8101E"}, 227 { RL_HWREV_8168_SPIN2, RL_8169, "8168"}, 228 { RL_HWREV_8168_SPIN3, RL_8169, "8168"}, 229 { 0, 0, NULL } 230 }; 231 232 static int re_probe (device_t); 233 static int re_attach (device_t); 234 static int re_detach (device_t); 235 236 static int re_encap (struct rl_softc *, struct mbuf **, int *); 237 238 static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); 239 static void re_dma_map_desc (void *, bus_dma_segment_t *, int, 240 bus_size_t, int); 241 static int re_allocmem (device_t, struct rl_softc *); 242 static int re_newbuf (struct rl_softc *, int, struct mbuf *); 243 static int re_rx_list_init (struct rl_softc *); 244 static int re_tx_list_init (struct rl_softc *); 245 #ifdef RE_FIXUP_RX 246 static __inline void re_fixup_rx 247 (struct mbuf *); 248 #endif 249 static int re_rxeof (struct rl_softc *); 250 static void re_txeof (struct rl_softc *); 251 #ifdef DEVICE_POLLING 252 static void re_poll (struct ifnet *, enum poll_cmd, int); 253 static void re_poll_locked (struct ifnet *, enum poll_cmd, int); 254 #endif 255 static int re_intr (void *); 256 static void re_tick (void *); 257 static void re_tx_task (void *, int); 258 static void re_int_task (void *, int); 259 static void re_start (struct ifnet *); 260 static int re_ioctl (struct ifnet *, u_long, caddr_t); 261 static void re_init (void *); 262 static void re_init_locked (struct rl_softc *); 263 static void re_stop (struct rl_softc *); 264 static void re_watchdog (struct rl_softc *); 265 static int re_suspend (device_t); 266 static int re_resume (device_t); 267 static int re_shutdown (device_t); 268 static int re_ifmedia_upd (struct ifnet *); 269 static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); 270 271 static void re_eeprom_putbyte (struct rl_softc *, int); 272 static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); 273 static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); 274 static int re_gmii_readreg (device_t, int, int); 275 static int re_gmii_writereg (device_t, int, int, int); 276 277 static int re_miibus_readreg (device_t, int, int); 278 static int re_miibus_writereg (device_t, int, int, int); 279 static void re_miibus_statchg (device_t); 280 281 static void re_setmulti (struct rl_softc *); 282 static void re_reset (struct rl_softc *); 283 284 #ifdef RE_DIAG 285 static int re_diag (struct rl_softc *); 286 #endif 287 288 #ifdef RE_USEIOSPACE 289 #define RL_RES SYS_RES_IOPORT 290 #define RL_RID RL_PCI_LOIO 291 #else 292 #define RL_RES SYS_RES_MEMORY 293 #define RL_RID RL_PCI_LOMEM 294 #endif 295 296 static device_method_t re_methods[] = { 297 /* Device interface */ 298 DEVMETHOD(device_probe, re_probe), 299 DEVMETHOD(device_attach, re_attach), 300 DEVMETHOD(device_detach, re_detach), 301 DEVMETHOD(device_suspend, re_suspend), 302 DEVMETHOD(device_resume, re_resume), 303 DEVMETHOD(device_shutdown, re_shutdown), 304 305 /* bus interface */ 306 DEVMETHOD(bus_print_child, bus_generic_print_child), 307 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 308 309 /* MII interface */ 310 DEVMETHOD(miibus_readreg, re_miibus_readreg), 311 DEVMETHOD(miibus_writereg, re_miibus_writereg), 312 DEVMETHOD(miibus_statchg, re_miibus_statchg), 313 314 { 0, 0 } 315 }; 316 317 static driver_t re_driver = { 318 "re", 319 re_methods, 320 sizeof(struct rl_softc) 321 }; 322 323 static devclass_t re_devclass; 324 325 DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); 326 DRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0); 327 DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); 328 329 #define EE_SET(x) \ 330 CSR_WRITE_1(sc, RL_EECMD, \ 331 CSR_READ_1(sc, RL_EECMD) | x) 332 333 #define EE_CLR(x) \ 334 CSR_WRITE_1(sc, RL_EECMD, \ 335 CSR_READ_1(sc, RL_EECMD) & ~x) 336 337 /* 338 * Send a read command and address to the EEPROM, check for ACK. 339 */ 340 static void 341 re_eeprom_putbyte(sc, addr) 342 struct rl_softc *sc; 343 int addr; 344 { 345 register int d, i; 346 347 d = addr | (RL_9346_READ << sc->rl_eewidth); 348 349 /* 350 * Feed in each bit and strobe the clock. 351 */ 352 353 for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { 354 if (d & i) { 355 EE_SET(RL_EE_DATAIN); 356 } else { 357 EE_CLR(RL_EE_DATAIN); 358 } 359 DELAY(100); 360 EE_SET(RL_EE_CLK); 361 DELAY(150); 362 EE_CLR(RL_EE_CLK); 363 DELAY(100); 364 } 365 366 return; 367 } 368 369 /* 370 * Read a word of data stored in the EEPROM at address 'addr.' 371 */ 372 static void 373 re_eeprom_getword(sc, addr, dest) 374 struct rl_softc *sc; 375 int addr; 376 u_int16_t *dest; 377 { 378 register int i; 379 u_int16_t word = 0; 380 381 /* 382 * Send address of word we want to read. 383 */ 384 re_eeprom_putbyte(sc, addr); 385 386 /* 387 * Start reading bits from EEPROM. 388 */ 389 for (i = 0x8000; i; i >>= 1) { 390 EE_SET(RL_EE_CLK); 391 DELAY(100); 392 if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) 393 word |= i; 394 EE_CLR(RL_EE_CLK); 395 DELAY(100); 396 } 397 398 *dest = word; 399 400 return; 401 } 402 403 /* 404 * Read a sequence of words from the EEPROM. 405 */ 406 static void 407 re_read_eeprom(sc, dest, off, cnt) 408 struct rl_softc *sc; 409 caddr_t dest; 410 int off; 411 int cnt; 412 { 413 int i; 414 u_int16_t word = 0, *ptr; 415 416 CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 417 418 DELAY(100); 419 420 for (i = 0; i < cnt; i++) { 421 CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); 422 re_eeprom_getword(sc, off + i, &word); 423 CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); 424 ptr = (u_int16_t *)(dest + (i * 2)); 425 *ptr = word; 426 } 427 428 CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); 429 430 return; 431 } 432 433 static int 434 re_gmii_readreg(dev, phy, reg) 435 device_t dev; 436 int phy, reg; 437 { 438 struct rl_softc *sc; 439 u_int32_t rval; 440 int i; 441 442 if (phy != 1) 443 return (0); 444 445 sc = device_get_softc(dev); 446 447 /* Let the rgephy driver read the GMEDIASTAT register */ 448 449 if (reg == RL_GMEDIASTAT) { 450 rval = CSR_READ_1(sc, RL_GMEDIASTAT); 451 return (rval); 452 } 453 454 CSR_WRITE_4(sc, RL_PHYAR, reg << 16); 455 DELAY(1000); 456 457 for (i = 0; i < RL_TIMEOUT; i++) { 458 rval = CSR_READ_4(sc, RL_PHYAR); 459 if (rval & RL_PHYAR_BUSY) 460 break; 461 DELAY(100); 462 } 463 464 if (i == RL_TIMEOUT) { 465 device_printf(sc->rl_dev, "PHY read failed\n"); 466 return (0); 467 } 468 469 return (rval & RL_PHYAR_PHYDATA); 470 } 471 472 static int 473 re_gmii_writereg(dev, phy, reg, data) 474 device_t dev; 475 int phy, reg, data; 476 { 477 struct rl_softc *sc; 478 u_int32_t rval; 479 int i; 480 481 sc = device_get_softc(dev); 482 483 CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | 484 (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); 485 DELAY(1000); 486 487 for (i = 0; i < RL_TIMEOUT; i++) { 488 rval = CSR_READ_4(sc, RL_PHYAR); 489 if (!(rval & RL_PHYAR_BUSY)) 490 break; 491 DELAY(100); 492 } 493 494 if (i == RL_TIMEOUT) { 495 device_printf(sc->rl_dev, "PHY write failed\n"); 496 return (0); 497 } 498 499 return (0); 500 } 501 502 static int 503 re_miibus_readreg(dev, phy, reg) 504 device_t dev; 505 int phy, reg; 506 { 507 struct rl_softc *sc; 508 u_int16_t rval = 0; 509 u_int16_t re8139_reg = 0; 510 511 sc = device_get_softc(dev); 512 513 if (sc->rl_type == RL_8169) { 514 rval = re_gmii_readreg(dev, phy, reg); 515 return (rval); 516 } 517 518 /* Pretend the internal PHY is only at address 0 */ 519 if (phy) { 520 return (0); 521 } 522 switch (reg) { 523 case MII_BMCR: 524 re8139_reg = RL_BMCR; 525 break; 526 case MII_BMSR: 527 re8139_reg = RL_BMSR; 528 break; 529 case MII_ANAR: 530 re8139_reg = RL_ANAR; 531 break; 532 case MII_ANER: 533 re8139_reg = RL_ANER; 534 break; 535 case MII_ANLPAR: 536 re8139_reg = RL_LPAR; 537 break; 538 case MII_PHYIDR1: 539 case MII_PHYIDR2: 540 return (0); 541 /* 542 * Allow the rlphy driver to read the media status 543 * register. If we have a link partner which does not 544 * support NWAY, this is the register which will tell 545 * us the results of parallel detection. 546 */ 547 case RL_MEDIASTAT: 548 rval = CSR_READ_1(sc, RL_MEDIASTAT); 549 return (rval); 550 default: 551 device_printf(sc->rl_dev, "bad phy register\n"); 552 return (0); 553 } 554 rval = CSR_READ_2(sc, re8139_reg); 555 if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { 556 /* 8139C+ has different bit layout. */ 557 rval &= ~(BMCR_LOOP | BMCR_ISO); 558 } 559 return (rval); 560 } 561 562 static int 563 re_miibus_writereg(dev, phy, reg, data) 564 device_t dev; 565 int phy, reg, data; 566 { 567 struct rl_softc *sc; 568 u_int16_t re8139_reg = 0; 569 int rval = 0; 570 571 sc = device_get_softc(dev); 572 573 if (sc->rl_type == RL_8169) { 574 rval = re_gmii_writereg(dev, phy, reg, data); 575 return (rval); 576 } 577 578 /* Pretend the internal PHY is only at address 0 */ 579 if (phy) 580 return (0); 581 582 switch (reg) { 583 case MII_BMCR: 584 re8139_reg = RL_BMCR; 585 if (sc->rl_type == RL_8139CPLUS) { 586 /* 8139C+ has different bit layout. */ 587 data &= ~(BMCR_LOOP | BMCR_ISO); 588 } 589 break; 590 case MII_BMSR: 591 re8139_reg = RL_BMSR; 592 break; 593 case MII_ANAR: 594 re8139_reg = RL_ANAR; 595 break; 596 case MII_ANER: 597 re8139_reg = RL_ANER; 598 break; 599 case MII_ANLPAR: 600 re8139_reg = RL_LPAR; 601 break; 602 case MII_PHYIDR1: 603 case MII_PHYIDR2: 604 return (0); 605 break; 606 default: 607 device_printf(sc->rl_dev, "bad phy register\n"); 608 return (0); 609 } 610 CSR_WRITE_2(sc, re8139_reg, data); 611 return (0); 612 } 613 614 static void 615 re_miibus_statchg(dev) 616 device_t dev; 617 { 618 619 } 620 621 /* 622 * Program the 64-bit multicast hash filter. 623 */ 624 static void 625 re_setmulti(sc) 626 struct rl_softc *sc; 627 { 628 struct ifnet *ifp; 629 int h = 0; 630 u_int32_t hashes[2] = { 0, 0 }; 631 struct ifmultiaddr *ifma; 632 u_int32_t rxfilt; 633 int mcnt = 0; 634 u_int32_t hwrev; 635 636 RL_LOCK_ASSERT(sc); 637 638 ifp = sc->rl_ifp; 639 640 641 rxfilt = CSR_READ_4(sc, RL_RXCFG); 642 rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_MULTI); 643 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 644 if (ifp->if_flags & IFF_PROMISC) 645 rxfilt |= RL_RXCFG_RX_ALLPHYS; 646 if (ifp->if_flags & IFF_ALLMULTI) 647 rxfilt |= RL_RXCFG_RX_MULTI; 648 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 649 CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); 650 CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); 651 return; 652 } 653 654 /* first, zot all the existing hash bits */ 655 CSR_WRITE_4(sc, RL_MAR0, 0); 656 CSR_WRITE_4(sc, RL_MAR4, 0); 657 658 /* now program new ones */ 659 IF_ADDR_LOCK(ifp); 660 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 661 if (ifma->ifma_addr->sa_family != AF_LINK) 662 continue; 663 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 664 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 665 if (h < 32) 666 hashes[0] |= (1 << h); 667 else 668 hashes[1] |= (1 << (h - 32)); 669 mcnt++; 670 } 671 IF_ADDR_UNLOCK(ifp); 672 673 if (mcnt) 674 rxfilt |= RL_RXCFG_RX_MULTI; 675 else 676 rxfilt &= ~RL_RXCFG_RX_MULTI; 677 678 CSR_WRITE_4(sc, RL_RXCFG, rxfilt); 679 680 /* 681 * For some unfathomable reason, RealTek decided to reverse 682 * the order of the multicast hash registers in the PCI Express 683 * parts. This means we have to write the hash pattern in reverse 684 * order for those devices. 685 */ 686 687 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 688 689 switch (hwrev) { 690 case RL_HWREV_8100E: 691 case RL_HWREV_8101E: 692 case RL_HWREV_8168_SPIN1: 693 case RL_HWREV_8168_SPIN2: 694 case RL_HWREV_8168_SPIN3: 695 CSR_WRITE_4(sc, RL_MAR0, bswap32(hashes[1])); 696 CSR_WRITE_4(sc, RL_MAR4, bswap32(hashes[0])); 697 break; 698 default: 699 CSR_WRITE_4(sc, RL_MAR0, hashes[0]); 700 CSR_WRITE_4(sc, RL_MAR4, hashes[1]); 701 break; 702 } 703 } 704 705 static void 706 re_reset(sc) 707 struct rl_softc *sc; 708 { 709 register int i; 710 711 RL_LOCK_ASSERT(sc); 712 713 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); 714 715 for (i = 0; i < RL_TIMEOUT; i++) { 716 DELAY(10); 717 if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) 718 break; 719 } 720 if (i == RL_TIMEOUT) 721 device_printf(sc->rl_dev, "reset never completed!\n"); 722 723 CSR_WRITE_1(sc, 0x82, 1); 724 } 725 726 #ifdef RE_DIAG 727 728 /* 729 * The following routine is designed to test for a defect on some 730 * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# 731 * lines connected to the bus, however for a 32-bit only card, they 732 * should be pulled high. The result of this defect is that the 733 * NIC will not work right if you plug it into a 64-bit slot: DMA 734 * operations will be done with 64-bit transfers, which will fail 735 * because the 64-bit data lines aren't connected. 736 * 737 * There's no way to work around this (short of talking a soldering 738 * iron to the board), however we can detect it. The method we use 739 * here is to put the NIC into digital loopback mode, set the receiver 740 * to promiscuous mode, and then try to send a frame. We then compare 741 * the frame data we sent to what was received. If the data matches, 742 * then the NIC is working correctly, otherwise we know the user has 743 * a defective NIC which has been mistakenly plugged into a 64-bit PCI 744 * slot. In the latter case, there's no way the NIC can work correctly, 745 * so we print out a message on the console and abort the device attach. 746 */ 747 748 static int 749 re_diag(sc) 750 struct rl_softc *sc; 751 { 752 struct ifnet *ifp = sc->rl_ifp; 753 struct mbuf *m0; 754 struct ether_header *eh; 755 struct rl_desc *cur_rx; 756 u_int16_t status; 757 u_int32_t rxstat; 758 int total_len, i, error = 0, phyaddr; 759 u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; 760 u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; 761 762 /* Allocate a single mbuf */ 763 MGETHDR(m0, M_DONTWAIT, MT_DATA); 764 if (m0 == NULL) 765 return (ENOBUFS); 766 767 RL_LOCK(sc); 768 769 /* 770 * Initialize the NIC in test mode. This sets the chip up 771 * so that it can send and receive frames, but performs the 772 * following special functions: 773 * - Puts receiver in promiscuous mode 774 * - Enables digital loopback mode 775 * - Leaves interrupts turned off 776 */ 777 778 ifp->if_flags |= IFF_PROMISC; 779 sc->rl_testmode = 1; 780 re_reset(sc); 781 re_init_locked(sc); 782 sc->rl_link = 1; 783 if (sc->rl_type == RL_8169) 784 phyaddr = 1; 785 else 786 phyaddr = 0; 787 788 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); 789 for (i = 0; i < RL_TIMEOUT; i++) { 790 status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); 791 if (!(status & BMCR_RESET)) 792 break; 793 } 794 795 re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); 796 CSR_WRITE_2(sc, RL_ISR, RL_INTRS); 797 798 DELAY(100000); 799 800 /* Put some data in the mbuf */ 801 802 eh = mtod(m0, struct ether_header *); 803 bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); 804 bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); 805 eh->ether_type = htons(ETHERTYPE_IP); 806 m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; 807 808 /* 809 * Queue the packet, start transmission. 810 * Note: IF_HANDOFF() ultimately calls re_start() for us. 811 */ 812 813 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 814 RL_UNLOCK(sc); 815 /* XXX: re_diag must not be called when in ALTQ mode */ 816 IF_HANDOFF(&ifp->if_snd, m0, ifp); 817 RL_LOCK(sc); 818 m0 = NULL; 819 820 /* Wait for it to propagate through the chip */ 821 822 DELAY(100000); 823 for (i = 0; i < RL_TIMEOUT; i++) { 824 status = CSR_READ_2(sc, RL_ISR); 825 CSR_WRITE_2(sc, RL_ISR, status); 826 if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == 827 (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) 828 break; 829 DELAY(10); 830 } 831 832 if (i == RL_TIMEOUT) { 833 device_printf(sc->rl_dev, 834 "diagnostic failed, failed to receive packet in" 835 " loopback mode\n"); 836 error = EIO; 837 goto done; 838 } 839 840 /* 841 * The packet should have been dumped into the first 842 * entry in the RX DMA ring. Grab it from there. 843 */ 844 845 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 846 sc->rl_ldata.rl_rx_list_map, 847 BUS_DMASYNC_POSTREAD); 848 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 849 sc->rl_ldata.rl_rx_dmamap[0], 850 BUS_DMASYNC_POSTWRITE); 851 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 852 sc->rl_ldata.rl_rx_dmamap[0]); 853 854 m0 = sc->rl_ldata.rl_rx_mbuf[0]; 855 sc->rl_ldata.rl_rx_mbuf[0] = NULL; 856 eh = mtod(m0, struct ether_header *); 857 858 cur_rx = &sc->rl_ldata.rl_rx_list[0]; 859 total_len = RL_RXBYTES(cur_rx); 860 rxstat = le32toh(cur_rx->rl_cmdstat); 861 862 if (total_len != ETHER_MIN_LEN) { 863 device_printf(sc->rl_dev, 864 "diagnostic failed, received short packet\n"); 865 error = EIO; 866 goto done; 867 } 868 869 /* Test that the received packet data matches what we sent. */ 870 871 if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || 872 bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || 873 ntohs(eh->ether_type) != ETHERTYPE_IP) { 874 device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); 875 device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", 876 dst, ":", src, ":", ETHERTYPE_IP); 877 device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", 878 eh->ether_dhost, ":", eh->ether_shost, ":", 879 ntohs(eh->ether_type)); 880 device_printf(sc->rl_dev, "You may have a defective 32-bit " 881 "NIC plugged into a 64-bit PCI slot.\n"); 882 device_printf(sc->rl_dev, "Please re-install the NIC in a " 883 "32-bit slot for proper operation.\n"); 884 device_printf(sc->rl_dev, "Read the re(4) man page for more " 885 "details.\n"); 886 error = EIO; 887 } 888 889 done: 890 /* Turn interface off, release resources */ 891 892 sc->rl_testmode = 0; 893 sc->rl_link = 0; 894 ifp->if_flags &= ~IFF_PROMISC; 895 re_stop(sc); 896 if (m0 != NULL) 897 m_freem(m0); 898 899 RL_UNLOCK(sc); 900 901 return (error); 902 } 903 904 #endif 905 906 /* 907 * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device 908 * IDs against our list and return a device name if we find a match. 909 */ 910 static int 911 re_probe(dev) 912 device_t dev; 913 { 914 struct rl_type *t; 915 struct rl_softc *sc; 916 int rid; 917 u_int32_t hwrev; 918 919 t = re_devs; 920 sc = device_get_softc(dev); 921 922 while (t->rl_name != NULL) { 923 if ((pci_get_vendor(dev) == t->rl_vid) && 924 (pci_get_device(dev) == t->rl_did)) { 925 /* 926 * Only attach to rev. 3 of the Linksys EG1032 adapter. 927 * Rev. 2 i supported by sk(4). 928 */ 929 if ((t->rl_vid == LINKSYS_VENDORID) && 930 (t->rl_did == LINKSYS_DEVICEID_EG1032) && 931 (pci_get_subdevice(dev) != 932 LINKSYS_SUBDEVICE_EG1032_REV3)) { 933 t++; 934 continue; 935 } 936 937 /* 938 * Temporarily map the I/O space 939 * so we can read the chip ID register. 940 */ 941 rid = RL_RID; 942 sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, 943 RF_ACTIVE); 944 if (sc->rl_res == NULL) { 945 device_printf(dev, 946 "couldn't map ports/memory\n"); 947 return (ENXIO); 948 } 949 sc->rl_btag = rman_get_bustag(sc->rl_res); 950 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 951 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 952 bus_release_resource(dev, RL_RES, 953 RL_RID, sc->rl_res); 954 if (t->rl_basetype == hwrev) { 955 device_set_desc(dev, t->rl_name); 956 return (BUS_PROBE_DEFAULT); 957 } 958 } 959 t++; 960 } 961 962 return (ENXIO); 963 } 964 965 /* 966 * This routine takes the segment list provided as the result of 967 * a bus_dma_map_load() operation and assigns the addresses/lengths 968 * to RealTek DMA descriptors. This can be called either by the RX 969 * code or the TX code. In the RX case, we'll probably wind up mapping 970 * at most one segment. For the TX case, there could be any number of 971 * segments since TX packets may span multiple mbufs. In either case, 972 * if the number of segments is larger than the rl_maxsegs limit 973 * specified by the caller, we abort the mapping operation. Sadly, 974 * whoever designed the buffer mapping API did not provide a way to 975 * return an error from here, so we have to fake it a bit. 976 */ 977 978 static void 979 re_dma_map_desc(arg, segs, nseg, mapsize, error) 980 void *arg; 981 bus_dma_segment_t *segs; 982 int nseg; 983 bus_size_t mapsize; 984 int error; 985 { 986 struct rl_dmaload_arg *ctx; 987 struct rl_desc *d = NULL; 988 int i = 0, idx; 989 u_int32_t cmdstat; 990 int totlen = 0; 991 992 if (error) 993 return; 994 995 ctx = arg; 996 997 /* Signal error to caller if there's too many segments */ 998 if (nseg > ctx->rl_maxsegs) { 999 ctx->rl_maxsegs = 0; 1000 return; 1001 } 1002 1003 /* 1004 * Map the segment array into descriptors. Note that we set the 1005 * start-of-frame and end-of-frame markers for either TX or RX, but 1006 * they really only have meaning in the TX case. (In the RX case, 1007 * it's the chip that tells us where packets begin and end.) 1008 * We also keep track of the end of the ring and set the 1009 * end-of-ring bits as needed, and we set the ownership bits 1010 * in all except the very first descriptor. (The caller will 1011 * set this descriptor later when it start transmission or 1012 * reception.) 1013 */ 1014 idx = ctx->rl_idx; 1015 for (;;) { 1016 d = &ctx->rl_ring[idx]; 1017 if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) { 1018 ctx->rl_maxsegs = 0; 1019 return; 1020 } 1021 cmdstat = segs[i].ds_len; 1022 totlen += segs[i].ds_len; 1023 d->rl_vlanctl = 0; 1024 d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); 1025 d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); 1026 if (i == 0) 1027 cmdstat |= RL_TDESC_CMD_SOF; 1028 else 1029 cmdstat |= RL_TDESC_CMD_OWN; 1030 if (idx == (RL_RX_DESC_CNT - 1)) 1031 cmdstat |= RL_TDESC_CMD_EOR; 1032 d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags); 1033 i++; 1034 if (i == nseg) 1035 break; 1036 RL_DESC_INC(idx); 1037 } 1038 1039 d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); 1040 ctx->rl_maxsegs = nseg; 1041 ctx->rl_idx = idx; 1042 } 1043 1044 /* 1045 * Map a single buffer address. 1046 */ 1047 1048 static void 1049 re_dma_map_addr(arg, segs, nseg, error) 1050 void *arg; 1051 bus_dma_segment_t *segs; 1052 int nseg; 1053 int error; 1054 { 1055 bus_addr_t *addr; 1056 1057 if (error) 1058 return; 1059 1060 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 1061 addr = arg; 1062 *addr = segs->ds_addr; 1063 } 1064 1065 static int 1066 re_allocmem(dev, sc) 1067 device_t dev; 1068 struct rl_softc *sc; 1069 { 1070 int error; 1071 int nseg; 1072 int i; 1073 1074 /* 1075 * Allocate map for RX mbufs. 1076 */ 1077 nseg = 32; 1078 error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0, 1079 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1080 NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, 1081 NULL, NULL, &sc->rl_ldata.rl_mtag); 1082 if (error) { 1083 device_printf(dev, "could not allocate dma tag\n"); 1084 return (ENOMEM); 1085 } 1086 1087 /* 1088 * Allocate map for TX descriptor list. 1089 */ 1090 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1091 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1092 NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, 0, 1093 NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); 1094 if (error) { 1095 device_printf(dev, "could not allocate dma tag\n"); 1096 return (ENOMEM); 1097 } 1098 1099 /* Allocate DMA'able memory for the TX ring */ 1100 1101 error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, 1102 (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1103 &sc->rl_ldata.rl_tx_list_map); 1104 if (error) 1105 return (ENOMEM); 1106 1107 /* Load the map for the TX ring. */ 1108 1109 error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, 1110 sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, 1111 RL_TX_LIST_SZ, re_dma_map_addr, 1112 &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); 1113 1114 /* Create DMA maps for TX buffers */ 1115 1116 for (i = 0; i < RL_TX_DESC_CNT; i++) { 1117 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1118 &sc->rl_ldata.rl_tx_dmamap[i]); 1119 if (error) { 1120 device_printf(dev, "can't create DMA map for TX\n"); 1121 return (ENOMEM); 1122 } 1123 } 1124 1125 /* 1126 * Allocate map for RX descriptor list. 1127 */ 1128 error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 1129 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, 1130 NULL, RL_RX_LIST_SZ, 1, RL_RX_LIST_SZ, 0, 1131 NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); 1132 if (error) { 1133 device_printf(dev, "could not allocate dma tag\n"); 1134 return (ENOMEM); 1135 } 1136 1137 /* Allocate DMA'able memory for the RX ring */ 1138 1139 error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, 1140 (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, 1141 &sc->rl_ldata.rl_rx_list_map); 1142 if (error) 1143 return (ENOMEM); 1144 1145 /* Load the map for the RX ring. */ 1146 1147 error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, 1148 sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, 1149 RL_RX_LIST_SZ, re_dma_map_addr, 1150 &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); 1151 1152 /* Create DMA maps for RX buffers */ 1153 1154 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1155 error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, 1156 &sc->rl_ldata.rl_rx_dmamap[i]); 1157 if (error) { 1158 device_printf(dev, "can't create DMA map for RX\n"); 1159 return (ENOMEM); 1160 } 1161 } 1162 1163 return (0); 1164 } 1165 1166 /* 1167 * Attach the interface. Allocate softc structures, do ifmedia 1168 * setup and ethernet/BPF attach. 1169 */ 1170 static int 1171 re_attach(dev) 1172 device_t dev; 1173 { 1174 u_char eaddr[ETHER_ADDR_LEN]; 1175 u_int16_t as[ETHER_ADDR_LEN / 2]; 1176 struct rl_softc *sc; 1177 struct ifnet *ifp; 1178 struct rl_hwrev *hw_rev; 1179 int hwrev; 1180 u_int16_t re_did = 0; 1181 int error = 0, rid, i; 1182 int msic, reg; 1183 1184 sc = device_get_softc(dev); 1185 sc->rl_dev = dev; 1186 1187 mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1188 MTX_DEF); 1189 callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); 1190 1191 /* 1192 * Map control/status registers. 1193 */ 1194 pci_enable_busmaster(dev); 1195 1196 rid = RL_RID; 1197 sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, 1198 RF_ACTIVE); 1199 1200 if (sc->rl_res == NULL) { 1201 device_printf(dev, "couldn't map ports/memory\n"); 1202 error = ENXIO; 1203 goto fail; 1204 } 1205 1206 sc->rl_btag = rman_get_bustag(sc->rl_res); 1207 sc->rl_bhandle = rman_get_bushandle(sc->rl_res); 1208 1209 msic = 0; 1210 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1211 msic = pci_msi_count(dev); 1212 if (bootverbose) 1213 device_printf(dev, "MSI count : %d\n", msic); 1214 } 1215 if (msic == RL_MSI_MESSAGES && msi_disable == 0) { 1216 if (pci_alloc_msi(dev, &msic) == 0) { 1217 if (msic == RL_MSI_MESSAGES) { 1218 device_printf(dev, "Using %d MSI messages\n", 1219 msic); 1220 sc->rl_msi = 1; 1221 } else 1222 pci_release_msi(dev); 1223 } 1224 } 1225 1226 /* Allocate interrupt */ 1227 if (sc->rl_msi == 0) { 1228 rid = 0; 1229 sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1230 RF_SHAREABLE | RF_ACTIVE); 1231 if (sc->rl_irq[0] == NULL) { 1232 device_printf(dev, "couldn't allocate IRQ resources\n"); 1233 error = ENXIO; 1234 goto fail; 1235 } 1236 } else { 1237 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1238 sc->rl_irq[i] = bus_alloc_resource_any(dev, 1239 SYS_RES_IRQ, &rid, RF_ACTIVE); 1240 if (sc->rl_irq[i] == NULL) { 1241 device_printf(dev, 1242 "couldn't llocate IRQ resources for " 1243 "message %d\n", rid); 1244 error = ENXIO; 1245 goto fail; 1246 } 1247 } 1248 } 1249 1250 /* Reset the adapter. */ 1251 RL_LOCK(sc); 1252 re_reset(sc); 1253 RL_UNLOCK(sc); 1254 1255 hw_rev = re_hwrevs; 1256 hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; 1257 while (hw_rev->rl_desc != NULL) { 1258 if (hw_rev->rl_rev == hwrev) { 1259 sc->rl_type = hw_rev->rl_type; 1260 break; 1261 } 1262 hw_rev++; 1263 } 1264 1265 sc->rl_eewidth = RL_9356_ADDR_LEN; 1266 re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); 1267 if (re_did != 0x8129) 1268 sc->rl_eewidth = RL_9346_ADDR_LEN; 1269 1270 /* 1271 * Get station address from the EEPROM. 1272 */ 1273 re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); 1274 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1275 as[i] = le16toh(as[i]); 1276 bcopy(as, eaddr, sizeof(eaddr)); 1277 1278 if (sc->rl_type == RL_8169) { 1279 /* Set RX length mask */ 1280 sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; 1281 sc->rl_txstart = RL_GTXSTART; 1282 } else { 1283 /* Set RX length mask */ 1284 sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; 1285 sc->rl_txstart = RL_TXSTART; 1286 } 1287 1288 /* 1289 * Allocate the parent bus DMA tag appropriate for PCI. 1290 */ 1291 #define RL_NSEG_NEW 32 1292 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 1293 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1294 MAXBSIZE, RL_NSEG_NEW, BUS_SPACE_MAXSIZE_32BIT, 0, 1295 NULL, NULL, &sc->rl_parent_tag); 1296 if (error) 1297 goto fail; 1298 1299 error = re_allocmem(dev, sc); 1300 1301 if (error) 1302 goto fail; 1303 1304 ifp = sc->rl_ifp = if_alloc(IFT_ETHER); 1305 if (ifp == NULL) { 1306 device_printf(dev, "can not if_alloc()\n"); 1307 error = ENOSPC; 1308 goto fail; 1309 } 1310 1311 /* Do MII setup */ 1312 if (mii_phy_probe(dev, &sc->rl_miibus, 1313 re_ifmedia_upd, re_ifmedia_sts)) { 1314 device_printf(dev, "MII without any phy!\n"); 1315 error = ENXIO; 1316 goto fail; 1317 } 1318 1319 /* Take PHY out of power down mode. */ 1320 if (sc->rl_type == RL_8169) { 1321 uint32_t rev; 1322 1323 rev = CSR_READ_4(sc, RL_TXCFG); 1324 /* HWVERID 0, 1 and 2 : bit26-30, bit23 */ 1325 rev &= 0x7c800000; 1326 if (rev != 0) { 1327 /* RTL8169S single chip */ 1328 switch (rev) { 1329 case RL_HWREV_8169_8110SB: 1330 case RL_HWREV_8169_8110SC: 1331 case RL_HWREV_8168_SPIN2: 1332 case RL_HWREV_8168_SPIN3: 1333 re_gmii_writereg(dev, 1, 0x1f, 0); 1334 re_gmii_writereg(dev, 1, 0x0e, 0); 1335 break; 1336 default: 1337 break; 1338 } 1339 } 1340 } 1341 1342 ifp->if_softc = sc; 1343 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1344 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1345 ifp->if_ioctl = re_ioctl; 1346 ifp->if_start = re_start; 1347 ifp->if_hwassist = RE_CSUM_FEATURES; 1348 ifp->if_capabilities = IFCAP_HWCSUM; 1349 ifp->if_capenable = ifp->if_capabilities; 1350 ifp->if_init = re_init; 1351 IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); 1352 ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; 1353 IFQ_SET_READY(&ifp->if_snd); 1354 1355 TASK_INIT(&sc->rl_txtask, 1, re_tx_task, ifp); 1356 TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); 1357 1358 /* 1359 * Call MI attach routine. 1360 */ 1361 ether_ifattach(ifp, eaddr); 1362 1363 /* VLAN capability setup */ 1364 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1365 if (ifp->if_capabilities & IFCAP_HWCSUM) 1366 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1367 ifp->if_capenable = ifp->if_capabilities; 1368 #ifdef DEVICE_POLLING 1369 ifp->if_capabilities |= IFCAP_POLLING; 1370 #endif 1371 /* 1372 * Tell the upper layer(s) we support long frames. 1373 * Must appear after the call to ether_ifattach() because 1374 * ether_ifattach() sets ifi_hdrlen to the default value. 1375 */ 1376 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1377 1378 #ifdef RE_DIAG 1379 /* 1380 * Perform hardware diagnostic on the original RTL8169. 1381 * Some 32-bit cards were incorrectly wired and would 1382 * malfunction if plugged into a 64-bit slot. 1383 */ 1384 1385 if (hwrev == RL_HWREV_8169) { 1386 error = re_diag(sc); 1387 if (error) { 1388 device_printf(dev, 1389 "attach aborted due to hardware diag failure\n"); 1390 ether_ifdetach(ifp); 1391 goto fail; 1392 } 1393 } 1394 #endif 1395 1396 /* Hook interrupt last to avoid having to lock softc */ 1397 if (sc->rl_msi == 0) 1398 error = bus_setup_intr(dev, sc->rl_irq[0], 1399 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1400 &sc->rl_intrhand[0]); 1401 else { 1402 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1403 error = bus_setup_intr(dev, sc->rl_irq[i], 1404 INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, 1405 &sc->rl_intrhand[i]); 1406 if (error != 0) 1407 break; 1408 } 1409 } 1410 if (error) { 1411 device_printf(dev, "couldn't set up irq\n"); 1412 ether_ifdetach(ifp); 1413 } 1414 1415 fail: 1416 1417 if (error) 1418 re_detach(dev); 1419 1420 return (error); 1421 } 1422 1423 /* 1424 * Shutdown hardware and free up resources. This can be called any 1425 * time after the mutex has been initialized. It is called in both 1426 * the error case in attach and the normal detach case so it needs 1427 * to be careful about only freeing resources that have actually been 1428 * allocated. 1429 */ 1430 static int 1431 re_detach(dev) 1432 device_t dev; 1433 { 1434 struct rl_softc *sc; 1435 struct ifnet *ifp; 1436 int i, rid; 1437 1438 sc = device_get_softc(dev); 1439 ifp = sc->rl_ifp; 1440 KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); 1441 1442 #ifdef DEVICE_POLLING 1443 if (ifp->if_capenable & IFCAP_POLLING) 1444 ether_poll_deregister(ifp); 1445 #endif 1446 /* These should only be active if attach succeeded */ 1447 if (device_is_attached(dev)) { 1448 RL_LOCK(sc); 1449 #if 0 1450 sc->suspended = 1; 1451 #endif 1452 re_stop(sc); 1453 RL_UNLOCK(sc); 1454 callout_drain(&sc->rl_stat_callout); 1455 taskqueue_drain(taskqueue_fast, &sc->rl_inttask); 1456 taskqueue_drain(taskqueue_fast, &sc->rl_txtask); 1457 /* 1458 * Force off the IFF_UP flag here, in case someone 1459 * still had a BPF descriptor attached to this 1460 * interface. If they do, ether_ifdetach() will cause 1461 * the BPF code to try and clear the promisc mode 1462 * flag, which will bubble down to re_ioctl(), 1463 * which will try to call re_init() again. This will 1464 * turn the NIC back on and restart the MII ticker, 1465 * which will panic the system when the kernel tries 1466 * to invoke the re_tick() function that isn't there 1467 * anymore. 1468 */ 1469 ifp->if_flags &= ~IFF_UP; 1470 ether_ifdetach(ifp); 1471 } 1472 if (sc->rl_miibus) 1473 device_delete_child(dev, sc->rl_miibus); 1474 bus_generic_detach(dev); 1475 1476 /* 1477 * The rest is resource deallocation, so we should already be 1478 * stopped here. 1479 */ 1480 1481 for (i = 0; i < RL_MSI_MESSAGES; i++) { 1482 if (sc->rl_intrhand[i] != NULL) { 1483 bus_teardown_intr(dev, sc->rl_irq[i], 1484 sc->rl_intrhand[i]); 1485 sc->rl_intrhand[i] = NULL; 1486 } 1487 } 1488 if (ifp != NULL) 1489 if_free(ifp); 1490 if (sc->rl_msi == 0) { 1491 if (sc->rl_irq[0] != NULL) { 1492 bus_release_resource(dev, SYS_RES_IRQ, 0, 1493 sc->rl_irq[0]); 1494 sc->rl_irq[0] = NULL; 1495 } 1496 } else { 1497 for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { 1498 if (sc->rl_irq[i] != NULL) { 1499 bus_release_resource(dev, SYS_RES_IRQ, rid, 1500 sc->rl_irq[i]); 1501 sc->rl_irq[i] = NULL; 1502 } 1503 } 1504 pci_release_msi(dev); 1505 } 1506 if (sc->rl_res) 1507 bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); 1508 1509 /* Unload and free the RX DMA ring memory and map */ 1510 1511 if (sc->rl_ldata.rl_rx_list_tag) { 1512 bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, 1513 sc->rl_ldata.rl_rx_list_map); 1514 bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, 1515 sc->rl_ldata.rl_rx_list, 1516 sc->rl_ldata.rl_rx_list_map); 1517 bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); 1518 } 1519 1520 /* Unload and free the TX DMA ring memory and map */ 1521 1522 if (sc->rl_ldata.rl_tx_list_tag) { 1523 bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, 1524 sc->rl_ldata.rl_tx_list_map); 1525 bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, 1526 sc->rl_ldata.rl_tx_list, 1527 sc->rl_ldata.rl_tx_list_map); 1528 bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); 1529 } 1530 1531 /* Destroy all the RX and TX buffer maps */ 1532 1533 if (sc->rl_ldata.rl_mtag) { 1534 for (i = 0; i < RL_TX_DESC_CNT; i++) 1535 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1536 sc->rl_ldata.rl_tx_dmamap[i]); 1537 for (i = 0; i < RL_RX_DESC_CNT; i++) 1538 bus_dmamap_destroy(sc->rl_ldata.rl_mtag, 1539 sc->rl_ldata.rl_rx_dmamap[i]); 1540 bus_dma_tag_destroy(sc->rl_ldata.rl_mtag); 1541 } 1542 1543 /* Unload and free the stats buffer and map */ 1544 1545 if (sc->rl_ldata.rl_stag) { 1546 bus_dmamap_unload(sc->rl_ldata.rl_stag, 1547 sc->rl_ldata.rl_rx_list_map); 1548 bus_dmamem_free(sc->rl_ldata.rl_stag, 1549 sc->rl_ldata.rl_stats, 1550 sc->rl_ldata.rl_smap); 1551 bus_dma_tag_destroy(sc->rl_ldata.rl_stag); 1552 } 1553 1554 if (sc->rl_parent_tag) 1555 bus_dma_tag_destroy(sc->rl_parent_tag); 1556 1557 mtx_destroy(&sc->rl_mtx); 1558 1559 return (0); 1560 } 1561 1562 static int 1563 re_newbuf(sc, idx, m) 1564 struct rl_softc *sc; 1565 int idx; 1566 struct mbuf *m; 1567 { 1568 struct rl_dmaload_arg arg; 1569 struct mbuf *n = NULL; 1570 int error; 1571 1572 if (m == NULL) { 1573 n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1574 if (n == NULL) 1575 return (ENOBUFS); 1576 m = n; 1577 } else 1578 m->m_data = m->m_ext.ext_buf; 1579 1580 m->m_len = m->m_pkthdr.len = MCLBYTES; 1581 #ifdef RE_FIXUP_RX 1582 /* 1583 * This is part of an evil trick to deal with non-x86 platforms. 1584 * The RealTek chip requires RX buffers to be aligned on 64-bit 1585 * boundaries, but that will hose non-x86 machines. To get around 1586 * this, we leave some empty space at the start of each buffer 1587 * and for non-x86 hosts, we copy the buffer back six bytes 1588 * to achieve word alignment. This is slightly more efficient 1589 * than allocating a new buffer, copying the contents, and 1590 * discarding the old buffer. 1591 */ 1592 m_adj(m, RE_ETHER_ALIGN); 1593 #endif 1594 arg.rl_idx = idx; 1595 arg.rl_maxsegs = 1; 1596 arg.rl_flags = 0; 1597 arg.rl_ring = sc->rl_ldata.rl_rx_list; 1598 1599 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, 1600 sc->rl_ldata.rl_rx_dmamap[idx], m, re_dma_map_desc, 1601 &arg, BUS_DMA_NOWAIT); 1602 if (error || arg.rl_maxsegs != 1) { 1603 if (n != NULL) 1604 m_freem(n); 1605 if (arg.rl_maxsegs == 0) 1606 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1607 sc->rl_ldata.rl_rx_dmamap[idx]); 1608 return (ENOMEM); 1609 } 1610 1611 sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN); 1612 sc->rl_ldata.rl_rx_mbuf[idx] = m; 1613 1614 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1615 sc->rl_ldata.rl_rx_dmamap[idx], 1616 BUS_DMASYNC_PREREAD); 1617 1618 return (0); 1619 } 1620 1621 #ifdef RE_FIXUP_RX 1622 static __inline void 1623 re_fixup_rx(m) 1624 struct mbuf *m; 1625 { 1626 int i; 1627 uint16_t *src, *dst; 1628 1629 src = mtod(m, uint16_t *); 1630 dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; 1631 1632 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1633 *dst++ = *src++; 1634 1635 m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; 1636 1637 return; 1638 } 1639 #endif 1640 1641 static int 1642 re_tx_list_init(sc) 1643 struct rl_softc *sc; 1644 { 1645 1646 RL_LOCK_ASSERT(sc); 1647 1648 bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); 1649 bzero ((char *)&sc->rl_ldata.rl_tx_mbuf, 1650 (RL_TX_DESC_CNT * sizeof(struct mbuf *))); 1651 1652 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1653 sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE); 1654 sc->rl_ldata.rl_tx_prodidx = 0; 1655 sc->rl_ldata.rl_tx_considx = 0; 1656 sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT; 1657 1658 return (0); 1659 } 1660 1661 static int 1662 re_rx_list_init(sc) 1663 struct rl_softc *sc; 1664 { 1665 int i; 1666 1667 bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); 1668 bzero ((char *)&sc->rl_ldata.rl_rx_mbuf, 1669 (RL_RX_DESC_CNT * sizeof(struct mbuf *))); 1670 1671 for (i = 0; i < RL_RX_DESC_CNT; i++) { 1672 if (re_newbuf(sc, i, NULL) == ENOBUFS) 1673 return (ENOBUFS); 1674 } 1675 1676 /* Flush the RX descriptors */ 1677 1678 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1679 sc->rl_ldata.rl_rx_list_map, 1680 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1681 1682 sc->rl_ldata.rl_rx_prodidx = 0; 1683 sc->rl_head = sc->rl_tail = NULL; 1684 1685 return (0); 1686 } 1687 1688 /* 1689 * RX handler for C+ and 8169. For the gigE chips, we support 1690 * the reception of jumbo frames that have been fragmented 1691 * across multiple 2K mbuf cluster buffers. 1692 */ 1693 static int 1694 re_rxeof(sc) 1695 struct rl_softc *sc; 1696 { 1697 struct mbuf *m; 1698 struct ifnet *ifp; 1699 int i, total_len; 1700 struct rl_desc *cur_rx; 1701 u_int32_t rxstat, rxvlan; 1702 int maxpkt = 16; 1703 1704 RL_LOCK_ASSERT(sc); 1705 1706 ifp = sc->rl_ifp; 1707 i = sc->rl_ldata.rl_rx_prodidx; 1708 1709 /* Invalidate the descriptor memory */ 1710 1711 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1712 sc->rl_ldata.rl_rx_list_map, 1713 BUS_DMASYNC_POSTREAD); 1714 1715 while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i]) && maxpkt) { 1716 cur_rx = &sc->rl_ldata.rl_rx_list[i]; 1717 m = sc->rl_ldata.rl_rx_mbuf[i]; 1718 total_len = RL_RXBYTES(cur_rx); 1719 rxstat = le32toh(cur_rx->rl_cmdstat); 1720 rxvlan = le32toh(cur_rx->rl_vlanctl); 1721 1722 /* Invalidate the RX mbuf and unload its map */ 1723 1724 bus_dmamap_sync(sc->rl_ldata.rl_mtag, 1725 sc->rl_ldata.rl_rx_dmamap[i], 1726 BUS_DMASYNC_POSTWRITE); 1727 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1728 sc->rl_ldata.rl_rx_dmamap[i]); 1729 1730 if (!(rxstat & RL_RDESC_STAT_EOF)) { 1731 m->m_len = RE_RX_DESC_BUFLEN; 1732 if (sc->rl_head == NULL) 1733 sc->rl_head = sc->rl_tail = m; 1734 else { 1735 m->m_flags &= ~M_PKTHDR; 1736 sc->rl_tail->m_next = m; 1737 sc->rl_tail = m; 1738 } 1739 re_newbuf(sc, i, NULL); 1740 RL_DESC_INC(i); 1741 continue; 1742 } 1743 1744 /* 1745 * NOTE: for the 8139C+, the frame length field 1746 * is always 12 bits in size, but for the gigE chips, 1747 * it is 13 bits (since the max RX frame length is 16K). 1748 * Unfortunately, all 32 bits in the status word 1749 * were already used, so to make room for the extra 1750 * length bit, RealTek took out the 'frame alignment 1751 * error' bit and shifted the other status bits 1752 * over one slot. The OWN, EOR, FS and LS bits are 1753 * still in the same places. We have already extracted 1754 * the frame length and checked the OWN bit, so rather 1755 * than using an alternate bit mapping, we shift the 1756 * status bits one space to the right so we can evaluate 1757 * them using the 8169 status as though it was in the 1758 * same format as that of the 8139C+. 1759 */ 1760 if (sc->rl_type == RL_8169) 1761 rxstat >>= 1; 1762 1763 /* 1764 * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be 1765 * set, but if CRC is clear, it will still be a valid frame. 1766 */ 1767 if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && 1768 (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { 1769 ifp->if_ierrors++; 1770 /* 1771 * If this is part of a multi-fragment packet, 1772 * discard all the pieces. 1773 */ 1774 if (sc->rl_head != NULL) { 1775 m_freem(sc->rl_head); 1776 sc->rl_head = sc->rl_tail = NULL; 1777 } 1778 re_newbuf(sc, i, m); 1779 RL_DESC_INC(i); 1780 continue; 1781 } 1782 1783 /* 1784 * If allocating a replacement mbuf fails, 1785 * reload the current one. 1786 */ 1787 1788 if (re_newbuf(sc, i, NULL)) { 1789 ifp->if_ierrors++; 1790 if (sc->rl_head != NULL) { 1791 m_freem(sc->rl_head); 1792 sc->rl_head = sc->rl_tail = NULL; 1793 } 1794 re_newbuf(sc, i, m); 1795 RL_DESC_INC(i); 1796 continue; 1797 } 1798 1799 RL_DESC_INC(i); 1800 1801 if (sc->rl_head != NULL) { 1802 m->m_len = total_len % RE_RX_DESC_BUFLEN; 1803 if (m->m_len == 0) 1804 m->m_len = RE_RX_DESC_BUFLEN; 1805 /* 1806 * Special case: if there's 4 bytes or less 1807 * in this buffer, the mbuf can be discarded: 1808 * the last 4 bytes is the CRC, which we don't 1809 * care about anyway. 1810 */ 1811 if (m->m_len <= ETHER_CRC_LEN) { 1812 sc->rl_tail->m_len -= 1813 (ETHER_CRC_LEN - m->m_len); 1814 m_freem(m); 1815 } else { 1816 m->m_len -= ETHER_CRC_LEN; 1817 m->m_flags &= ~M_PKTHDR; 1818 sc->rl_tail->m_next = m; 1819 } 1820 m = sc->rl_head; 1821 sc->rl_head = sc->rl_tail = NULL; 1822 m->m_pkthdr.len = total_len - ETHER_CRC_LEN; 1823 } else 1824 m->m_pkthdr.len = m->m_len = 1825 (total_len - ETHER_CRC_LEN); 1826 1827 #ifdef RE_FIXUP_RX 1828 re_fixup_rx(m); 1829 #endif 1830 ifp->if_ipackets++; 1831 m->m_pkthdr.rcvif = ifp; 1832 1833 /* Do RX checksumming if enabled */ 1834 1835 if (ifp->if_capenable & IFCAP_RXCSUM) { 1836 1837 /* Check IP header checksum */ 1838 if (rxstat & RL_RDESC_STAT_PROTOID) 1839 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1840 if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) 1841 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1842 1843 /* Check TCP/UDP checksum */ 1844 if ((RL_TCPPKT(rxstat) && 1845 !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || 1846 (RL_UDPPKT(rxstat) && 1847 !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { 1848 m->m_pkthdr.csum_flags |= 1849 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1850 m->m_pkthdr.csum_data = 0xffff; 1851 } 1852 } 1853 maxpkt--; 1854 if (rxvlan & RL_RDESC_VLANCTL_TAG) { 1855 m->m_pkthdr.ether_vtag = 1856 ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)); 1857 m->m_flags |= M_VLANTAG; 1858 } 1859 RL_UNLOCK(sc); 1860 (*ifp->if_input)(ifp, m); 1861 RL_LOCK(sc); 1862 } 1863 1864 /* Flush the RX DMA ring */ 1865 1866 bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, 1867 sc->rl_ldata.rl_rx_list_map, 1868 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 1869 1870 sc->rl_ldata.rl_rx_prodidx = i; 1871 1872 if (maxpkt) 1873 return(EAGAIN); 1874 1875 return(0); 1876 } 1877 1878 static void 1879 re_txeof(sc) 1880 struct rl_softc *sc; 1881 { 1882 struct ifnet *ifp; 1883 u_int32_t txstat; 1884 int idx; 1885 1886 ifp = sc->rl_ifp; 1887 idx = sc->rl_ldata.rl_tx_considx; 1888 1889 /* Invalidate the TX descriptor list */ 1890 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 1891 sc->rl_ldata.rl_tx_list_map, 1892 BUS_DMASYNC_POSTREAD); 1893 1894 while (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT) { 1895 txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); 1896 if (txstat & RL_TDESC_CMD_OWN) 1897 break; 1898 1899 sc->rl_ldata.rl_tx_list[idx].rl_bufaddr_lo = 0; 1900 1901 /* 1902 * We only stash mbufs in the last descriptor 1903 * in a fragment chain, which also happens to 1904 * be the only place where the TX status bits 1905 * are valid. 1906 */ 1907 if (txstat & RL_TDESC_CMD_EOF) { 1908 m_freem(sc->rl_ldata.rl_tx_mbuf[idx]); 1909 sc->rl_ldata.rl_tx_mbuf[idx] = NULL; 1910 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 1911 sc->rl_ldata.rl_tx_dmamap[idx]); 1912 if (txstat & (RL_TDESC_STAT_EXCESSCOL| 1913 RL_TDESC_STAT_COLCNT)) 1914 ifp->if_collisions++; 1915 if (txstat & RL_TDESC_STAT_TXERRSUM) 1916 ifp->if_oerrors++; 1917 else 1918 ifp->if_opackets++; 1919 } 1920 sc->rl_ldata.rl_tx_free++; 1921 RL_DESC_INC(idx); 1922 } 1923 sc->rl_ldata.rl_tx_considx = idx; 1924 1925 /* No changes made to the TX ring, so no flush needed */ 1926 1927 if (sc->rl_ldata.rl_tx_free > RL_TX_DESC_THLD) 1928 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1929 1930 if (sc->rl_ldata.rl_tx_free < RL_TX_DESC_CNT) { 1931 /* 1932 * Some chips will ignore a second TX request issued 1933 * while an existing transmission is in progress. If 1934 * the transmitter goes idle but there are still 1935 * packets waiting to be sent, we need to restart the 1936 * channel here to flush them out. This only seems to 1937 * be required with the PCIe devices. 1938 */ 1939 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 1940 1941 #ifdef RE_TX_MODERATION 1942 /* 1943 * If not all descriptors have been reaped yet, reload 1944 * the timer so that we will eventually get another 1945 * interrupt that will cause us to re-enter this routine. 1946 * This is done in case the transmitter has gone idle. 1947 */ 1948 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 1949 #endif 1950 } else 1951 sc->rl_watchdog_timer = 0; 1952 } 1953 1954 static void 1955 re_tick(xsc) 1956 void *xsc; 1957 { 1958 struct rl_softc *sc; 1959 struct mii_data *mii; 1960 struct ifnet *ifp; 1961 1962 sc = xsc; 1963 ifp = sc->rl_ifp; 1964 1965 RL_LOCK_ASSERT(sc); 1966 1967 re_watchdog(sc); 1968 1969 mii = device_get_softc(sc->rl_miibus); 1970 mii_tick(mii); 1971 if (sc->rl_link) { 1972 if (!(mii->mii_media_status & IFM_ACTIVE)) 1973 sc->rl_link = 0; 1974 } else { 1975 if (mii->mii_media_status & IFM_ACTIVE && 1976 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1977 sc->rl_link = 1; 1978 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1979 taskqueue_enqueue_fast(taskqueue_fast, 1980 &sc->rl_txtask); 1981 } 1982 } 1983 1984 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 1985 } 1986 1987 #ifdef DEVICE_POLLING 1988 static void 1989 re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1990 { 1991 struct rl_softc *sc = ifp->if_softc; 1992 1993 RL_LOCK(sc); 1994 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1995 re_poll_locked(ifp, cmd, count); 1996 RL_UNLOCK(sc); 1997 } 1998 1999 static void 2000 re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 2001 { 2002 struct rl_softc *sc = ifp->if_softc; 2003 2004 RL_LOCK_ASSERT(sc); 2005 2006 sc->rxcycles = count; 2007 re_rxeof(sc); 2008 re_txeof(sc); 2009 2010 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2011 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2012 2013 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 2014 u_int16_t status; 2015 2016 status = CSR_READ_2(sc, RL_ISR); 2017 if (status == 0xffff) 2018 return; 2019 if (status) 2020 CSR_WRITE_2(sc, RL_ISR, status); 2021 2022 /* 2023 * XXX check behaviour on receiver stalls. 2024 */ 2025 2026 if (status & RL_ISR_SYSTEM_ERR) { 2027 re_reset(sc); 2028 re_init_locked(sc); 2029 } 2030 } 2031 } 2032 #endif /* DEVICE_POLLING */ 2033 2034 static int 2035 re_intr(arg) 2036 void *arg; 2037 { 2038 struct rl_softc *sc; 2039 uint16_t status; 2040 2041 sc = arg; 2042 2043 status = CSR_READ_2(sc, RL_ISR); 2044 if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) 2045 return (FILTER_STRAY); 2046 CSR_WRITE_2(sc, RL_IMR, 0); 2047 2048 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2049 2050 return (FILTER_HANDLED); 2051 } 2052 2053 static void 2054 re_int_task(arg, npending) 2055 void *arg; 2056 int npending; 2057 { 2058 struct rl_softc *sc; 2059 struct ifnet *ifp; 2060 u_int16_t status; 2061 int rval = 0; 2062 2063 sc = arg; 2064 ifp = sc->rl_ifp; 2065 2066 RL_LOCK(sc); 2067 2068 status = CSR_READ_2(sc, RL_ISR); 2069 CSR_WRITE_2(sc, RL_ISR, status); 2070 2071 if (sc->suspended || !(ifp->if_flags & IFF_UP)) { 2072 RL_UNLOCK(sc); 2073 return; 2074 } 2075 2076 #ifdef DEVICE_POLLING 2077 if (ifp->if_capenable & IFCAP_POLLING) { 2078 RL_UNLOCK(sc); 2079 return; 2080 } 2081 #endif 2082 2083 if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) 2084 rval = re_rxeof(sc); 2085 2086 #ifdef RE_TX_MODERATION 2087 if (status & (RL_ISR_TIMEOUT_EXPIRED| 2088 #else 2089 if (status & (RL_ISR_TX_OK| 2090 #endif 2091 RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) 2092 re_txeof(sc); 2093 2094 if (status & RL_ISR_SYSTEM_ERR) { 2095 re_reset(sc); 2096 re_init_locked(sc); 2097 } 2098 2099 if (status & RL_ISR_LINKCHG) { 2100 callout_stop(&sc->rl_stat_callout); 2101 re_tick(sc); 2102 } 2103 2104 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2105 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); 2106 2107 RL_UNLOCK(sc); 2108 2109 if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { 2110 taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); 2111 return; 2112 } 2113 2114 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2115 2116 return; 2117 } 2118 2119 static int 2120 re_encap(sc, m_head, idx) 2121 struct rl_softc *sc; 2122 struct mbuf **m_head; 2123 int *idx; 2124 { 2125 struct mbuf *m_new = NULL; 2126 struct rl_dmaload_arg arg; 2127 bus_dmamap_t map; 2128 int error; 2129 2130 RL_LOCK_ASSERT(sc); 2131 2132 if (sc->rl_ldata.rl_tx_free <= RL_TX_DESC_THLD) 2133 return (EFBIG); 2134 2135 /* 2136 * Set up checksum offload. Note: checksum offload bits must 2137 * appear in all descriptors of a multi-descriptor transmit 2138 * attempt. This is according to testing done with an 8169 2139 * chip. This is a requirement. 2140 */ 2141 2142 arg.rl_flags = 0; 2143 2144 if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) 2145 arg.rl_flags = RL_TDESC_CMD_LGSEND | 2146 ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << 2147 RL_TDESC_CMD_MSSVAL_SHIFT); 2148 else { 2149 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 2150 arg.rl_flags |= RL_TDESC_CMD_IPCSUM; 2151 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 2152 arg.rl_flags |= RL_TDESC_CMD_TCPCSUM; 2153 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 2154 arg.rl_flags |= RL_TDESC_CMD_UDPCSUM; 2155 } 2156 2157 arg.rl_idx = *idx; 2158 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2159 if (arg.rl_maxsegs > RL_TX_DESC_THLD) 2160 arg.rl_maxsegs -= RL_TX_DESC_THLD; 2161 arg.rl_ring = sc->rl_ldata.rl_tx_list; 2162 2163 map = sc->rl_ldata.rl_tx_dmamap[*idx]; 2164 2165 /* 2166 * With some of the RealTek chips, using the checksum offload 2167 * support in conjunction with the autopadding feature results 2168 * in the transmission of corrupt frames. For example, if we 2169 * need to send a really small IP fragment that's less than 60 2170 * bytes in size, and IP header checksumming is enabled, the 2171 * resulting ethernet frame that appears on the wire will 2172 * have garbled payload. To work around this, if TX checksum 2173 * offload is enabled, we always manually pad short frames out 2174 * to the minimum ethernet frame size. We do this by pretending 2175 * the mbuf chain has too many fragments so the coalescing code 2176 * below can assemble the packet into a single buffer that's 2177 * padded out to the mininum frame size. 2178 * 2179 * Note: this appears unnecessary for TCP, and doing it for TCP 2180 * with PCIe adapters seems to result in bad checksums. 2181 */ 2182 2183 if (arg.rl_flags && !(arg.rl_flags & RL_TDESC_CMD_TCPCSUM) && 2184 (*m_head)->m_pkthdr.len < RL_MIN_FRAMELEN) 2185 error = EFBIG; 2186 else 2187 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2188 *m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2189 2190 if (error && error != EFBIG) { 2191 device_printf(sc->rl_dev, "can't map mbuf (error %d)\n", error); 2192 return (ENOBUFS); 2193 } 2194 2195 /* Too many segments to map, coalesce into a single mbuf */ 2196 2197 if (error || arg.rl_maxsegs == 0) { 2198 if (arg.rl_maxsegs == 0) 2199 bus_dmamap_unload(sc->rl_ldata.rl_mtag, map); 2200 m_new = m_defrag(*m_head, M_DONTWAIT); 2201 if (m_new == NULL) { 2202 m_freem(*m_head); 2203 *m_head = NULL; 2204 return (ENOBUFS); 2205 } 2206 *m_head = m_new; 2207 2208 /* 2209 * Manually pad short frames, and zero the pad space 2210 * to avoid leaking data. 2211 */ 2212 if (m_new->m_pkthdr.len < RL_MIN_FRAMELEN) { 2213 bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, 2214 RL_MIN_FRAMELEN - m_new->m_pkthdr.len); 2215 m_new->m_pkthdr.len += RL_MIN_FRAMELEN - 2216 m_new->m_pkthdr.len; 2217 m_new->m_len = m_new->m_pkthdr.len; 2218 } 2219 2220 /* Note that we'll run over RL_TX_DESC_THLD here. */ 2221 arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; 2222 error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, 2223 *m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); 2224 if (error || arg.rl_maxsegs == 0) { 2225 device_printf(sc->rl_dev, 2226 "can't map defragmented mbuf (error %d)\n", error); 2227 m_freem(m_new); 2228 *m_head = NULL; 2229 if (arg.rl_maxsegs == 0) 2230 bus_dmamap_unload(sc->rl_ldata.rl_mtag, map); 2231 return (EFBIG); 2232 } 2233 } 2234 2235 /* 2236 * Insure that the map for this transmission 2237 * is placed at the array index of the last descriptor 2238 * in this chain. (Swap last and first dmamaps.) 2239 */ 2240 sc->rl_ldata.rl_tx_dmamap[*idx] = 2241 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx]; 2242 sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map; 2243 2244 sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = *m_head; 2245 sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs; 2246 2247 /* 2248 * Set up hardware VLAN tagging. Note: vlan tag info must 2249 * appear in the first descriptor of a multi-descriptor 2250 * transmission attempt. 2251 */ 2252 if ((*m_head)->m_flags & M_VLANTAG) 2253 sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl = 2254 htole32(htons((*m_head)->m_pkthdr.ether_vtag) | 2255 RL_TDESC_VLANCTL_TAG); 2256 2257 /* Transfer ownership of packet to the chip. */ 2258 2259 sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= 2260 htole32(RL_TDESC_CMD_OWN); 2261 if (*idx != arg.rl_idx) 2262 sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= 2263 htole32(RL_TDESC_CMD_OWN); 2264 2265 RL_DESC_INC(arg.rl_idx); 2266 *idx = arg.rl_idx; 2267 2268 return (0); 2269 } 2270 2271 static void 2272 re_tx_task(arg, npending) 2273 void *arg; 2274 int npending; 2275 { 2276 struct ifnet *ifp; 2277 2278 ifp = arg; 2279 re_start(ifp); 2280 2281 return; 2282 } 2283 2284 /* 2285 * Main transmit routine for C+ and gigE NICs. 2286 */ 2287 static void 2288 re_start(ifp) 2289 struct ifnet *ifp; 2290 { 2291 struct rl_softc *sc; 2292 struct mbuf *m_head = NULL; 2293 int idx, queued = 0; 2294 2295 sc = ifp->if_softc; 2296 2297 RL_LOCK(sc); 2298 2299 if (!sc->rl_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) { 2300 RL_UNLOCK(sc); 2301 return; 2302 } 2303 2304 idx = sc->rl_ldata.rl_tx_prodidx; 2305 2306 while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) { 2307 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2308 if (m_head == NULL) 2309 break; 2310 2311 if (re_encap(sc, &m_head, &idx)) { 2312 if (m_head == NULL) 2313 break; 2314 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2315 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2316 break; 2317 } 2318 2319 /* 2320 * If there's a BPF listener, bounce a copy of this frame 2321 * to him. 2322 */ 2323 ETHER_BPF_MTAP(ifp, m_head); 2324 2325 queued++; 2326 } 2327 2328 if (queued == 0) { 2329 #ifdef RE_TX_MODERATION 2330 if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT) 2331 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2332 #endif 2333 RL_UNLOCK(sc); 2334 return; 2335 } 2336 2337 /* Flush the TX descriptors */ 2338 2339 bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, 2340 sc->rl_ldata.rl_tx_list_map, 2341 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); 2342 2343 sc->rl_ldata.rl_tx_prodidx = idx; 2344 2345 CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); 2346 2347 #ifdef RE_TX_MODERATION 2348 /* 2349 * Use the countdown timer for interrupt moderation. 2350 * 'TX done' interrupts are disabled. Instead, we reset the 2351 * countdown timer, which will begin counting until it hits 2352 * the value in the TIMERINT register, and then trigger an 2353 * interrupt. Each time we write to the TIMERCNT register, 2354 * the timer count is reset to 0. 2355 */ 2356 CSR_WRITE_4(sc, RL_TIMERCNT, 1); 2357 #endif 2358 2359 /* 2360 * Set a timeout in case the chip goes out to lunch. 2361 */ 2362 sc->rl_watchdog_timer = 5; 2363 2364 RL_UNLOCK(sc); 2365 2366 return; 2367 } 2368 2369 static void 2370 re_init(xsc) 2371 void *xsc; 2372 { 2373 struct rl_softc *sc = xsc; 2374 2375 RL_LOCK(sc); 2376 re_init_locked(sc); 2377 RL_UNLOCK(sc); 2378 } 2379 2380 static void 2381 re_init_locked(sc) 2382 struct rl_softc *sc; 2383 { 2384 struct ifnet *ifp = sc->rl_ifp; 2385 struct mii_data *mii; 2386 u_int32_t rxcfg = 0; 2387 union { 2388 uint32_t align_dummy; 2389 u_char eaddr[ETHER_ADDR_LEN]; 2390 } eaddr; 2391 2392 RL_LOCK_ASSERT(sc); 2393 2394 mii = device_get_softc(sc->rl_miibus); 2395 2396 /* 2397 * Cancel pending I/O and free all RX/TX buffers. 2398 */ 2399 re_stop(sc); 2400 2401 /* 2402 * Enable C+ RX and TX mode, as well as VLAN stripping and 2403 * RX checksum offload. We must configure the C+ register 2404 * before all others. 2405 */ 2406 CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| 2407 RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| 2408 RL_CPLUSCMD_VLANSTRIP|RL_CPLUSCMD_RXCSUM_ENB); 2409 2410 /* 2411 * Init our MAC address. Even though the chipset 2412 * documentation doesn't mention it, we need to enter "Config 2413 * register write enable" mode to modify the ID registers. 2414 */ 2415 /* Copy MAC address on stack to align. */ 2416 bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); 2417 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); 2418 CSR_WRITE_4(sc, RL_IDR0, 2419 htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); 2420 CSR_WRITE_4(sc, RL_IDR4, 2421 htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); 2422 CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); 2423 2424 /* 2425 * For C+ mode, initialize the RX descriptors and mbufs. 2426 */ 2427 re_rx_list_init(sc); 2428 re_tx_list_init(sc); 2429 2430 /* 2431 * Load the addresses of the RX and TX lists into the chip. 2432 */ 2433 2434 CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, 2435 RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); 2436 CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, 2437 RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); 2438 2439 CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, 2440 RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); 2441 CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, 2442 RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); 2443 2444 /* 2445 * Enable transmit and receive. 2446 */ 2447 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2448 2449 /* 2450 * Set the initial TX and RX configuration. 2451 */ 2452 if (sc->rl_testmode) { 2453 if (sc->rl_type == RL_8169) 2454 CSR_WRITE_4(sc, RL_TXCFG, 2455 RL_TXCFG_CONFIG|RL_LOOPTEST_ON); 2456 else 2457 CSR_WRITE_4(sc, RL_TXCFG, 2458 RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); 2459 } else 2460 CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); 2461 2462 CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); 2463 2464 CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); 2465 2466 /* Set the individual bit to receive frames for this host only. */ 2467 rxcfg = CSR_READ_4(sc, RL_RXCFG); 2468 rxcfg |= RL_RXCFG_RX_INDIV; 2469 2470 /* If we want promiscuous mode, set the allframes bit. */ 2471 if (ifp->if_flags & IFF_PROMISC) 2472 rxcfg |= RL_RXCFG_RX_ALLPHYS; 2473 else 2474 rxcfg &= ~RL_RXCFG_RX_ALLPHYS; 2475 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2476 2477 /* 2478 * Set capture broadcast bit to capture broadcast frames. 2479 */ 2480 if (ifp->if_flags & IFF_BROADCAST) 2481 rxcfg |= RL_RXCFG_RX_BROAD; 2482 else 2483 rxcfg &= ~RL_RXCFG_RX_BROAD; 2484 CSR_WRITE_4(sc, RL_RXCFG, rxcfg); 2485 2486 /* 2487 * Program the multicast filter, if necessary. 2488 */ 2489 re_setmulti(sc); 2490 2491 #ifdef DEVICE_POLLING 2492 /* 2493 * Disable interrupts if we are polling. 2494 */ 2495 if (ifp->if_capenable & IFCAP_POLLING) 2496 CSR_WRITE_2(sc, RL_IMR, 0); 2497 else /* otherwise ... */ 2498 #endif 2499 2500 /* 2501 * Enable interrupts. 2502 */ 2503 if (sc->rl_testmode) 2504 CSR_WRITE_2(sc, RL_IMR, 0); 2505 else 2506 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2507 CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); 2508 2509 /* Set initial TX threshold */ 2510 sc->rl_txthresh = RL_TX_THRESH_INIT; 2511 2512 /* Start RX/TX process. */ 2513 CSR_WRITE_4(sc, RL_MISSEDPKT, 0); 2514 #ifdef notdef 2515 /* Enable receiver and transmitter. */ 2516 CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); 2517 #endif 2518 2519 #ifdef RE_TX_MODERATION 2520 /* 2521 * Initialize the timer interrupt register so that 2522 * a timer interrupt will be generated once the timer 2523 * reaches a certain number of ticks. The timer is 2524 * reloaded on each transmit. This gives us TX interrupt 2525 * moderation, which dramatically improves TX frame rate. 2526 */ 2527 if (sc->rl_type == RL_8169) 2528 CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); 2529 else 2530 CSR_WRITE_4(sc, RL_TIMERINT, 0x400); 2531 #endif 2532 2533 /* 2534 * For 8169 gigE NICs, set the max allowed RX packet 2535 * size so we can receive jumbo frames. 2536 */ 2537 if (sc->rl_type == RL_8169) 2538 CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); 2539 2540 if (sc->rl_testmode) 2541 return; 2542 2543 mii_mediachg(mii); 2544 2545 CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); 2546 2547 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2548 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2549 2550 sc->rl_link = 0; 2551 sc->rl_watchdog_timer = 0; 2552 callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); 2553 } 2554 2555 /* 2556 * Set media options. 2557 */ 2558 static int 2559 re_ifmedia_upd(ifp) 2560 struct ifnet *ifp; 2561 { 2562 struct rl_softc *sc; 2563 struct mii_data *mii; 2564 2565 sc = ifp->if_softc; 2566 mii = device_get_softc(sc->rl_miibus); 2567 RL_LOCK(sc); 2568 mii_mediachg(mii); 2569 RL_UNLOCK(sc); 2570 2571 return (0); 2572 } 2573 2574 /* 2575 * Report current media status. 2576 */ 2577 static void 2578 re_ifmedia_sts(ifp, ifmr) 2579 struct ifnet *ifp; 2580 struct ifmediareq *ifmr; 2581 { 2582 struct rl_softc *sc; 2583 struct mii_data *mii; 2584 2585 sc = ifp->if_softc; 2586 mii = device_get_softc(sc->rl_miibus); 2587 2588 RL_LOCK(sc); 2589 mii_pollstat(mii); 2590 RL_UNLOCK(sc); 2591 ifmr->ifm_active = mii->mii_media_active; 2592 ifmr->ifm_status = mii->mii_media_status; 2593 } 2594 2595 static int 2596 re_ioctl(ifp, command, data) 2597 struct ifnet *ifp; 2598 u_long command; 2599 caddr_t data; 2600 { 2601 struct rl_softc *sc = ifp->if_softc; 2602 struct ifreq *ifr = (struct ifreq *) data; 2603 struct mii_data *mii; 2604 int error = 0; 2605 2606 switch (command) { 2607 case SIOCSIFMTU: 2608 RL_LOCK(sc); 2609 if (ifr->ifr_mtu > RL_JUMBO_MTU) 2610 error = EINVAL; 2611 ifp->if_mtu = ifr->ifr_mtu; 2612 RL_UNLOCK(sc); 2613 break; 2614 case SIOCSIFFLAGS: 2615 RL_LOCK(sc); 2616 if ((ifp->if_flags & IFF_UP) != 0) { 2617 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2618 if (((ifp->if_flags ^ sc->rl_if_flags) 2619 & IFF_PROMISC) != 0) 2620 re_setmulti(sc); 2621 } else 2622 re_init_locked(sc); 2623 } else { 2624 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2625 re_stop(sc); 2626 } 2627 sc->rl_if_flags = ifp->if_flags; 2628 RL_UNLOCK(sc); 2629 break; 2630 case SIOCADDMULTI: 2631 case SIOCDELMULTI: 2632 RL_LOCK(sc); 2633 re_setmulti(sc); 2634 RL_UNLOCK(sc); 2635 break; 2636 case SIOCGIFMEDIA: 2637 case SIOCSIFMEDIA: 2638 mii = device_get_softc(sc->rl_miibus); 2639 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2640 break; 2641 case SIOCSIFCAP: 2642 { 2643 int mask, reinit; 2644 2645 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2646 reinit = 0; 2647 #ifdef DEVICE_POLLING 2648 if (mask & IFCAP_POLLING) { 2649 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2650 error = ether_poll_register(re_poll, ifp); 2651 if (error) 2652 return(error); 2653 RL_LOCK(sc); 2654 /* Disable interrupts */ 2655 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2656 ifp->if_capenable |= IFCAP_POLLING; 2657 RL_UNLOCK(sc); 2658 } else { 2659 error = ether_poll_deregister(ifp); 2660 /* Enable interrupts. */ 2661 RL_LOCK(sc); 2662 CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); 2663 ifp->if_capenable &= ~IFCAP_POLLING; 2664 RL_UNLOCK(sc); 2665 } 2666 } 2667 #endif /* DEVICE_POLLING */ 2668 if (mask & IFCAP_HWCSUM) { 2669 ifp->if_capenable ^= IFCAP_HWCSUM; 2670 if (ifp->if_capenable & IFCAP_TXCSUM) 2671 ifp->if_hwassist |= RE_CSUM_FEATURES; 2672 else 2673 ifp->if_hwassist &= ~RE_CSUM_FEATURES; 2674 reinit = 1; 2675 } 2676 if (mask & IFCAP_VLAN_HWTAGGING) { 2677 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2678 reinit = 1; 2679 } 2680 if (mask & IFCAP_TSO4) { 2681 ifp->if_capenable ^= IFCAP_TSO4; 2682 if ((IFCAP_TSO4 & ifp->if_capenable) && 2683 (IFCAP_TSO4 & ifp->if_capabilities)) 2684 ifp->if_hwassist |= CSUM_TSO; 2685 else 2686 ifp->if_hwassist &= ~CSUM_TSO; 2687 } 2688 if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) 2689 re_init(sc); 2690 VLAN_CAPABILITIES(ifp); 2691 } 2692 break; 2693 default: 2694 error = ether_ioctl(ifp, command, data); 2695 break; 2696 } 2697 2698 return (error); 2699 } 2700 2701 static void 2702 re_watchdog(sc) 2703 struct rl_softc *sc; 2704 { 2705 2706 RL_LOCK_ASSERT(sc); 2707 2708 if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) 2709 return; 2710 2711 device_printf(sc->rl_dev, "watchdog timeout\n"); 2712 sc->rl_ifp->if_oerrors++; 2713 2714 re_txeof(sc); 2715 re_rxeof(sc); 2716 re_init_locked(sc); 2717 } 2718 2719 /* 2720 * Stop the adapter and free any mbufs allocated to the 2721 * RX and TX lists. 2722 */ 2723 static void 2724 re_stop(sc) 2725 struct rl_softc *sc; 2726 { 2727 register int i; 2728 struct ifnet *ifp; 2729 2730 RL_LOCK_ASSERT(sc); 2731 2732 ifp = sc->rl_ifp; 2733 2734 sc->rl_watchdog_timer = 0; 2735 callout_stop(&sc->rl_stat_callout); 2736 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2737 2738 CSR_WRITE_1(sc, RL_COMMAND, 0x00); 2739 CSR_WRITE_2(sc, RL_IMR, 0x0000); 2740 CSR_WRITE_2(sc, RL_ISR, 0xFFFF); 2741 2742 if (sc->rl_head != NULL) { 2743 m_freem(sc->rl_head); 2744 sc->rl_head = sc->rl_tail = NULL; 2745 } 2746 2747 /* Free the TX list buffers. */ 2748 2749 for (i = 0; i < RL_TX_DESC_CNT; i++) { 2750 if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) { 2751 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2752 sc->rl_ldata.rl_tx_dmamap[i]); 2753 m_freem(sc->rl_ldata.rl_tx_mbuf[i]); 2754 sc->rl_ldata.rl_tx_mbuf[i] = NULL; 2755 } 2756 } 2757 2758 /* Free the RX list buffers. */ 2759 2760 for (i = 0; i < RL_RX_DESC_CNT; i++) { 2761 if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) { 2762 bus_dmamap_unload(sc->rl_ldata.rl_mtag, 2763 sc->rl_ldata.rl_rx_dmamap[i]); 2764 m_freem(sc->rl_ldata.rl_rx_mbuf[i]); 2765 sc->rl_ldata.rl_rx_mbuf[i] = NULL; 2766 } 2767 } 2768 } 2769 2770 /* 2771 * Device suspend routine. Stop the interface and save some PCI 2772 * settings in case the BIOS doesn't restore them properly on 2773 * resume. 2774 */ 2775 static int 2776 re_suspend(dev) 2777 device_t dev; 2778 { 2779 struct rl_softc *sc; 2780 2781 sc = device_get_softc(dev); 2782 2783 RL_LOCK(sc); 2784 re_stop(sc); 2785 sc->suspended = 1; 2786 RL_UNLOCK(sc); 2787 2788 return (0); 2789 } 2790 2791 /* 2792 * Device resume routine. Restore some PCI settings in case the BIOS 2793 * doesn't, re-enable busmastering, and restart the interface if 2794 * appropriate. 2795 */ 2796 static int 2797 re_resume(dev) 2798 device_t dev; 2799 { 2800 struct rl_softc *sc; 2801 struct ifnet *ifp; 2802 2803 sc = device_get_softc(dev); 2804 2805 RL_LOCK(sc); 2806 2807 ifp = sc->rl_ifp; 2808 2809 /* reinitialize interface if necessary */ 2810 if (ifp->if_flags & IFF_UP) 2811 re_init_locked(sc); 2812 2813 sc->suspended = 0; 2814 RL_UNLOCK(sc); 2815 2816 return (0); 2817 } 2818 2819 /* 2820 * Stop all chip I/O so that the kernel's probe routines don't 2821 * get confused by errant DMAs when rebooting. 2822 */ 2823 static int 2824 re_shutdown(dev) 2825 device_t dev; 2826 { 2827 struct rl_softc *sc; 2828 2829 sc = device_get_softc(dev); 2830 2831 RL_LOCK(sc); 2832 re_stop(sc); 2833 /* 2834 * Mark interface as down since otherwise we will panic if 2835 * interrupt comes in later on, which can happen in some 2836 * cases. 2837 */ 2838 sc->rl_ifp->if_flags &= ~IFF_UP; 2839 RL_UNLOCK(sc); 2840 2841 return (0); 2842 } 2843