1 /*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * 44 * These chips are used on several low cost gigabit ethernet NICs 45 * sold by D-Link, Addtron, SMC and Asante. Both parts are 46 * virtually the same, except the 83820 is a 64-bit/32-bit part, 47 * while the 83821 is 32-bit only. 48 * 49 * Many cards also use National gigE transceivers, such as the 50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 51 * contains a full register description that applies to all of these 52 * components: 53 * 54 * http://www.national.com/ds/DP/DP83861.pdf 55 * 56 * Written by Bill Paul <wpaul@bsdi.com> 57 * BSDi Open Source Solutions 58 */ 59 60 /* 61 * The NatSemi DP83820 and 83821 controllers are enhanced versions 62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 67 * matching buffers, one perfect address filter buffer and interrupt 68 * moderation. The 83820 supports both 64-bit and 32-bit addressing 69 * and data transfers: the 64-bit support can be toggled on or off 70 * via software. This affects the size of certain fields in the DMA 71 * descriptors. 72 * 73 * There are two bugs/misfeatures in the 83820/83821 that I have 74 * discovered so far: 75 * 76 * - Receive buffers must be aligned on 64-bit boundaries, which means 77 * you must resort to copying data in order to fix up the payload 78 * alignment. 79 * 80 * - In order to transmit jumbo frames larger than 8170 bytes, you have 81 * to turn off transmit checksum offloading, because the chip can't 82 * compute the checksum on an outgoing frame unless it fits entirely 83 * within the TX FIFO, which is only 8192 bytes in size. If you have 84 * TX checksum offload enabled and you transmit attempt to transmit a 85 * frame larger than 8170 bytes, the transmitter will wedge. 86 * 87 * To work around the latter problem, TX checksum offload is disabled 88 * if the user selects an MTU larger than 8152 (8170 - 18). 89 */ 90 91 #ifdef HAVE_KERNEL_OPTION_HEADERS 92 #include "opt_device_polling.h" 93 #endif 94 95 #include <sys/param.h> 96 #include <sys/systm.h> 97 #include <sys/bus.h> 98 #include <sys/endian.h> 99 #include <sys/kernel.h> 100 #include <sys/lock.h> 101 #include <sys/malloc.h> 102 #include <sys/mbuf.h> 103 #include <sys/module.h> 104 #include <sys/mutex.h> 105 #include <sys/rman.h> 106 #include <sys/socket.h> 107 #include <sys/sockio.h> 108 #include <sys/sysctl.h> 109 110 #include <net/bpf.h> 111 #include <net/if.h> 112 #include <net/if_var.h> 113 #include <net/if_arp.h> 114 #include <net/ethernet.h> 115 #include <net/if_dl.h> 116 #include <net/if_media.h> 117 #include <net/if_types.h> 118 #include <net/if_vlan_var.h> 119 120 #include <dev/mii/mii.h> 121 #include <dev/mii/mii_bitbang.h> 122 #include <dev/mii/miivar.h> 123 124 #include <dev/pci/pcireg.h> 125 #include <dev/pci/pcivar.h> 126 127 #include <machine/bus.h> 128 129 #include <dev/nge/if_ngereg.h> 130 131 /* "device miibus" required. See GENERIC if you get errors here. */ 132 #include "miibus_if.h" 133 134 MODULE_DEPEND(nge, pci, 1, 1, 1); 135 MODULE_DEPEND(nge, ether, 1, 1, 1); 136 MODULE_DEPEND(nge, miibus, 1, 1, 1); 137 138 #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 139 140 /* 141 * Various supported device vendors/types and their names. 142 */ 143 static const struct nge_type nge_devs[] = { 144 { NGE_VENDORID, NGE_DEVICEID, 145 "National Semiconductor Gigabit Ethernet" }, 146 { 0, 0, NULL } 147 }; 148 149 static int nge_probe(device_t); 150 static int nge_attach(device_t); 151 static int nge_detach(device_t); 152 static int nge_shutdown(device_t); 153 static int nge_suspend(device_t); 154 static int nge_resume(device_t); 155 156 static __inline void nge_discard_rxbuf(struct nge_softc *, int); 157 static int nge_newbuf(struct nge_softc *, int); 158 static int nge_encap(struct nge_softc *, struct mbuf **); 159 #ifndef __NO_STRICT_ALIGNMENT 160 static __inline void nge_fixup_rx(struct mbuf *); 161 #endif 162 static int nge_rxeof(struct nge_softc *); 163 static void nge_txeof(struct nge_softc *); 164 static void nge_intr(void *); 165 static void nge_tick(void *); 166 static void nge_stats_update(struct nge_softc *); 167 static void nge_start(struct ifnet *); 168 static void nge_start_locked(struct ifnet *); 169 static int nge_ioctl(struct ifnet *, u_long, caddr_t); 170 static void nge_init(void *); 171 static void nge_init_locked(struct nge_softc *); 172 static int nge_stop_mac(struct nge_softc *); 173 static void nge_stop(struct nge_softc *); 174 static void nge_wol(struct nge_softc *); 175 static void nge_watchdog(struct nge_softc *); 176 static int nge_mediachange(struct ifnet *); 177 static void nge_mediastatus(struct ifnet *, struct ifmediareq *); 178 179 static void nge_delay(struct nge_softc *); 180 static void nge_eeprom_idle(struct nge_softc *); 181 static void nge_eeprom_putbyte(struct nge_softc *, int); 182 static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *); 183 static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int); 184 185 static int nge_miibus_readreg(device_t, int, int); 186 static int nge_miibus_writereg(device_t, int, int, int); 187 static void nge_miibus_statchg(device_t); 188 189 static void nge_rxfilter(struct nge_softc *); 190 static void nge_reset(struct nge_softc *); 191 static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 192 static int nge_dma_alloc(struct nge_softc *); 193 static void nge_dma_free(struct nge_softc *); 194 static int nge_list_rx_init(struct nge_softc *); 195 static int nge_list_tx_init(struct nge_softc *); 196 static void nge_sysctl_node(struct nge_softc *); 197 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 198 static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS); 199 200 /* 201 * MII bit-bang glue 202 */ 203 static uint32_t nge_mii_bitbang_read(device_t); 204 static void nge_mii_bitbang_write(device_t, uint32_t); 205 206 static const struct mii_bitbang_ops nge_mii_bitbang_ops = { 207 nge_mii_bitbang_read, 208 nge_mii_bitbang_write, 209 { 210 NGE_MEAR_MII_DATA, /* MII_BIT_MDO */ 211 NGE_MEAR_MII_DATA, /* MII_BIT_MDI */ 212 NGE_MEAR_MII_CLK, /* MII_BIT_MDC */ 213 NGE_MEAR_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 214 0, /* MII_BIT_DIR_PHY_HOST */ 215 } 216 }; 217 218 static device_method_t nge_methods[] = { 219 /* Device interface */ 220 DEVMETHOD(device_probe, nge_probe), 221 DEVMETHOD(device_attach, nge_attach), 222 DEVMETHOD(device_detach, nge_detach), 223 DEVMETHOD(device_shutdown, nge_shutdown), 224 DEVMETHOD(device_suspend, nge_suspend), 225 DEVMETHOD(device_resume, nge_resume), 226 227 /* MII interface */ 228 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 229 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 230 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 231 232 DEVMETHOD_END 233 }; 234 235 static driver_t nge_driver = { 236 "nge", 237 nge_methods, 238 sizeof(struct nge_softc) 239 }; 240 241 static devclass_t nge_devclass; 242 243 DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0); 244 DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); 245 246 #define NGE_SETBIT(sc, reg, x) \ 247 CSR_WRITE_4(sc, reg, \ 248 CSR_READ_4(sc, reg) | (x)) 249 250 #define NGE_CLRBIT(sc, reg, x) \ 251 CSR_WRITE_4(sc, reg, \ 252 CSR_READ_4(sc, reg) & ~(x)) 253 254 #define SIO_SET(x) \ 255 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 256 257 #define SIO_CLR(x) \ 258 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 259 260 static void 261 nge_delay(struct nge_softc *sc) 262 { 263 int idx; 264 265 for (idx = (300 / 33) + 1; idx > 0; idx--) 266 CSR_READ_4(sc, NGE_CSR); 267 } 268 269 static void 270 nge_eeprom_idle(struct nge_softc *sc) 271 { 272 int i; 273 274 SIO_SET(NGE_MEAR_EE_CSEL); 275 nge_delay(sc); 276 SIO_SET(NGE_MEAR_EE_CLK); 277 nge_delay(sc); 278 279 for (i = 0; i < 25; i++) { 280 SIO_CLR(NGE_MEAR_EE_CLK); 281 nge_delay(sc); 282 SIO_SET(NGE_MEAR_EE_CLK); 283 nge_delay(sc); 284 } 285 286 SIO_CLR(NGE_MEAR_EE_CLK); 287 nge_delay(sc); 288 SIO_CLR(NGE_MEAR_EE_CSEL); 289 nge_delay(sc); 290 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 291 } 292 293 /* 294 * Send a read command and address to the EEPROM, check for ACK. 295 */ 296 static void 297 nge_eeprom_putbyte(struct nge_softc *sc, int addr) 298 { 299 int d, i; 300 301 d = addr | NGE_EECMD_READ; 302 303 /* 304 * Feed in each bit and stobe the clock. 305 */ 306 for (i = 0x400; i; i >>= 1) { 307 if (d & i) { 308 SIO_SET(NGE_MEAR_EE_DIN); 309 } else { 310 SIO_CLR(NGE_MEAR_EE_DIN); 311 } 312 nge_delay(sc); 313 SIO_SET(NGE_MEAR_EE_CLK); 314 nge_delay(sc); 315 SIO_CLR(NGE_MEAR_EE_CLK); 316 nge_delay(sc); 317 } 318 } 319 320 /* 321 * Read a word of data stored in the EEPROM at address 'addr.' 322 */ 323 static void 324 nge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest) 325 { 326 int i; 327 uint16_t word = 0; 328 329 /* Force EEPROM to idle state. */ 330 nge_eeprom_idle(sc); 331 332 /* Enter EEPROM access mode. */ 333 nge_delay(sc); 334 SIO_CLR(NGE_MEAR_EE_CLK); 335 nge_delay(sc); 336 SIO_SET(NGE_MEAR_EE_CSEL); 337 nge_delay(sc); 338 339 /* 340 * Send address of word we want to read. 341 */ 342 nge_eeprom_putbyte(sc, addr); 343 344 /* 345 * Start reading bits from EEPROM. 346 */ 347 for (i = 0x8000; i; i >>= 1) { 348 SIO_SET(NGE_MEAR_EE_CLK); 349 nge_delay(sc); 350 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 351 word |= i; 352 nge_delay(sc); 353 SIO_CLR(NGE_MEAR_EE_CLK); 354 nge_delay(sc); 355 } 356 357 /* Turn off EEPROM access mode. */ 358 nge_eeprom_idle(sc); 359 360 *dest = word; 361 } 362 363 /* 364 * Read a sequence of words from the EEPROM. 365 */ 366 static void 367 nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt) 368 { 369 int i; 370 uint16_t word = 0, *ptr; 371 372 for (i = 0; i < cnt; i++) { 373 nge_eeprom_getword(sc, off + i, &word); 374 ptr = (uint16_t *)(dest + (i * 2)); 375 *ptr = word; 376 } 377 } 378 379 /* 380 * Read the MII serial port for the MII bit-bang module. 381 */ 382 static uint32_t 383 nge_mii_bitbang_read(device_t dev) 384 { 385 struct nge_softc *sc; 386 uint32_t val; 387 388 sc = device_get_softc(dev); 389 390 val = CSR_READ_4(sc, NGE_MEAR); 391 CSR_BARRIER_4(sc, NGE_MEAR, 392 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 393 394 return (val); 395 } 396 397 /* 398 * Write the MII serial port for the MII bit-bang module. 399 */ 400 static void 401 nge_mii_bitbang_write(device_t dev, uint32_t val) 402 { 403 struct nge_softc *sc; 404 405 sc = device_get_softc(dev); 406 407 CSR_WRITE_4(sc, NGE_MEAR, val); 408 CSR_BARRIER_4(sc, NGE_MEAR, 409 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 410 } 411 412 static int 413 nge_miibus_readreg(device_t dev, int phy, int reg) 414 { 415 struct nge_softc *sc; 416 int rv; 417 418 sc = device_get_softc(dev); 419 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 420 /* Pretend PHY is at address 0. */ 421 if (phy != 0) 422 return (0); 423 switch (reg) { 424 case MII_BMCR: 425 reg = NGE_TBI_BMCR; 426 break; 427 case MII_BMSR: 428 /* 83820/83821 has different bit layout for BMSR. */ 429 rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT; 430 reg = CSR_READ_4(sc, NGE_TBI_BMSR); 431 if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0) 432 rv |= BMSR_ACOMP; 433 if ((reg & NGE_TBIBMSR_LINKSTAT) != 0) 434 rv |= BMSR_LINK; 435 return (rv); 436 case MII_ANAR: 437 reg = NGE_TBI_ANAR; 438 break; 439 case MII_ANLPAR: 440 reg = NGE_TBI_ANLPAR; 441 break; 442 case MII_ANER: 443 reg = NGE_TBI_ANER; 444 break; 445 case MII_EXTSR: 446 reg = NGE_TBI_ESR; 447 break; 448 case MII_PHYIDR1: 449 case MII_PHYIDR2: 450 return (0); 451 default: 452 device_printf(sc->nge_dev, 453 "bad phy register read : %d\n", reg); 454 return (0); 455 } 456 return (CSR_READ_4(sc, reg)); 457 } 458 459 return (mii_bitbang_readreg(dev, &nge_mii_bitbang_ops, phy, reg)); 460 } 461 462 static int 463 nge_miibus_writereg(device_t dev, int phy, int reg, int data) 464 { 465 struct nge_softc *sc; 466 467 sc = device_get_softc(dev); 468 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 469 /* Pretend PHY is at address 0. */ 470 if (phy != 0) 471 return (0); 472 switch (reg) { 473 case MII_BMCR: 474 reg = NGE_TBI_BMCR; 475 break; 476 case MII_BMSR: 477 return (0); 478 case MII_ANAR: 479 reg = NGE_TBI_ANAR; 480 break; 481 case MII_ANLPAR: 482 reg = NGE_TBI_ANLPAR; 483 break; 484 case MII_ANER: 485 reg = NGE_TBI_ANER; 486 break; 487 case MII_EXTSR: 488 reg = NGE_TBI_ESR; 489 break; 490 case MII_PHYIDR1: 491 case MII_PHYIDR2: 492 return (0); 493 default: 494 device_printf(sc->nge_dev, 495 "bad phy register write : %d\n", reg); 496 return (0); 497 } 498 CSR_WRITE_4(sc, reg, data); 499 return (0); 500 } 501 502 mii_bitbang_writereg(dev, &nge_mii_bitbang_ops, phy, reg, data); 503 504 return (0); 505 } 506 507 /* 508 * media status/link state change handler. 509 */ 510 static void 511 nge_miibus_statchg(device_t dev) 512 { 513 struct nge_softc *sc; 514 struct mii_data *mii; 515 struct ifnet *ifp; 516 struct nge_txdesc *txd; 517 uint32_t done, reg, status; 518 int i; 519 520 sc = device_get_softc(dev); 521 NGE_LOCK_ASSERT(sc); 522 523 mii = device_get_softc(sc->nge_miibus); 524 ifp = sc->nge_ifp; 525 if (mii == NULL || ifp == NULL || 526 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 527 return; 528 529 sc->nge_flags &= ~NGE_FLAG_LINK; 530 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 531 (IFM_AVALID | IFM_ACTIVE)) { 532 switch (IFM_SUBTYPE(mii->mii_media_active)) { 533 case IFM_10_T: 534 case IFM_100_TX: 535 case IFM_1000_T: 536 case IFM_1000_SX: 537 case IFM_1000_LX: 538 case IFM_1000_CX: 539 sc->nge_flags |= NGE_FLAG_LINK; 540 break; 541 default: 542 break; 543 } 544 } 545 546 /* Stop Tx/Rx MACs. */ 547 if (nge_stop_mac(sc) == ETIMEDOUT) 548 device_printf(sc->nge_dev, 549 "%s: unable to stop Tx/Rx MAC\n", __func__); 550 nge_txeof(sc); 551 nge_rxeof(sc); 552 if (sc->nge_head != NULL) { 553 m_freem(sc->nge_head); 554 sc->nge_head = sc->nge_tail = NULL; 555 } 556 557 /* Release queued frames. */ 558 for (i = 0; i < NGE_TX_RING_CNT; i++) { 559 txd = &sc->nge_cdata.nge_txdesc[i]; 560 if (txd->tx_m != NULL) { 561 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 562 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 563 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 564 txd->tx_dmamap); 565 m_freem(txd->tx_m); 566 txd->tx_m = NULL; 567 } 568 } 569 570 /* Program MAC with resolved speed/duplex. */ 571 if ((sc->nge_flags & NGE_FLAG_LINK) != 0) { 572 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 573 NGE_SETBIT(sc, NGE_TX_CFG, 574 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 575 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 576 #ifdef notyet 577 /* Enable flow-control. */ 578 if ((IFM_OPTIONS(mii->mii_media_active) & 579 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0) 580 NGE_SETBIT(sc, NGE_PAUSECSR, 581 NGE_PAUSECSR_PAUSE_ENB); 582 #endif 583 } else { 584 NGE_CLRBIT(sc, NGE_TX_CFG, 585 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 586 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 587 NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB); 588 } 589 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 590 reg = CSR_READ_4(sc, NGE_CFG); 591 switch (IFM_SUBTYPE(mii->mii_media_active)) { 592 case IFM_1000_SX: 593 case IFM_1000_LX: 594 case IFM_1000_CX: 595 case IFM_1000_T: 596 reg |= NGE_CFG_MODE_1000; 597 break; 598 default: 599 reg &= ~NGE_CFG_MODE_1000; 600 break; 601 } 602 CSR_WRITE_4(sc, NGE_CFG, reg); 603 604 /* Reset Tx/Rx MAC. */ 605 reg = CSR_READ_4(sc, NGE_CSR); 606 reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET; 607 CSR_WRITE_4(sc, NGE_CSR, reg); 608 /* Check the completion of reset. */ 609 done = 0; 610 for (i = 0; i < NGE_TIMEOUT; i++) { 611 DELAY(1); 612 status = CSR_READ_4(sc, NGE_ISR); 613 if ((status & NGE_ISR_RX_RESET_DONE) != 0) 614 done |= NGE_ISR_RX_RESET_DONE; 615 if ((status & NGE_ISR_TX_RESET_DONE) != 0) 616 done |= NGE_ISR_TX_RESET_DONE; 617 if (done == 618 (NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE)) 619 break; 620 } 621 if (i == NGE_TIMEOUT) 622 device_printf(sc->nge_dev, 623 "%s: unable to reset Tx/Rx MAC\n", __func__); 624 /* Reuse Rx buffer and reset consumer pointer. */ 625 sc->nge_cdata.nge_rx_cons = 0; 626 /* 627 * It seems that resetting Rx/Tx MAC results in 628 * resetting Tx/Rx descriptor pointer registers such 629 * that reloading Tx/Rx lists address are needed. 630 */ 631 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 632 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 633 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 634 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 635 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 636 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 637 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 638 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 639 /* Reinitialize Tx buffers. */ 640 nge_list_tx_init(sc); 641 642 /* Restart Rx MAC. */ 643 reg = CSR_READ_4(sc, NGE_CSR); 644 reg |= NGE_CSR_RX_ENABLE; 645 CSR_WRITE_4(sc, NGE_CSR, reg); 646 for (i = 0; i < NGE_TIMEOUT; i++) { 647 if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0) 648 break; 649 DELAY(1); 650 } 651 if (i == NGE_TIMEOUT) 652 device_printf(sc->nge_dev, 653 "%s: unable to restart Rx MAC\n", __func__); 654 } 655 656 /* Data LED off for TBI mode */ 657 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 658 CSR_WRITE_4(sc, NGE_GPIO, 659 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 660 } 661 662 static void 663 nge_rxfilter(struct nge_softc *sc) 664 { 665 struct ifnet *ifp; 666 struct ifmultiaddr *ifma; 667 uint32_t h, i, rxfilt; 668 int bit, index; 669 670 NGE_LOCK_ASSERT(sc); 671 ifp = sc->nge_ifp; 672 673 /* Make sure to stop Rx filtering. */ 674 rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL); 675 rxfilt &= ~NGE_RXFILTCTL_ENABLE; 676 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 677 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 678 679 rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS); 680 rxfilt &= ~NGE_RXFILTCTL_BROAD; 681 /* 682 * We don't want to use the hash table for matching unicast 683 * addresses. 684 */ 685 rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH); 686 687 /* 688 * For the NatSemi chip, we have to explicitly enable the 689 * reception of ARP frames, as well as turn on the 'perfect 690 * match' filter where we store the station address, otherwise 691 * we won't receive unicasts meant for this host. 692 */ 693 rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT; 694 695 /* 696 * Set the capture broadcast bit to capture broadcast frames. 697 */ 698 if ((ifp->if_flags & IFF_BROADCAST) != 0) 699 rxfilt |= NGE_RXFILTCTL_BROAD; 700 701 if ((ifp->if_flags & IFF_PROMISC) != 0 || 702 (ifp->if_flags & IFF_ALLMULTI) != 0) { 703 rxfilt |= NGE_RXFILTCTL_ALLMULTI; 704 if ((ifp->if_flags & IFF_PROMISC) != 0) 705 rxfilt |= NGE_RXFILTCTL_ALLPHYS; 706 goto done; 707 } 708 709 /* 710 * We have to explicitly enable the multicast hash table 711 * on the NatSemi chip if we want to use it, which we do. 712 */ 713 rxfilt |= NGE_RXFILTCTL_MCHASH; 714 715 /* first, zot all the existing hash bits */ 716 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 717 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 718 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 719 } 720 721 /* 722 * From the 11 bits returned by the crc routine, the top 7 723 * bits represent the 16-bit word in the mcast hash table 724 * that needs to be updated, and the lower 4 bits represent 725 * which bit within that byte needs to be set. 726 */ 727 if_maddr_rlock(ifp); 728 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 729 if (ifma->ifma_addr->sa_family != AF_LINK) 730 continue; 731 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 732 ifma->ifma_addr), ETHER_ADDR_LEN) >> 21; 733 index = (h >> 4) & 0x7F; 734 bit = h & 0xF; 735 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 736 NGE_FILTADDR_MCAST_LO + (index * 2)); 737 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 738 } 739 if_maddr_runlock(ifp); 740 741 done: 742 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 743 /* Turn the receive filter on. */ 744 rxfilt |= NGE_RXFILTCTL_ENABLE; 745 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 746 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 747 } 748 749 static void 750 nge_reset(struct nge_softc *sc) 751 { 752 uint32_t v; 753 int i; 754 755 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 756 757 for (i = 0; i < NGE_TIMEOUT; i++) { 758 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 759 break; 760 DELAY(1); 761 } 762 763 if (i == NGE_TIMEOUT) 764 device_printf(sc->nge_dev, "reset never completed\n"); 765 766 /* Wait a little while for the chip to get its brains in order. */ 767 DELAY(1000); 768 769 /* 770 * If this is a NetSemi chip, make sure to clear 771 * PME mode. 772 */ 773 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 774 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 775 776 /* Clear WOL events which may interfere normal Rx filter opertaion. */ 777 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 778 779 /* 780 * Only DP83820 supports 64bits addressing/data transfers and 781 * 64bit addressing requires different descriptor structures. 782 * To make it simple, disable 64bit addressing/data transfers. 783 */ 784 v = CSR_READ_4(sc, NGE_CFG); 785 v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB); 786 CSR_WRITE_4(sc, NGE_CFG, v); 787 } 788 789 /* 790 * Probe for a NatSemi chip. Check the PCI vendor and device 791 * IDs against our list and return a device name if we find a match. 792 */ 793 static int 794 nge_probe(device_t dev) 795 { 796 const struct nge_type *t; 797 798 t = nge_devs; 799 800 while (t->nge_name != NULL) { 801 if ((pci_get_vendor(dev) == t->nge_vid) && 802 (pci_get_device(dev) == t->nge_did)) { 803 device_set_desc(dev, t->nge_name); 804 return (BUS_PROBE_DEFAULT); 805 } 806 t++; 807 } 808 809 return (ENXIO); 810 } 811 812 /* 813 * Attach the interface. Allocate softc structures, do ifmedia 814 * setup and ethernet/BPF attach. 815 */ 816 static int 817 nge_attach(device_t dev) 818 { 819 uint8_t eaddr[ETHER_ADDR_LEN]; 820 uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg; 821 struct nge_softc *sc; 822 struct ifnet *ifp; 823 int error, i, rid; 824 825 error = 0; 826 sc = device_get_softc(dev); 827 sc->nge_dev = dev; 828 829 NGE_LOCK_INIT(sc, device_get_nameunit(dev)); 830 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); 831 832 /* 833 * Map control/status registers. 834 */ 835 pci_enable_busmaster(dev); 836 837 #ifdef NGE_USEIOSPACE 838 sc->nge_res_type = SYS_RES_IOPORT; 839 sc->nge_res_id = PCIR_BAR(0); 840 #else 841 sc->nge_res_type = SYS_RES_MEMORY; 842 sc->nge_res_id = PCIR_BAR(1); 843 #endif 844 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 845 &sc->nge_res_id, RF_ACTIVE); 846 847 if (sc->nge_res == NULL) { 848 if (sc->nge_res_type == SYS_RES_MEMORY) { 849 sc->nge_res_type = SYS_RES_IOPORT; 850 sc->nge_res_id = PCIR_BAR(0); 851 } else { 852 sc->nge_res_type = SYS_RES_MEMORY; 853 sc->nge_res_id = PCIR_BAR(1); 854 } 855 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 856 &sc->nge_res_id, RF_ACTIVE); 857 if (sc->nge_res == NULL) { 858 device_printf(dev, "couldn't allocate %s resources\n", 859 sc->nge_res_type == SYS_RES_MEMORY ? "memory" : 860 "I/O"); 861 NGE_LOCK_DESTROY(sc); 862 return (ENXIO); 863 } 864 } 865 866 /* Allocate interrupt */ 867 rid = 0; 868 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 869 RF_SHAREABLE | RF_ACTIVE); 870 871 if (sc->nge_irq == NULL) { 872 device_printf(dev, "couldn't map interrupt\n"); 873 error = ENXIO; 874 goto fail; 875 } 876 877 /* Enable MWI. */ 878 reg = pci_read_config(dev, PCIR_COMMAND, 2); 879 reg |= PCIM_CMD_MWRICEN; 880 pci_write_config(dev, PCIR_COMMAND, reg, 2); 881 882 /* Reset the adapter. */ 883 nge_reset(sc); 884 885 /* 886 * Get station address from the EEPROM. 887 */ 888 nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3); 889 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 890 ea[i] = le16toh(ea[i]); 891 ea_temp = ea[0]; 892 ea[0] = ea[2]; 893 ea[2] = ea_temp; 894 bcopy(ea, eaddr, sizeof(eaddr)); 895 896 if (nge_dma_alloc(sc) != 0) { 897 error = ENXIO; 898 goto fail; 899 } 900 901 nge_sysctl_node(sc); 902 903 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); 904 if (ifp == NULL) { 905 device_printf(dev, "can not allocate ifnet structure\n"); 906 error = ENOSPC; 907 goto fail; 908 } 909 ifp->if_softc = sc; 910 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 911 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 912 ifp->if_ioctl = nge_ioctl; 913 ifp->if_start = nge_start; 914 ifp->if_init = nge_init; 915 ifp->if_snd.ifq_drv_maxlen = NGE_TX_RING_CNT - 1; 916 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 917 IFQ_SET_READY(&ifp->if_snd); 918 ifp->if_hwassist = NGE_CSUM_FEATURES; 919 ifp->if_capabilities = IFCAP_HWCSUM; 920 /* 921 * It seems that some hardwares doesn't provide 3.3V auxiliary 922 * supply(3VAUX) to drive PME such that checking PCI power 923 * management capability is necessary. 924 */ 925 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &i) == 0) 926 ifp->if_capabilities |= IFCAP_WOL; 927 ifp->if_capenable = ifp->if_capabilities; 928 929 if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) { 930 sc->nge_flags |= NGE_FLAG_TBI; 931 device_printf(dev, "Using TBI\n"); 932 /* Configure GPIO. */ 933 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 934 | NGE_GPIO_GP4_OUT 935 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 936 | NGE_GPIO_GP3_OUTENB 937 | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); 938 } 939 940 /* 941 * Do MII setup. 942 */ 943 error = mii_attach(dev, &sc->nge_miibus, ifp, nge_mediachange, 944 nge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 945 if (error != 0) { 946 device_printf(dev, "attaching PHYs failed\n"); 947 goto fail; 948 } 949 950 /* 951 * Call MI attach routine. 952 */ 953 ether_ifattach(ifp, eaddr); 954 955 /* VLAN capability setup. */ 956 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 957 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 958 ifp->if_capenable = ifp->if_capabilities; 959 #ifdef DEVICE_POLLING 960 ifp->if_capabilities |= IFCAP_POLLING; 961 #endif 962 /* 963 * Tell the upper layer(s) we support long frames. 964 * Must appear after the call to ether_ifattach() because 965 * ether_ifattach() sets ifi_hdrlen to the default value. 966 */ 967 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 968 969 /* 970 * Hookup IRQ last. 971 */ 972 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, 973 NULL, nge_intr, sc, &sc->nge_intrhand); 974 if (error) { 975 device_printf(dev, "couldn't set up irq\n"); 976 goto fail; 977 } 978 979 fail: 980 if (error != 0) 981 nge_detach(dev); 982 return (error); 983 } 984 985 static int 986 nge_detach(device_t dev) 987 { 988 struct nge_softc *sc; 989 struct ifnet *ifp; 990 991 sc = device_get_softc(dev); 992 ifp = sc->nge_ifp; 993 994 #ifdef DEVICE_POLLING 995 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 996 ether_poll_deregister(ifp); 997 #endif 998 999 if (device_is_attached(dev)) { 1000 NGE_LOCK(sc); 1001 sc->nge_flags |= NGE_FLAG_DETACH; 1002 nge_stop(sc); 1003 NGE_UNLOCK(sc); 1004 callout_drain(&sc->nge_stat_ch); 1005 if (ifp != NULL) 1006 ether_ifdetach(ifp); 1007 } 1008 1009 if (sc->nge_miibus != NULL) { 1010 device_delete_child(dev, sc->nge_miibus); 1011 sc->nge_miibus = NULL; 1012 } 1013 bus_generic_detach(dev); 1014 if (sc->nge_intrhand != NULL) 1015 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 1016 if (sc->nge_irq != NULL) 1017 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 1018 if (sc->nge_res != NULL) 1019 bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id, 1020 sc->nge_res); 1021 1022 nge_dma_free(sc); 1023 if (ifp != NULL) 1024 if_free(ifp); 1025 1026 NGE_LOCK_DESTROY(sc); 1027 1028 return (0); 1029 } 1030 1031 struct nge_dmamap_arg { 1032 bus_addr_t nge_busaddr; 1033 }; 1034 1035 static void 1036 nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1037 { 1038 struct nge_dmamap_arg *ctx; 1039 1040 if (error != 0) 1041 return; 1042 ctx = arg; 1043 ctx->nge_busaddr = segs[0].ds_addr; 1044 } 1045 1046 static int 1047 nge_dma_alloc(struct nge_softc *sc) 1048 { 1049 struct nge_dmamap_arg ctx; 1050 struct nge_txdesc *txd; 1051 struct nge_rxdesc *rxd; 1052 int error, i; 1053 1054 /* Create parent DMA tag. */ 1055 error = bus_dma_tag_create( 1056 bus_get_dma_tag(sc->nge_dev), /* parent */ 1057 1, 0, /* alignment, boundary */ 1058 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1059 BUS_SPACE_MAXADDR, /* highaddr */ 1060 NULL, NULL, /* filter, filterarg */ 1061 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1062 0, /* nsegments */ 1063 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1064 0, /* flags */ 1065 NULL, NULL, /* lockfunc, lockarg */ 1066 &sc->nge_cdata.nge_parent_tag); 1067 if (error != 0) { 1068 device_printf(sc->nge_dev, "failed to create parent DMA tag\n"); 1069 goto fail; 1070 } 1071 /* Create tag for Tx ring. */ 1072 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1073 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1074 BUS_SPACE_MAXADDR, /* lowaddr */ 1075 BUS_SPACE_MAXADDR, /* highaddr */ 1076 NULL, NULL, /* filter, filterarg */ 1077 NGE_TX_RING_SIZE, /* maxsize */ 1078 1, /* nsegments */ 1079 NGE_TX_RING_SIZE, /* maxsegsize */ 1080 0, /* flags */ 1081 NULL, NULL, /* lockfunc, lockarg */ 1082 &sc->nge_cdata.nge_tx_ring_tag); 1083 if (error != 0) { 1084 device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n"); 1085 goto fail; 1086 } 1087 1088 /* Create tag for Rx ring. */ 1089 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1090 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1091 BUS_SPACE_MAXADDR, /* lowaddr */ 1092 BUS_SPACE_MAXADDR, /* highaddr */ 1093 NULL, NULL, /* filter, filterarg */ 1094 NGE_RX_RING_SIZE, /* maxsize */ 1095 1, /* nsegments */ 1096 NGE_RX_RING_SIZE, /* maxsegsize */ 1097 0, /* flags */ 1098 NULL, NULL, /* lockfunc, lockarg */ 1099 &sc->nge_cdata.nge_rx_ring_tag); 1100 if (error != 0) { 1101 device_printf(sc->nge_dev, 1102 "failed to create Rx ring DMA tag\n"); 1103 goto fail; 1104 } 1105 1106 /* Create tag for Tx buffers. */ 1107 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1108 1, 0, /* alignment, boundary */ 1109 BUS_SPACE_MAXADDR, /* lowaddr */ 1110 BUS_SPACE_MAXADDR, /* highaddr */ 1111 NULL, NULL, /* filter, filterarg */ 1112 MCLBYTES * NGE_MAXTXSEGS, /* maxsize */ 1113 NGE_MAXTXSEGS, /* nsegments */ 1114 MCLBYTES, /* maxsegsize */ 1115 0, /* flags */ 1116 NULL, NULL, /* lockfunc, lockarg */ 1117 &sc->nge_cdata.nge_tx_tag); 1118 if (error != 0) { 1119 device_printf(sc->nge_dev, "failed to create Tx DMA tag\n"); 1120 goto fail; 1121 } 1122 1123 /* Create tag for Rx buffers. */ 1124 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1125 NGE_RX_ALIGN, 0, /* alignment, boundary */ 1126 BUS_SPACE_MAXADDR, /* lowaddr */ 1127 BUS_SPACE_MAXADDR, /* highaddr */ 1128 NULL, NULL, /* filter, filterarg */ 1129 MCLBYTES, /* maxsize */ 1130 1, /* nsegments */ 1131 MCLBYTES, /* maxsegsize */ 1132 0, /* flags */ 1133 NULL, NULL, /* lockfunc, lockarg */ 1134 &sc->nge_cdata.nge_rx_tag); 1135 if (error != 0) { 1136 device_printf(sc->nge_dev, "failed to create Rx DMA tag\n"); 1137 goto fail; 1138 } 1139 1140 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1141 error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag, 1142 (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK | 1143 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map); 1144 if (error != 0) { 1145 device_printf(sc->nge_dev, 1146 "failed to allocate DMA'able memory for Tx ring\n"); 1147 goto fail; 1148 } 1149 1150 ctx.nge_busaddr = 0; 1151 error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag, 1152 sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring, 1153 NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1154 if (error != 0 || ctx.nge_busaddr == 0) { 1155 device_printf(sc->nge_dev, 1156 "failed to load DMA'able memory for Tx ring\n"); 1157 goto fail; 1158 } 1159 sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr; 1160 1161 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1162 error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag, 1163 (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK | 1164 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map); 1165 if (error != 0) { 1166 device_printf(sc->nge_dev, 1167 "failed to allocate DMA'able memory for Rx ring\n"); 1168 goto fail; 1169 } 1170 1171 ctx.nge_busaddr = 0; 1172 error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag, 1173 sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring, 1174 NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1175 if (error != 0 || ctx.nge_busaddr == 0) { 1176 device_printf(sc->nge_dev, 1177 "failed to load DMA'able memory for Rx ring\n"); 1178 goto fail; 1179 } 1180 sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr; 1181 1182 /* Create DMA maps for Tx buffers. */ 1183 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1184 txd = &sc->nge_cdata.nge_txdesc[i]; 1185 txd->tx_m = NULL; 1186 txd->tx_dmamap = NULL; 1187 error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0, 1188 &txd->tx_dmamap); 1189 if (error != 0) { 1190 device_printf(sc->nge_dev, 1191 "failed to create Tx dmamap\n"); 1192 goto fail; 1193 } 1194 } 1195 /* Create DMA maps for Rx buffers. */ 1196 if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1197 &sc->nge_cdata.nge_rx_sparemap)) != 0) { 1198 device_printf(sc->nge_dev, 1199 "failed to create spare Rx dmamap\n"); 1200 goto fail; 1201 } 1202 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1203 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1204 rxd->rx_m = NULL; 1205 rxd->rx_dmamap = NULL; 1206 error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1207 &rxd->rx_dmamap); 1208 if (error != 0) { 1209 device_printf(sc->nge_dev, 1210 "failed to create Rx dmamap\n"); 1211 goto fail; 1212 } 1213 } 1214 1215 fail: 1216 return (error); 1217 } 1218 1219 static void 1220 nge_dma_free(struct nge_softc *sc) 1221 { 1222 struct nge_txdesc *txd; 1223 struct nge_rxdesc *rxd; 1224 int i; 1225 1226 /* Tx ring. */ 1227 if (sc->nge_cdata.nge_tx_ring_tag) { 1228 if (sc->nge_rdata.nge_tx_ring_paddr) 1229 bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag, 1230 sc->nge_cdata.nge_tx_ring_map); 1231 if (sc->nge_rdata.nge_tx_ring) 1232 bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag, 1233 sc->nge_rdata.nge_tx_ring, 1234 sc->nge_cdata.nge_tx_ring_map); 1235 sc->nge_rdata.nge_tx_ring = NULL; 1236 sc->nge_rdata.nge_tx_ring_paddr = 0; 1237 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag); 1238 sc->nge_cdata.nge_tx_ring_tag = NULL; 1239 } 1240 /* Rx ring. */ 1241 if (sc->nge_cdata.nge_rx_ring_tag) { 1242 if (sc->nge_rdata.nge_rx_ring_paddr) 1243 bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag, 1244 sc->nge_cdata.nge_rx_ring_map); 1245 if (sc->nge_rdata.nge_rx_ring) 1246 bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag, 1247 sc->nge_rdata.nge_rx_ring, 1248 sc->nge_cdata.nge_rx_ring_map); 1249 sc->nge_rdata.nge_rx_ring = NULL; 1250 sc->nge_rdata.nge_rx_ring_paddr = 0; 1251 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag); 1252 sc->nge_cdata.nge_rx_ring_tag = NULL; 1253 } 1254 /* Tx buffers. */ 1255 if (sc->nge_cdata.nge_tx_tag) { 1256 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1257 txd = &sc->nge_cdata.nge_txdesc[i]; 1258 if (txd->tx_dmamap) { 1259 bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag, 1260 txd->tx_dmamap); 1261 txd->tx_dmamap = NULL; 1262 } 1263 } 1264 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag); 1265 sc->nge_cdata.nge_tx_tag = NULL; 1266 } 1267 /* Rx buffers. */ 1268 if (sc->nge_cdata.nge_rx_tag) { 1269 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1270 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1271 if (rxd->rx_dmamap) { 1272 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1273 rxd->rx_dmamap); 1274 rxd->rx_dmamap = NULL; 1275 } 1276 } 1277 if (sc->nge_cdata.nge_rx_sparemap) { 1278 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1279 sc->nge_cdata.nge_rx_sparemap); 1280 sc->nge_cdata.nge_rx_sparemap = 0; 1281 } 1282 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag); 1283 sc->nge_cdata.nge_rx_tag = NULL; 1284 } 1285 1286 if (sc->nge_cdata.nge_parent_tag) { 1287 bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag); 1288 sc->nge_cdata.nge_parent_tag = NULL; 1289 } 1290 } 1291 1292 /* 1293 * Initialize the transmit descriptors. 1294 */ 1295 static int 1296 nge_list_tx_init(struct nge_softc *sc) 1297 { 1298 struct nge_ring_data *rd; 1299 struct nge_txdesc *txd; 1300 bus_addr_t addr; 1301 int i; 1302 1303 sc->nge_cdata.nge_tx_prod = 0; 1304 sc->nge_cdata.nge_tx_cons = 0; 1305 sc->nge_cdata.nge_tx_cnt = 0; 1306 1307 rd = &sc->nge_rdata; 1308 bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT); 1309 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1310 if (i == NGE_TX_RING_CNT - 1) 1311 addr = NGE_TX_RING_ADDR(sc, 0); 1312 else 1313 addr = NGE_TX_RING_ADDR(sc, i + 1); 1314 rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1315 txd = &sc->nge_cdata.nge_txdesc[i]; 1316 txd->tx_m = NULL; 1317 } 1318 1319 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1320 sc->nge_cdata.nge_tx_ring_map, 1321 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1322 1323 return (0); 1324 } 1325 1326 /* 1327 * Initialize the RX descriptors and allocate mbufs for them. Note that 1328 * we arrange the descriptors in a closed ring, so that the last descriptor 1329 * points back to the first. 1330 */ 1331 static int 1332 nge_list_rx_init(struct nge_softc *sc) 1333 { 1334 struct nge_ring_data *rd; 1335 bus_addr_t addr; 1336 int i; 1337 1338 sc->nge_cdata.nge_rx_cons = 0; 1339 sc->nge_head = sc->nge_tail = NULL; 1340 1341 rd = &sc->nge_rdata; 1342 bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT); 1343 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1344 if (nge_newbuf(sc, i) != 0) 1345 return (ENOBUFS); 1346 if (i == NGE_RX_RING_CNT - 1) 1347 addr = NGE_RX_RING_ADDR(sc, 0); 1348 else 1349 addr = NGE_RX_RING_ADDR(sc, i + 1); 1350 rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1351 } 1352 1353 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1354 sc->nge_cdata.nge_rx_ring_map, 1355 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1356 1357 return (0); 1358 } 1359 1360 static __inline void 1361 nge_discard_rxbuf(struct nge_softc *sc, int idx) 1362 { 1363 struct nge_desc *desc; 1364 1365 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1366 desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t)); 1367 desc->nge_extsts = 0; 1368 } 1369 1370 /* 1371 * Initialize an RX descriptor and attach an MBUF cluster. 1372 */ 1373 static int 1374 nge_newbuf(struct nge_softc *sc, int idx) 1375 { 1376 struct nge_desc *desc; 1377 struct nge_rxdesc *rxd; 1378 struct mbuf *m; 1379 bus_dma_segment_t segs[1]; 1380 bus_dmamap_t map; 1381 int nsegs; 1382 1383 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1384 if (m == NULL) 1385 return (ENOBUFS); 1386 m->m_len = m->m_pkthdr.len = MCLBYTES; 1387 m_adj(m, sizeof(uint64_t)); 1388 1389 if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag, 1390 sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1391 m_freem(m); 1392 return (ENOBUFS); 1393 } 1394 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1395 1396 rxd = &sc->nge_cdata.nge_rxdesc[idx]; 1397 if (rxd->rx_m != NULL) { 1398 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1399 BUS_DMASYNC_POSTREAD); 1400 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap); 1401 } 1402 map = rxd->rx_dmamap; 1403 rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap; 1404 sc->nge_cdata.nge_rx_sparemap = map; 1405 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1406 BUS_DMASYNC_PREREAD); 1407 rxd->rx_m = m; 1408 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1409 desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr)); 1410 desc->nge_cmdsts = htole32(segs[0].ds_len); 1411 desc->nge_extsts = 0; 1412 1413 return (0); 1414 } 1415 1416 #ifndef __NO_STRICT_ALIGNMENT 1417 static __inline void 1418 nge_fixup_rx(struct mbuf *m) 1419 { 1420 int i; 1421 uint16_t *src, *dst; 1422 1423 src = mtod(m, uint16_t *); 1424 dst = src - 1; 1425 1426 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1427 *dst++ = *src++; 1428 1429 m->m_data -= ETHER_ALIGN; 1430 } 1431 #endif 1432 1433 /* 1434 * A frame has been uploaded: pass the resulting mbuf chain up to 1435 * the higher level protocols. 1436 */ 1437 static int 1438 nge_rxeof(struct nge_softc *sc) 1439 { 1440 struct mbuf *m; 1441 struct ifnet *ifp; 1442 struct nge_desc *cur_rx; 1443 struct nge_rxdesc *rxd; 1444 int cons, prog, rx_npkts, total_len; 1445 uint32_t cmdsts, extsts; 1446 1447 NGE_LOCK_ASSERT(sc); 1448 1449 ifp = sc->nge_ifp; 1450 cons = sc->nge_cdata.nge_rx_cons; 1451 rx_npkts = 0; 1452 1453 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1454 sc->nge_cdata.nge_rx_ring_map, 1455 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1456 1457 for (prog = 0; prog < NGE_RX_RING_CNT && 1458 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1459 NGE_INC(cons, NGE_RX_RING_CNT)) { 1460 #ifdef DEVICE_POLLING 1461 if (ifp->if_capenable & IFCAP_POLLING) { 1462 if (sc->rxcycles <= 0) 1463 break; 1464 sc->rxcycles--; 1465 } 1466 #endif 1467 cur_rx = &sc->nge_rdata.nge_rx_ring[cons]; 1468 cmdsts = le32toh(cur_rx->nge_cmdsts); 1469 extsts = le32toh(cur_rx->nge_extsts); 1470 if ((cmdsts & NGE_CMDSTS_OWN) == 0) 1471 break; 1472 prog++; 1473 rxd = &sc->nge_cdata.nge_rxdesc[cons]; 1474 m = rxd->rx_m; 1475 total_len = cmdsts & NGE_CMDSTS_BUFLEN; 1476 1477 if ((cmdsts & NGE_CMDSTS_MORE) != 0) { 1478 if (nge_newbuf(sc, cons) != 0) { 1479 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1480 if (sc->nge_head != NULL) { 1481 m_freem(sc->nge_head); 1482 sc->nge_head = sc->nge_tail = NULL; 1483 } 1484 nge_discard_rxbuf(sc, cons); 1485 continue; 1486 } 1487 m->m_len = total_len; 1488 if (sc->nge_head == NULL) { 1489 m->m_pkthdr.len = total_len; 1490 sc->nge_head = sc->nge_tail = m; 1491 } else { 1492 m->m_flags &= ~M_PKTHDR; 1493 sc->nge_head->m_pkthdr.len += total_len; 1494 sc->nge_tail->m_next = m; 1495 sc->nge_tail = m; 1496 } 1497 continue; 1498 } 1499 1500 /* 1501 * If an error occurs, update stats, clear the 1502 * status word and leave the mbuf cluster in place: 1503 * it should simply get re-used next time this descriptor 1504 * comes up in the ring. 1505 */ 1506 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1507 if ((cmdsts & NGE_RXSTAT_RUNT) && 1508 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) { 1509 /* 1510 * Work-around hardware bug, accept runt frames 1511 * if its length is larger than or equal to 56. 1512 */ 1513 } else { 1514 /* 1515 * Input error counters are updated by hardware. 1516 */ 1517 if (sc->nge_head != NULL) { 1518 m_freem(sc->nge_head); 1519 sc->nge_head = sc->nge_tail = NULL; 1520 } 1521 nge_discard_rxbuf(sc, cons); 1522 continue; 1523 } 1524 } 1525 1526 /* Try conjure up a replacement mbuf. */ 1527 1528 if (nge_newbuf(sc, cons) != 0) { 1529 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1530 if (sc->nge_head != NULL) { 1531 m_freem(sc->nge_head); 1532 sc->nge_head = sc->nge_tail = NULL; 1533 } 1534 nge_discard_rxbuf(sc, cons); 1535 continue; 1536 } 1537 1538 /* Chain received mbufs. */ 1539 if (sc->nge_head != NULL) { 1540 m->m_len = total_len; 1541 m->m_flags &= ~M_PKTHDR; 1542 sc->nge_tail->m_next = m; 1543 m = sc->nge_head; 1544 m->m_pkthdr.len += total_len; 1545 sc->nge_head = sc->nge_tail = NULL; 1546 } else 1547 m->m_pkthdr.len = m->m_len = total_len; 1548 1549 /* 1550 * Ok. NatSemi really screwed up here. This is the 1551 * only gigE chip I know of with alignment constraints 1552 * on receive buffers. RX buffers must be 64-bit aligned. 1553 */ 1554 /* 1555 * By popular demand, ignore the alignment problems 1556 * on the non-strict alignment platform. The performance hit 1557 * incurred due to unaligned accesses is much smaller 1558 * than the hit produced by forcing buffer copies all 1559 * the time, especially with jumbo frames. We still 1560 * need to fix up the alignment everywhere else though. 1561 */ 1562 #ifndef __NO_STRICT_ALIGNMENT 1563 nge_fixup_rx(m); 1564 #endif 1565 m->m_pkthdr.rcvif = ifp; 1566 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1567 1568 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1569 /* Do IP checksum checking. */ 1570 if ((extsts & NGE_RXEXTSTS_IPPKT) != 0) 1571 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1572 if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0) 1573 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1574 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1575 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1576 (extsts & NGE_RXEXTSTS_UDPPKT && 1577 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1578 m->m_pkthdr.csum_flags |= 1579 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1580 m->m_pkthdr.csum_data = 0xffff; 1581 } 1582 } 1583 1584 /* 1585 * If we received a packet with a vlan tag, pass it 1586 * to vlan_input() instead of ether_input(). 1587 */ 1588 if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 && 1589 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1590 m->m_pkthdr.ether_vtag = 1591 bswap16(extsts & NGE_RXEXTSTS_VTCI); 1592 m->m_flags |= M_VLANTAG; 1593 } 1594 NGE_UNLOCK(sc); 1595 (*ifp->if_input)(ifp, m); 1596 NGE_LOCK(sc); 1597 rx_npkts++; 1598 } 1599 1600 if (prog > 0) { 1601 sc->nge_cdata.nge_rx_cons = cons; 1602 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1603 sc->nge_cdata.nge_rx_ring_map, 1604 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1605 } 1606 return (rx_npkts); 1607 } 1608 1609 /* 1610 * A frame was downloaded to the chip. It's safe for us to clean up 1611 * the list buffers. 1612 */ 1613 static void 1614 nge_txeof(struct nge_softc *sc) 1615 { 1616 struct nge_desc *cur_tx; 1617 struct nge_txdesc *txd; 1618 struct ifnet *ifp; 1619 uint32_t cmdsts; 1620 int cons, prod; 1621 1622 NGE_LOCK_ASSERT(sc); 1623 ifp = sc->nge_ifp; 1624 1625 cons = sc->nge_cdata.nge_tx_cons; 1626 prod = sc->nge_cdata.nge_tx_prod; 1627 if (cons == prod) 1628 return; 1629 1630 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1631 sc->nge_cdata.nge_tx_ring_map, 1632 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1633 1634 /* 1635 * Go through our tx list and free mbufs for those 1636 * frames that have been transmitted. 1637 */ 1638 for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) { 1639 cur_tx = &sc->nge_rdata.nge_tx_ring[cons]; 1640 cmdsts = le32toh(cur_tx->nge_cmdsts); 1641 if ((cmdsts & NGE_CMDSTS_OWN) != 0) 1642 break; 1643 sc->nge_cdata.nge_tx_cnt--; 1644 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1645 if ((cmdsts & NGE_CMDSTS_MORE) != 0) 1646 continue; 1647 1648 txd = &sc->nge_cdata.nge_txdesc[cons]; 1649 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap, 1650 BUS_DMASYNC_POSTWRITE); 1651 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap); 1652 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1653 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1654 if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0) 1655 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1656 if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0) 1657 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1658 } else 1659 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1660 1661 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (cmdsts & NGE_TXSTAT_COLLCNT) >> 16); 1662 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1663 __func__)); 1664 m_freem(txd->tx_m); 1665 txd->tx_m = NULL; 1666 } 1667 1668 sc->nge_cdata.nge_tx_cons = cons; 1669 if (sc->nge_cdata.nge_tx_cnt == 0) 1670 sc->nge_watchdog_timer = 0; 1671 } 1672 1673 static void 1674 nge_tick(void *xsc) 1675 { 1676 struct nge_softc *sc; 1677 struct mii_data *mii; 1678 1679 sc = xsc; 1680 NGE_LOCK_ASSERT(sc); 1681 mii = device_get_softc(sc->nge_miibus); 1682 mii_tick(mii); 1683 /* 1684 * For PHYs that does not reset established link, it is 1685 * necessary to check whether driver still have a valid 1686 * link(e.g link state change callback is not called). 1687 * Otherwise, driver think it lost link because driver 1688 * initialization routine clears link state flag. 1689 */ 1690 if ((sc->nge_flags & NGE_FLAG_LINK) == 0) 1691 nge_miibus_statchg(sc->nge_dev); 1692 nge_stats_update(sc); 1693 nge_watchdog(sc); 1694 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 1695 } 1696 1697 static void 1698 nge_stats_update(struct nge_softc *sc) 1699 { 1700 struct ifnet *ifp; 1701 struct nge_stats now, *stats, *nstats; 1702 1703 NGE_LOCK_ASSERT(sc); 1704 1705 ifp = sc->nge_ifp; 1706 stats = &now; 1707 stats->rx_pkts_errs = 1708 CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF; 1709 stats->rx_crc_errs = 1710 CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF; 1711 stats->rx_fifo_oflows = 1712 CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF; 1713 stats->rx_align_errs = 1714 CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF; 1715 stats->rx_sym_errs = 1716 CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF; 1717 stats->rx_pkts_jumbos = 1718 CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF; 1719 stats->rx_len_errs = 1720 CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF; 1721 stats->rx_unctl_frames = 1722 CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF; 1723 stats->rx_pause = 1724 CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF; 1725 stats->tx_pause = 1726 CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF; 1727 stats->tx_seq_errs = 1728 CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF; 1729 1730 /* 1731 * Since we've accept errored frames exclude Rx length errors. 1732 */ 1733 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1734 stats->rx_pkts_errs + stats->rx_crc_errs + 1735 stats->rx_fifo_oflows + stats->rx_sym_errs); 1736 1737 nstats = &sc->nge_stats; 1738 nstats->rx_pkts_errs += stats->rx_pkts_errs; 1739 nstats->rx_crc_errs += stats->rx_crc_errs; 1740 nstats->rx_fifo_oflows += stats->rx_fifo_oflows; 1741 nstats->rx_align_errs += stats->rx_align_errs; 1742 nstats->rx_sym_errs += stats->rx_sym_errs; 1743 nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos; 1744 nstats->rx_len_errs += stats->rx_len_errs; 1745 nstats->rx_unctl_frames += stats->rx_unctl_frames; 1746 nstats->rx_pause += stats->rx_pause; 1747 nstats->tx_pause += stats->tx_pause; 1748 nstats->tx_seq_errs += stats->tx_seq_errs; 1749 } 1750 1751 #ifdef DEVICE_POLLING 1752 static poll_handler_t nge_poll; 1753 1754 static int 1755 nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1756 { 1757 struct nge_softc *sc; 1758 int rx_npkts = 0; 1759 1760 sc = ifp->if_softc; 1761 1762 NGE_LOCK(sc); 1763 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1764 NGE_UNLOCK(sc); 1765 return (rx_npkts); 1766 } 1767 1768 /* 1769 * On the nge, reading the status register also clears it. 1770 * So before returning to intr mode we must make sure that all 1771 * possible pending sources of interrupts have been served. 1772 * In practice this means run to completion the *eof routines, 1773 * and then call the interrupt routine. 1774 */ 1775 sc->rxcycles = count; 1776 rx_npkts = nge_rxeof(sc); 1777 nge_txeof(sc); 1778 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1779 nge_start_locked(ifp); 1780 1781 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1782 uint32_t status; 1783 1784 /* Reading the ISR register clears all interrupts. */ 1785 status = CSR_READ_4(sc, NGE_ISR); 1786 1787 if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0) 1788 rx_npkts += nge_rxeof(sc); 1789 1790 if ((status & NGE_ISR_RX_IDLE) != 0) 1791 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1792 1793 if ((status & NGE_ISR_SYSERR) != 0) { 1794 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1795 nge_init_locked(sc); 1796 } 1797 } 1798 NGE_UNLOCK(sc); 1799 return (rx_npkts); 1800 } 1801 #endif /* DEVICE_POLLING */ 1802 1803 static void 1804 nge_intr(void *arg) 1805 { 1806 struct nge_softc *sc; 1807 struct ifnet *ifp; 1808 uint32_t status; 1809 1810 sc = (struct nge_softc *)arg; 1811 ifp = sc->nge_ifp; 1812 1813 NGE_LOCK(sc); 1814 1815 if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0) 1816 goto done_locked; 1817 1818 /* Reading the ISR register clears all interrupts. */ 1819 status = CSR_READ_4(sc, NGE_ISR); 1820 if (status == 0xffffffff || (status & NGE_INTRS) == 0) 1821 goto done_locked; 1822 #ifdef DEVICE_POLLING 1823 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1824 goto done_locked; 1825 #endif 1826 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1827 goto done_locked; 1828 1829 /* Disable interrupts. */ 1830 CSR_WRITE_4(sc, NGE_IER, 0); 1831 1832 /* Data LED on for TBI mode */ 1833 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1834 CSR_WRITE_4(sc, NGE_GPIO, 1835 CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); 1836 1837 for (; (status & NGE_INTRS) != 0;) { 1838 if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR | 1839 NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0) 1840 nge_txeof(sc); 1841 1842 if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR | 1843 NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW | 1844 NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0) 1845 nge_rxeof(sc); 1846 1847 if ((status & NGE_ISR_RX_IDLE) != 0) 1848 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1849 1850 if ((status & NGE_ISR_SYSERR) != 0) { 1851 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1852 nge_init_locked(sc); 1853 } 1854 /* Reading the ISR register clears all interrupts. */ 1855 status = CSR_READ_4(sc, NGE_ISR); 1856 } 1857 1858 /* Re-enable interrupts. */ 1859 CSR_WRITE_4(sc, NGE_IER, 1); 1860 1861 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1862 nge_start_locked(ifp); 1863 1864 /* Data LED off for TBI mode */ 1865 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1866 CSR_WRITE_4(sc, NGE_GPIO, 1867 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 1868 1869 done_locked: 1870 NGE_UNLOCK(sc); 1871 } 1872 1873 /* 1874 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1875 * pointers to the fragment pointers. 1876 */ 1877 static int 1878 nge_encap(struct nge_softc *sc, struct mbuf **m_head) 1879 { 1880 struct nge_txdesc *txd, *txd_last; 1881 struct nge_desc *desc; 1882 struct mbuf *m; 1883 bus_dmamap_t map; 1884 bus_dma_segment_t txsegs[NGE_MAXTXSEGS]; 1885 int error, i, nsegs, prod, si; 1886 1887 NGE_LOCK_ASSERT(sc); 1888 1889 m = *m_head; 1890 prod = sc->nge_cdata.nge_tx_prod; 1891 txd = &sc->nge_cdata.nge_txdesc[prod]; 1892 txd_last = txd; 1893 map = txd->tx_dmamap; 1894 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map, 1895 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1896 if (error == EFBIG) { 1897 m = m_collapse(*m_head, M_NOWAIT, NGE_MAXTXSEGS); 1898 if (m == NULL) { 1899 m_freem(*m_head); 1900 *m_head = NULL; 1901 return (ENOBUFS); 1902 } 1903 *m_head = m; 1904 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, 1905 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1906 if (error != 0) { 1907 m_freem(*m_head); 1908 *m_head = NULL; 1909 return (error); 1910 } 1911 } else if (error != 0) 1912 return (error); 1913 if (nsegs == 0) { 1914 m_freem(*m_head); 1915 *m_head = NULL; 1916 return (EIO); 1917 } 1918 1919 /* Check number of available descriptors. */ 1920 if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) { 1921 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map); 1922 return (ENOBUFS); 1923 } 1924 1925 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE); 1926 1927 si = prod; 1928 for (i = 0; i < nsegs; i++) { 1929 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1930 desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr)); 1931 if (i == 0) 1932 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1933 NGE_CMDSTS_MORE); 1934 else 1935 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1936 NGE_CMDSTS_MORE | NGE_CMDSTS_OWN); 1937 desc->nge_extsts = 0; 1938 sc->nge_cdata.nge_tx_cnt++; 1939 NGE_INC(prod, NGE_TX_RING_CNT); 1940 } 1941 /* Update producer index. */ 1942 sc->nge_cdata.nge_tx_prod = prod; 1943 1944 prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT; 1945 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1946 /* Check if we have a VLAN tag to insert. */ 1947 if ((m->m_flags & M_VLANTAG) != 0) 1948 desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT | 1949 bswap16(m->m_pkthdr.ether_vtag)); 1950 /* Set EOP on the last desciptor. */ 1951 desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE); 1952 1953 /* Set checksum offload in the first descriptor. */ 1954 desc = &sc->nge_rdata.nge_tx_ring[si]; 1955 if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) { 1956 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1957 desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM); 1958 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1959 desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM); 1960 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1961 desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM); 1962 } 1963 /* Lastly, turn the first descriptor ownership to hardware. */ 1964 desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN); 1965 1966 txd = &sc->nge_cdata.nge_txdesc[prod]; 1967 map = txd_last->tx_dmamap; 1968 txd_last->tx_dmamap = txd->tx_dmamap; 1969 txd->tx_dmamap = map; 1970 txd->tx_m = m; 1971 1972 return (0); 1973 } 1974 1975 /* 1976 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1977 * to the mbuf data regions directly in the transmit lists. We also save a 1978 * copy of the pointers since the transmit list fragment pointers are 1979 * physical addresses. 1980 */ 1981 1982 static void 1983 nge_start(struct ifnet *ifp) 1984 { 1985 struct nge_softc *sc; 1986 1987 sc = ifp->if_softc; 1988 NGE_LOCK(sc); 1989 nge_start_locked(ifp); 1990 NGE_UNLOCK(sc); 1991 } 1992 1993 static void 1994 nge_start_locked(struct ifnet *ifp) 1995 { 1996 struct nge_softc *sc; 1997 struct mbuf *m_head; 1998 int enq; 1999 2000 sc = ifp->if_softc; 2001 2002 NGE_LOCK_ASSERT(sc); 2003 2004 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2005 IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0) 2006 return; 2007 2008 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2009 sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) { 2010 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2011 if (m_head == NULL) 2012 break; 2013 /* 2014 * Pack the data into the transmit ring. If we 2015 * don't have room, set the OACTIVE flag and wait 2016 * for the NIC to drain the ring. 2017 */ 2018 if (nge_encap(sc, &m_head)) { 2019 if (m_head == NULL) 2020 break; 2021 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2022 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2023 break; 2024 } 2025 2026 enq++; 2027 /* 2028 * If there's a BPF listener, bounce a copy of this frame 2029 * to him. 2030 */ 2031 ETHER_BPF_MTAP(ifp, m_head); 2032 } 2033 2034 if (enq > 0) { 2035 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 2036 sc->nge_cdata.nge_tx_ring_map, 2037 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2038 /* Transmit */ 2039 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 2040 2041 /* Set a timeout in case the chip goes out to lunch. */ 2042 sc->nge_watchdog_timer = 5; 2043 } 2044 } 2045 2046 static void 2047 nge_init(void *xsc) 2048 { 2049 struct nge_softc *sc = xsc; 2050 2051 NGE_LOCK(sc); 2052 nge_init_locked(sc); 2053 NGE_UNLOCK(sc); 2054 } 2055 2056 static void 2057 nge_init_locked(struct nge_softc *sc) 2058 { 2059 struct ifnet *ifp = sc->nge_ifp; 2060 struct mii_data *mii; 2061 uint8_t *eaddr; 2062 uint32_t reg; 2063 2064 NGE_LOCK_ASSERT(sc); 2065 2066 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2067 return; 2068 2069 /* 2070 * Cancel pending I/O and free all RX/TX buffers. 2071 */ 2072 nge_stop(sc); 2073 2074 /* Reset the adapter. */ 2075 nge_reset(sc); 2076 2077 /* Disable Rx filter prior to programming Rx filter. */ 2078 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0); 2079 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 2080 2081 mii = device_get_softc(sc->nge_miibus); 2082 2083 /* Set MAC address. */ 2084 eaddr = IF_LLADDR(sc->nge_ifp); 2085 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 2086 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]); 2087 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 2088 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]); 2089 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 2090 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]); 2091 2092 /* Init circular RX list. */ 2093 if (nge_list_rx_init(sc) == ENOBUFS) { 2094 device_printf(sc->nge_dev, "initialization failed: no " 2095 "memory for rx buffers\n"); 2096 nge_stop(sc); 2097 return; 2098 } 2099 2100 /* 2101 * Init tx descriptors. 2102 */ 2103 nge_list_tx_init(sc); 2104 2105 /* Set Rx filter. */ 2106 nge_rxfilter(sc); 2107 2108 /* Disable PRIQ ctl. */ 2109 CSR_WRITE_4(sc, NGE_PRIOQCTL, 0); 2110 2111 /* 2112 * Set pause frames paramters. 2113 * Rx stat FIFO hi-threshold : 2 or more packets 2114 * Rx stat FIFO lo-threshold : less than 2 packets 2115 * Rx data FIFO hi-threshold : 2K or more bytes 2116 * Rx data FIFO lo-threshold : less than 2K bytes 2117 * pause time : (512ns * 0xffff) -> 33.55ms 2118 */ 2119 CSR_WRITE_4(sc, NGE_PAUSECSR, 2120 NGE_PAUSECSR_PAUSE_ON_MCAST | 2121 NGE_PAUSECSR_PAUSE_ON_DA | 2122 ((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) | 2123 ((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) | 2124 ((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) | 2125 ((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) | 2126 NGE_PAUSECSR_CNT); 2127 2128 /* 2129 * Load the address of the RX and TX lists. 2130 */ 2131 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 2132 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 2133 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 2134 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 2135 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 2136 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 2137 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 2138 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 2139 2140 /* Set RX configuration. */ 2141 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 2142 2143 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0); 2144 /* 2145 * Enable hardware checksum validation for all IPv4 2146 * packets, do not reject packets with bad checksums. 2147 */ 2148 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2149 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 2150 2151 /* 2152 * Tell the chip to detect and strip VLAN tag info from 2153 * received frames. The tag will be provided in the extsts 2154 * field in the RX descriptors. 2155 */ 2156 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB); 2157 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2158 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB); 2159 2160 /* Set TX configuration. */ 2161 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 2162 2163 /* 2164 * Enable TX IPv4 checksumming on a per-packet basis. 2165 */ 2166 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 2167 2168 /* 2169 * Tell the chip to insert VLAN tags on a per-packet basis as 2170 * dictated by the code in the frame encapsulation routine. 2171 */ 2172 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 2173 2174 /* 2175 * Enable the delivery of PHY interrupts based on 2176 * link/speed/duplex status changes. Also enable the 2177 * extsts field in the DMA descriptors (needed for 2178 * TCP/IP checksum offload on transmit). 2179 */ 2180 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD | 2181 NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB); 2182 2183 /* 2184 * Configure interrupt holdoff (moderation). We can 2185 * have the chip delay interrupt delivery for a certain 2186 * period. Units are in 100us, and the max setting 2187 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 2188 */ 2189 CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff); 2190 2191 /* 2192 * Enable MAC statistics counters and clear. 2193 */ 2194 reg = CSR_READ_4(sc, NGE_MIBCTL); 2195 reg &= ~NGE_MIBCTL_FREEZE_CNT; 2196 reg |= NGE_MIBCTL_CLEAR_CNT; 2197 CSR_WRITE_4(sc, NGE_MIBCTL, reg); 2198 2199 /* 2200 * Enable interrupts. 2201 */ 2202 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 2203 #ifdef DEVICE_POLLING 2204 /* 2205 * ... only enable interrupts if we are not polling, make sure 2206 * they are off otherwise. 2207 */ 2208 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2209 CSR_WRITE_4(sc, NGE_IER, 0); 2210 else 2211 #endif 2212 CSR_WRITE_4(sc, NGE_IER, 1); 2213 2214 sc->nge_flags &= ~NGE_FLAG_LINK; 2215 mii_mediachg(mii); 2216 2217 sc->nge_watchdog_timer = 0; 2218 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 2219 2220 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2221 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2222 } 2223 2224 /* 2225 * Set media options. 2226 */ 2227 static int 2228 nge_mediachange(struct ifnet *ifp) 2229 { 2230 struct nge_softc *sc; 2231 struct mii_data *mii; 2232 struct mii_softc *miisc; 2233 int error; 2234 2235 sc = ifp->if_softc; 2236 NGE_LOCK(sc); 2237 mii = device_get_softc(sc->nge_miibus); 2238 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2239 PHY_RESET(miisc); 2240 error = mii_mediachg(mii); 2241 NGE_UNLOCK(sc); 2242 2243 return (error); 2244 } 2245 2246 /* 2247 * Report current media status. 2248 */ 2249 static void 2250 nge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2251 { 2252 struct nge_softc *sc; 2253 struct mii_data *mii; 2254 2255 sc = ifp->if_softc; 2256 NGE_LOCK(sc); 2257 mii = device_get_softc(sc->nge_miibus); 2258 mii_pollstat(mii); 2259 ifmr->ifm_active = mii->mii_media_active; 2260 ifmr->ifm_status = mii->mii_media_status; 2261 NGE_UNLOCK(sc); 2262 } 2263 2264 static int 2265 nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2266 { 2267 struct nge_softc *sc = ifp->if_softc; 2268 struct ifreq *ifr = (struct ifreq *) data; 2269 struct mii_data *mii; 2270 int error = 0, mask; 2271 2272 switch (command) { 2273 case SIOCSIFMTU: 2274 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU) 2275 error = EINVAL; 2276 else { 2277 NGE_LOCK(sc); 2278 ifp->if_mtu = ifr->ifr_mtu; 2279 /* 2280 * Workaround: if the MTU is larger than 2281 * 8152 (TX FIFO size minus 64 minus 18), turn off 2282 * TX checksum offloading. 2283 */ 2284 if (ifr->ifr_mtu >= 8152) { 2285 ifp->if_capenable &= ~IFCAP_TXCSUM; 2286 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2287 } else { 2288 ifp->if_capenable |= IFCAP_TXCSUM; 2289 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2290 } 2291 NGE_UNLOCK(sc); 2292 VLAN_CAPABILITIES(ifp); 2293 } 2294 break; 2295 case SIOCSIFFLAGS: 2296 NGE_LOCK(sc); 2297 if ((ifp->if_flags & IFF_UP) != 0) { 2298 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2299 if ((ifp->if_flags ^ sc->nge_if_flags) & 2300 (IFF_PROMISC | IFF_ALLMULTI)) 2301 nge_rxfilter(sc); 2302 } else { 2303 if ((sc->nge_flags & NGE_FLAG_DETACH) == 0) 2304 nge_init_locked(sc); 2305 } 2306 } else { 2307 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2308 nge_stop(sc); 2309 } 2310 sc->nge_if_flags = ifp->if_flags; 2311 NGE_UNLOCK(sc); 2312 error = 0; 2313 break; 2314 case SIOCADDMULTI: 2315 case SIOCDELMULTI: 2316 NGE_LOCK(sc); 2317 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2318 nge_rxfilter(sc); 2319 NGE_UNLOCK(sc); 2320 break; 2321 case SIOCGIFMEDIA: 2322 case SIOCSIFMEDIA: 2323 mii = device_get_softc(sc->nge_miibus); 2324 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2325 break; 2326 case SIOCSIFCAP: 2327 NGE_LOCK(sc); 2328 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2329 #ifdef DEVICE_POLLING 2330 if ((mask & IFCAP_POLLING) != 0 && 2331 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 2332 ifp->if_capenable ^= IFCAP_POLLING; 2333 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 2334 error = ether_poll_register(nge_poll, ifp); 2335 if (error != 0) { 2336 NGE_UNLOCK(sc); 2337 break; 2338 } 2339 /* Disable interrupts. */ 2340 CSR_WRITE_4(sc, NGE_IER, 0); 2341 } else { 2342 error = ether_poll_deregister(ifp); 2343 /* Enable interrupts. */ 2344 CSR_WRITE_4(sc, NGE_IER, 1); 2345 } 2346 } 2347 #endif /* DEVICE_POLLING */ 2348 if ((mask & IFCAP_TXCSUM) != 0 && 2349 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2350 ifp->if_capenable ^= IFCAP_TXCSUM; 2351 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2352 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2353 else 2354 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2355 } 2356 if ((mask & IFCAP_RXCSUM) != 0 && 2357 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2358 ifp->if_capenable ^= IFCAP_RXCSUM; 2359 2360 if ((mask & IFCAP_WOL) != 0 && 2361 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2362 if ((mask & IFCAP_WOL_UCAST) != 0) 2363 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2364 if ((mask & IFCAP_WOL_MCAST) != 0) 2365 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2366 if ((mask & IFCAP_WOL_MAGIC) != 0) 2367 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2368 } 2369 2370 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2371 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2372 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2373 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2374 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2375 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2376 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2377 if ((ifp->if_capenable & 2378 IFCAP_VLAN_HWTAGGING) != 0) 2379 NGE_SETBIT(sc, 2380 NGE_VLAN_IP_RXCTL, 2381 NGE_VIPRXCTL_TAG_STRIP_ENB); 2382 else 2383 NGE_CLRBIT(sc, 2384 NGE_VLAN_IP_RXCTL, 2385 NGE_VIPRXCTL_TAG_STRIP_ENB); 2386 } 2387 } 2388 /* 2389 * Both VLAN hardware tagging and checksum offload is 2390 * required to do checksum offload on VLAN interface. 2391 */ 2392 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2393 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2394 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2395 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2396 NGE_UNLOCK(sc); 2397 VLAN_CAPABILITIES(ifp); 2398 break; 2399 default: 2400 error = ether_ioctl(ifp, command, data); 2401 break; 2402 } 2403 2404 return (error); 2405 } 2406 2407 static void 2408 nge_watchdog(struct nge_softc *sc) 2409 { 2410 struct ifnet *ifp; 2411 2412 NGE_LOCK_ASSERT(sc); 2413 2414 if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer) 2415 return; 2416 2417 ifp = sc->nge_ifp; 2418 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2419 if_printf(ifp, "watchdog timeout\n"); 2420 2421 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2422 nge_init_locked(sc); 2423 2424 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2425 nge_start_locked(ifp); 2426 } 2427 2428 static int 2429 nge_stop_mac(struct nge_softc *sc) 2430 { 2431 uint32_t reg; 2432 int i; 2433 2434 NGE_LOCK_ASSERT(sc); 2435 2436 reg = CSR_READ_4(sc, NGE_CSR); 2437 if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) { 2438 reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE); 2439 reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE; 2440 CSR_WRITE_4(sc, NGE_CSR, reg); 2441 for (i = 0; i < NGE_TIMEOUT; i++) { 2442 DELAY(1); 2443 if ((CSR_READ_4(sc, NGE_CSR) & 2444 (NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0) 2445 break; 2446 } 2447 if (i == NGE_TIMEOUT) 2448 return (ETIMEDOUT); 2449 } 2450 2451 return (0); 2452 } 2453 2454 /* 2455 * Stop the adapter and free any mbufs allocated to the 2456 * RX and TX lists. 2457 */ 2458 static void 2459 nge_stop(struct nge_softc *sc) 2460 { 2461 struct nge_txdesc *txd; 2462 struct nge_rxdesc *rxd; 2463 int i; 2464 struct ifnet *ifp; 2465 2466 NGE_LOCK_ASSERT(sc); 2467 ifp = sc->nge_ifp; 2468 2469 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2470 sc->nge_flags &= ~NGE_FLAG_LINK; 2471 callout_stop(&sc->nge_stat_ch); 2472 sc->nge_watchdog_timer = 0; 2473 2474 CSR_WRITE_4(sc, NGE_IER, 0); 2475 CSR_WRITE_4(sc, NGE_IMR, 0); 2476 if (nge_stop_mac(sc) == ETIMEDOUT) 2477 device_printf(sc->nge_dev, 2478 "%s: unable to stop Tx/Rx MAC\n", __func__); 2479 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0); 2480 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0); 2481 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2482 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2483 nge_stats_update(sc); 2484 if (sc->nge_head != NULL) { 2485 m_freem(sc->nge_head); 2486 sc->nge_head = sc->nge_tail = NULL; 2487 } 2488 2489 /* 2490 * Free RX and TX mbufs still in the queues. 2491 */ 2492 for (i = 0; i < NGE_RX_RING_CNT; i++) { 2493 rxd = &sc->nge_cdata.nge_rxdesc[i]; 2494 if (rxd->rx_m != NULL) { 2495 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, 2496 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2497 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, 2498 rxd->rx_dmamap); 2499 m_freem(rxd->rx_m); 2500 rxd->rx_m = NULL; 2501 } 2502 } 2503 for (i = 0; i < NGE_TX_RING_CNT; i++) { 2504 txd = &sc->nge_cdata.nge_txdesc[i]; 2505 if (txd->tx_m != NULL) { 2506 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 2507 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2508 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 2509 txd->tx_dmamap); 2510 m_freem(txd->tx_m); 2511 txd->tx_m = NULL; 2512 } 2513 } 2514 } 2515 2516 /* 2517 * Before setting WOL bits, caller should have stopped Receiver. 2518 */ 2519 static void 2520 nge_wol(struct nge_softc *sc) 2521 { 2522 struct ifnet *ifp; 2523 uint32_t reg; 2524 uint16_t pmstat; 2525 int pmc; 2526 2527 NGE_LOCK_ASSERT(sc); 2528 2529 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) != 0) 2530 return; 2531 2532 ifp = sc->nge_ifp; 2533 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2534 /* Disable WOL & disconnect CLKRUN to save power. */ 2535 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 2536 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 2537 } else { 2538 if (nge_stop_mac(sc) == ETIMEDOUT) 2539 device_printf(sc->nge_dev, 2540 "%s: unable to stop Tx/Rx MAC\n", __func__); 2541 /* 2542 * Make sure wake frames will be buffered in the Rx FIFO. 2543 * (i.e. Silent Rx mode.) 2544 */ 2545 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2546 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_HI, BUS_SPACE_BARRIER_WRITE); 2547 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2548 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_LO, BUS_SPACE_BARRIER_WRITE); 2549 /* Enable Rx again. */ 2550 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 2551 CSR_BARRIER_4(sc, NGE_CSR, BUS_SPACE_BARRIER_WRITE); 2552 2553 /* Configure WOL events. */ 2554 reg = 0; 2555 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2556 reg |= NGE_WOLCSR_WAKE_ON_UNICAST; 2557 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2558 reg |= NGE_WOLCSR_WAKE_ON_MULTICAST; 2559 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2560 reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT; 2561 CSR_WRITE_4(sc, NGE_WOLCSR, reg); 2562 2563 /* Activate CLKRUN. */ 2564 reg = CSR_READ_4(sc, NGE_CLKRUN); 2565 reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB; 2566 CSR_WRITE_4(sc, NGE_CLKRUN, reg); 2567 } 2568 2569 /* Request PME. */ 2570 pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2); 2571 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2572 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2573 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2574 pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2575 } 2576 2577 /* 2578 * Stop all chip I/O so that the kernel's probe routines don't 2579 * get confused by errant DMAs when rebooting. 2580 */ 2581 static int 2582 nge_shutdown(device_t dev) 2583 { 2584 2585 return (nge_suspend(dev)); 2586 } 2587 2588 static int 2589 nge_suspend(device_t dev) 2590 { 2591 struct nge_softc *sc; 2592 2593 sc = device_get_softc(dev); 2594 2595 NGE_LOCK(sc); 2596 nge_stop(sc); 2597 nge_wol(sc); 2598 sc->nge_flags |= NGE_FLAG_SUSPENDED; 2599 NGE_UNLOCK(sc); 2600 2601 return (0); 2602 } 2603 2604 static int 2605 nge_resume(device_t dev) 2606 { 2607 struct nge_softc *sc; 2608 struct ifnet *ifp; 2609 uint16_t pmstat; 2610 int pmc; 2611 2612 sc = device_get_softc(dev); 2613 2614 NGE_LOCK(sc); 2615 ifp = sc->nge_ifp; 2616 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) == 0) { 2617 /* Disable PME and clear PME status. */ 2618 pmstat = pci_read_config(sc->nge_dev, 2619 pmc + PCIR_POWER_STATUS, 2); 2620 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2621 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2622 pci_write_config(sc->nge_dev, 2623 pmc + PCIR_POWER_STATUS, pmstat, 2); 2624 } 2625 } 2626 if (ifp->if_flags & IFF_UP) { 2627 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2628 nge_init_locked(sc); 2629 } 2630 2631 sc->nge_flags &= ~NGE_FLAG_SUSPENDED; 2632 NGE_UNLOCK(sc); 2633 2634 return (0); 2635 } 2636 2637 #define NGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2638 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2639 2640 static void 2641 nge_sysctl_node(struct nge_softc *sc) 2642 { 2643 struct sysctl_ctx_list *ctx; 2644 struct sysctl_oid_list *child, *parent; 2645 struct sysctl_oid *tree; 2646 struct nge_stats *stats; 2647 int error; 2648 2649 ctx = device_get_sysctl_ctx(sc->nge_dev); 2650 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev)); 2651 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff", 2652 CTLTYPE_INT | CTLFLAG_RW, &sc->nge_int_holdoff, 0, 2653 sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation"); 2654 /* Pull in device tunables. */ 2655 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2656 error = resource_int_value(device_get_name(sc->nge_dev), 2657 device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff); 2658 if (error == 0) { 2659 if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN || 2660 sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) { 2661 device_printf(sc->nge_dev, 2662 "int_holdoff value out of range; " 2663 "using default: %d(%d us)\n", 2664 NGE_INT_HOLDOFF_DEFAULT, 2665 NGE_INT_HOLDOFF_DEFAULT * 100); 2666 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2667 } 2668 } 2669 2670 stats = &sc->nge_stats; 2671 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2672 NULL, "NGE statistics"); 2673 parent = SYSCTL_CHILDREN(tree); 2674 2675 /* Rx statistics. */ 2676 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2677 NULL, "Rx MAC statistics"); 2678 child = SYSCTL_CHILDREN(tree); 2679 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs", 2680 &stats->rx_pkts_errs, 2681 "Packet errors including both wire errors and FIFO overruns"); 2682 NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 2683 &stats->rx_crc_errs, "CRC errors"); 2684 NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2685 &stats->rx_fifo_oflows, "FIFO overflows"); 2686 NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2687 &stats->rx_align_errs, "Frame alignment errors"); 2688 NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2689 &stats->rx_sym_errs, "One or more symbol errors"); 2690 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos", 2691 &stats->rx_pkts_jumbos, 2692 "Packets received with length greater than 1518 bytes"); 2693 NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2694 &stats->rx_len_errs, "In Range Length errors"); 2695 NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames", 2696 &stats->rx_unctl_frames, "Control frames with unsupported opcode"); 2697 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2698 &stats->rx_pause, "Pause frames"); 2699 2700 /* Tx statistics. */ 2701 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2702 NULL, "Tx MAC statistics"); 2703 child = SYSCTL_CHILDREN(tree); 2704 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2705 &stats->tx_pause, "Pause frames"); 2706 NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs", 2707 &stats->tx_seq_errs, 2708 "Loss of collision heartbeat during transmission"); 2709 } 2710 2711 #undef NGE_SYSCTL_STAT_ADD32 2712 2713 static int 2714 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2715 { 2716 int error, value; 2717 2718 if (arg1 == NULL) 2719 return (EINVAL); 2720 value = *(int *)arg1; 2721 error = sysctl_handle_int(oidp, &value, 0, req); 2722 if (error != 0 || req->newptr == NULL) 2723 return (error); 2724 if (value < low || value > high) 2725 return (EINVAL); 2726 *(int *)arg1 = value; 2727 2728 return (0); 2729 } 2730 2731 static int 2732 sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS) 2733 { 2734 2735 return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN, 2736 NGE_INT_HOLDOFF_MAX)); 2737 } 2738