1 /*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * 44 * These chips are used on several low cost gigabit ethernet NICs 45 * sold by D-Link, Addtron, SMC and Asante. Both parts are 46 * virtually the same, except the 83820 is a 64-bit/32-bit part, 47 * while the 83821 is 32-bit only. 48 * 49 * Many cards also use National gigE transceivers, such as the 50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 51 * contains a full register description that applies to all of these 52 * components: 53 * 54 * http://www.national.com/ds/DP/DP83861.pdf 55 * 56 * Written by Bill Paul <wpaul@bsdi.com> 57 * BSDi Open Source Solutions 58 */ 59 60 /* 61 * The NatSemi DP83820 and 83821 controllers are enhanced versions 62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 67 * matching buffers, one perfect address filter buffer and interrupt 68 * moderation. The 83820 supports both 64-bit and 32-bit addressing 69 * and data transfers: the 64-bit support can be toggled on or off 70 * via software. This affects the size of certain fields in the DMA 71 * descriptors. 72 * 73 * There are two bugs/misfeatures in the 83820/83821 that I have 74 * discovered so far: 75 * 76 * - Receive buffers must be aligned on 64-bit boundaries, which means 77 * you must resort to copying data in order to fix up the payload 78 * alignment. 79 * 80 * - In order to transmit jumbo frames larger than 8170 bytes, you have 81 * to turn off transmit checksum offloading, because the chip can't 82 * compute the checksum on an outgoing frame unless it fits entirely 83 * within the TX FIFO, which is only 8192 bytes in size. If you have 84 * TX checksum offload enabled and you transmit attempt to transmit a 85 * frame larger than 8170 bytes, the transmitter will wedge. 86 * 87 * To work around the latter problem, TX checksum offload is disabled 88 * if the user selects an MTU larger than 8152 (8170 - 18). 89 */ 90 91 #ifdef HAVE_KERNEL_OPTION_HEADERS 92 #include "opt_device_polling.h" 93 #endif 94 95 #include <sys/param.h> 96 #include <sys/systm.h> 97 #include <sys/bus.h> 98 #include <sys/endian.h> 99 #include <sys/kernel.h> 100 #include <sys/lock.h> 101 #include <sys/malloc.h> 102 #include <sys/mbuf.h> 103 #include <sys/module.h> 104 #include <sys/mutex.h> 105 #include <sys/rman.h> 106 #include <sys/socket.h> 107 #include <sys/sockio.h> 108 #include <sys/sysctl.h> 109 110 #include <net/bpf.h> 111 #include <net/if.h> 112 #include <net/if_arp.h> 113 #include <net/ethernet.h> 114 #include <net/if_dl.h> 115 #include <net/if_media.h> 116 #include <net/if_types.h> 117 #include <net/if_vlan_var.h> 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/mii_bitbang.h> 121 #include <dev/mii/miivar.h> 122 123 #include <dev/pci/pcireg.h> 124 #include <dev/pci/pcivar.h> 125 126 #include <machine/bus.h> 127 128 #include <dev/nge/if_ngereg.h> 129 130 /* "device miibus" required. See GENERIC if you get errors here. */ 131 #include "miibus_if.h" 132 133 MODULE_DEPEND(nge, pci, 1, 1, 1); 134 MODULE_DEPEND(nge, ether, 1, 1, 1); 135 MODULE_DEPEND(nge, miibus, 1, 1, 1); 136 137 #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 138 139 /* 140 * Various supported device vendors/types and their names. 141 */ 142 static const struct nge_type const nge_devs[] = { 143 { NGE_VENDORID, NGE_DEVICEID, 144 "National Semiconductor Gigabit Ethernet" }, 145 { 0, 0, NULL } 146 }; 147 148 static int nge_probe(device_t); 149 static int nge_attach(device_t); 150 static int nge_detach(device_t); 151 static int nge_shutdown(device_t); 152 static int nge_suspend(device_t); 153 static int nge_resume(device_t); 154 155 static __inline void nge_discard_rxbuf(struct nge_softc *, int); 156 static int nge_newbuf(struct nge_softc *, int); 157 static int nge_encap(struct nge_softc *, struct mbuf **); 158 #ifndef __NO_STRICT_ALIGNMENT 159 static __inline void nge_fixup_rx(struct mbuf *); 160 #endif 161 static int nge_rxeof(struct nge_softc *); 162 static void nge_txeof(struct nge_softc *); 163 static void nge_intr(void *); 164 static void nge_tick(void *); 165 static void nge_stats_update(struct nge_softc *); 166 static void nge_start(struct ifnet *); 167 static void nge_start_locked(struct ifnet *); 168 static int nge_ioctl(struct ifnet *, u_long, caddr_t); 169 static void nge_init(void *); 170 static void nge_init_locked(struct nge_softc *); 171 static int nge_stop_mac(struct nge_softc *); 172 static void nge_stop(struct nge_softc *); 173 static void nge_wol(struct nge_softc *); 174 static void nge_watchdog(struct nge_softc *); 175 static int nge_mediachange(struct ifnet *); 176 static void nge_mediastatus(struct ifnet *, struct ifmediareq *); 177 178 static void nge_delay(struct nge_softc *); 179 static void nge_eeprom_idle(struct nge_softc *); 180 static void nge_eeprom_putbyte(struct nge_softc *, int); 181 static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *); 182 static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int); 183 184 static int nge_miibus_readreg(device_t, int, int); 185 static int nge_miibus_writereg(device_t, int, int, int); 186 static void nge_miibus_statchg(device_t); 187 188 static void nge_rxfilter(struct nge_softc *); 189 static void nge_reset(struct nge_softc *); 190 static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 191 static int nge_dma_alloc(struct nge_softc *); 192 static void nge_dma_free(struct nge_softc *); 193 static int nge_list_rx_init(struct nge_softc *); 194 static int nge_list_tx_init(struct nge_softc *); 195 static void nge_sysctl_node(struct nge_softc *); 196 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 197 static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS); 198 199 /* 200 * MII bit-bang glue 201 */ 202 static uint32_t nge_mii_bitbang_read(device_t); 203 static void nge_mii_bitbang_write(device_t, uint32_t); 204 205 static const struct mii_bitbang_ops nge_mii_bitbang_ops = { 206 nge_mii_bitbang_read, 207 nge_mii_bitbang_write, 208 { 209 NGE_MEAR_MII_DATA, /* MII_BIT_MDO */ 210 NGE_MEAR_MII_DATA, /* MII_BIT_MDI */ 211 NGE_MEAR_MII_CLK, /* MII_BIT_MDC */ 212 NGE_MEAR_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 213 0, /* MII_BIT_DIR_PHY_HOST */ 214 } 215 }; 216 217 static device_method_t nge_methods[] = { 218 /* Device interface */ 219 DEVMETHOD(device_probe, nge_probe), 220 DEVMETHOD(device_attach, nge_attach), 221 DEVMETHOD(device_detach, nge_detach), 222 DEVMETHOD(device_shutdown, nge_shutdown), 223 DEVMETHOD(device_suspend, nge_suspend), 224 DEVMETHOD(device_resume, nge_resume), 225 226 /* bus interface */ 227 DEVMETHOD(bus_print_child, bus_generic_print_child), 228 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 229 230 /* MII interface */ 231 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 232 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 233 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 234 235 { NULL, NULL } 236 }; 237 238 static driver_t nge_driver = { 239 "nge", 240 nge_methods, 241 sizeof(struct nge_softc) 242 }; 243 244 static devclass_t nge_devclass; 245 246 DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0); 247 DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); 248 249 #define NGE_SETBIT(sc, reg, x) \ 250 CSR_WRITE_4(sc, reg, \ 251 CSR_READ_4(sc, reg) | (x)) 252 253 #define NGE_CLRBIT(sc, reg, x) \ 254 CSR_WRITE_4(sc, reg, \ 255 CSR_READ_4(sc, reg) & ~(x)) 256 257 #define SIO_SET(x) \ 258 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 259 260 #define SIO_CLR(x) \ 261 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 262 263 static void 264 nge_delay(struct nge_softc *sc) 265 { 266 int idx; 267 268 for (idx = (300 / 33) + 1; idx > 0; idx--) 269 CSR_READ_4(sc, NGE_CSR); 270 } 271 272 static void 273 nge_eeprom_idle(struct nge_softc *sc) 274 { 275 int i; 276 277 SIO_SET(NGE_MEAR_EE_CSEL); 278 nge_delay(sc); 279 SIO_SET(NGE_MEAR_EE_CLK); 280 nge_delay(sc); 281 282 for (i = 0; i < 25; i++) { 283 SIO_CLR(NGE_MEAR_EE_CLK); 284 nge_delay(sc); 285 SIO_SET(NGE_MEAR_EE_CLK); 286 nge_delay(sc); 287 } 288 289 SIO_CLR(NGE_MEAR_EE_CLK); 290 nge_delay(sc); 291 SIO_CLR(NGE_MEAR_EE_CSEL); 292 nge_delay(sc); 293 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 294 } 295 296 /* 297 * Send a read command and address to the EEPROM, check for ACK. 298 */ 299 static void 300 nge_eeprom_putbyte(struct nge_softc *sc, int addr) 301 { 302 int d, i; 303 304 d = addr | NGE_EECMD_READ; 305 306 /* 307 * Feed in each bit and stobe the clock. 308 */ 309 for (i = 0x400; i; i >>= 1) { 310 if (d & i) { 311 SIO_SET(NGE_MEAR_EE_DIN); 312 } else { 313 SIO_CLR(NGE_MEAR_EE_DIN); 314 } 315 nge_delay(sc); 316 SIO_SET(NGE_MEAR_EE_CLK); 317 nge_delay(sc); 318 SIO_CLR(NGE_MEAR_EE_CLK); 319 nge_delay(sc); 320 } 321 } 322 323 /* 324 * Read a word of data stored in the EEPROM at address 'addr.' 325 */ 326 static void 327 nge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest) 328 { 329 int i; 330 uint16_t word = 0; 331 332 /* Force EEPROM to idle state. */ 333 nge_eeprom_idle(sc); 334 335 /* Enter EEPROM access mode. */ 336 nge_delay(sc); 337 SIO_CLR(NGE_MEAR_EE_CLK); 338 nge_delay(sc); 339 SIO_SET(NGE_MEAR_EE_CSEL); 340 nge_delay(sc); 341 342 /* 343 * Send address of word we want to read. 344 */ 345 nge_eeprom_putbyte(sc, addr); 346 347 /* 348 * Start reading bits from EEPROM. 349 */ 350 for (i = 0x8000; i; i >>= 1) { 351 SIO_SET(NGE_MEAR_EE_CLK); 352 nge_delay(sc); 353 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 354 word |= i; 355 nge_delay(sc); 356 SIO_CLR(NGE_MEAR_EE_CLK); 357 nge_delay(sc); 358 } 359 360 /* Turn off EEPROM access mode. */ 361 nge_eeprom_idle(sc); 362 363 *dest = word; 364 } 365 366 /* 367 * Read a sequence of words from the EEPROM. 368 */ 369 static void 370 nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt) 371 { 372 int i; 373 uint16_t word = 0, *ptr; 374 375 for (i = 0; i < cnt; i++) { 376 nge_eeprom_getword(sc, off + i, &word); 377 ptr = (uint16_t *)(dest + (i * 2)); 378 *ptr = word; 379 } 380 } 381 382 /* 383 * Read the MII serial port for the MII bit-bang module. 384 */ 385 static uint32_t 386 nge_mii_bitbang_read(device_t dev) 387 { 388 struct nge_softc *sc; 389 uint32_t val; 390 391 sc = device_get_softc(dev); 392 393 val = CSR_READ_4(sc, NGE_MEAR); 394 CSR_BARRIER_4(sc, NGE_MEAR, 395 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 396 397 return (val); 398 } 399 400 /* 401 * Write the MII serial port for the MII bit-bang module. 402 */ 403 static void 404 nge_mii_bitbang_write(device_t dev, uint32_t val) 405 { 406 struct nge_softc *sc; 407 408 sc = device_get_softc(dev); 409 410 CSR_WRITE_4(sc, NGE_MEAR, val); 411 CSR_BARRIER_4(sc, NGE_MEAR, 412 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 413 } 414 415 static int 416 nge_miibus_readreg(device_t dev, int phy, int reg) 417 { 418 struct nge_softc *sc; 419 int rv; 420 421 sc = device_get_softc(dev); 422 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 423 /* Pretend PHY is at address 0. */ 424 if (phy != 0) 425 return (0); 426 switch (reg) { 427 case MII_BMCR: 428 reg = NGE_TBI_BMCR; 429 break; 430 case MII_BMSR: 431 /* 83820/83821 has different bit layout for BMSR. */ 432 rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT; 433 reg = CSR_READ_4(sc, NGE_TBI_BMSR); 434 if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0) 435 rv |= BMSR_ACOMP; 436 if ((reg & NGE_TBIBMSR_LINKSTAT) != 0) 437 rv |= BMSR_LINK; 438 return (rv); 439 case MII_ANAR: 440 reg = NGE_TBI_ANAR; 441 break; 442 case MII_ANLPAR: 443 reg = NGE_TBI_ANLPAR; 444 break; 445 case MII_ANER: 446 reg = NGE_TBI_ANER; 447 break; 448 case MII_EXTSR: 449 reg = NGE_TBI_ESR; 450 break; 451 case MII_PHYIDR1: 452 case MII_PHYIDR2: 453 return (0); 454 default: 455 device_printf(sc->nge_dev, 456 "bad phy register read : %d\n", reg); 457 return (0); 458 } 459 return (CSR_READ_4(sc, reg)); 460 } 461 462 return (mii_bitbang_readreg(dev, &nge_mii_bitbang_ops, phy, reg)); 463 } 464 465 static int 466 nge_miibus_writereg(device_t dev, int phy, int reg, int data) 467 { 468 struct nge_softc *sc; 469 470 sc = device_get_softc(dev); 471 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 472 /* Pretend PHY is at address 0. */ 473 if (phy != 0) 474 return (0); 475 switch (reg) { 476 case MII_BMCR: 477 reg = NGE_TBI_BMCR; 478 break; 479 case MII_BMSR: 480 return (0); 481 case MII_ANAR: 482 reg = NGE_TBI_ANAR; 483 break; 484 case MII_ANLPAR: 485 reg = NGE_TBI_ANLPAR; 486 break; 487 case MII_ANER: 488 reg = NGE_TBI_ANER; 489 break; 490 case MII_EXTSR: 491 reg = NGE_TBI_ESR; 492 break; 493 case MII_PHYIDR1: 494 case MII_PHYIDR2: 495 return (0); 496 default: 497 device_printf(sc->nge_dev, 498 "bad phy register write : %d\n", reg); 499 return (0); 500 } 501 CSR_WRITE_4(sc, reg, data); 502 return (0); 503 } 504 505 mii_bitbang_writereg(dev, &nge_mii_bitbang_ops, phy, reg, data); 506 507 return (0); 508 } 509 510 /* 511 * media status/link state change handler. 512 */ 513 static void 514 nge_miibus_statchg(device_t dev) 515 { 516 struct nge_softc *sc; 517 struct mii_data *mii; 518 struct ifnet *ifp; 519 struct nge_txdesc *txd; 520 uint32_t done, reg, status; 521 int i; 522 523 sc = device_get_softc(dev); 524 NGE_LOCK_ASSERT(sc); 525 526 mii = device_get_softc(sc->nge_miibus); 527 ifp = sc->nge_ifp; 528 if (mii == NULL || ifp == NULL || 529 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 530 return; 531 532 sc->nge_flags &= ~NGE_FLAG_LINK; 533 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 534 (IFM_AVALID | IFM_ACTIVE)) { 535 switch (IFM_SUBTYPE(mii->mii_media_active)) { 536 case IFM_10_T: 537 case IFM_100_TX: 538 case IFM_1000_T: 539 case IFM_1000_SX: 540 case IFM_1000_LX: 541 case IFM_1000_CX: 542 sc->nge_flags |= NGE_FLAG_LINK; 543 break; 544 default: 545 break; 546 } 547 } 548 549 /* Stop Tx/Rx MACs. */ 550 if (nge_stop_mac(sc) == ETIMEDOUT) 551 device_printf(sc->nge_dev, 552 "%s: unable to stop Tx/Rx MAC\n", __func__); 553 nge_txeof(sc); 554 nge_rxeof(sc); 555 if (sc->nge_head != NULL) { 556 m_freem(sc->nge_head); 557 sc->nge_head = sc->nge_tail = NULL; 558 } 559 560 /* Release queued frames. */ 561 for (i = 0; i < NGE_TX_RING_CNT; i++) { 562 txd = &sc->nge_cdata.nge_txdesc[i]; 563 if (txd->tx_m != NULL) { 564 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 565 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 566 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 567 txd->tx_dmamap); 568 m_freem(txd->tx_m); 569 txd->tx_m = NULL; 570 } 571 } 572 573 /* Program MAC with resolved speed/duplex. */ 574 if ((sc->nge_flags & NGE_FLAG_LINK) != 0) { 575 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 576 NGE_SETBIT(sc, NGE_TX_CFG, 577 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 578 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 579 #ifdef notyet 580 /* Enable flow-control. */ 581 if ((IFM_OPTIONS(mii->mii_media_active) & 582 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0) 583 NGE_SETBIT(sc, NGE_PAUSECSR, 584 NGE_PAUSECSR_PAUSE_ENB); 585 #endif 586 } else { 587 NGE_CLRBIT(sc, NGE_TX_CFG, 588 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 589 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 590 NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB); 591 } 592 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 593 reg = CSR_READ_4(sc, NGE_CFG); 594 switch (IFM_SUBTYPE(mii->mii_media_active)) { 595 case IFM_1000_SX: 596 case IFM_1000_LX: 597 case IFM_1000_CX: 598 case IFM_1000_T: 599 reg |= NGE_CFG_MODE_1000; 600 break; 601 default: 602 reg &= ~NGE_CFG_MODE_1000; 603 break; 604 } 605 CSR_WRITE_4(sc, NGE_CFG, reg); 606 607 /* Reset Tx/Rx MAC. */ 608 reg = CSR_READ_4(sc, NGE_CSR); 609 reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET; 610 CSR_WRITE_4(sc, NGE_CSR, reg); 611 /* Check the completion of reset. */ 612 done = 0; 613 for (i = 0; i < NGE_TIMEOUT; i++) { 614 DELAY(1); 615 status = CSR_READ_4(sc, NGE_ISR); 616 if ((status & NGE_ISR_RX_RESET_DONE) != 0) 617 done |= NGE_ISR_RX_RESET_DONE; 618 if ((status & NGE_ISR_TX_RESET_DONE) != 0) 619 done |= NGE_ISR_TX_RESET_DONE; 620 if (done == 621 (NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE)) 622 break; 623 } 624 if (i == NGE_TIMEOUT) 625 device_printf(sc->nge_dev, 626 "%s: unable to reset Tx/Rx MAC\n", __func__); 627 /* Reuse Rx buffer and reset consumer pointer. */ 628 sc->nge_cdata.nge_rx_cons = 0; 629 /* 630 * It seems that resetting Rx/Tx MAC results in 631 * resetting Tx/Rx descriptor pointer registers such 632 * that reloading Tx/Rx lists address are needed. 633 */ 634 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 635 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 636 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 637 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 638 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 639 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 640 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 641 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 642 /* Reinitialize Tx buffers. */ 643 nge_list_tx_init(sc); 644 645 /* Restart Rx MAC. */ 646 reg = CSR_READ_4(sc, NGE_CSR); 647 reg |= NGE_CSR_RX_ENABLE; 648 CSR_WRITE_4(sc, NGE_CSR, reg); 649 for (i = 0; i < NGE_TIMEOUT; i++) { 650 if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0) 651 break; 652 DELAY(1); 653 } 654 if (i == NGE_TIMEOUT) 655 device_printf(sc->nge_dev, 656 "%s: unable to restart Rx MAC\n", __func__); 657 } 658 659 /* Data LED off for TBI mode */ 660 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 661 CSR_WRITE_4(sc, NGE_GPIO, 662 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 663 } 664 665 static void 666 nge_rxfilter(struct nge_softc *sc) 667 { 668 struct ifnet *ifp; 669 struct ifmultiaddr *ifma; 670 uint32_t h, i, rxfilt; 671 int bit, index; 672 673 NGE_LOCK_ASSERT(sc); 674 ifp = sc->nge_ifp; 675 676 /* Make sure to stop Rx filtering. */ 677 rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL); 678 rxfilt &= ~NGE_RXFILTCTL_ENABLE; 679 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 680 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 681 682 rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS); 683 rxfilt &= ~NGE_RXFILTCTL_BROAD; 684 /* 685 * We don't want to use the hash table for matching unicast 686 * addresses. 687 */ 688 rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH); 689 690 /* 691 * For the NatSemi chip, we have to explicitly enable the 692 * reception of ARP frames, as well as turn on the 'perfect 693 * match' filter where we store the station address, otherwise 694 * we won't receive unicasts meant for this host. 695 */ 696 rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT; 697 698 /* 699 * Set the capture broadcast bit to capture broadcast frames. 700 */ 701 if ((ifp->if_flags & IFF_BROADCAST) != 0) 702 rxfilt |= NGE_RXFILTCTL_BROAD; 703 704 if ((ifp->if_flags & IFF_PROMISC) != 0 || 705 (ifp->if_flags & IFF_ALLMULTI) != 0) { 706 rxfilt |= NGE_RXFILTCTL_ALLMULTI; 707 if ((ifp->if_flags & IFF_PROMISC) != 0) 708 rxfilt |= NGE_RXFILTCTL_ALLPHYS; 709 goto done; 710 } 711 712 /* 713 * We have to explicitly enable the multicast hash table 714 * on the NatSemi chip if we want to use it, which we do. 715 */ 716 rxfilt |= NGE_RXFILTCTL_MCHASH; 717 718 /* first, zot all the existing hash bits */ 719 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 720 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 721 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 722 } 723 724 /* 725 * From the 11 bits returned by the crc routine, the top 7 726 * bits represent the 16-bit word in the mcast hash table 727 * that needs to be updated, and the lower 4 bits represent 728 * which bit within that byte needs to be set. 729 */ 730 if_maddr_rlock(ifp); 731 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 732 if (ifma->ifma_addr->sa_family != AF_LINK) 733 continue; 734 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 735 ifma->ifma_addr), ETHER_ADDR_LEN) >> 21; 736 index = (h >> 4) & 0x7F; 737 bit = h & 0xF; 738 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 739 NGE_FILTADDR_MCAST_LO + (index * 2)); 740 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 741 } 742 if_maddr_runlock(ifp); 743 744 done: 745 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 746 /* Turn the receive filter on. */ 747 rxfilt |= NGE_RXFILTCTL_ENABLE; 748 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 749 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 750 } 751 752 static void 753 nge_reset(struct nge_softc *sc) 754 { 755 uint32_t v; 756 int i; 757 758 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 759 760 for (i = 0; i < NGE_TIMEOUT; i++) { 761 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 762 break; 763 DELAY(1); 764 } 765 766 if (i == NGE_TIMEOUT) 767 device_printf(sc->nge_dev, "reset never completed\n"); 768 769 /* Wait a little while for the chip to get its brains in order. */ 770 DELAY(1000); 771 772 /* 773 * If this is a NetSemi chip, make sure to clear 774 * PME mode. 775 */ 776 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 777 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 778 779 /* Clear WOL events which may interfere normal Rx filter opertaion. */ 780 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 781 782 /* 783 * Only DP83820 supports 64bits addressing/data transfers and 784 * 64bit addressing requires different descriptor structures. 785 * To make it simple, disable 64bit addressing/data transfers. 786 */ 787 v = CSR_READ_4(sc, NGE_CFG); 788 v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB); 789 CSR_WRITE_4(sc, NGE_CFG, v); 790 } 791 792 /* 793 * Probe for a NatSemi chip. Check the PCI vendor and device 794 * IDs against our list and return a device name if we find a match. 795 */ 796 static int 797 nge_probe(device_t dev) 798 { 799 const struct nge_type *t; 800 801 t = nge_devs; 802 803 while (t->nge_name != NULL) { 804 if ((pci_get_vendor(dev) == t->nge_vid) && 805 (pci_get_device(dev) == t->nge_did)) { 806 device_set_desc(dev, t->nge_name); 807 return (BUS_PROBE_DEFAULT); 808 } 809 t++; 810 } 811 812 return (ENXIO); 813 } 814 815 /* 816 * Attach the interface. Allocate softc structures, do ifmedia 817 * setup and ethernet/BPF attach. 818 */ 819 static int 820 nge_attach(device_t dev) 821 { 822 uint8_t eaddr[ETHER_ADDR_LEN]; 823 uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg; 824 struct nge_softc *sc; 825 struct ifnet *ifp; 826 int error, i, rid; 827 828 error = 0; 829 sc = device_get_softc(dev); 830 sc->nge_dev = dev; 831 832 NGE_LOCK_INIT(sc, device_get_nameunit(dev)); 833 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); 834 835 /* 836 * Map control/status registers. 837 */ 838 pci_enable_busmaster(dev); 839 840 #ifdef NGE_USEIOSPACE 841 sc->nge_res_type = SYS_RES_IOPORT; 842 sc->nge_res_id = PCIR_BAR(0); 843 #else 844 sc->nge_res_type = SYS_RES_MEMORY; 845 sc->nge_res_id = PCIR_BAR(1); 846 #endif 847 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 848 &sc->nge_res_id, RF_ACTIVE); 849 850 if (sc->nge_res == NULL) { 851 if (sc->nge_res_type == SYS_RES_MEMORY) { 852 sc->nge_res_type = SYS_RES_IOPORT; 853 sc->nge_res_id = PCIR_BAR(0); 854 } else { 855 sc->nge_res_type = SYS_RES_MEMORY; 856 sc->nge_res_id = PCIR_BAR(1); 857 } 858 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 859 &sc->nge_res_id, RF_ACTIVE); 860 if (sc->nge_res == NULL) { 861 device_printf(dev, "couldn't allocate %s resources\n", 862 sc->nge_res_type == SYS_RES_MEMORY ? "memory" : 863 "I/O"); 864 NGE_LOCK_DESTROY(sc); 865 return (ENXIO); 866 } 867 } 868 869 /* Allocate interrupt */ 870 rid = 0; 871 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 872 RF_SHAREABLE | RF_ACTIVE); 873 874 if (sc->nge_irq == NULL) { 875 device_printf(dev, "couldn't map interrupt\n"); 876 error = ENXIO; 877 goto fail; 878 } 879 880 /* Enable MWI. */ 881 reg = pci_read_config(dev, PCIR_COMMAND, 2); 882 reg |= PCIM_CMD_MWRICEN; 883 pci_write_config(dev, PCIR_COMMAND, reg, 2); 884 885 /* Reset the adapter. */ 886 nge_reset(sc); 887 888 /* 889 * Get station address from the EEPROM. 890 */ 891 nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3); 892 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 893 ea[i] = le16toh(ea[i]); 894 ea_temp = ea[0]; 895 ea[0] = ea[2]; 896 ea[2] = ea_temp; 897 bcopy(ea, eaddr, sizeof(eaddr)); 898 899 if (nge_dma_alloc(sc) != 0) { 900 error = ENXIO; 901 goto fail; 902 } 903 904 nge_sysctl_node(sc); 905 906 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); 907 if (ifp == NULL) { 908 device_printf(dev, "can not allocate ifnet structure\n"); 909 error = ENOSPC; 910 goto fail; 911 } 912 ifp->if_softc = sc; 913 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 914 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 915 ifp->if_ioctl = nge_ioctl; 916 ifp->if_start = nge_start; 917 ifp->if_init = nge_init; 918 ifp->if_snd.ifq_drv_maxlen = NGE_TX_RING_CNT - 1; 919 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 920 IFQ_SET_READY(&ifp->if_snd); 921 ifp->if_hwassist = NGE_CSUM_FEATURES; 922 ifp->if_capabilities = IFCAP_HWCSUM; 923 /* 924 * It seems that some hardwares doesn't provide 3.3V auxiliary 925 * supply(3VAUX) to drive PME such that checking PCI power 926 * management capability is necessary. 927 */ 928 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &i) == 0) 929 ifp->if_capabilities |= IFCAP_WOL; 930 ifp->if_capenable = ifp->if_capabilities; 931 932 if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) { 933 sc->nge_flags |= NGE_FLAG_TBI; 934 device_printf(dev, "Using TBI\n"); 935 /* Configure GPIO. */ 936 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 937 | NGE_GPIO_GP4_OUT 938 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 939 | NGE_GPIO_GP3_OUTENB 940 | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); 941 } 942 943 /* 944 * Do MII setup. 945 */ 946 error = mii_attach(dev, &sc->nge_miibus, ifp, nge_mediachange, 947 nge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 948 if (error != 0) { 949 device_printf(dev, "attaching PHYs failed\n"); 950 goto fail; 951 } 952 953 /* 954 * Call MI attach routine. 955 */ 956 ether_ifattach(ifp, eaddr); 957 958 /* VLAN capability setup. */ 959 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 960 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 961 ifp->if_capenable = ifp->if_capabilities; 962 #ifdef DEVICE_POLLING 963 ifp->if_capabilities |= IFCAP_POLLING; 964 #endif 965 /* 966 * Tell the upper layer(s) we support long frames. 967 * Must appear after the call to ether_ifattach() because 968 * ether_ifattach() sets ifi_hdrlen to the default value. 969 */ 970 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 971 972 /* 973 * Hookup IRQ last. 974 */ 975 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, 976 NULL, nge_intr, sc, &sc->nge_intrhand); 977 if (error) { 978 device_printf(dev, "couldn't set up irq\n"); 979 goto fail; 980 } 981 982 fail: 983 if (error != 0) 984 nge_detach(dev); 985 return (error); 986 } 987 988 static int 989 nge_detach(device_t dev) 990 { 991 struct nge_softc *sc; 992 struct ifnet *ifp; 993 994 sc = device_get_softc(dev); 995 ifp = sc->nge_ifp; 996 997 #ifdef DEVICE_POLLING 998 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 999 ether_poll_deregister(ifp); 1000 #endif 1001 1002 if (device_is_attached(dev)) { 1003 NGE_LOCK(sc); 1004 sc->nge_flags |= NGE_FLAG_DETACH; 1005 nge_stop(sc); 1006 NGE_UNLOCK(sc); 1007 callout_drain(&sc->nge_stat_ch); 1008 if (ifp != NULL) 1009 ether_ifdetach(ifp); 1010 } 1011 1012 if (sc->nge_miibus != NULL) { 1013 device_delete_child(dev, sc->nge_miibus); 1014 sc->nge_miibus = NULL; 1015 } 1016 bus_generic_detach(dev); 1017 if (sc->nge_intrhand != NULL) 1018 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 1019 if (sc->nge_irq != NULL) 1020 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 1021 if (sc->nge_res != NULL) 1022 bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id, 1023 sc->nge_res); 1024 1025 nge_dma_free(sc); 1026 if (ifp != NULL) 1027 if_free(ifp); 1028 1029 NGE_LOCK_DESTROY(sc); 1030 1031 return (0); 1032 } 1033 1034 struct nge_dmamap_arg { 1035 bus_addr_t nge_busaddr; 1036 }; 1037 1038 static void 1039 nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1040 { 1041 struct nge_dmamap_arg *ctx; 1042 1043 if (error != 0) 1044 return; 1045 ctx = arg; 1046 ctx->nge_busaddr = segs[0].ds_addr; 1047 } 1048 1049 static int 1050 nge_dma_alloc(struct nge_softc *sc) 1051 { 1052 struct nge_dmamap_arg ctx; 1053 struct nge_txdesc *txd; 1054 struct nge_rxdesc *rxd; 1055 int error, i; 1056 1057 /* Create parent DMA tag. */ 1058 error = bus_dma_tag_create( 1059 bus_get_dma_tag(sc->nge_dev), /* parent */ 1060 1, 0, /* alignment, boundary */ 1061 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1062 BUS_SPACE_MAXADDR, /* highaddr */ 1063 NULL, NULL, /* filter, filterarg */ 1064 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1065 0, /* nsegments */ 1066 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1067 0, /* flags */ 1068 NULL, NULL, /* lockfunc, lockarg */ 1069 &sc->nge_cdata.nge_parent_tag); 1070 if (error != 0) { 1071 device_printf(sc->nge_dev, "failed to create parent DMA tag\n"); 1072 goto fail; 1073 } 1074 /* Create tag for Tx ring. */ 1075 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1076 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1077 BUS_SPACE_MAXADDR, /* lowaddr */ 1078 BUS_SPACE_MAXADDR, /* highaddr */ 1079 NULL, NULL, /* filter, filterarg */ 1080 NGE_TX_RING_SIZE, /* maxsize */ 1081 1, /* nsegments */ 1082 NGE_TX_RING_SIZE, /* maxsegsize */ 1083 0, /* flags */ 1084 NULL, NULL, /* lockfunc, lockarg */ 1085 &sc->nge_cdata.nge_tx_ring_tag); 1086 if (error != 0) { 1087 device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n"); 1088 goto fail; 1089 } 1090 1091 /* Create tag for Rx ring. */ 1092 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1093 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1094 BUS_SPACE_MAXADDR, /* lowaddr */ 1095 BUS_SPACE_MAXADDR, /* highaddr */ 1096 NULL, NULL, /* filter, filterarg */ 1097 NGE_RX_RING_SIZE, /* maxsize */ 1098 1, /* nsegments */ 1099 NGE_RX_RING_SIZE, /* maxsegsize */ 1100 0, /* flags */ 1101 NULL, NULL, /* lockfunc, lockarg */ 1102 &sc->nge_cdata.nge_rx_ring_tag); 1103 if (error != 0) { 1104 device_printf(sc->nge_dev, 1105 "failed to create Rx ring DMA tag\n"); 1106 goto fail; 1107 } 1108 1109 /* Create tag for Tx buffers. */ 1110 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1111 1, 0, /* alignment, boundary */ 1112 BUS_SPACE_MAXADDR, /* lowaddr */ 1113 BUS_SPACE_MAXADDR, /* highaddr */ 1114 NULL, NULL, /* filter, filterarg */ 1115 MCLBYTES * NGE_MAXTXSEGS, /* maxsize */ 1116 NGE_MAXTXSEGS, /* nsegments */ 1117 MCLBYTES, /* maxsegsize */ 1118 0, /* flags */ 1119 NULL, NULL, /* lockfunc, lockarg */ 1120 &sc->nge_cdata.nge_tx_tag); 1121 if (error != 0) { 1122 device_printf(sc->nge_dev, "failed to create Tx DMA tag\n"); 1123 goto fail; 1124 } 1125 1126 /* Create tag for Rx buffers. */ 1127 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1128 NGE_RX_ALIGN, 0, /* alignment, boundary */ 1129 BUS_SPACE_MAXADDR, /* lowaddr */ 1130 BUS_SPACE_MAXADDR, /* highaddr */ 1131 NULL, NULL, /* filter, filterarg */ 1132 MCLBYTES, /* maxsize */ 1133 1, /* nsegments */ 1134 MCLBYTES, /* maxsegsize */ 1135 0, /* flags */ 1136 NULL, NULL, /* lockfunc, lockarg */ 1137 &sc->nge_cdata.nge_rx_tag); 1138 if (error != 0) { 1139 device_printf(sc->nge_dev, "failed to create Rx DMA tag\n"); 1140 goto fail; 1141 } 1142 1143 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1144 error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag, 1145 (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK | 1146 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map); 1147 if (error != 0) { 1148 device_printf(sc->nge_dev, 1149 "failed to allocate DMA'able memory for Tx ring\n"); 1150 goto fail; 1151 } 1152 1153 ctx.nge_busaddr = 0; 1154 error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag, 1155 sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring, 1156 NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1157 if (error != 0 || ctx.nge_busaddr == 0) { 1158 device_printf(sc->nge_dev, 1159 "failed to load DMA'able memory for Tx ring\n"); 1160 goto fail; 1161 } 1162 sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr; 1163 1164 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1165 error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag, 1166 (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK | 1167 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map); 1168 if (error != 0) { 1169 device_printf(sc->nge_dev, 1170 "failed to allocate DMA'able memory for Rx ring\n"); 1171 goto fail; 1172 } 1173 1174 ctx.nge_busaddr = 0; 1175 error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag, 1176 sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring, 1177 NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1178 if (error != 0 || ctx.nge_busaddr == 0) { 1179 device_printf(sc->nge_dev, 1180 "failed to load DMA'able memory for Rx ring\n"); 1181 goto fail; 1182 } 1183 sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr; 1184 1185 /* Create DMA maps for Tx buffers. */ 1186 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1187 txd = &sc->nge_cdata.nge_txdesc[i]; 1188 txd->tx_m = NULL; 1189 txd->tx_dmamap = NULL; 1190 error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0, 1191 &txd->tx_dmamap); 1192 if (error != 0) { 1193 device_printf(sc->nge_dev, 1194 "failed to create Tx dmamap\n"); 1195 goto fail; 1196 } 1197 } 1198 /* Create DMA maps for Rx buffers. */ 1199 if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1200 &sc->nge_cdata.nge_rx_sparemap)) != 0) { 1201 device_printf(sc->nge_dev, 1202 "failed to create spare Rx dmamap\n"); 1203 goto fail; 1204 } 1205 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1206 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1207 rxd->rx_m = NULL; 1208 rxd->rx_dmamap = NULL; 1209 error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1210 &rxd->rx_dmamap); 1211 if (error != 0) { 1212 device_printf(sc->nge_dev, 1213 "failed to create Rx dmamap\n"); 1214 goto fail; 1215 } 1216 } 1217 1218 fail: 1219 return (error); 1220 } 1221 1222 static void 1223 nge_dma_free(struct nge_softc *sc) 1224 { 1225 struct nge_txdesc *txd; 1226 struct nge_rxdesc *rxd; 1227 int i; 1228 1229 /* Tx ring. */ 1230 if (sc->nge_cdata.nge_tx_ring_tag) { 1231 if (sc->nge_cdata.nge_tx_ring_map) 1232 bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag, 1233 sc->nge_cdata.nge_tx_ring_map); 1234 if (sc->nge_cdata.nge_tx_ring_map && 1235 sc->nge_rdata.nge_tx_ring) 1236 bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag, 1237 sc->nge_rdata.nge_tx_ring, 1238 sc->nge_cdata.nge_tx_ring_map); 1239 sc->nge_rdata.nge_tx_ring = NULL; 1240 sc->nge_cdata.nge_tx_ring_map = NULL; 1241 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag); 1242 sc->nge_cdata.nge_tx_ring_tag = NULL; 1243 } 1244 /* Rx ring. */ 1245 if (sc->nge_cdata.nge_rx_ring_tag) { 1246 if (sc->nge_cdata.nge_rx_ring_map) 1247 bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag, 1248 sc->nge_cdata.nge_rx_ring_map); 1249 if (sc->nge_cdata.nge_rx_ring_map && 1250 sc->nge_rdata.nge_rx_ring) 1251 bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag, 1252 sc->nge_rdata.nge_rx_ring, 1253 sc->nge_cdata.nge_rx_ring_map); 1254 sc->nge_rdata.nge_rx_ring = NULL; 1255 sc->nge_cdata.nge_rx_ring_map = NULL; 1256 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag); 1257 sc->nge_cdata.nge_rx_ring_tag = NULL; 1258 } 1259 /* Tx buffers. */ 1260 if (sc->nge_cdata.nge_tx_tag) { 1261 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1262 txd = &sc->nge_cdata.nge_txdesc[i]; 1263 if (txd->tx_dmamap) { 1264 bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag, 1265 txd->tx_dmamap); 1266 txd->tx_dmamap = NULL; 1267 } 1268 } 1269 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag); 1270 sc->nge_cdata.nge_tx_tag = NULL; 1271 } 1272 /* Rx buffers. */ 1273 if (sc->nge_cdata.nge_rx_tag) { 1274 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1275 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1276 if (rxd->rx_dmamap) { 1277 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1278 rxd->rx_dmamap); 1279 rxd->rx_dmamap = NULL; 1280 } 1281 } 1282 if (sc->nge_cdata.nge_rx_sparemap) { 1283 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1284 sc->nge_cdata.nge_rx_sparemap); 1285 sc->nge_cdata.nge_rx_sparemap = 0; 1286 } 1287 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag); 1288 sc->nge_cdata.nge_rx_tag = NULL; 1289 } 1290 1291 if (sc->nge_cdata.nge_parent_tag) { 1292 bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag); 1293 sc->nge_cdata.nge_parent_tag = NULL; 1294 } 1295 } 1296 1297 /* 1298 * Initialize the transmit descriptors. 1299 */ 1300 static int 1301 nge_list_tx_init(struct nge_softc *sc) 1302 { 1303 struct nge_ring_data *rd; 1304 struct nge_txdesc *txd; 1305 bus_addr_t addr; 1306 int i; 1307 1308 sc->nge_cdata.nge_tx_prod = 0; 1309 sc->nge_cdata.nge_tx_cons = 0; 1310 sc->nge_cdata.nge_tx_cnt = 0; 1311 1312 rd = &sc->nge_rdata; 1313 bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT); 1314 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1315 if (i == NGE_TX_RING_CNT - 1) 1316 addr = NGE_TX_RING_ADDR(sc, 0); 1317 else 1318 addr = NGE_TX_RING_ADDR(sc, i + 1); 1319 rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1320 txd = &sc->nge_cdata.nge_txdesc[i]; 1321 txd->tx_m = NULL; 1322 } 1323 1324 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1325 sc->nge_cdata.nge_tx_ring_map, 1326 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1327 1328 return (0); 1329 } 1330 1331 /* 1332 * Initialize the RX descriptors and allocate mbufs for them. Note that 1333 * we arrange the descriptors in a closed ring, so that the last descriptor 1334 * points back to the first. 1335 */ 1336 static int 1337 nge_list_rx_init(struct nge_softc *sc) 1338 { 1339 struct nge_ring_data *rd; 1340 bus_addr_t addr; 1341 int i; 1342 1343 sc->nge_cdata.nge_rx_cons = 0; 1344 sc->nge_head = sc->nge_tail = NULL; 1345 1346 rd = &sc->nge_rdata; 1347 bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT); 1348 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1349 if (nge_newbuf(sc, i) != 0) 1350 return (ENOBUFS); 1351 if (i == NGE_RX_RING_CNT - 1) 1352 addr = NGE_RX_RING_ADDR(sc, 0); 1353 else 1354 addr = NGE_RX_RING_ADDR(sc, i + 1); 1355 rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1356 } 1357 1358 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1359 sc->nge_cdata.nge_rx_ring_map, 1360 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1361 1362 return (0); 1363 } 1364 1365 static __inline void 1366 nge_discard_rxbuf(struct nge_softc *sc, int idx) 1367 { 1368 struct nge_desc *desc; 1369 1370 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1371 desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t)); 1372 desc->nge_extsts = 0; 1373 } 1374 1375 /* 1376 * Initialize an RX descriptor and attach an MBUF cluster. 1377 */ 1378 static int 1379 nge_newbuf(struct nge_softc *sc, int idx) 1380 { 1381 struct nge_desc *desc; 1382 struct nge_rxdesc *rxd; 1383 struct mbuf *m; 1384 bus_dma_segment_t segs[1]; 1385 bus_dmamap_t map; 1386 int nsegs; 1387 1388 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1389 if (m == NULL) 1390 return (ENOBUFS); 1391 m->m_len = m->m_pkthdr.len = MCLBYTES; 1392 m_adj(m, sizeof(uint64_t)); 1393 1394 if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag, 1395 sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1396 m_freem(m); 1397 return (ENOBUFS); 1398 } 1399 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1400 1401 rxd = &sc->nge_cdata.nge_rxdesc[idx]; 1402 if (rxd->rx_m != NULL) { 1403 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1404 BUS_DMASYNC_POSTREAD); 1405 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap); 1406 } 1407 map = rxd->rx_dmamap; 1408 rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap; 1409 sc->nge_cdata.nge_rx_sparemap = map; 1410 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1411 BUS_DMASYNC_PREREAD); 1412 rxd->rx_m = m; 1413 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1414 desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr)); 1415 desc->nge_cmdsts = htole32(segs[0].ds_len); 1416 desc->nge_extsts = 0; 1417 1418 return (0); 1419 } 1420 1421 #ifndef __NO_STRICT_ALIGNMENT 1422 static __inline void 1423 nge_fixup_rx(struct mbuf *m) 1424 { 1425 int i; 1426 uint16_t *src, *dst; 1427 1428 src = mtod(m, uint16_t *); 1429 dst = src - 1; 1430 1431 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1432 *dst++ = *src++; 1433 1434 m->m_data -= ETHER_ALIGN; 1435 } 1436 #endif 1437 1438 /* 1439 * A frame has been uploaded: pass the resulting mbuf chain up to 1440 * the higher level protocols. 1441 */ 1442 static int 1443 nge_rxeof(struct nge_softc *sc) 1444 { 1445 struct mbuf *m; 1446 struct ifnet *ifp; 1447 struct nge_desc *cur_rx; 1448 struct nge_rxdesc *rxd; 1449 int cons, prog, rx_npkts, total_len; 1450 uint32_t cmdsts, extsts; 1451 1452 NGE_LOCK_ASSERT(sc); 1453 1454 ifp = sc->nge_ifp; 1455 cons = sc->nge_cdata.nge_rx_cons; 1456 rx_npkts = 0; 1457 1458 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1459 sc->nge_cdata.nge_rx_ring_map, 1460 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1461 1462 for (prog = 0; prog < NGE_RX_RING_CNT && 1463 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1464 NGE_INC(cons, NGE_RX_RING_CNT)) { 1465 #ifdef DEVICE_POLLING 1466 if (ifp->if_capenable & IFCAP_POLLING) { 1467 if (sc->rxcycles <= 0) 1468 break; 1469 sc->rxcycles--; 1470 } 1471 #endif 1472 cur_rx = &sc->nge_rdata.nge_rx_ring[cons]; 1473 cmdsts = le32toh(cur_rx->nge_cmdsts); 1474 extsts = le32toh(cur_rx->nge_extsts); 1475 if ((cmdsts & NGE_CMDSTS_OWN) == 0) 1476 break; 1477 prog++; 1478 rxd = &sc->nge_cdata.nge_rxdesc[cons]; 1479 m = rxd->rx_m; 1480 total_len = cmdsts & NGE_CMDSTS_BUFLEN; 1481 1482 if ((cmdsts & NGE_CMDSTS_MORE) != 0) { 1483 if (nge_newbuf(sc, cons) != 0) { 1484 ifp->if_iqdrops++; 1485 if (sc->nge_head != NULL) { 1486 m_freem(sc->nge_head); 1487 sc->nge_head = sc->nge_tail = NULL; 1488 } 1489 nge_discard_rxbuf(sc, cons); 1490 continue; 1491 } 1492 m->m_len = total_len; 1493 if (sc->nge_head == NULL) { 1494 m->m_pkthdr.len = total_len; 1495 sc->nge_head = sc->nge_tail = m; 1496 } else { 1497 m->m_flags &= ~M_PKTHDR; 1498 sc->nge_head->m_pkthdr.len += total_len; 1499 sc->nge_tail->m_next = m; 1500 sc->nge_tail = m; 1501 } 1502 continue; 1503 } 1504 1505 /* 1506 * If an error occurs, update stats, clear the 1507 * status word and leave the mbuf cluster in place: 1508 * it should simply get re-used next time this descriptor 1509 * comes up in the ring. 1510 */ 1511 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1512 if ((cmdsts & NGE_RXSTAT_RUNT) && 1513 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) { 1514 /* 1515 * Work-around hardware bug, accept runt frames 1516 * if its length is larger than or equal to 56. 1517 */ 1518 } else { 1519 /* 1520 * Input error counters are updated by hardware. 1521 */ 1522 if (sc->nge_head != NULL) { 1523 m_freem(sc->nge_head); 1524 sc->nge_head = sc->nge_tail = NULL; 1525 } 1526 nge_discard_rxbuf(sc, cons); 1527 continue; 1528 } 1529 } 1530 1531 /* Try conjure up a replacement mbuf. */ 1532 1533 if (nge_newbuf(sc, cons) != 0) { 1534 ifp->if_iqdrops++; 1535 if (sc->nge_head != NULL) { 1536 m_freem(sc->nge_head); 1537 sc->nge_head = sc->nge_tail = NULL; 1538 } 1539 nge_discard_rxbuf(sc, cons); 1540 continue; 1541 } 1542 1543 /* Chain received mbufs. */ 1544 if (sc->nge_head != NULL) { 1545 m->m_len = total_len; 1546 m->m_flags &= ~M_PKTHDR; 1547 sc->nge_tail->m_next = m; 1548 m = sc->nge_head; 1549 m->m_pkthdr.len += total_len; 1550 sc->nge_head = sc->nge_tail = NULL; 1551 } else 1552 m->m_pkthdr.len = m->m_len = total_len; 1553 1554 /* 1555 * Ok. NatSemi really screwed up here. This is the 1556 * only gigE chip I know of with alignment constraints 1557 * on receive buffers. RX buffers must be 64-bit aligned. 1558 */ 1559 /* 1560 * By popular demand, ignore the alignment problems 1561 * on the non-strict alignment platform. The performance hit 1562 * incurred due to unaligned accesses is much smaller 1563 * than the hit produced by forcing buffer copies all 1564 * the time, especially with jumbo frames. We still 1565 * need to fix up the alignment everywhere else though. 1566 */ 1567 #ifndef __NO_STRICT_ALIGNMENT 1568 nge_fixup_rx(m); 1569 #endif 1570 m->m_pkthdr.rcvif = ifp; 1571 ifp->if_ipackets++; 1572 1573 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1574 /* Do IP checksum checking. */ 1575 if ((extsts & NGE_RXEXTSTS_IPPKT) != 0) 1576 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1577 if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0) 1578 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1579 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1580 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1581 (extsts & NGE_RXEXTSTS_UDPPKT && 1582 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1583 m->m_pkthdr.csum_flags |= 1584 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1585 m->m_pkthdr.csum_data = 0xffff; 1586 } 1587 } 1588 1589 /* 1590 * If we received a packet with a vlan tag, pass it 1591 * to vlan_input() instead of ether_input(). 1592 */ 1593 if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 && 1594 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1595 m->m_pkthdr.ether_vtag = 1596 bswap16(extsts & NGE_RXEXTSTS_VTCI); 1597 m->m_flags |= M_VLANTAG; 1598 } 1599 NGE_UNLOCK(sc); 1600 (*ifp->if_input)(ifp, m); 1601 NGE_LOCK(sc); 1602 rx_npkts++; 1603 } 1604 1605 if (prog > 0) { 1606 sc->nge_cdata.nge_rx_cons = cons; 1607 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1608 sc->nge_cdata.nge_rx_ring_map, 1609 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1610 } 1611 return (rx_npkts); 1612 } 1613 1614 /* 1615 * A frame was downloaded to the chip. It's safe for us to clean up 1616 * the list buffers. 1617 */ 1618 static void 1619 nge_txeof(struct nge_softc *sc) 1620 { 1621 struct nge_desc *cur_tx; 1622 struct nge_txdesc *txd; 1623 struct ifnet *ifp; 1624 uint32_t cmdsts; 1625 int cons, prod; 1626 1627 NGE_LOCK_ASSERT(sc); 1628 ifp = sc->nge_ifp; 1629 1630 cons = sc->nge_cdata.nge_tx_cons; 1631 prod = sc->nge_cdata.nge_tx_prod; 1632 if (cons == prod) 1633 return; 1634 1635 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1636 sc->nge_cdata.nge_tx_ring_map, 1637 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1638 1639 /* 1640 * Go through our tx list and free mbufs for those 1641 * frames that have been transmitted. 1642 */ 1643 for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) { 1644 cur_tx = &sc->nge_rdata.nge_tx_ring[cons]; 1645 cmdsts = le32toh(cur_tx->nge_cmdsts); 1646 if ((cmdsts & NGE_CMDSTS_OWN) != 0) 1647 break; 1648 sc->nge_cdata.nge_tx_cnt--; 1649 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1650 if ((cmdsts & NGE_CMDSTS_MORE) != 0) 1651 continue; 1652 1653 txd = &sc->nge_cdata.nge_txdesc[cons]; 1654 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap, 1655 BUS_DMASYNC_POSTWRITE); 1656 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap); 1657 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1658 ifp->if_oerrors++; 1659 if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0) 1660 ifp->if_collisions++; 1661 if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0) 1662 ifp->if_collisions++; 1663 } else 1664 ifp->if_opackets++; 1665 1666 ifp->if_collisions += (cmdsts & NGE_TXSTAT_COLLCNT) >> 16; 1667 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1668 __func__)); 1669 m_freem(txd->tx_m); 1670 txd->tx_m = NULL; 1671 } 1672 1673 sc->nge_cdata.nge_tx_cons = cons; 1674 if (sc->nge_cdata.nge_tx_cnt == 0) 1675 sc->nge_watchdog_timer = 0; 1676 } 1677 1678 static void 1679 nge_tick(void *xsc) 1680 { 1681 struct nge_softc *sc; 1682 struct mii_data *mii; 1683 1684 sc = xsc; 1685 NGE_LOCK_ASSERT(sc); 1686 mii = device_get_softc(sc->nge_miibus); 1687 mii_tick(mii); 1688 /* 1689 * For PHYs that does not reset established link, it is 1690 * necessary to check whether driver still have a valid 1691 * link(e.g link state change callback is not called). 1692 * Otherwise, driver think it lost link because driver 1693 * initialization routine clears link state flag. 1694 */ 1695 if ((sc->nge_flags & NGE_FLAG_LINK) == 0) 1696 nge_miibus_statchg(sc->nge_dev); 1697 nge_stats_update(sc); 1698 nge_watchdog(sc); 1699 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 1700 } 1701 1702 static void 1703 nge_stats_update(struct nge_softc *sc) 1704 { 1705 struct ifnet *ifp; 1706 struct nge_stats now, *stats, *nstats; 1707 1708 NGE_LOCK_ASSERT(sc); 1709 1710 ifp = sc->nge_ifp; 1711 stats = &now; 1712 stats->rx_pkts_errs = 1713 CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF; 1714 stats->rx_crc_errs = 1715 CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF; 1716 stats->rx_fifo_oflows = 1717 CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF; 1718 stats->rx_align_errs = 1719 CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF; 1720 stats->rx_sym_errs = 1721 CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF; 1722 stats->rx_pkts_jumbos = 1723 CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF; 1724 stats->rx_len_errs = 1725 CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF; 1726 stats->rx_unctl_frames = 1727 CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF; 1728 stats->rx_pause = 1729 CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF; 1730 stats->tx_pause = 1731 CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF; 1732 stats->tx_seq_errs = 1733 CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF; 1734 1735 /* 1736 * Since we've accept errored frames exclude Rx length errors. 1737 */ 1738 ifp->if_ierrors += stats->rx_pkts_errs + stats->rx_crc_errs + 1739 stats->rx_fifo_oflows + stats->rx_sym_errs; 1740 1741 nstats = &sc->nge_stats; 1742 nstats->rx_pkts_errs += stats->rx_pkts_errs; 1743 nstats->rx_crc_errs += stats->rx_crc_errs; 1744 nstats->rx_fifo_oflows += stats->rx_fifo_oflows; 1745 nstats->rx_align_errs += stats->rx_align_errs; 1746 nstats->rx_sym_errs += stats->rx_sym_errs; 1747 nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos; 1748 nstats->rx_len_errs += stats->rx_len_errs; 1749 nstats->rx_unctl_frames += stats->rx_unctl_frames; 1750 nstats->rx_pause += stats->rx_pause; 1751 nstats->tx_pause += stats->tx_pause; 1752 nstats->tx_seq_errs += stats->tx_seq_errs; 1753 } 1754 1755 #ifdef DEVICE_POLLING 1756 static poll_handler_t nge_poll; 1757 1758 static int 1759 nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1760 { 1761 struct nge_softc *sc; 1762 int rx_npkts = 0; 1763 1764 sc = ifp->if_softc; 1765 1766 NGE_LOCK(sc); 1767 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1768 NGE_UNLOCK(sc); 1769 return (rx_npkts); 1770 } 1771 1772 /* 1773 * On the nge, reading the status register also clears it. 1774 * So before returning to intr mode we must make sure that all 1775 * possible pending sources of interrupts have been served. 1776 * In practice this means run to completion the *eof routines, 1777 * and then call the interrupt routine. 1778 */ 1779 sc->rxcycles = count; 1780 rx_npkts = nge_rxeof(sc); 1781 nge_txeof(sc); 1782 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1783 nge_start_locked(ifp); 1784 1785 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1786 uint32_t status; 1787 1788 /* Reading the ISR register clears all interrupts. */ 1789 status = CSR_READ_4(sc, NGE_ISR); 1790 1791 if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0) 1792 rx_npkts += nge_rxeof(sc); 1793 1794 if ((status & NGE_ISR_RX_IDLE) != 0) 1795 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1796 1797 if ((status & NGE_ISR_SYSERR) != 0) { 1798 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1799 nge_init_locked(sc); 1800 } 1801 } 1802 NGE_UNLOCK(sc); 1803 return (rx_npkts); 1804 } 1805 #endif /* DEVICE_POLLING */ 1806 1807 static void 1808 nge_intr(void *arg) 1809 { 1810 struct nge_softc *sc; 1811 struct ifnet *ifp; 1812 uint32_t status; 1813 1814 sc = (struct nge_softc *)arg; 1815 ifp = sc->nge_ifp; 1816 1817 NGE_LOCK(sc); 1818 1819 if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0) 1820 goto done_locked; 1821 1822 /* Reading the ISR register clears all interrupts. */ 1823 status = CSR_READ_4(sc, NGE_ISR); 1824 if (status == 0xffffffff || (status & NGE_INTRS) == 0) 1825 goto done_locked; 1826 #ifdef DEVICE_POLLING 1827 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1828 goto done_locked; 1829 #endif 1830 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1831 goto done_locked; 1832 1833 /* Disable interrupts. */ 1834 CSR_WRITE_4(sc, NGE_IER, 0); 1835 1836 /* Data LED on for TBI mode */ 1837 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1838 CSR_WRITE_4(sc, NGE_GPIO, 1839 CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); 1840 1841 for (; (status & NGE_INTRS) != 0;) { 1842 if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR | 1843 NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0) 1844 nge_txeof(sc); 1845 1846 if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR | 1847 NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW | 1848 NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0) 1849 nge_rxeof(sc); 1850 1851 if ((status & NGE_ISR_RX_IDLE) != 0) 1852 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1853 1854 if ((status & NGE_ISR_SYSERR) != 0) { 1855 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1856 nge_init_locked(sc); 1857 } 1858 /* Reading the ISR register clears all interrupts. */ 1859 status = CSR_READ_4(sc, NGE_ISR); 1860 } 1861 1862 /* Re-enable interrupts. */ 1863 CSR_WRITE_4(sc, NGE_IER, 1); 1864 1865 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1866 nge_start_locked(ifp); 1867 1868 /* Data LED off for TBI mode */ 1869 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1870 CSR_WRITE_4(sc, NGE_GPIO, 1871 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 1872 1873 done_locked: 1874 NGE_UNLOCK(sc); 1875 } 1876 1877 /* 1878 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1879 * pointers to the fragment pointers. 1880 */ 1881 static int 1882 nge_encap(struct nge_softc *sc, struct mbuf **m_head) 1883 { 1884 struct nge_txdesc *txd, *txd_last; 1885 struct nge_desc *desc; 1886 struct mbuf *m; 1887 bus_dmamap_t map; 1888 bus_dma_segment_t txsegs[NGE_MAXTXSEGS]; 1889 int error, i, nsegs, prod, si; 1890 1891 NGE_LOCK_ASSERT(sc); 1892 1893 m = *m_head; 1894 prod = sc->nge_cdata.nge_tx_prod; 1895 txd = &sc->nge_cdata.nge_txdesc[prod]; 1896 txd_last = txd; 1897 map = txd->tx_dmamap; 1898 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map, 1899 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1900 if (error == EFBIG) { 1901 m = m_collapse(*m_head, M_DONTWAIT, NGE_MAXTXSEGS); 1902 if (m == NULL) { 1903 m_freem(*m_head); 1904 *m_head = NULL; 1905 return (ENOBUFS); 1906 } 1907 *m_head = m; 1908 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, 1909 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1910 if (error != 0) { 1911 m_freem(*m_head); 1912 *m_head = NULL; 1913 return (error); 1914 } 1915 } else if (error != 0) 1916 return (error); 1917 if (nsegs == 0) { 1918 m_freem(*m_head); 1919 *m_head = NULL; 1920 return (EIO); 1921 } 1922 1923 /* Check number of available descriptors. */ 1924 if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) { 1925 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map); 1926 return (ENOBUFS); 1927 } 1928 1929 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE); 1930 1931 si = prod; 1932 for (i = 0; i < nsegs; i++) { 1933 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1934 desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr)); 1935 if (i == 0) 1936 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1937 NGE_CMDSTS_MORE); 1938 else 1939 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1940 NGE_CMDSTS_MORE | NGE_CMDSTS_OWN); 1941 desc->nge_extsts = 0; 1942 sc->nge_cdata.nge_tx_cnt++; 1943 NGE_INC(prod, NGE_TX_RING_CNT); 1944 } 1945 /* Update producer index. */ 1946 sc->nge_cdata.nge_tx_prod = prod; 1947 1948 prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT; 1949 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1950 /* Check if we have a VLAN tag to insert. */ 1951 if ((m->m_flags & M_VLANTAG) != 0) 1952 desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT | 1953 bswap16(m->m_pkthdr.ether_vtag)); 1954 /* Set EOP on the last desciptor. */ 1955 desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE); 1956 1957 /* Set checksum offload in the first descriptor. */ 1958 desc = &sc->nge_rdata.nge_tx_ring[si]; 1959 if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) { 1960 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1961 desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM); 1962 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1963 desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM); 1964 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1965 desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM); 1966 } 1967 /* Lastly, turn the first descriptor ownership to hardware. */ 1968 desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN); 1969 1970 txd = &sc->nge_cdata.nge_txdesc[prod]; 1971 map = txd_last->tx_dmamap; 1972 txd_last->tx_dmamap = txd->tx_dmamap; 1973 txd->tx_dmamap = map; 1974 txd->tx_m = m; 1975 1976 return (0); 1977 } 1978 1979 /* 1980 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1981 * to the mbuf data regions directly in the transmit lists. We also save a 1982 * copy of the pointers since the transmit list fragment pointers are 1983 * physical addresses. 1984 */ 1985 1986 static void 1987 nge_start(struct ifnet *ifp) 1988 { 1989 struct nge_softc *sc; 1990 1991 sc = ifp->if_softc; 1992 NGE_LOCK(sc); 1993 nge_start_locked(ifp); 1994 NGE_UNLOCK(sc); 1995 } 1996 1997 static void 1998 nge_start_locked(struct ifnet *ifp) 1999 { 2000 struct nge_softc *sc; 2001 struct mbuf *m_head; 2002 int enq; 2003 2004 sc = ifp->if_softc; 2005 2006 NGE_LOCK_ASSERT(sc); 2007 2008 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2009 IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0) 2010 return; 2011 2012 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2013 sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) { 2014 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2015 if (m_head == NULL) 2016 break; 2017 /* 2018 * Pack the data into the transmit ring. If we 2019 * don't have room, set the OACTIVE flag and wait 2020 * for the NIC to drain the ring. 2021 */ 2022 if (nge_encap(sc, &m_head)) { 2023 if (m_head == NULL) 2024 break; 2025 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2026 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2027 break; 2028 } 2029 2030 enq++; 2031 /* 2032 * If there's a BPF listener, bounce a copy of this frame 2033 * to him. 2034 */ 2035 ETHER_BPF_MTAP(ifp, m_head); 2036 } 2037 2038 if (enq > 0) { 2039 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 2040 sc->nge_cdata.nge_tx_ring_map, 2041 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2042 /* Transmit */ 2043 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 2044 2045 /* Set a timeout in case the chip goes out to lunch. */ 2046 sc->nge_watchdog_timer = 5; 2047 } 2048 } 2049 2050 static void 2051 nge_init(void *xsc) 2052 { 2053 struct nge_softc *sc = xsc; 2054 2055 NGE_LOCK(sc); 2056 nge_init_locked(sc); 2057 NGE_UNLOCK(sc); 2058 } 2059 2060 static void 2061 nge_init_locked(struct nge_softc *sc) 2062 { 2063 struct ifnet *ifp = sc->nge_ifp; 2064 struct mii_data *mii; 2065 uint8_t *eaddr; 2066 uint32_t reg; 2067 2068 NGE_LOCK_ASSERT(sc); 2069 2070 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2071 return; 2072 2073 /* 2074 * Cancel pending I/O and free all RX/TX buffers. 2075 */ 2076 nge_stop(sc); 2077 2078 /* Reset the adapter. */ 2079 nge_reset(sc); 2080 2081 /* Disable Rx filter prior to programming Rx filter. */ 2082 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0); 2083 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 2084 2085 mii = device_get_softc(sc->nge_miibus); 2086 2087 /* Set MAC address. */ 2088 eaddr = IF_LLADDR(sc->nge_ifp); 2089 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 2090 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]); 2091 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 2092 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]); 2093 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 2094 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]); 2095 2096 /* Init circular RX list. */ 2097 if (nge_list_rx_init(sc) == ENOBUFS) { 2098 device_printf(sc->nge_dev, "initialization failed: no " 2099 "memory for rx buffers\n"); 2100 nge_stop(sc); 2101 return; 2102 } 2103 2104 /* 2105 * Init tx descriptors. 2106 */ 2107 nge_list_tx_init(sc); 2108 2109 /* 2110 * For the NatSemi chip, we have to explicitly enable the 2111 * reception of ARP frames, as well as turn on the 'perfect 2112 * match' filter where we store the station address, otherwise 2113 * we won't receive unicasts meant for this host. 2114 */ 2115 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 2116 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 2117 2118 /* 2119 * Set the capture broadcast bit to capture broadcast frames. 2120 */ 2121 if (ifp->if_flags & IFF_BROADCAST) { 2122 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 2123 } else { 2124 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 2125 } 2126 2127 /* Turn the receive filter on. */ 2128 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 2129 2130 /* Set Rx filter. */ 2131 nge_rxfilter(sc); 2132 2133 /* Disable PRIQ ctl. */ 2134 CSR_WRITE_4(sc, NGE_PRIOQCTL, 0); 2135 2136 /* 2137 * Set pause frames paramters. 2138 * Rx stat FIFO hi-threshold : 2 or more packets 2139 * Rx stat FIFO lo-threshold : less than 2 packets 2140 * Rx data FIFO hi-threshold : 2K or more bytes 2141 * Rx data FIFO lo-threshold : less than 2K bytes 2142 * pause time : (512ns * 0xffff) -> 33.55ms 2143 */ 2144 CSR_WRITE_4(sc, NGE_PAUSECSR, 2145 NGE_PAUSECSR_PAUSE_ON_MCAST | 2146 NGE_PAUSECSR_PAUSE_ON_DA | 2147 ((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) | 2148 ((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) | 2149 ((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) | 2150 ((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) | 2151 NGE_PAUSECSR_CNT); 2152 2153 /* 2154 * Load the address of the RX and TX lists. 2155 */ 2156 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 2157 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 2158 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 2159 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 2160 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 2161 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 2162 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 2163 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 2164 2165 /* Set RX configuration. */ 2166 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 2167 2168 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0); 2169 /* 2170 * Enable hardware checksum validation for all IPv4 2171 * packets, do not reject packets with bad checksums. 2172 */ 2173 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2174 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 2175 2176 /* 2177 * Tell the chip to detect and strip VLAN tag info from 2178 * received frames. The tag will be provided in the extsts 2179 * field in the RX descriptors. 2180 */ 2181 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB); 2182 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2183 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB); 2184 2185 /* Set TX configuration. */ 2186 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 2187 2188 /* 2189 * Enable TX IPv4 checksumming on a per-packet basis. 2190 */ 2191 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 2192 2193 /* 2194 * Tell the chip to insert VLAN tags on a per-packet basis as 2195 * dictated by the code in the frame encapsulation routine. 2196 */ 2197 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 2198 2199 /* 2200 * Enable the delivery of PHY interrupts based on 2201 * link/speed/duplex status changes. Also enable the 2202 * extsts field in the DMA descriptors (needed for 2203 * TCP/IP checksum offload on transmit). 2204 */ 2205 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD | 2206 NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB); 2207 2208 /* 2209 * Configure interrupt holdoff (moderation). We can 2210 * have the chip delay interrupt delivery for a certain 2211 * period. Units are in 100us, and the max setting 2212 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 2213 */ 2214 CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff); 2215 2216 /* 2217 * Enable MAC statistics counters and clear. 2218 */ 2219 reg = CSR_READ_4(sc, NGE_MIBCTL); 2220 reg &= ~NGE_MIBCTL_FREEZE_CNT; 2221 reg |= NGE_MIBCTL_CLEAR_CNT; 2222 CSR_WRITE_4(sc, NGE_MIBCTL, reg); 2223 2224 /* 2225 * Enable interrupts. 2226 */ 2227 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 2228 #ifdef DEVICE_POLLING 2229 /* 2230 * ... only enable interrupts if we are not polling, make sure 2231 * they are off otherwise. 2232 */ 2233 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2234 CSR_WRITE_4(sc, NGE_IER, 0); 2235 else 2236 #endif 2237 CSR_WRITE_4(sc, NGE_IER, 1); 2238 2239 sc->nge_flags &= ~NGE_FLAG_LINK; 2240 mii_mediachg(mii); 2241 2242 sc->nge_watchdog_timer = 0; 2243 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 2244 2245 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2246 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2247 } 2248 2249 /* 2250 * Set media options. 2251 */ 2252 static int 2253 nge_mediachange(struct ifnet *ifp) 2254 { 2255 struct nge_softc *sc; 2256 struct mii_data *mii; 2257 struct mii_softc *miisc; 2258 int error; 2259 2260 sc = ifp->if_softc; 2261 NGE_LOCK(sc); 2262 mii = device_get_softc(sc->nge_miibus); 2263 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2264 PHY_RESET(miisc); 2265 error = mii_mediachg(mii); 2266 NGE_UNLOCK(sc); 2267 2268 return (error); 2269 } 2270 2271 /* 2272 * Report current media status. 2273 */ 2274 static void 2275 nge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2276 { 2277 struct nge_softc *sc; 2278 struct mii_data *mii; 2279 2280 sc = ifp->if_softc; 2281 NGE_LOCK(sc); 2282 mii = device_get_softc(sc->nge_miibus); 2283 mii_pollstat(mii); 2284 ifmr->ifm_active = mii->mii_media_active; 2285 ifmr->ifm_status = mii->mii_media_status; 2286 NGE_UNLOCK(sc); 2287 } 2288 2289 static int 2290 nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2291 { 2292 struct nge_softc *sc = ifp->if_softc; 2293 struct ifreq *ifr = (struct ifreq *) data; 2294 struct mii_data *mii; 2295 int error = 0, mask; 2296 2297 switch (command) { 2298 case SIOCSIFMTU: 2299 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU) 2300 error = EINVAL; 2301 else { 2302 NGE_LOCK(sc); 2303 ifp->if_mtu = ifr->ifr_mtu; 2304 /* 2305 * Workaround: if the MTU is larger than 2306 * 8152 (TX FIFO size minus 64 minus 18), turn off 2307 * TX checksum offloading. 2308 */ 2309 if (ifr->ifr_mtu >= 8152) { 2310 ifp->if_capenable &= ~IFCAP_TXCSUM; 2311 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2312 } else { 2313 ifp->if_capenable |= IFCAP_TXCSUM; 2314 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2315 } 2316 NGE_UNLOCK(sc); 2317 VLAN_CAPABILITIES(ifp); 2318 } 2319 break; 2320 case SIOCSIFFLAGS: 2321 NGE_LOCK(sc); 2322 if ((ifp->if_flags & IFF_UP) != 0) { 2323 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2324 if ((ifp->if_flags ^ sc->nge_if_flags) & 2325 (IFF_PROMISC | IFF_ALLMULTI)) 2326 nge_rxfilter(sc); 2327 } else { 2328 if ((sc->nge_flags & NGE_FLAG_DETACH) == 0) 2329 nge_init_locked(sc); 2330 } 2331 } else { 2332 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2333 nge_stop(sc); 2334 } 2335 sc->nge_if_flags = ifp->if_flags; 2336 NGE_UNLOCK(sc); 2337 error = 0; 2338 break; 2339 case SIOCADDMULTI: 2340 case SIOCDELMULTI: 2341 NGE_LOCK(sc); 2342 nge_rxfilter(sc); 2343 NGE_UNLOCK(sc); 2344 error = 0; 2345 break; 2346 case SIOCGIFMEDIA: 2347 case SIOCSIFMEDIA: 2348 mii = device_get_softc(sc->nge_miibus); 2349 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2350 break; 2351 case SIOCSIFCAP: 2352 NGE_LOCK(sc); 2353 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2354 #ifdef DEVICE_POLLING 2355 if ((mask & IFCAP_POLLING) != 0 && 2356 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 2357 ifp->if_capenable ^= IFCAP_POLLING; 2358 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 2359 error = ether_poll_register(nge_poll, ifp); 2360 if (error != 0) { 2361 NGE_UNLOCK(sc); 2362 break; 2363 } 2364 /* Disable interrupts. */ 2365 CSR_WRITE_4(sc, NGE_IER, 0); 2366 } else { 2367 error = ether_poll_deregister(ifp); 2368 /* Enable interrupts. */ 2369 CSR_WRITE_4(sc, NGE_IER, 1); 2370 } 2371 } 2372 #endif /* DEVICE_POLLING */ 2373 if ((mask & IFCAP_TXCSUM) != 0 && 2374 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2375 ifp->if_capenable ^= IFCAP_TXCSUM; 2376 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2377 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2378 else 2379 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2380 } 2381 if ((mask & IFCAP_RXCSUM) != 0 && 2382 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2383 ifp->if_capenable ^= IFCAP_RXCSUM; 2384 2385 if ((mask & IFCAP_WOL) != 0 && 2386 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2387 if ((mask & IFCAP_WOL_UCAST) != 0) 2388 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2389 if ((mask & IFCAP_WOL_MCAST) != 0) 2390 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2391 if ((mask & IFCAP_WOL_MAGIC) != 0) 2392 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2393 } 2394 2395 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2396 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2397 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2398 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2399 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2400 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2401 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2402 if ((ifp->if_capenable & 2403 IFCAP_VLAN_HWTAGGING) != 0) 2404 NGE_SETBIT(sc, 2405 NGE_VLAN_IP_RXCTL, 2406 NGE_VIPRXCTL_TAG_STRIP_ENB); 2407 else 2408 NGE_CLRBIT(sc, 2409 NGE_VLAN_IP_RXCTL, 2410 NGE_VIPRXCTL_TAG_STRIP_ENB); 2411 } 2412 } 2413 /* 2414 * Both VLAN hardware tagging and checksum offload is 2415 * required to do checksum offload on VLAN interface. 2416 */ 2417 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2418 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2419 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2420 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2421 NGE_UNLOCK(sc); 2422 VLAN_CAPABILITIES(ifp); 2423 break; 2424 default: 2425 error = ether_ioctl(ifp, command, data); 2426 break; 2427 } 2428 2429 return (error); 2430 } 2431 2432 static void 2433 nge_watchdog(struct nge_softc *sc) 2434 { 2435 struct ifnet *ifp; 2436 2437 NGE_LOCK_ASSERT(sc); 2438 2439 if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer) 2440 return; 2441 2442 ifp = sc->nge_ifp; 2443 ifp->if_oerrors++; 2444 if_printf(ifp, "watchdog timeout\n"); 2445 2446 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2447 nge_init_locked(sc); 2448 2449 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2450 nge_start_locked(ifp); 2451 } 2452 2453 static int 2454 nge_stop_mac(struct nge_softc *sc) 2455 { 2456 uint32_t reg; 2457 int i; 2458 2459 NGE_LOCK_ASSERT(sc); 2460 2461 reg = CSR_READ_4(sc, NGE_CSR); 2462 if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) { 2463 reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE); 2464 reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE; 2465 CSR_WRITE_4(sc, NGE_CSR, reg); 2466 for (i = 0; i < NGE_TIMEOUT; i++) { 2467 DELAY(1); 2468 if ((CSR_READ_4(sc, NGE_CSR) & 2469 (NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0) 2470 break; 2471 } 2472 if (i == NGE_TIMEOUT) 2473 return (ETIMEDOUT); 2474 } 2475 2476 return (0); 2477 } 2478 2479 /* 2480 * Stop the adapter and free any mbufs allocated to the 2481 * RX and TX lists. 2482 */ 2483 static void 2484 nge_stop(struct nge_softc *sc) 2485 { 2486 struct nge_txdesc *txd; 2487 struct nge_rxdesc *rxd; 2488 int i; 2489 struct ifnet *ifp; 2490 2491 NGE_LOCK_ASSERT(sc); 2492 ifp = sc->nge_ifp; 2493 2494 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2495 sc->nge_flags &= ~NGE_FLAG_LINK; 2496 callout_stop(&sc->nge_stat_ch); 2497 sc->nge_watchdog_timer = 0; 2498 2499 CSR_WRITE_4(sc, NGE_IER, 0); 2500 CSR_WRITE_4(sc, NGE_IMR, 0); 2501 if (nge_stop_mac(sc) == ETIMEDOUT) 2502 device_printf(sc->nge_dev, 2503 "%s: unable to stop Tx/Rx MAC\n", __func__); 2504 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0); 2505 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0); 2506 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2507 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2508 nge_stats_update(sc); 2509 if (sc->nge_head != NULL) { 2510 m_freem(sc->nge_head); 2511 sc->nge_head = sc->nge_tail = NULL; 2512 } 2513 2514 /* 2515 * Free RX and TX mbufs still in the queues. 2516 */ 2517 for (i = 0; i < NGE_RX_RING_CNT; i++) { 2518 rxd = &sc->nge_cdata.nge_rxdesc[i]; 2519 if (rxd->rx_m != NULL) { 2520 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, 2521 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2522 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, 2523 rxd->rx_dmamap); 2524 m_freem(rxd->rx_m); 2525 rxd->rx_m = NULL; 2526 } 2527 } 2528 for (i = 0; i < NGE_TX_RING_CNT; i++) { 2529 txd = &sc->nge_cdata.nge_txdesc[i]; 2530 if (txd->tx_m != NULL) { 2531 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 2532 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2533 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 2534 txd->tx_dmamap); 2535 m_freem(txd->tx_m); 2536 txd->tx_m = NULL; 2537 } 2538 } 2539 } 2540 2541 /* 2542 * Before setting WOL bits, caller should have stopped Receiver. 2543 */ 2544 static void 2545 nge_wol(struct nge_softc *sc) 2546 { 2547 struct ifnet *ifp; 2548 uint32_t reg; 2549 uint16_t pmstat; 2550 int pmc; 2551 2552 NGE_LOCK_ASSERT(sc); 2553 2554 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) != 0) 2555 return; 2556 2557 ifp = sc->nge_ifp; 2558 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2559 /* Disable WOL & disconnect CLKRUN to save power. */ 2560 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 2561 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 2562 } else { 2563 if (nge_stop_mac(sc) == ETIMEDOUT) 2564 device_printf(sc->nge_dev, 2565 "%s: unable to stop Tx/Rx MAC\n", __func__); 2566 /* 2567 * Make sure wake frames will be buffered in the Rx FIFO. 2568 * (i.e. Silent Rx mode.) 2569 */ 2570 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2571 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_HI, BUS_SPACE_BARRIER_WRITE); 2572 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2573 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_LO, BUS_SPACE_BARRIER_WRITE); 2574 /* Enable Rx again. */ 2575 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 2576 CSR_BARRIER_4(sc, NGE_CSR, BUS_SPACE_BARRIER_WRITE); 2577 2578 /* Configure WOL events. */ 2579 reg = 0; 2580 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2581 reg |= NGE_WOLCSR_WAKE_ON_UNICAST; 2582 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2583 reg |= NGE_WOLCSR_WAKE_ON_MULTICAST; 2584 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2585 reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT; 2586 CSR_WRITE_4(sc, NGE_WOLCSR, reg); 2587 2588 /* Activate CLKRUN. */ 2589 reg = CSR_READ_4(sc, NGE_CLKRUN); 2590 reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB; 2591 CSR_WRITE_4(sc, NGE_CLKRUN, reg); 2592 } 2593 2594 /* Request PME. */ 2595 pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2); 2596 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2597 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2598 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2599 pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2600 } 2601 2602 /* 2603 * Stop all chip I/O so that the kernel's probe routines don't 2604 * get confused by errant DMAs when rebooting. 2605 */ 2606 static int 2607 nge_shutdown(device_t dev) 2608 { 2609 2610 return (nge_suspend(dev)); 2611 } 2612 2613 static int 2614 nge_suspend(device_t dev) 2615 { 2616 struct nge_softc *sc; 2617 2618 sc = device_get_softc(dev); 2619 2620 NGE_LOCK(sc); 2621 nge_stop(sc); 2622 nge_wol(sc); 2623 sc->nge_flags |= NGE_FLAG_SUSPENDED; 2624 NGE_UNLOCK(sc); 2625 2626 return (0); 2627 } 2628 2629 static int 2630 nge_resume(device_t dev) 2631 { 2632 struct nge_softc *sc; 2633 struct ifnet *ifp; 2634 uint16_t pmstat; 2635 int pmc; 2636 2637 sc = device_get_softc(dev); 2638 2639 NGE_LOCK(sc); 2640 ifp = sc->nge_ifp; 2641 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) == 0) { 2642 /* Disable PME and clear PME status. */ 2643 pmstat = pci_read_config(sc->nge_dev, 2644 pmc + PCIR_POWER_STATUS, 2); 2645 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2646 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2647 pci_write_config(sc->nge_dev, 2648 pmc + PCIR_POWER_STATUS, pmstat, 2); 2649 } 2650 } 2651 if (ifp->if_flags & IFF_UP) { 2652 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2653 nge_init_locked(sc); 2654 } 2655 2656 sc->nge_flags &= ~NGE_FLAG_SUSPENDED; 2657 NGE_UNLOCK(sc); 2658 2659 return (0); 2660 } 2661 2662 #define NGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2663 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2664 2665 static void 2666 nge_sysctl_node(struct nge_softc *sc) 2667 { 2668 struct sysctl_ctx_list *ctx; 2669 struct sysctl_oid_list *child, *parent; 2670 struct sysctl_oid *tree; 2671 struct nge_stats *stats; 2672 int error; 2673 2674 ctx = device_get_sysctl_ctx(sc->nge_dev); 2675 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev)); 2676 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff", 2677 CTLTYPE_INT | CTLFLAG_RW, &sc->nge_int_holdoff, 0, 2678 sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation"); 2679 /* Pull in device tunables. */ 2680 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2681 error = resource_int_value(device_get_name(sc->nge_dev), 2682 device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff); 2683 if (error == 0) { 2684 if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN || 2685 sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) { 2686 device_printf(sc->nge_dev, 2687 "int_holdoff value out of range; " 2688 "using default: %d(%d us)\n", 2689 NGE_INT_HOLDOFF_DEFAULT, 2690 NGE_INT_HOLDOFF_DEFAULT * 100); 2691 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2692 } 2693 } 2694 2695 stats = &sc->nge_stats; 2696 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2697 NULL, "NGE statistics"); 2698 parent = SYSCTL_CHILDREN(tree); 2699 2700 /* Rx statistics. */ 2701 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2702 NULL, "Rx MAC statistics"); 2703 child = SYSCTL_CHILDREN(tree); 2704 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs", 2705 &stats->rx_pkts_errs, 2706 "Packet errors including both wire errors and FIFO overruns"); 2707 NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 2708 &stats->rx_crc_errs, "CRC errors"); 2709 NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2710 &stats->rx_fifo_oflows, "FIFO overflows"); 2711 NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2712 &stats->rx_align_errs, "Frame alignment errors"); 2713 NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2714 &stats->rx_sym_errs, "One or more symbol errors"); 2715 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos", 2716 &stats->rx_pkts_jumbos, 2717 "Packets received with length greater than 1518 bytes"); 2718 NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2719 &stats->rx_len_errs, "In Range Length errors"); 2720 NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames", 2721 &stats->rx_unctl_frames, "Control frames with unsupported opcode"); 2722 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2723 &stats->rx_pause, "Pause frames"); 2724 2725 /* Tx statistics. */ 2726 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2727 NULL, "Tx MAC statistics"); 2728 child = SYSCTL_CHILDREN(tree); 2729 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2730 &stats->tx_pause, "Pause frames"); 2731 NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs", 2732 &stats->tx_seq_errs, 2733 "Loss of collision heartbeat during transmission"); 2734 } 2735 2736 #undef NGE_SYSCTL_STAT_ADD32 2737 2738 static int 2739 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2740 { 2741 int error, value; 2742 2743 if (arg1 == NULL) 2744 return (EINVAL); 2745 value = *(int *)arg1; 2746 error = sysctl_handle_int(oidp, &value, 0, req); 2747 if (error != 0 || req->newptr == NULL) 2748 return (error); 2749 if (value < low || value > high) 2750 return (EINVAL); 2751 *(int *)arg1 = value; 2752 2753 return (0); 2754 } 2755 2756 static int 2757 sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS) 2758 { 2759 2760 return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN, 2761 NGE_INT_HOLDOFF_MAX)); 2762 } 2763