1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 6 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 41 * for FreeBSD. Datasheets are available from: 42 * 43 * http://www.national.com/ds/DP/DP83820.pdf 44 * http://www.national.com/ds/DP/DP83821.pdf 45 * 46 * These chips are used on several low cost gigabit ethernet NICs 47 * sold by D-Link, Addtron, SMC and Asante. Both parts are 48 * virtually the same, except the 83820 is a 64-bit/32-bit part, 49 * while the 83821 is 32-bit only. 50 * 51 * Many cards also use National gigE transceivers, such as the 52 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 53 * contains a full register description that applies to all of these 54 * components: 55 * 56 * http://www.national.com/ds/DP/DP83861.pdf 57 * 58 * Written by Bill Paul <wpaul@bsdi.com> 59 * BSDi Open Source Solutions 60 */ 61 62 /* 63 * The NatSemi DP83820 and 83821 controllers are enhanced versions 64 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 65 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 66 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 67 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 68 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 69 * matching buffers, one perfect address filter buffer and interrupt 70 * moderation. The 83820 supports both 64-bit and 32-bit addressing 71 * and data transfers: the 64-bit support can be toggled on or off 72 * via software. This affects the size of certain fields in the DMA 73 * descriptors. 74 * 75 * There are two bugs/misfeatures in the 83820/83821 that I have 76 * discovered so far: 77 * 78 * - Receive buffers must be aligned on 64-bit boundaries, which means 79 * you must resort to copying data in order to fix up the payload 80 * alignment. 81 * 82 * - In order to transmit jumbo frames larger than 8170 bytes, you have 83 * to turn off transmit checksum offloading, because the chip can't 84 * compute the checksum on an outgoing frame unless it fits entirely 85 * within the TX FIFO, which is only 8192 bytes in size. If you have 86 * TX checksum offload enabled and you transmit attempt to transmit a 87 * frame larger than 8170 bytes, the transmitter will wedge. 88 * 89 * To work around the latter problem, TX checksum offload is disabled 90 * if the user selects an MTU larger than 8152 (8170 - 18). 91 */ 92 93 #ifdef HAVE_KERNEL_OPTION_HEADERS 94 #include "opt_device_polling.h" 95 #endif 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/bus.h> 100 #include <sys/endian.h> 101 #include <sys/kernel.h> 102 #include <sys/lock.h> 103 #include <sys/malloc.h> 104 #include <sys/mbuf.h> 105 #include <sys/module.h> 106 #include <sys/mutex.h> 107 #include <sys/rman.h> 108 #include <sys/socket.h> 109 #include <sys/sockio.h> 110 #include <sys/sysctl.h> 111 112 #include <net/bpf.h> 113 #include <net/if.h> 114 #include <net/if_var.h> 115 #include <net/if_arp.h> 116 #include <net/ethernet.h> 117 #include <net/if_dl.h> 118 #include <net/if_media.h> 119 #include <net/if_types.h> 120 #include <net/if_vlan_var.h> 121 122 #include <dev/mii/mii.h> 123 #include <dev/mii/mii_bitbang.h> 124 #include <dev/mii/miivar.h> 125 126 #include <dev/pci/pcireg.h> 127 #include <dev/pci/pcivar.h> 128 129 #include <machine/bus.h> 130 131 #include <dev/nge/if_ngereg.h> 132 133 /* "device miibus" required. See GENERIC if you get errors here. */ 134 #include "miibus_if.h" 135 136 MODULE_DEPEND(nge, pci, 1, 1, 1); 137 MODULE_DEPEND(nge, ether, 1, 1, 1); 138 MODULE_DEPEND(nge, miibus, 1, 1, 1); 139 140 #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 141 142 /* 143 * Various supported device vendors/types and their names. 144 */ 145 static const struct nge_type nge_devs[] = { 146 { NGE_VENDORID, NGE_DEVICEID, 147 "National Semiconductor Gigabit Ethernet" }, 148 { 0, 0, NULL } 149 }; 150 151 static int nge_probe(device_t); 152 static int nge_attach(device_t); 153 static int nge_detach(device_t); 154 static int nge_shutdown(device_t); 155 static int nge_suspend(device_t); 156 static int nge_resume(device_t); 157 158 static __inline void nge_discard_rxbuf(struct nge_softc *, int); 159 static int nge_newbuf(struct nge_softc *, int); 160 static int nge_encap(struct nge_softc *, struct mbuf **); 161 #ifndef __NO_STRICT_ALIGNMENT 162 static __inline void nge_fixup_rx(struct mbuf *); 163 #endif 164 static int nge_rxeof(struct nge_softc *); 165 static void nge_txeof(struct nge_softc *); 166 static void nge_intr(void *); 167 static void nge_tick(void *); 168 static void nge_stats_update(struct nge_softc *); 169 static void nge_start(struct ifnet *); 170 static void nge_start_locked(struct ifnet *); 171 static int nge_ioctl(struct ifnet *, u_long, caddr_t); 172 static void nge_init(void *); 173 static void nge_init_locked(struct nge_softc *); 174 static int nge_stop_mac(struct nge_softc *); 175 static void nge_stop(struct nge_softc *); 176 static void nge_wol(struct nge_softc *); 177 static void nge_watchdog(struct nge_softc *); 178 static int nge_mediachange(struct ifnet *); 179 static void nge_mediastatus(struct ifnet *, struct ifmediareq *); 180 181 static void nge_delay(struct nge_softc *); 182 static void nge_eeprom_idle(struct nge_softc *); 183 static void nge_eeprom_putbyte(struct nge_softc *, int); 184 static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *); 185 static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int); 186 187 static int nge_miibus_readreg(device_t, int, int); 188 static int nge_miibus_writereg(device_t, int, int, int); 189 static void nge_miibus_statchg(device_t); 190 191 static void nge_rxfilter(struct nge_softc *); 192 static void nge_reset(struct nge_softc *); 193 static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 194 static int nge_dma_alloc(struct nge_softc *); 195 static void nge_dma_free(struct nge_softc *); 196 static int nge_list_rx_init(struct nge_softc *); 197 static int nge_list_tx_init(struct nge_softc *); 198 static void nge_sysctl_node(struct nge_softc *); 199 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 200 static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS); 201 202 /* 203 * MII bit-bang glue 204 */ 205 static uint32_t nge_mii_bitbang_read(device_t); 206 static void nge_mii_bitbang_write(device_t, uint32_t); 207 208 static const struct mii_bitbang_ops nge_mii_bitbang_ops = { 209 nge_mii_bitbang_read, 210 nge_mii_bitbang_write, 211 { 212 NGE_MEAR_MII_DATA, /* MII_BIT_MDO */ 213 NGE_MEAR_MII_DATA, /* MII_BIT_MDI */ 214 NGE_MEAR_MII_CLK, /* MII_BIT_MDC */ 215 NGE_MEAR_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 216 0, /* MII_BIT_DIR_PHY_HOST */ 217 } 218 }; 219 220 static device_method_t nge_methods[] = { 221 /* Device interface */ 222 DEVMETHOD(device_probe, nge_probe), 223 DEVMETHOD(device_attach, nge_attach), 224 DEVMETHOD(device_detach, nge_detach), 225 DEVMETHOD(device_shutdown, nge_shutdown), 226 DEVMETHOD(device_suspend, nge_suspend), 227 DEVMETHOD(device_resume, nge_resume), 228 229 /* MII interface */ 230 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 231 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 232 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 233 234 DEVMETHOD_END 235 }; 236 237 static driver_t nge_driver = { 238 "nge", 239 nge_methods, 240 sizeof(struct nge_softc) 241 }; 242 243 static devclass_t nge_devclass; 244 245 DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0); 246 DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); 247 248 #define NGE_SETBIT(sc, reg, x) \ 249 CSR_WRITE_4(sc, reg, \ 250 CSR_READ_4(sc, reg) | (x)) 251 252 #define NGE_CLRBIT(sc, reg, x) \ 253 CSR_WRITE_4(sc, reg, \ 254 CSR_READ_4(sc, reg) & ~(x)) 255 256 #define SIO_SET(x) \ 257 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 258 259 #define SIO_CLR(x) \ 260 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 261 262 static void 263 nge_delay(struct nge_softc *sc) 264 { 265 int idx; 266 267 for (idx = (300 / 33) + 1; idx > 0; idx--) 268 CSR_READ_4(sc, NGE_CSR); 269 } 270 271 static void 272 nge_eeprom_idle(struct nge_softc *sc) 273 { 274 int i; 275 276 SIO_SET(NGE_MEAR_EE_CSEL); 277 nge_delay(sc); 278 SIO_SET(NGE_MEAR_EE_CLK); 279 nge_delay(sc); 280 281 for (i = 0; i < 25; i++) { 282 SIO_CLR(NGE_MEAR_EE_CLK); 283 nge_delay(sc); 284 SIO_SET(NGE_MEAR_EE_CLK); 285 nge_delay(sc); 286 } 287 288 SIO_CLR(NGE_MEAR_EE_CLK); 289 nge_delay(sc); 290 SIO_CLR(NGE_MEAR_EE_CSEL); 291 nge_delay(sc); 292 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 293 } 294 295 /* 296 * Send a read command and address to the EEPROM, check for ACK. 297 */ 298 static void 299 nge_eeprom_putbyte(struct nge_softc *sc, int addr) 300 { 301 int d, i; 302 303 d = addr | NGE_EECMD_READ; 304 305 /* 306 * Feed in each bit and stobe the clock. 307 */ 308 for (i = 0x400; i; i >>= 1) { 309 if (d & i) { 310 SIO_SET(NGE_MEAR_EE_DIN); 311 } else { 312 SIO_CLR(NGE_MEAR_EE_DIN); 313 } 314 nge_delay(sc); 315 SIO_SET(NGE_MEAR_EE_CLK); 316 nge_delay(sc); 317 SIO_CLR(NGE_MEAR_EE_CLK); 318 nge_delay(sc); 319 } 320 } 321 322 /* 323 * Read a word of data stored in the EEPROM at address 'addr.' 324 */ 325 static void 326 nge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest) 327 { 328 int i; 329 uint16_t word = 0; 330 331 /* Force EEPROM to idle state. */ 332 nge_eeprom_idle(sc); 333 334 /* Enter EEPROM access mode. */ 335 nge_delay(sc); 336 SIO_CLR(NGE_MEAR_EE_CLK); 337 nge_delay(sc); 338 SIO_SET(NGE_MEAR_EE_CSEL); 339 nge_delay(sc); 340 341 /* 342 * Send address of word we want to read. 343 */ 344 nge_eeprom_putbyte(sc, addr); 345 346 /* 347 * Start reading bits from EEPROM. 348 */ 349 for (i = 0x8000; i; i >>= 1) { 350 SIO_SET(NGE_MEAR_EE_CLK); 351 nge_delay(sc); 352 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 353 word |= i; 354 nge_delay(sc); 355 SIO_CLR(NGE_MEAR_EE_CLK); 356 nge_delay(sc); 357 } 358 359 /* Turn off EEPROM access mode. */ 360 nge_eeprom_idle(sc); 361 362 *dest = word; 363 } 364 365 /* 366 * Read a sequence of words from the EEPROM. 367 */ 368 static void 369 nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt) 370 { 371 int i; 372 uint16_t word = 0, *ptr; 373 374 for (i = 0; i < cnt; i++) { 375 nge_eeprom_getword(sc, off + i, &word); 376 ptr = (uint16_t *)(dest + (i * 2)); 377 *ptr = word; 378 } 379 } 380 381 /* 382 * Read the MII serial port for the MII bit-bang module. 383 */ 384 static uint32_t 385 nge_mii_bitbang_read(device_t dev) 386 { 387 struct nge_softc *sc; 388 uint32_t val; 389 390 sc = device_get_softc(dev); 391 392 val = CSR_READ_4(sc, NGE_MEAR); 393 CSR_BARRIER_4(sc, NGE_MEAR, 394 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 395 396 return (val); 397 } 398 399 /* 400 * Write the MII serial port for the MII bit-bang module. 401 */ 402 static void 403 nge_mii_bitbang_write(device_t dev, uint32_t val) 404 { 405 struct nge_softc *sc; 406 407 sc = device_get_softc(dev); 408 409 CSR_WRITE_4(sc, NGE_MEAR, val); 410 CSR_BARRIER_4(sc, NGE_MEAR, 411 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 412 } 413 414 static int 415 nge_miibus_readreg(device_t dev, int phy, int reg) 416 { 417 struct nge_softc *sc; 418 int rv; 419 420 sc = device_get_softc(dev); 421 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 422 /* Pretend PHY is at address 0. */ 423 if (phy != 0) 424 return (0); 425 switch (reg) { 426 case MII_BMCR: 427 reg = NGE_TBI_BMCR; 428 break; 429 case MII_BMSR: 430 /* 83820/83821 has different bit layout for BMSR. */ 431 rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT; 432 reg = CSR_READ_4(sc, NGE_TBI_BMSR); 433 if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0) 434 rv |= BMSR_ACOMP; 435 if ((reg & NGE_TBIBMSR_LINKSTAT) != 0) 436 rv |= BMSR_LINK; 437 return (rv); 438 case MII_ANAR: 439 reg = NGE_TBI_ANAR; 440 break; 441 case MII_ANLPAR: 442 reg = NGE_TBI_ANLPAR; 443 break; 444 case MII_ANER: 445 reg = NGE_TBI_ANER; 446 break; 447 case MII_EXTSR: 448 reg = NGE_TBI_ESR; 449 break; 450 case MII_PHYIDR1: 451 case MII_PHYIDR2: 452 return (0); 453 default: 454 device_printf(sc->nge_dev, 455 "bad phy register read : %d\n", reg); 456 return (0); 457 } 458 return (CSR_READ_4(sc, reg)); 459 } 460 461 return (mii_bitbang_readreg(dev, &nge_mii_bitbang_ops, phy, reg)); 462 } 463 464 static int 465 nge_miibus_writereg(device_t dev, int phy, int reg, int data) 466 { 467 struct nge_softc *sc; 468 469 sc = device_get_softc(dev); 470 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 471 /* Pretend PHY is at address 0. */ 472 if (phy != 0) 473 return (0); 474 switch (reg) { 475 case MII_BMCR: 476 reg = NGE_TBI_BMCR; 477 break; 478 case MII_BMSR: 479 return (0); 480 case MII_ANAR: 481 reg = NGE_TBI_ANAR; 482 break; 483 case MII_ANLPAR: 484 reg = NGE_TBI_ANLPAR; 485 break; 486 case MII_ANER: 487 reg = NGE_TBI_ANER; 488 break; 489 case MII_EXTSR: 490 reg = NGE_TBI_ESR; 491 break; 492 case MII_PHYIDR1: 493 case MII_PHYIDR2: 494 return (0); 495 default: 496 device_printf(sc->nge_dev, 497 "bad phy register write : %d\n", reg); 498 return (0); 499 } 500 CSR_WRITE_4(sc, reg, data); 501 return (0); 502 } 503 504 mii_bitbang_writereg(dev, &nge_mii_bitbang_ops, phy, reg, data); 505 506 return (0); 507 } 508 509 /* 510 * media status/link state change handler. 511 */ 512 static void 513 nge_miibus_statchg(device_t dev) 514 { 515 struct nge_softc *sc; 516 struct mii_data *mii; 517 struct ifnet *ifp; 518 struct nge_txdesc *txd; 519 uint32_t done, reg, status; 520 int i; 521 522 sc = device_get_softc(dev); 523 NGE_LOCK_ASSERT(sc); 524 525 mii = device_get_softc(sc->nge_miibus); 526 ifp = sc->nge_ifp; 527 if (mii == NULL || ifp == NULL || 528 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 529 return; 530 531 sc->nge_flags &= ~NGE_FLAG_LINK; 532 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 533 (IFM_AVALID | IFM_ACTIVE)) { 534 switch (IFM_SUBTYPE(mii->mii_media_active)) { 535 case IFM_10_T: 536 case IFM_100_TX: 537 case IFM_1000_T: 538 case IFM_1000_SX: 539 case IFM_1000_LX: 540 case IFM_1000_CX: 541 sc->nge_flags |= NGE_FLAG_LINK; 542 break; 543 default: 544 break; 545 } 546 } 547 548 /* Stop Tx/Rx MACs. */ 549 if (nge_stop_mac(sc) == ETIMEDOUT) 550 device_printf(sc->nge_dev, 551 "%s: unable to stop Tx/Rx MAC\n", __func__); 552 nge_txeof(sc); 553 nge_rxeof(sc); 554 if (sc->nge_head != NULL) { 555 m_freem(sc->nge_head); 556 sc->nge_head = sc->nge_tail = NULL; 557 } 558 559 /* Release queued frames. */ 560 for (i = 0; i < NGE_TX_RING_CNT; i++) { 561 txd = &sc->nge_cdata.nge_txdesc[i]; 562 if (txd->tx_m != NULL) { 563 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 564 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 565 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 566 txd->tx_dmamap); 567 m_freem(txd->tx_m); 568 txd->tx_m = NULL; 569 } 570 } 571 572 /* Program MAC with resolved speed/duplex. */ 573 if ((sc->nge_flags & NGE_FLAG_LINK) != 0) { 574 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 575 NGE_SETBIT(sc, NGE_TX_CFG, 576 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 577 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 578 #ifdef notyet 579 /* Enable flow-control. */ 580 if ((IFM_OPTIONS(mii->mii_media_active) & 581 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0) 582 NGE_SETBIT(sc, NGE_PAUSECSR, 583 NGE_PAUSECSR_PAUSE_ENB); 584 #endif 585 } else { 586 NGE_CLRBIT(sc, NGE_TX_CFG, 587 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 588 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 589 NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB); 590 } 591 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 592 reg = CSR_READ_4(sc, NGE_CFG); 593 switch (IFM_SUBTYPE(mii->mii_media_active)) { 594 case IFM_1000_SX: 595 case IFM_1000_LX: 596 case IFM_1000_CX: 597 case IFM_1000_T: 598 reg |= NGE_CFG_MODE_1000; 599 break; 600 default: 601 reg &= ~NGE_CFG_MODE_1000; 602 break; 603 } 604 CSR_WRITE_4(sc, NGE_CFG, reg); 605 606 /* Reset Tx/Rx MAC. */ 607 reg = CSR_READ_4(sc, NGE_CSR); 608 reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET; 609 CSR_WRITE_4(sc, NGE_CSR, reg); 610 /* Check the completion of reset. */ 611 done = 0; 612 for (i = 0; i < NGE_TIMEOUT; i++) { 613 DELAY(1); 614 status = CSR_READ_4(sc, NGE_ISR); 615 if ((status & NGE_ISR_RX_RESET_DONE) != 0) 616 done |= NGE_ISR_RX_RESET_DONE; 617 if ((status & NGE_ISR_TX_RESET_DONE) != 0) 618 done |= NGE_ISR_TX_RESET_DONE; 619 if (done == 620 (NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE)) 621 break; 622 } 623 if (i == NGE_TIMEOUT) 624 device_printf(sc->nge_dev, 625 "%s: unable to reset Tx/Rx MAC\n", __func__); 626 /* Reuse Rx buffer and reset consumer pointer. */ 627 sc->nge_cdata.nge_rx_cons = 0; 628 /* 629 * It seems that resetting Rx/Tx MAC results in 630 * resetting Tx/Rx descriptor pointer registers such 631 * that reloading Tx/Rx lists address are needed. 632 */ 633 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 634 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 635 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 636 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 637 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 638 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 639 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 640 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 641 /* Reinitialize Tx buffers. */ 642 nge_list_tx_init(sc); 643 644 /* Restart Rx MAC. */ 645 reg = CSR_READ_4(sc, NGE_CSR); 646 reg |= NGE_CSR_RX_ENABLE; 647 CSR_WRITE_4(sc, NGE_CSR, reg); 648 for (i = 0; i < NGE_TIMEOUT; i++) { 649 if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0) 650 break; 651 DELAY(1); 652 } 653 if (i == NGE_TIMEOUT) 654 device_printf(sc->nge_dev, 655 "%s: unable to restart Rx MAC\n", __func__); 656 } 657 658 /* Data LED off for TBI mode */ 659 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 660 CSR_WRITE_4(sc, NGE_GPIO, 661 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 662 } 663 664 static u_int 665 nge_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 666 { 667 struct nge_softc *sc = arg; 668 uint32_t h; 669 int bit, index; 670 671 /* 672 * From the 11 bits returned by the crc routine, the top 7 673 * bits represent the 16-bit word in the mcast hash table 674 * that needs to be updated, and the lower 4 bits represent 675 * which bit within that byte needs to be set. 676 */ 677 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 21; 678 index = (h >> 4) & 0x7F; 679 bit = h & 0xF; 680 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + (index * 2)); 681 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 682 683 return (1); 684 } 685 686 static void 687 nge_rxfilter(struct nge_softc *sc) 688 { 689 struct ifnet *ifp; 690 uint32_t i, rxfilt; 691 692 NGE_LOCK_ASSERT(sc); 693 ifp = sc->nge_ifp; 694 695 /* Make sure to stop Rx filtering. */ 696 rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL); 697 rxfilt &= ~NGE_RXFILTCTL_ENABLE; 698 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 699 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 700 701 rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS); 702 rxfilt &= ~NGE_RXFILTCTL_BROAD; 703 /* 704 * We don't want to use the hash table for matching unicast 705 * addresses. 706 */ 707 rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH); 708 709 /* 710 * For the NatSemi chip, we have to explicitly enable the 711 * reception of ARP frames, as well as turn on the 'perfect 712 * match' filter where we store the station address, otherwise 713 * we won't receive unicasts meant for this host. 714 */ 715 rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT; 716 717 /* 718 * Set the capture broadcast bit to capture broadcast frames. 719 */ 720 if ((ifp->if_flags & IFF_BROADCAST) != 0) 721 rxfilt |= NGE_RXFILTCTL_BROAD; 722 723 if ((ifp->if_flags & IFF_PROMISC) != 0 || 724 (ifp->if_flags & IFF_ALLMULTI) != 0) { 725 rxfilt |= NGE_RXFILTCTL_ALLMULTI; 726 if ((ifp->if_flags & IFF_PROMISC) != 0) 727 rxfilt |= NGE_RXFILTCTL_ALLPHYS; 728 goto done; 729 } 730 731 /* 732 * We have to explicitly enable the multicast hash table 733 * on the NatSemi chip if we want to use it, which we do. 734 */ 735 rxfilt |= NGE_RXFILTCTL_MCHASH; 736 737 /* first, zot all the existing hash bits */ 738 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 739 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 740 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 741 } 742 743 if_foreach_llmaddr(ifp, nge_write_maddr, sc); 744 done: 745 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 746 /* Turn the receive filter on. */ 747 rxfilt |= NGE_RXFILTCTL_ENABLE; 748 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 749 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 750 } 751 752 static void 753 nge_reset(struct nge_softc *sc) 754 { 755 uint32_t v; 756 int i; 757 758 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 759 760 for (i = 0; i < NGE_TIMEOUT; i++) { 761 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 762 break; 763 DELAY(1); 764 } 765 766 if (i == NGE_TIMEOUT) 767 device_printf(sc->nge_dev, "reset never completed\n"); 768 769 /* Wait a little while for the chip to get its brains in order. */ 770 DELAY(1000); 771 772 /* 773 * If this is a NetSemi chip, make sure to clear 774 * PME mode. 775 */ 776 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 777 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 778 779 /* Clear WOL events which may interfere normal Rx filter opertaion. */ 780 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 781 782 /* 783 * Only DP83820 supports 64bits addressing/data transfers and 784 * 64bit addressing requires different descriptor structures. 785 * To make it simple, disable 64bit addressing/data transfers. 786 */ 787 v = CSR_READ_4(sc, NGE_CFG); 788 v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB); 789 CSR_WRITE_4(sc, NGE_CFG, v); 790 } 791 792 /* 793 * Probe for a NatSemi chip. Check the PCI vendor and device 794 * IDs against our list and return a device name if we find a match. 795 */ 796 static int 797 nge_probe(device_t dev) 798 { 799 const struct nge_type *t; 800 801 t = nge_devs; 802 803 while (t->nge_name != NULL) { 804 if ((pci_get_vendor(dev) == t->nge_vid) && 805 (pci_get_device(dev) == t->nge_did)) { 806 device_set_desc(dev, t->nge_name); 807 return (BUS_PROBE_DEFAULT); 808 } 809 t++; 810 } 811 812 return (ENXIO); 813 } 814 815 /* 816 * Attach the interface. Allocate softc structures, do ifmedia 817 * setup and ethernet/BPF attach. 818 */ 819 static int 820 nge_attach(device_t dev) 821 { 822 uint8_t eaddr[ETHER_ADDR_LEN]; 823 uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg; 824 struct nge_softc *sc; 825 struct ifnet *ifp; 826 int error, i, rid; 827 828 error = 0; 829 sc = device_get_softc(dev); 830 sc->nge_dev = dev; 831 832 NGE_LOCK_INIT(sc, device_get_nameunit(dev)); 833 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); 834 835 /* 836 * Map control/status registers. 837 */ 838 pci_enable_busmaster(dev); 839 840 #ifdef NGE_USEIOSPACE 841 sc->nge_res_type = SYS_RES_IOPORT; 842 sc->nge_res_id = PCIR_BAR(0); 843 #else 844 sc->nge_res_type = SYS_RES_MEMORY; 845 sc->nge_res_id = PCIR_BAR(1); 846 #endif 847 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 848 &sc->nge_res_id, RF_ACTIVE); 849 850 if (sc->nge_res == NULL) { 851 if (sc->nge_res_type == SYS_RES_MEMORY) { 852 sc->nge_res_type = SYS_RES_IOPORT; 853 sc->nge_res_id = PCIR_BAR(0); 854 } else { 855 sc->nge_res_type = SYS_RES_MEMORY; 856 sc->nge_res_id = PCIR_BAR(1); 857 } 858 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 859 &sc->nge_res_id, RF_ACTIVE); 860 if (sc->nge_res == NULL) { 861 device_printf(dev, "couldn't allocate %s resources\n", 862 sc->nge_res_type == SYS_RES_MEMORY ? "memory" : 863 "I/O"); 864 NGE_LOCK_DESTROY(sc); 865 return (ENXIO); 866 } 867 } 868 869 /* Allocate interrupt */ 870 rid = 0; 871 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 872 RF_SHAREABLE | RF_ACTIVE); 873 874 if (sc->nge_irq == NULL) { 875 device_printf(dev, "couldn't map interrupt\n"); 876 error = ENXIO; 877 goto fail; 878 } 879 880 /* Enable MWI. */ 881 reg = pci_read_config(dev, PCIR_COMMAND, 2); 882 reg |= PCIM_CMD_MWRICEN; 883 pci_write_config(dev, PCIR_COMMAND, reg, 2); 884 885 /* Reset the adapter. */ 886 nge_reset(sc); 887 888 /* 889 * Get station address from the EEPROM. 890 */ 891 nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3); 892 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 893 ea[i] = le16toh(ea[i]); 894 ea_temp = ea[0]; 895 ea[0] = ea[2]; 896 ea[2] = ea_temp; 897 bcopy(ea, eaddr, sizeof(eaddr)); 898 899 if (nge_dma_alloc(sc) != 0) { 900 error = ENXIO; 901 goto fail; 902 } 903 904 nge_sysctl_node(sc); 905 906 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); 907 if (ifp == NULL) { 908 device_printf(dev, "can not allocate ifnet structure\n"); 909 error = ENOSPC; 910 goto fail; 911 } 912 ifp->if_softc = sc; 913 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 914 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 915 ifp->if_ioctl = nge_ioctl; 916 ifp->if_start = nge_start; 917 ifp->if_init = nge_init; 918 ifp->if_snd.ifq_drv_maxlen = NGE_TX_RING_CNT - 1; 919 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 920 IFQ_SET_READY(&ifp->if_snd); 921 ifp->if_hwassist = NGE_CSUM_FEATURES; 922 ifp->if_capabilities = IFCAP_HWCSUM; 923 /* 924 * It seems that some hardwares doesn't provide 3.3V auxiliary 925 * supply(3VAUX) to drive PME such that checking PCI power 926 * management capability is necessary. 927 */ 928 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &i) == 0) 929 ifp->if_capabilities |= IFCAP_WOL; 930 ifp->if_capenable = ifp->if_capabilities; 931 932 if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) { 933 sc->nge_flags |= NGE_FLAG_TBI; 934 device_printf(dev, "Using TBI\n"); 935 /* Configure GPIO. */ 936 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 937 | NGE_GPIO_GP4_OUT 938 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 939 | NGE_GPIO_GP3_OUTENB 940 | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); 941 } 942 943 /* 944 * Do MII setup. 945 */ 946 error = mii_attach(dev, &sc->nge_miibus, ifp, nge_mediachange, 947 nge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 948 if (error != 0) { 949 device_printf(dev, "attaching PHYs failed\n"); 950 goto fail; 951 } 952 953 /* 954 * Call MI attach routine. 955 */ 956 ether_ifattach(ifp, eaddr); 957 958 /* VLAN capability setup. */ 959 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 960 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 961 ifp->if_capenable = ifp->if_capabilities; 962 #ifdef DEVICE_POLLING 963 ifp->if_capabilities |= IFCAP_POLLING; 964 #endif 965 /* 966 * Tell the upper layer(s) we support long frames. 967 * Must appear after the call to ether_ifattach() because 968 * ether_ifattach() sets ifi_hdrlen to the default value. 969 */ 970 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 971 972 /* 973 * Hookup IRQ last. 974 */ 975 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, 976 NULL, nge_intr, sc, &sc->nge_intrhand); 977 if (error) { 978 device_printf(dev, "couldn't set up irq\n"); 979 goto fail; 980 } 981 982 fail: 983 if (error != 0) 984 nge_detach(dev); 985 return (error); 986 } 987 988 static int 989 nge_detach(device_t dev) 990 { 991 struct nge_softc *sc; 992 struct ifnet *ifp; 993 994 sc = device_get_softc(dev); 995 ifp = sc->nge_ifp; 996 997 #ifdef DEVICE_POLLING 998 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 999 ether_poll_deregister(ifp); 1000 #endif 1001 1002 if (device_is_attached(dev)) { 1003 NGE_LOCK(sc); 1004 sc->nge_flags |= NGE_FLAG_DETACH; 1005 nge_stop(sc); 1006 NGE_UNLOCK(sc); 1007 callout_drain(&sc->nge_stat_ch); 1008 if (ifp != NULL) 1009 ether_ifdetach(ifp); 1010 } 1011 1012 if (sc->nge_miibus != NULL) { 1013 device_delete_child(dev, sc->nge_miibus); 1014 sc->nge_miibus = NULL; 1015 } 1016 bus_generic_detach(dev); 1017 if (sc->nge_intrhand != NULL) 1018 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 1019 if (sc->nge_irq != NULL) 1020 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 1021 if (sc->nge_res != NULL) 1022 bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id, 1023 sc->nge_res); 1024 1025 nge_dma_free(sc); 1026 if (ifp != NULL) 1027 if_free(ifp); 1028 1029 NGE_LOCK_DESTROY(sc); 1030 1031 return (0); 1032 } 1033 1034 struct nge_dmamap_arg { 1035 bus_addr_t nge_busaddr; 1036 }; 1037 1038 static void 1039 nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1040 { 1041 struct nge_dmamap_arg *ctx; 1042 1043 if (error != 0) 1044 return; 1045 ctx = arg; 1046 ctx->nge_busaddr = segs[0].ds_addr; 1047 } 1048 1049 static int 1050 nge_dma_alloc(struct nge_softc *sc) 1051 { 1052 struct nge_dmamap_arg ctx; 1053 struct nge_txdesc *txd; 1054 struct nge_rxdesc *rxd; 1055 int error, i; 1056 1057 /* Create parent DMA tag. */ 1058 error = bus_dma_tag_create( 1059 bus_get_dma_tag(sc->nge_dev), /* parent */ 1060 1, 0, /* alignment, boundary */ 1061 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1062 BUS_SPACE_MAXADDR, /* highaddr */ 1063 NULL, NULL, /* filter, filterarg */ 1064 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1065 0, /* nsegments */ 1066 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1067 0, /* flags */ 1068 NULL, NULL, /* lockfunc, lockarg */ 1069 &sc->nge_cdata.nge_parent_tag); 1070 if (error != 0) { 1071 device_printf(sc->nge_dev, "failed to create parent DMA tag\n"); 1072 goto fail; 1073 } 1074 /* Create tag for Tx ring. */ 1075 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1076 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1077 BUS_SPACE_MAXADDR, /* lowaddr */ 1078 BUS_SPACE_MAXADDR, /* highaddr */ 1079 NULL, NULL, /* filter, filterarg */ 1080 NGE_TX_RING_SIZE, /* maxsize */ 1081 1, /* nsegments */ 1082 NGE_TX_RING_SIZE, /* maxsegsize */ 1083 0, /* flags */ 1084 NULL, NULL, /* lockfunc, lockarg */ 1085 &sc->nge_cdata.nge_tx_ring_tag); 1086 if (error != 0) { 1087 device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n"); 1088 goto fail; 1089 } 1090 1091 /* Create tag for Rx ring. */ 1092 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1093 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1094 BUS_SPACE_MAXADDR, /* lowaddr */ 1095 BUS_SPACE_MAXADDR, /* highaddr */ 1096 NULL, NULL, /* filter, filterarg */ 1097 NGE_RX_RING_SIZE, /* maxsize */ 1098 1, /* nsegments */ 1099 NGE_RX_RING_SIZE, /* maxsegsize */ 1100 0, /* flags */ 1101 NULL, NULL, /* lockfunc, lockarg */ 1102 &sc->nge_cdata.nge_rx_ring_tag); 1103 if (error != 0) { 1104 device_printf(sc->nge_dev, 1105 "failed to create Rx ring DMA tag\n"); 1106 goto fail; 1107 } 1108 1109 /* Create tag for Tx buffers. */ 1110 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1111 1, 0, /* alignment, boundary */ 1112 BUS_SPACE_MAXADDR, /* lowaddr */ 1113 BUS_SPACE_MAXADDR, /* highaddr */ 1114 NULL, NULL, /* filter, filterarg */ 1115 MCLBYTES * NGE_MAXTXSEGS, /* maxsize */ 1116 NGE_MAXTXSEGS, /* nsegments */ 1117 MCLBYTES, /* maxsegsize */ 1118 0, /* flags */ 1119 NULL, NULL, /* lockfunc, lockarg */ 1120 &sc->nge_cdata.nge_tx_tag); 1121 if (error != 0) { 1122 device_printf(sc->nge_dev, "failed to create Tx DMA tag\n"); 1123 goto fail; 1124 } 1125 1126 /* Create tag for Rx buffers. */ 1127 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1128 NGE_RX_ALIGN, 0, /* alignment, boundary */ 1129 BUS_SPACE_MAXADDR, /* lowaddr */ 1130 BUS_SPACE_MAXADDR, /* highaddr */ 1131 NULL, NULL, /* filter, filterarg */ 1132 MCLBYTES, /* maxsize */ 1133 1, /* nsegments */ 1134 MCLBYTES, /* maxsegsize */ 1135 0, /* flags */ 1136 NULL, NULL, /* lockfunc, lockarg */ 1137 &sc->nge_cdata.nge_rx_tag); 1138 if (error != 0) { 1139 device_printf(sc->nge_dev, "failed to create Rx DMA tag\n"); 1140 goto fail; 1141 } 1142 1143 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1144 error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag, 1145 (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK | 1146 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map); 1147 if (error != 0) { 1148 device_printf(sc->nge_dev, 1149 "failed to allocate DMA'able memory for Tx ring\n"); 1150 goto fail; 1151 } 1152 1153 ctx.nge_busaddr = 0; 1154 error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag, 1155 sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring, 1156 NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1157 if (error != 0 || ctx.nge_busaddr == 0) { 1158 device_printf(sc->nge_dev, 1159 "failed to load DMA'able memory for Tx ring\n"); 1160 goto fail; 1161 } 1162 sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr; 1163 1164 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1165 error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag, 1166 (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK | 1167 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map); 1168 if (error != 0) { 1169 device_printf(sc->nge_dev, 1170 "failed to allocate DMA'able memory for Rx ring\n"); 1171 goto fail; 1172 } 1173 1174 ctx.nge_busaddr = 0; 1175 error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag, 1176 sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring, 1177 NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1178 if (error != 0 || ctx.nge_busaddr == 0) { 1179 device_printf(sc->nge_dev, 1180 "failed to load DMA'able memory for Rx ring\n"); 1181 goto fail; 1182 } 1183 sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr; 1184 1185 /* Create DMA maps for Tx buffers. */ 1186 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1187 txd = &sc->nge_cdata.nge_txdesc[i]; 1188 txd->tx_m = NULL; 1189 txd->tx_dmamap = NULL; 1190 error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0, 1191 &txd->tx_dmamap); 1192 if (error != 0) { 1193 device_printf(sc->nge_dev, 1194 "failed to create Tx dmamap\n"); 1195 goto fail; 1196 } 1197 } 1198 /* Create DMA maps for Rx buffers. */ 1199 if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1200 &sc->nge_cdata.nge_rx_sparemap)) != 0) { 1201 device_printf(sc->nge_dev, 1202 "failed to create spare Rx dmamap\n"); 1203 goto fail; 1204 } 1205 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1206 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1207 rxd->rx_m = NULL; 1208 rxd->rx_dmamap = NULL; 1209 error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1210 &rxd->rx_dmamap); 1211 if (error != 0) { 1212 device_printf(sc->nge_dev, 1213 "failed to create Rx dmamap\n"); 1214 goto fail; 1215 } 1216 } 1217 1218 fail: 1219 return (error); 1220 } 1221 1222 static void 1223 nge_dma_free(struct nge_softc *sc) 1224 { 1225 struct nge_txdesc *txd; 1226 struct nge_rxdesc *rxd; 1227 int i; 1228 1229 /* Tx ring. */ 1230 if (sc->nge_cdata.nge_tx_ring_tag) { 1231 if (sc->nge_rdata.nge_tx_ring_paddr) 1232 bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag, 1233 sc->nge_cdata.nge_tx_ring_map); 1234 if (sc->nge_rdata.nge_tx_ring) 1235 bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag, 1236 sc->nge_rdata.nge_tx_ring, 1237 sc->nge_cdata.nge_tx_ring_map); 1238 sc->nge_rdata.nge_tx_ring = NULL; 1239 sc->nge_rdata.nge_tx_ring_paddr = 0; 1240 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag); 1241 sc->nge_cdata.nge_tx_ring_tag = NULL; 1242 } 1243 /* Rx ring. */ 1244 if (sc->nge_cdata.nge_rx_ring_tag) { 1245 if (sc->nge_rdata.nge_rx_ring_paddr) 1246 bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag, 1247 sc->nge_cdata.nge_rx_ring_map); 1248 if (sc->nge_rdata.nge_rx_ring) 1249 bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag, 1250 sc->nge_rdata.nge_rx_ring, 1251 sc->nge_cdata.nge_rx_ring_map); 1252 sc->nge_rdata.nge_rx_ring = NULL; 1253 sc->nge_rdata.nge_rx_ring_paddr = 0; 1254 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag); 1255 sc->nge_cdata.nge_rx_ring_tag = NULL; 1256 } 1257 /* Tx buffers. */ 1258 if (sc->nge_cdata.nge_tx_tag) { 1259 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1260 txd = &sc->nge_cdata.nge_txdesc[i]; 1261 if (txd->tx_dmamap) { 1262 bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag, 1263 txd->tx_dmamap); 1264 txd->tx_dmamap = NULL; 1265 } 1266 } 1267 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag); 1268 sc->nge_cdata.nge_tx_tag = NULL; 1269 } 1270 /* Rx buffers. */ 1271 if (sc->nge_cdata.nge_rx_tag) { 1272 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1273 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1274 if (rxd->rx_dmamap) { 1275 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1276 rxd->rx_dmamap); 1277 rxd->rx_dmamap = NULL; 1278 } 1279 } 1280 if (sc->nge_cdata.nge_rx_sparemap) { 1281 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1282 sc->nge_cdata.nge_rx_sparemap); 1283 sc->nge_cdata.nge_rx_sparemap = 0; 1284 } 1285 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag); 1286 sc->nge_cdata.nge_rx_tag = NULL; 1287 } 1288 1289 if (sc->nge_cdata.nge_parent_tag) { 1290 bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag); 1291 sc->nge_cdata.nge_parent_tag = NULL; 1292 } 1293 } 1294 1295 /* 1296 * Initialize the transmit descriptors. 1297 */ 1298 static int 1299 nge_list_tx_init(struct nge_softc *sc) 1300 { 1301 struct nge_ring_data *rd; 1302 struct nge_txdesc *txd; 1303 bus_addr_t addr; 1304 int i; 1305 1306 sc->nge_cdata.nge_tx_prod = 0; 1307 sc->nge_cdata.nge_tx_cons = 0; 1308 sc->nge_cdata.nge_tx_cnt = 0; 1309 1310 rd = &sc->nge_rdata; 1311 bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT); 1312 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1313 if (i == NGE_TX_RING_CNT - 1) 1314 addr = NGE_TX_RING_ADDR(sc, 0); 1315 else 1316 addr = NGE_TX_RING_ADDR(sc, i + 1); 1317 rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1318 txd = &sc->nge_cdata.nge_txdesc[i]; 1319 txd->tx_m = NULL; 1320 } 1321 1322 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1323 sc->nge_cdata.nge_tx_ring_map, 1324 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1325 1326 return (0); 1327 } 1328 1329 /* 1330 * Initialize the RX descriptors and allocate mbufs for them. Note that 1331 * we arrange the descriptors in a closed ring, so that the last descriptor 1332 * points back to the first. 1333 */ 1334 static int 1335 nge_list_rx_init(struct nge_softc *sc) 1336 { 1337 struct nge_ring_data *rd; 1338 bus_addr_t addr; 1339 int i; 1340 1341 sc->nge_cdata.nge_rx_cons = 0; 1342 sc->nge_head = sc->nge_tail = NULL; 1343 1344 rd = &sc->nge_rdata; 1345 bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT); 1346 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1347 if (nge_newbuf(sc, i) != 0) 1348 return (ENOBUFS); 1349 if (i == NGE_RX_RING_CNT - 1) 1350 addr = NGE_RX_RING_ADDR(sc, 0); 1351 else 1352 addr = NGE_RX_RING_ADDR(sc, i + 1); 1353 rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1354 } 1355 1356 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1357 sc->nge_cdata.nge_rx_ring_map, 1358 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1359 1360 return (0); 1361 } 1362 1363 static __inline void 1364 nge_discard_rxbuf(struct nge_softc *sc, int idx) 1365 { 1366 struct nge_desc *desc; 1367 1368 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1369 desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t)); 1370 desc->nge_extsts = 0; 1371 } 1372 1373 /* 1374 * Initialize an RX descriptor and attach an MBUF cluster. 1375 */ 1376 static int 1377 nge_newbuf(struct nge_softc *sc, int idx) 1378 { 1379 struct nge_desc *desc; 1380 struct nge_rxdesc *rxd; 1381 struct mbuf *m; 1382 bus_dma_segment_t segs[1]; 1383 bus_dmamap_t map; 1384 int nsegs; 1385 1386 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1387 if (m == NULL) 1388 return (ENOBUFS); 1389 m->m_len = m->m_pkthdr.len = MCLBYTES; 1390 m_adj(m, sizeof(uint64_t)); 1391 1392 if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag, 1393 sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1394 m_freem(m); 1395 return (ENOBUFS); 1396 } 1397 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1398 1399 rxd = &sc->nge_cdata.nge_rxdesc[idx]; 1400 if (rxd->rx_m != NULL) { 1401 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1402 BUS_DMASYNC_POSTREAD); 1403 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap); 1404 } 1405 map = rxd->rx_dmamap; 1406 rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap; 1407 sc->nge_cdata.nge_rx_sparemap = map; 1408 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1409 BUS_DMASYNC_PREREAD); 1410 rxd->rx_m = m; 1411 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1412 desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr)); 1413 desc->nge_cmdsts = htole32(segs[0].ds_len); 1414 desc->nge_extsts = 0; 1415 1416 return (0); 1417 } 1418 1419 #ifndef __NO_STRICT_ALIGNMENT 1420 static __inline void 1421 nge_fixup_rx(struct mbuf *m) 1422 { 1423 int i; 1424 uint16_t *src, *dst; 1425 1426 src = mtod(m, uint16_t *); 1427 dst = src - 1; 1428 1429 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1430 *dst++ = *src++; 1431 1432 m->m_data -= ETHER_ALIGN; 1433 } 1434 #endif 1435 1436 /* 1437 * A frame has been uploaded: pass the resulting mbuf chain up to 1438 * the higher level protocols. 1439 */ 1440 static int 1441 nge_rxeof(struct nge_softc *sc) 1442 { 1443 struct mbuf *m; 1444 struct ifnet *ifp; 1445 struct nge_desc *cur_rx; 1446 struct nge_rxdesc *rxd; 1447 int cons, prog, rx_npkts, total_len; 1448 uint32_t cmdsts, extsts; 1449 1450 NGE_LOCK_ASSERT(sc); 1451 1452 ifp = sc->nge_ifp; 1453 cons = sc->nge_cdata.nge_rx_cons; 1454 rx_npkts = 0; 1455 1456 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1457 sc->nge_cdata.nge_rx_ring_map, 1458 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1459 1460 for (prog = 0; prog < NGE_RX_RING_CNT && 1461 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1462 NGE_INC(cons, NGE_RX_RING_CNT)) { 1463 #ifdef DEVICE_POLLING 1464 if (ifp->if_capenable & IFCAP_POLLING) { 1465 if (sc->rxcycles <= 0) 1466 break; 1467 sc->rxcycles--; 1468 } 1469 #endif 1470 cur_rx = &sc->nge_rdata.nge_rx_ring[cons]; 1471 cmdsts = le32toh(cur_rx->nge_cmdsts); 1472 extsts = le32toh(cur_rx->nge_extsts); 1473 if ((cmdsts & NGE_CMDSTS_OWN) == 0) 1474 break; 1475 prog++; 1476 rxd = &sc->nge_cdata.nge_rxdesc[cons]; 1477 m = rxd->rx_m; 1478 total_len = cmdsts & NGE_CMDSTS_BUFLEN; 1479 1480 if ((cmdsts & NGE_CMDSTS_MORE) != 0) { 1481 if (nge_newbuf(sc, cons) != 0) { 1482 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1483 if (sc->nge_head != NULL) { 1484 m_freem(sc->nge_head); 1485 sc->nge_head = sc->nge_tail = NULL; 1486 } 1487 nge_discard_rxbuf(sc, cons); 1488 continue; 1489 } 1490 m->m_len = total_len; 1491 if (sc->nge_head == NULL) { 1492 m->m_pkthdr.len = total_len; 1493 sc->nge_head = sc->nge_tail = m; 1494 } else { 1495 m->m_flags &= ~M_PKTHDR; 1496 sc->nge_head->m_pkthdr.len += total_len; 1497 sc->nge_tail->m_next = m; 1498 sc->nge_tail = m; 1499 } 1500 continue; 1501 } 1502 1503 /* 1504 * If an error occurs, update stats, clear the 1505 * status word and leave the mbuf cluster in place: 1506 * it should simply get re-used next time this descriptor 1507 * comes up in the ring. 1508 */ 1509 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1510 if ((cmdsts & NGE_RXSTAT_RUNT) && 1511 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) { 1512 /* 1513 * Work-around hardware bug, accept runt frames 1514 * if its length is larger than or equal to 56. 1515 */ 1516 } else { 1517 /* 1518 * Input error counters are updated by hardware. 1519 */ 1520 if (sc->nge_head != NULL) { 1521 m_freem(sc->nge_head); 1522 sc->nge_head = sc->nge_tail = NULL; 1523 } 1524 nge_discard_rxbuf(sc, cons); 1525 continue; 1526 } 1527 } 1528 1529 /* Try conjure up a replacement mbuf. */ 1530 1531 if (nge_newbuf(sc, cons) != 0) { 1532 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1533 if (sc->nge_head != NULL) { 1534 m_freem(sc->nge_head); 1535 sc->nge_head = sc->nge_tail = NULL; 1536 } 1537 nge_discard_rxbuf(sc, cons); 1538 continue; 1539 } 1540 1541 /* Chain received mbufs. */ 1542 if (sc->nge_head != NULL) { 1543 m->m_len = total_len; 1544 m->m_flags &= ~M_PKTHDR; 1545 sc->nge_tail->m_next = m; 1546 m = sc->nge_head; 1547 m->m_pkthdr.len += total_len; 1548 sc->nge_head = sc->nge_tail = NULL; 1549 } else 1550 m->m_pkthdr.len = m->m_len = total_len; 1551 1552 /* 1553 * Ok. NatSemi really screwed up here. This is the 1554 * only gigE chip I know of with alignment constraints 1555 * on receive buffers. RX buffers must be 64-bit aligned. 1556 */ 1557 /* 1558 * By popular demand, ignore the alignment problems 1559 * on the non-strict alignment platform. The performance hit 1560 * incurred due to unaligned accesses is much smaller 1561 * than the hit produced by forcing buffer copies all 1562 * the time, especially with jumbo frames. We still 1563 * need to fix up the alignment everywhere else though. 1564 */ 1565 #ifndef __NO_STRICT_ALIGNMENT 1566 nge_fixup_rx(m); 1567 #endif 1568 m->m_pkthdr.rcvif = ifp; 1569 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1570 1571 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1572 /* Do IP checksum checking. */ 1573 if ((extsts & NGE_RXEXTSTS_IPPKT) != 0) 1574 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1575 if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0) 1576 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1577 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1578 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1579 (extsts & NGE_RXEXTSTS_UDPPKT && 1580 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1581 m->m_pkthdr.csum_flags |= 1582 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1583 m->m_pkthdr.csum_data = 0xffff; 1584 } 1585 } 1586 1587 /* 1588 * If we received a packet with a vlan tag, pass it 1589 * to vlan_input() instead of ether_input(). 1590 */ 1591 if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 && 1592 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1593 m->m_pkthdr.ether_vtag = 1594 bswap16(extsts & NGE_RXEXTSTS_VTCI); 1595 m->m_flags |= M_VLANTAG; 1596 } 1597 NGE_UNLOCK(sc); 1598 (*ifp->if_input)(ifp, m); 1599 NGE_LOCK(sc); 1600 rx_npkts++; 1601 } 1602 1603 if (prog > 0) { 1604 sc->nge_cdata.nge_rx_cons = cons; 1605 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1606 sc->nge_cdata.nge_rx_ring_map, 1607 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1608 } 1609 return (rx_npkts); 1610 } 1611 1612 /* 1613 * A frame was downloaded to the chip. It's safe for us to clean up 1614 * the list buffers. 1615 */ 1616 static void 1617 nge_txeof(struct nge_softc *sc) 1618 { 1619 struct nge_desc *cur_tx; 1620 struct nge_txdesc *txd; 1621 struct ifnet *ifp; 1622 uint32_t cmdsts; 1623 int cons, prod; 1624 1625 NGE_LOCK_ASSERT(sc); 1626 ifp = sc->nge_ifp; 1627 1628 cons = sc->nge_cdata.nge_tx_cons; 1629 prod = sc->nge_cdata.nge_tx_prod; 1630 if (cons == prod) 1631 return; 1632 1633 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1634 sc->nge_cdata.nge_tx_ring_map, 1635 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1636 1637 /* 1638 * Go through our tx list and free mbufs for those 1639 * frames that have been transmitted. 1640 */ 1641 for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) { 1642 cur_tx = &sc->nge_rdata.nge_tx_ring[cons]; 1643 cmdsts = le32toh(cur_tx->nge_cmdsts); 1644 if ((cmdsts & NGE_CMDSTS_OWN) != 0) 1645 break; 1646 sc->nge_cdata.nge_tx_cnt--; 1647 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1648 if ((cmdsts & NGE_CMDSTS_MORE) != 0) 1649 continue; 1650 1651 txd = &sc->nge_cdata.nge_txdesc[cons]; 1652 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap, 1653 BUS_DMASYNC_POSTWRITE); 1654 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap); 1655 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1656 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1657 if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0) 1658 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1659 if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0) 1660 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1661 } else 1662 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1663 1664 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (cmdsts & NGE_TXSTAT_COLLCNT) >> 16); 1665 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1666 __func__)); 1667 m_freem(txd->tx_m); 1668 txd->tx_m = NULL; 1669 } 1670 1671 sc->nge_cdata.nge_tx_cons = cons; 1672 if (sc->nge_cdata.nge_tx_cnt == 0) 1673 sc->nge_watchdog_timer = 0; 1674 } 1675 1676 static void 1677 nge_tick(void *xsc) 1678 { 1679 struct nge_softc *sc; 1680 struct mii_data *mii; 1681 1682 sc = xsc; 1683 NGE_LOCK_ASSERT(sc); 1684 mii = device_get_softc(sc->nge_miibus); 1685 mii_tick(mii); 1686 /* 1687 * For PHYs that does not reset established link, it is 1688 * necessary to check whether driver still have a valid 1689 * link(e.g link state change callback is not called). 1690 * Otherwise, driver think it lost link because driver 1691 * initialization routine clears link state flag. 1692 */ 1693 if ((sc->nge_flags & NGE_FLAG_LINK) == 0) 1694 nge_miibus_statchg(sc->nge_dev); 1695 nge_stats_update(sc); 1696 nge_watchdog(sc); 1697 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 1698 } 1699 1700 static void 1701 nge_stats_update(struct nge_softc *sc) 1702 { 1703 struct ifnet *ifp; 1704 struct nge_stats now, *stats, *nstats; 1705 1706 NGE_LOCK_ASSERT(sc); 1707 1708 ifp = sc->nge_ifp; 1709 stats = &now; 1710 stats->rx_pkts_errs = 1711 CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF; 1712 stats->rx_crc_errs = 1713 CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF; 1714 stats->rx_fifo_oflows = 1715 CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF; 1716 stats->rx_align_errs = 1717 CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF; 1718 stats->rx_sym_errs = 1719 CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF; 1720 stats->rx_pkts_jumbos = 1721 CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF; 1722 stats->rx_len_errs = 1723 CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF; 1724 stats->rx_unctl_frames = 1725 CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF; 1726 stats->rx_pause = 1727 CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF; 1728 stats->tx_pause = 1729 CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF; 1730 stats->tx_seq_errs = 1731 CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF; 1732 1733 /* 1734 * Since we've accept errored frames exclude Rx length errors. 1735 */ 1736 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1737 stats->rx_pkts_errs + stats->rx_crc_errs + 1738 stats->rx_fifo_oflows + stats->rx_sym_errs); 1739 1740 nstats = &sc->nge_stats; 1741 nstats->rx_pkts_errs += stats->rx_pkts_errs; 1742 nstats->rx_crc_errs += stats->rx_crc_errs; 1743 nstats->rx_fifo_oflows += stats->rx_fifo_oflows; 1744 nstats->rx_align_errs += stats->rx_align_errs; 1745 nstats->rx_sym_errs += stats->rx_sym_errs; 1746 nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos; 1747 nstats->rx_len_errs += stats->rx_len_errs; 1748 nstats->rx_unctl_frames += stats->rx_unctl_frames; 1749 nstats->rx_pause += stats->rx_pause; 1750 nstats->tx_pause += stats->tx_pause; 1751 nstats->tx_seq_errs += stats->tx_seq_errs; 1752 } 1753 1754 #ifdef DEVICE_POLLING 1755 static poll_handler_t nge_poll; 1756 1757 static int 1758 nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1759 { 1760 struct nge_softc *sc; 1761 int rx_npkts = 0; 1762 1763 sc = ifp->if_softc; 1764 1765 NGE_LOCK(sc); 1766 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1767 NGE_UNLOCK(sc); 1768 return (rx_npkts); 1769 } 1770 1771 /* 1772 * On the nge, reading the status register also clears it. 1773 * So before returning to intr mode we must make sure that all 1774 * possible pending sources of interrupts have been served. 1775 * In practice this means run to completion the *eof routines, 1776 * and then call the interrupt routine. 1777 */ 1778 sc->rxcycles = count; 1779 rx_npkts = nge_rxeof(sc); 1780 nge_txeof(sc); 1781 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1782 nge_start_locked(ifp); 1783 1784 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1785 uint32_t status; 1786 1787 /* Reading the ISR register clears all interrupts. */ 1788 status = CSR_READ_4(sc, NGE_ISR); 1789 1790 if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0) 1791 rx_npkts += nge_rxeof(sc); 1792 1793 if ((status & NGE_ISR_RX_IDLE) != 0) 1794 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1795 1796 if ((status & NGE_ISR_SYSERR) != 0) { 1797 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1798 nge_init_locked(sc); 1799 } 1800 } 1801 NGE_UNLOCK(sc); 1802 return (rx_npkts); 1803 } 1804 #endif /* DEVICE_POLLING */ 1805 1806 static void 1807 nge_intr(void *arg) 1808 { 1809 struct nge_softc *sc; 1810 struct ifnet *ifp; 1811 uint32_t status; 1812 1813 sc = (struct nge_softc *)arg; 1814 ifp = sc->nge_ifp; 1815 1816 NGE_LOCK(sc); 1817 1818 if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0) 1819 goto done_locked; 1820 1821 /* Reading the ISR register clears all interrupts. */ 1822 status = CSR_READ_4(sc, NGE_ISR); 1823 if (status == 0xffffffff || (status & NGE_INTRS) == 0) 1824 goto done_locked; 1825 #ifdef DEVICE_POLLING 1826 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1827 goto done_locked; 1828 #endif 1829 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1830 goto done_locked; 1831 1832 /* Disable interrupts. */ 1833 CSR_WRITE_4(sc, NGE_IER, 0); 1834 1835 /* Data LED on for TBI mode */ 1836 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1837 CSR_WRITE_4(sc, NGE_GPIO, 1838 CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); 1839 1840 for (; (status & NGE_INTRS) != 0;) { 1841 if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR | 1842 NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0) 1843 nge_txeof(sc); 1844 1845 if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR | 1846 NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW | 1847 NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0) 1848 nge_rxeof(sc); 1849 1850 if ((status & NGE_ISR_RX_IDLE) != 0) 1851 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1852 1853 if ((status & NGE_ISR_SYSERR) != 0) { 1854 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1855 nge_init_locked(sc); 1856 } 1857 /* Reading the ISR register clears all interrupts. */ 1858 status = CSR_READ_4(sc, NGE_ISR); 1859 } 1860 1861 /* Re-enable interrupts. */ 1862 CSR_WRITE_4(sc, NGE_IER, 1); 1863 1864 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1865 nge_start_locked(ifp); 1866 1867 /* Data LED off for TBI mode */ 1868 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1869 CSR_WRITE_4(sc, NGE_GPIO, 1870 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 1871 1872 done_locked: 1873 NGE_UNLOCK(sc); 1874 } 1875 1876 /* 1877 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1878 * pointers to the fragment pointers. 1879 */ 1880 static int 1881 nge_encap(struct nge_softc *sc, struct mbuf **m_head) 1882 { 1883 struct nge_txdesc *txd, *txd_last; 1884 struct nge_desc *desc; 1885 struct mbuf *m; 1886 bus_dmamap_t map; 1887 bus_dma_segment_t txsegs[NGE_MAXTXSEGS]; 1888 int error, i, nsegs, prod, si; 1889 1890 NGE_LOCK_ASSERT(sc); 1891 1892 m = *m_head; 1893 prod = sc->nge_cdata.nge_tx_prod; 1894 txd = &sc->nge_cdata.nge_txdesc[prod]; 1895 txd_last = txd; 1896 map = txd->tx_dmamap; 1897 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map, 1898 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1899 if (error == EFBIG) { 1900 m = m_collapse(*m_head, M_NOWAIT, NGE_MAXTXSEGS); 1901 if (m == NULL) { 1902 m_freem(*m_head); 1903 *m_head = NULL; 1904 return (ENOBUFS); 1905 } 1906 *m_head = m; 1907 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, 1908 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1909 if (error != 0) { 1910 m_freem(*m_head); 1911 *m_head = NULL; 1912 return (error); 1913 } 1914 } else if (error != 0) 1915 return (error); 1916 if (nsegs == 0) { 1917 m_freem(*m_head); 1918 *m_head = NULL; 1919 return (EIO); 1920 } 1921 1922 /* Check number of available descriptors. */ 1923 if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) { 1924 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map); 1925 return (ENOBUFS); 1926 } 1927 1928 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE); 1929 1930 si = prod; 1931 for (i = 0; i < nsegs; i++) { 1932 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1933 desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr)); 1934 if (i == 0) 1935 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1936 NGE_CMDSTS_MORE); 1937 else 1938 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1939 NGE_CMDSTS_MORE | NGE_CMDSTS_OWN); 1940 desc->nge_extsts = 0; 1941 sc->nge_cdata.nge_tx_cnt++; 1942 NGE_INC(prod, NGE_TX_RING_CNT); 1943 } 1944 /* Update producer index. */ 1945 sc->nge_cdata.nge_tx_prod = prod; 1946 1947 prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT; 1948 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1949 /* Check if we have a VLAN tag to insert. */ 1950 if ((m->m_flags & M_VLANTAG) != 0) 1951 desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT | 1952 bswap16(m->m_pkthdr.ether_vtag)); 1953 /* Set EOP on the last desciptor. */ 1954 desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE); 1955 1956 /* Set checksum offload in the first descriptor. */ 1957 desc = &sc->nge_rdata.nge_tx_ring[si]; 1958 if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) { 1959 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1960 desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM); 1961 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1962 desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM); 1963 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1964 desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM); 1965 } 1966 /* Lastly, turn the first descriptor ownership to hardware. */ 1967 desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN); 1968 1969 txd = &sc->nge_cdata.nge_txdesc[prod]; 1970 map = txd_last->tx_dmamap; 1971 txd_last->tx_dmamap = txd->tx_dmamap; 1972 txd->tx_dmamap = map; 1973 txd->tx_m = m; 1974 1975 return (0); 1976 } 1977 1978 /* 1979 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1980 * to the mbuf data regions directly in the transmit lists. We also save a 1981 * copy of the pointers since the transmit list fragment pointers are 1982 * physical addresses. 1983 */ 1984 1985 static void 1986 nge_start(struct ifnet *ifp) 1987 { 1988 struct nge_softc *sc; 1989 1990 sc = ifp->if_softc; 1991 NGE_LOCK(sc); 1992 nge_start_locked(ifp); 1993 NGE_UNLOCK(sc); 1994 } 1995 1996 static void 1997 nge_start_locked(struct ifnet *ifp) 1998 { 1999 struct nge_softc *sc; 2000 struct mbuf *m_head; 2001 int enq; 2002 2003 sc = ifp->if_softc; 2004 2005 NGE_LOCK_ASSERT(sc); 2006 2007 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2008 IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0) 2009 return; 2010 2011 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2012 sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) { 2013 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2014 if (m_head == NULL) 2015 break; 2016 /* 2017 * Pack the data into the transmit ring. If we 2018 * don't have room, set the OACTIVE flag and wait 2019 * for the NIC to drain the ring. 2020 */ 2021 if (nge_encap(sc, &m_head)) { 2022 if (m_head == NULL) 2023 break; 2024 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2025 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2026 break; 2027 } 2028 2029 enq++; 2030 /* 2031 * If there's a BPF listener, bounce a copy of this frame 2032 * to him. 2033 */ 2034 ETHER_BPF_MTAP(ifp, m_head); 2035 } 2036 2037 if (enq > 0) { 2038 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 2039 sc->nge_cdata.nge_tx_ring_map, 2040 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2041 /* Transmit */ 2042 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 2043 2044 /* Set a timeout in case the chip goes out to lunch. */ 2045 sc->nge_watchdog_timer = 5; 2046 } 2047 } 2048 2049 static void 2050 nge_init(void *xsc) 2051 { 2052 struct nge_softc *sc = xsc; 2053 2054 NGE_LOCK(sc); 2055 nge_init_locked(sc); 2056 NGE_UNLOCK(sc); 2057 } 2058 2059 static void 2060 nge_init_locked(struct nge_softc *sc) 2061 { 2062 struct ifnet *ifp = sc->nge_ifp; 2063 struct mii_data *mii; 2064 uint8_t *eaddr; 2065 uint32_t reg; 2066 2067 NGE_LOCK_ASSERT(sc); 2068 2069 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2070 return; 2071 2072 /* 2073 * Cancel pending I/O and free all RX/TX buffers. 2074 */ 2075 nge_stop(sc); 2076 2077 /* Reset the adapter. */ 2078 nge_reset(sc); 2079 2080 /* Disable Rx filter prior to programming Rx filter. */ 2081 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0); 2082 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 2083 2084 mii = device_get_softc(sc->nge_miibus); 2085 2086 /* Set MAC address. */ 2087 eaddr = IF_LLADDR(sc->nge_ifp); 2088 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 2089 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]); 2090 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 2091 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]); 2092 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 2093 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]); 2094 2095 /* Init circular RX list. */ 2096 if (nge_list_rx_init(sc) == ENOBUFS) { 2097 device_printf(sc->nge_dev, "initialization failed: no " 2098 "memory for rx buffers\n"); 2099 nge_stop(sc); 2100 return; 2101 } 2102 2103 /* 2104 * Init tx descriptors. 2105 */ 2106 nge_list_tx_init(sc); 2107 2108 /* Set Rx filter. */ 2109 nge_rxfilter(sc); 2110 2111 /* Disable PRIQ ctl. */ 2112 CSR_WRITE_4(sc, NGE_PRIOQCTL, 0); 2113 2114 /* 2115 * Set pause frames parameters. 2116 * Rx stat FIFO hi-threshold : 2 or more packets 2117 * Rx stat FIFO lo-threshold : less than 2 packets 2118 * Rx data FIFO hi-threshold : 2K or more bytes 2119 * Rx data FIFO lo-threshold : less than 2K bytes 2120 * pause time : (512ns * 0xffff) -> 33.55ms 2121 */ 2122 CSR_WRITE_4(sc, NGE_PAUSECSR, 2123 NGE_PAUSECSR_PAUSE_ON_MCAST | 2124 NGE_PAUSECSR_PAUSE_ON_DA | 2125 ((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) | 2126 ((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) | 2127 ((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) | 2128 ((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) | 2129 NGE_PAUSECSR_CNT); 2130 2131 /* 2132 * Load the address of the RX and TX lists. 2133 */ 2134 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 2135 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 2136 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 2137 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 2138 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 2139 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 2140 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 2141 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 2142 2143 /* Set RX configuration. */ 2144 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 2145 2146 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0); 2147 /* 2148 * Enable hardware checksum validation for all IPv4 2149 * packets, do not reject packets with bad checksums. 2150 */ 2151 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2152 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 2153 2154 /* 2155 * Tell the chip to detect and strip VLAN tag info from 2156 * received frames. The tag will be provided in the extsts 2157 * field in the RX descriptors. 2158 */ 2159 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB); 2160 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2161 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB); 2162 2163 /* Set TX configuration. */ 2164 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 2165 2166 /* 2167 * Enable TX IPv4 checksumming on a per-packet basis. 2168 */ 2169 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 2170 2171 /* 2172 * Tell the chip to insert VLAN tags on a per-packet basis as 2173 * dictated by the code in the frame encapsulation routine. 2174 */ 2175 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 2176 2177 /* 2178 * Enable the delivery of PHY interrupts based on 2179 * link/speed/duplex status changes. Also enable the 2180 * extsts field in the DMA descriptors (needed for 2181 * TCP/IP checksum offload on transmit). 2182 */ 2183 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD | 2184 NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB); 2185 2186 /* 2187 * Configure interrupt holdoff (moderation). We can 2188 * have the chip delay interrupt delivery for a certain 2189 * period. Units are in 100us, and the max setting 2190 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 2191 */ 2192 CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff); 2193 2194 /* 2195 * Enable MAC statistics counters and clear. 2196 */ 2197 reg = CSR_READ_4(sc, NGE_MIBCTL); 2198 reg &= ~NGE_MIBCTL_FREEZE_CNT; 2199 reg |= NGE_MIBCTL_CLEAR_CNT; 2200 CSR_WRITE_4(sc, NGE_MIBCTL, reg); 2201 2202 /* 2203 * Enable interrupts. 2204 */ 2205 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 2206 #ifdef DEVICE_POLLING 2207 /* 2208 * ... only enable interrupts if we are not polling, make sure 2209 * they are off otherwise. 2210 */ 2211 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2212 CSR_WRITE_4(sc, NGE_IER, 0); 2213 else 2214 #endif 2215 CSR_WRITE_4(sc, NGE_IER, 1); 2216 2217 sc->nge_flags &= ~NGE_FLAG_LINK; 2218 mii_mediachg(mii); 2219 2220 sc->nge_watchdog_timer = 0; 2221 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 2222 2223 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2224 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2225 } 2226 2227 /* 2228 * Set media options. 2229 */ 2230 static int 2231 nge_mediachange(struct ifnet *ifp) 2232 { 2233 struct nge_softc *sc; 2234 struct mii_data *mii; 2235 struct mii_softc *miisc; 2236 int error; 2237 2238 sc = ifp->if_softc; 2239 NGE_LOCK(sc); 2240 mii = device_get_softc(sc->nge_miibus); 2241 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2242 PHY_RESET(miisc); 2243 error = mii_mediachg(mii); 2244 NGE_UNLOCK(sc); 2245 2246 return (error); 2247 } 2248 2249 /* 2250 * Report current media status. 2251 */ 2252 static void 2253 nge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2254 { 2255 struct nge_softc *sc; 2256 struct mii_data *mii; 2257 2258 sc = ifp->if_softc; 2259 NGE_LOCK(sc); 2260 mii = device_get_softc(sc->nge_miibus); 2261 mii_pollstat(mii); 2262 ifmr->ifm_active = mii->mii_media_active; 2263 ifmr->ifm_status = mii->mii_media_status; 2264 NGE_UNLOCK(sc); 2265 } 2266 2267 static int 2268 nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2269 { 2270 struct nge_softc *sc = ifp->if_softc; 2271 struct ifreq *ifr = (struct ifreq *) data; 2272 struct mii_data *mii; 2273 int error = 0, mask; 2274 2275 switch (command) { 2276 case SIOCSIFMTU: 2277 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU) 2278 error = EINVAL; 2279 else { 2280 NGE_LOCK(sc); 2281 ifp->if_mtu = ifr->ifr_mtu; 2282 /* 2283 * Workaround: if the MTU is larger than 2284 * 8152 (TX FIFO size minus 64 minus 18), turn off 2285 * TX checksum offloading. 2286 */ 2287 if (ifr->ifr_mtu >= 8152) { 2288 ifp->if_capenable &= ~IFCAP_TXCSUM; 2289 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2290 } else { 2291 ifp->if_capenable |= IFCAP_TXCSUM; 2292 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2293 } 2294 NGE_UNLOCK(sc); 2295 VLAN_CAPABILITIES(ifp); 2296 } 2297 break; 2298 case SIOCSIFFLAGS: 2299 NGE_LOCK(sc); 2300 if ((ifp->if_flags & IFF_UP) != 0) { 2301 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2302 if ((ifp->if_flags ^ sc->nge_if_flags) & 2303 (IFF_PROMISC | IFF_ALLMULTI)) 2304 nge_rxfilter(sc); 2305 } else { 2306 if ((sc->nge_flags & NGE_FLAG_DETACH) == 0) 2307 nge_init_locked(sc); 2308 } 2309 } else { 2310 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2311 nge_stop(sc); 2312 } 2313 sc->nge_if_flags = ifp->if_flags; 2314 NGE_UNLOCK(sc); 2315 error = 0; 2316 break; 2317 case SIOCADDMULTI: 2318 case SIOCDELMULTI: 2319 NGE_LOCK(sc); 2320 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2321 nge_rxfilter(sc); 2322 NGE_UNLOCK(sc); 2323 break; 2324 case SIOCGIFMEDIA: 2325 case SIOCSIFMEDIA: 2326 mii = device_get_softc(sc->nge_miibus); 2327 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2328 break; 2329 case SIOCSIFCAP: 2330 NGE_LOCK(sc); 2331 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2332 #ifdef DEVICE_POLLING 2333 if ((mask & IFCAP_POLLING) != 0 && 2334 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 2335 ifp->if_capenable ^= IFCAP_POLLING; 2336 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 2337 error = ether_poll_register(nge_poll, ifp); 2338 if (error != 0) { 2339 NGE_UNLOCK(sc); 2340 break; 2341 } 2342 /* Disable interrupts. */ 2343 CSR_WRITE_4(sc, NGE_IER, 0); 2344 } else { 2345 error = ether_poll_deregister(ifp); 2346 /* Enable interrupts. */ 2347 CSR_WRITE_4(sc, NGE_IER, 1); 2348 } 2349 } 2350 #endif /* DEVICE_POLLING */ 2351 if ((mask & IFCAP_TXCSUM) != 0 && 2352 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2353 ifp->if_capenable ^= IFCAP_TXCSUM; 2354 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2355 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2356 else 2357 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2358 } 2359 if ((mask & IFCAP_RXCSUM) != 0 && 2360 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2361 ifp->if_capenable ^= IFCAP_RXCSUM; 2362 2363 if ((mask & IFCAP_WOL) != 0 && 2364 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2365 if ((mask & IFCAP_WOL_UCAST) != 0) 2366 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2367 if ((mask & IFCAP_WOL_MCAST) != 0) 2368 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2369 if ((mask & IFCAP_WOL_MAGIC) != 0) 2370 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2371 } 2372 2373 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2374 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2375 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2376 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2377 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2378 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2379 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2380 if ((ifp->if_capenable & 2381 IFCAP_VLAN_HWTAGGING) != 0) 2382 NGE_SETBIT(sc, 2383 NGE_VLAN_IP_RXCTL, 2384 NGE_VIPRXCTL_TAG_STRIP_ENB); 2385 else 2386 NGE_CLRBIT(sc, 2387 NGE_VLAN_IP_RXCTL, 2388 NGE_VIPRXCTL_TAG_STRIP_ENB); 2389 } 2390 } 2391 /* 2392 * Both VLAN hardware tagging and checksum offload is 2393 * required to do checksum offload on VLAN interface. 2394 */ 2395 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2396 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2397 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2398 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2399 NGE_UNLOCK(sc); 2400 VLAN_CAPABILITIES(ifp); 2401 break; 2402 default: 2403 error = ether_ioctl(ifp, command, data); 2404 break; 2405 } 2406 2407 return (error); 2408 } 2409 2410 static void 2411 nge_watchdog(struct nge_softc *sc) 2412 { 2413 struct ifnet *ifp; 2414 2415 NGE_LOCK_ASSERT(sc); 2416 2417 if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer) 2418 return; 2419 2420 ifp = sc->nge_ifp; 2421 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2422 if_printf(ifp, "watchdog timeout\n"); 2423 2424 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2425 nge_init_locked(sc); 2426 2427 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2428 nge_start_locked(ifp); 2429 } 2430 2431 static int 2432 nge_stop_mac(struct nge_softc *sc) 2433 { 2434 uint32_t reg; 2435 int i; 2436 2437 NGE_LOCK_ASSERT(sc); 2438 2439 reg = CSR_READ_4(sc, NGE_CSR); 2440 if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) { 2441 reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE); 2442 reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE; 2443 CSR_WRITE_4(sc, NGE_CSR, reg); 2444 for (i = 0; i < NGE_TIMEOUT; i++) { 2445 DELAY(1); 2446 if ((CSR_READ_4(sc, NGE_CSR) & 2447 (NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0) 2448 break; 2449 } 2450 if (i == NGE_TIMEOUT) 2451 return (ETIMEDOUT); 2452 } 2453 2454 return (0); 2455 } 2456 2457 /* 2458 * Stop the adapter and free any mbufs allocated to the 2459 * RX and TX lists. 2460 */ 2461 static void 2462 nge_stop(struct nge_softc *sc) 2463 { 2464 struct nge_txdesc *txd; 2465 struct nge_rxdesc *rxd; 2466 int i; 2467 struct ifnet *ifp; 2468 2469 NGE_LOCK_ASSERT(sc); 2470 ifp = sc->nge_ifp; 2471 2472 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2473 sc->nge_flags &= ~NGE_FLAG_LINK; 2474 callout_stop(&sc->nge_stat_ch); 2475 sc->nge_watchdog_timer = 0; 2476 2477 CSR_WRITE_4(sc, NGE_IER, 0); 2478 CSR_WRITE_4(sc, NGE_IMR, 0); 2479 if (nge_stop_mac(sc) == ETIMEDOUT) 2480 device_printf(sc->nge_dev, 2481 "%s: unable to stop Tx/Rx MAC\n", __func__); 2482 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0); 2483 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0); 2484 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2485 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2486 nge_stats_update(sc); 2487 if (sc->nge_head != NULL) { 2488 m_freem(sc->nge_head); 2489 sc->nge_head = sc->nge_tail = NULL; 2490 } 2491 2492 /* 2493 * Free RX and TX mbufs still in the queues. 2494 */ 2495 for (i = 0; i < NGE_RX_RING_CNT; i++) { 2496 rxd = &sc->nge_cdata.nge_rxdesc[i]; 2497 if (rxd->rx_m != NULL) { 2498 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, 2499 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2500 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, 2501 rxd->rx_dmamap); 2502 m_freem(rxd->rx_m); 2503 rxd->rx_m = NULL; 2504 } 2505 } 2506 for (i = 0; i < NGE_TX_RING_CNT; i++) { 2507 txd = &sc->nge_cdata.nge_txdesc[i]; 2508 if (txd->tx_m != NULL) { 2509 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 2510 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2511 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 2512 txd->tx_dmamap); 2513 m_freem(txd->tx_m); 2514 txd->tx_m = NULL; 2515 } 2516 } 2517 } 2518 2519 /* 2520 * Before setting WOL bits, caller should have stopped Receiver. 2521 */ 2522 static void 2523 nge_wol(struct nge_softc *sc) 2524 { 2525 struct ifnet *ifp; 2526 uint32_t reg; 2527 uint16_t pmstat; 2528 int pmc; 2529 2530 NGE_LOCK_ASSERT(sc); 2531 2532 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) != 0) 2533 return; 2534 2535 ifp = sc->nge_ifp; 2536 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2537 /* Disable WOL & disconnect CLKRUN to save power. */ 2538 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 2539 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 2540 } else { 2541 if (nge_stop_mac(sc) == ETIMEDOUT) 2542 device_printf(sc->nge_dev, 2543 "%s: unable to stop Tx/Rx MAC\n", __func__); 2544 /* 2545 * Make sure wake frames will be buffered in the Rx FIFO. 2546 * (i.e. Silent Rx mode.) 2547 */ 2548 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2549 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_HI, BUS_SPACE_BARRIER_WRITE); 2550 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2551 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_LO, BUS_SPACE_BARRIER_WRITE); 2552 /* Enable Rx again. */ 2553 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 2554 CSR_BARRIER_4(sc, NGE_CSR, BUS_SPACE_BARRIER_WRITE); 2555 2556 /* Configure WOL events. */ 2557 reg = 0; 2558 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2559 reg |= NGE_WOLCSR_WAKE_ON_UNICAST; 2560 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2561 reg |= NGE_WOLCSR_WAKE_ON_MULTICAST; 2562 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2563 reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT; 2564 CSR_WRITE_4(sc, NGE_WOLCSR, reg); 2565 2566 /* Activate CLKRUN. */ 2567 reg = CSR_READ_4(sc, NGE_CLKRUN); 2568 reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB; 2569 CSR_WRITE_4(sc, NGE_CLKRUN, reg); 2570 } 2571 2572 /* Request PME. */ 2573 pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2); 2574 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2575 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2576 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2577 pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2578 } 2579 2580 /* 2581 * Stop all chip I/O so that the kernel's probe routines don't 2582 * get confused by errant DMAs when rebooting. 2583 */ 2584 static int 2585 nge_shutdown(device_t dev) 2586 { 2587 2588 return (nge_suspend(dev)); 2589 } 2590 2591 static int 2592 nge_suspend(device_t dev) 2593 { 2594 struct nge_softc *sc; 2595 2596 sc = device_get_softc(dev); 2597 2598 NGE_LOCK(sc); 2599 nge_stop(sc); 2600 nge_wol(sc); 2601 sc->nge_flags |= NGE_FLAG_SUSPENDED; 2602 NGE_UNLOCK(sc); 2603 2604 return (0); 2605 } 2606 2607 static int 2608 nge_resume(device_t dev) 2609 { 2610 struct nge_softc *sc; 2611 struct ifnet *ifp; 2612 uint16_t pmstat; 2613 int pmc; 2614 2615 sc = device_get_softc(dev); 2616 2617 NGE_LOCK(sc); 2618 ifp = sc->nge_ifp; 2619 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) == 0) { 2620 /* Disable PME and clear PME status. */ 2621 pmstat = pci_read_config(sc->nge_dev, 2622 pmc + PCIR_POWER_STATUS, 2); 2623 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2624 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2625 pci_write_config(sc->nge_dev, 2626 pmc + PCIR_POWER_STATUS, pmstat, 2); 2627 } 2628 } 2629 if (ifp->if_flags & IFF_UP) { 2630 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2631 nge_init_locked(sc); 2632 } 2633 2634 sc->nge_flags &= ~NGE_FLAG_SUSPENDED; 2635 NGE_UNLOCK(sc); 2636 2637 return (0); 2638 } 2639 2640 #define NGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2641 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2642 2643 static void 2644 nge_sysctl_node(struct nge_softc *sc) 2645 { 2646 struct sysctl_ctx_list *ctx; 2647 struct sysctl_oid_list *child, *parent; 2648 struct sysctl_oid *tree; 2649 struct nge_stats *stats; 2650 int error; 2651 2652 ctx = device_get_sysctl_ctx(sc->nge_dev); 2653 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev)); 2654 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff", 2655 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->nge_int_holdoff, 2656 0, sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation"); 2657 /* Pull in device tunables. */ 2658 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2659 error = resource_int_value(device_get_name(sc->nge_dev), 2660 device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff); 2661 if (error == 0) { 2662 if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN || 2663 sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) { 2664 device_printf(sc->nge_dev, 2665 "int_holdoff value out of range; " 2666 "using default: %d(%d us)\n", 2667 NGE_INT_HOLDOFF_DEFAULT, 2668 NGE_INT_HOLDOFF_DEFAULT * 100); 2669 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2670 } 2671 } 2672 2673 stats = &sc->nge_stats; 2674 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 2675 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NGE statistics"); 2676 parent = SYSCTL_CHILDREN(tree); 2677 2678 /* Rx statistics. */ 2679 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 2680 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics"); 2681 child = SYSCTL_CHILDREN(tree); 2682 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs", 2683 &stats->rx_pkts_errs, 2684 "Packet errors including both wire errors and FIFO overruns"); 2685 NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 2686 &stats->rx_crc_errs, "CRC errors"); 2687 NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2688 &stats->rx_fifo_oflows, "FIFO overflows"); 2689 NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2690 &stats->rx_align_errs, "Frame alignment errors"); 2691 NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2692 &stats->rx_sym_errs, "One or more symbol errors"); 2693 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos", 2694 &stats->rx_pkts_jumbos, 2695 "Packets received with length greater than 1518 bytes"); 2696 NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2697 &stats->rx_len_errs, "In Range Length errors"); 2698 NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames", 2699 &stats->rx_unctl_frames, "Control frames with unsupported opcode"); 2700 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2701 &stats->rx_pause, "Pause frames"); 2702 2703 /* Tx statistics. */ 2704 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 2705 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics"); 2706 child = SYSCTL_CHILDREN(tree); 2707 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2708 &stats->tx_pause, "Pause frames"); 2709 NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs", 2710 &stats->tx_seq_errs, 2711 "Loss of collision heartbeat during transmission"); 2712 } 2713 2714 #undef NGE_SYSCTL_STAT_ADD32 2715 2716 static int 2717 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2718 { 2719 int error, value; 2720 2721 if (arg1 == NULL) 2722 return (EINVAL); 2723 value = *(int *)arg1; 2724 error = sysctl_handle_int(oidp, &value, 0, req); 2725 if (error != 0 || req->newptr == NULL) 2726 return (error); 2727 if (value < low || value > high) 2728 return (EINVAL); 2729 *(int *)arg1 = value; 2730 2731 return (0); 2732 } 2733 2734 static int 2735 sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS) 2736 { 2737 2738 return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN, 2739 NGE_INT_HOLDOFF_MAX)); 2740 } 2741