1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 2001 Wind River Systems 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 6 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Bill Paul. 19 * 4. Neither the name of the author nor the names of any co-contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 27 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 33 * THE POSSIBILITY OF SUCH DAMAGE. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 /* 40 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 41 * for FreeBSD. Datasheets are available from: 42 * 43 * http://www.national.com/ds/DP/DP83820.pdf 44 * http://www.national.com/ds/DP/DP83821.pdf 45 * 46 * These chips are used on several low cost gigabit ethernet NICs 47 * sold by D-Link, Addtron, SMC and Asante. Both parts are 48 * virtually the same, except the 83820 is a 64-bit/32-bit part, 49 * while the 83821 is 32-bit only. 50 * 51 * Many cards also use National gigE transceivers, such as the 52 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 53 * contains a full register description that applies to all of these 54 * components: 55 * 56 * http://www.national.com/ds/DP/DP83861.pdf 57 * 58 * Written by Bill Paul <wpaul@bsdi.com> 59 * BSDi Open Source Solutions 60 */ 61 62 /* 63 * The NatSemi DP83820 and 83821 controllers are enhanced versions 64 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 65 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 66 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 67 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 68 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 69 * matching buffers, one perfect address filter buffer and interrupt 70 * moderation. The 83820 supports both 64-bit and 32-bit addressing 71 * and data transfers: the 64-bit support can be toggled on or off 72 * via software. This affects the size of certain fields in the DMA 73 * descriptors. 74 * 75 * There are two bugs/misfeatures in the 83820/83821 that I have 76 * discovered so far: 77 * 78 * - Receive buffers must be aligned on 64-bit boundaries, which means 79 * you must resort to copying data in order to fix up the payload 80 * alignment. 81 * 82 * - In order to transmit jumbo frames larger than 8170 bytes, you have 83 * to turn off transmit checksum offloading, because the chip can't 84 * compute the checksum on an outgoing frame unless it fits entirely 85 * within the TX FIFO, which is only 8192 bytes in size. If you have 86 * TX checksum offload enabled and you transmit attempt to transmit a 87 * frame larger than 8170 bytes, the transmitter will wedge. 88 * 89 * To work around the latter problem, TX checksum offload is disabled 90 * if the user selects an MTU larger than 8152 (8170 - 18). 91 */ 92 93 #ifdef HAVE_KERNEL_OPTION_HEADERS 94 #include "opt_device_polling.h" 95 #endif 96 97 #include <sys/param.h> 98 #include <sys/systm.h> 99 #include <sys/bus.h> 100 #include <sys/endian.h> 101 #include <sys/kernel.h> 102 #include <sys/lock.h> 103 #include <sys/malloc.h> 104 #include <sys/mbuf.h> 105 #include <sys/module.h> 106 #include <sys/mutex.h> 107 #include <sys/rman.h> 108 #include <sys/socket.h> 109 #include <sys/sockio.h> 110 #include <sys/sysctl.h> 111 112 #include <net/bpf.h> 113 #include <net/if.h> 114 #include <net/if_var.h> 115 #include <net/if_arp.h> 116 #include <net/ethernet.h> 117 #include <net/if_dl.h> 118 #include <net/if_media.h> 119 #include <net/if_types.h> 120 #include <net/if_vlan_var.h> 121 122 #include <dev/mii/mii.h> 123 #include <dev/mii/mii_bitbang.h> 124 #include <dev/mii/miivar.h> 125 126 #include <dev/pci/pcireg.h> 127 #include <dev/pci/pcivar.h> 128 129 #include <machine/bus.h> 130 131 #include <dev/nge/if_ngereg.h> 132 133 /* "device miibus" required. See GENERIC if you get errors here. */ 134 #include "miibus_if.h" 135 136 MODULE_DEPEND(nge, pci, 1, 1, 1); 137 MODULE_DEPEND(nge, ether, 1, 1, 1); 138 MODULE_DEPEND(nge, miibus, 1, 1, 1); 139 140 #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 141 142 /* 143 * Various supported device vendors/types and their names. 144 */ 145 static const struct nge_type nge_devs[] = { 146 { NGE_VENDORID, NGE_DEVICEID, 147 "National Semiconductor Gigabit Ethernet" }, 148 { 0, 0, NULL } 149 }; 150 151 static int nge_probe(device_t); 152 static int nge_attach(device_t); 153 static int nge_detach(device_t); 154 static int nge_shutdown(device_t); 155 static int nge_suspend(device_t); 156 static int nge_resume(device_t); 157 158 static __inline void nge_discard_rxbuf(struct nge_softc *, int); 159 static int nge_newbuf(struct nge_softc *, int); 160 static int nge_encap(struct nge_softc *, struct mbuf **); 161 #ifndef __NO_STRICT_ALIGNMENT 162 static __inline void nge_fixup_rx(struct mbuf *); 163 #endif 164 static int nge_rxeof(struct nge_softc *); 165 static void nge_txeof(struct nge_softc *); 166 static void nge_intr(void *); 167 static void nge_tick(void *); 168 static void nge_stats_update(struct nge_softc *); 169 static void nge_start(struct ifnet *); 170 static void nge_start_locked(struct ifnet *); 171 static int nge_ioctl(struct ifnet *, u_long, caddr_t); 172 static void nge_init(void *); 173 static void nge_init_locked(struct nge_softc *); 174 static int nge_stop_mac(struct nge_softc *); 175 static void nge_stop(struct nge_softc *); 176 static void nge_wol(struct nge_softc *); 177 static void nge_watchdog(struct nge_softc *); 178 static int nge_mediachange(struct ifnet *); 179 static void nge_mediastatus(struct ifnet *, struct ifmediareq *); 180 181 static void nge_delay(struct nge_softc *); 182 static void nge_eeprom_idle(struct nge_softc *); 183 static void nge_eeprom_putbyte(struct nge_softc *, int); 184 static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *); 185 static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int); 186 187 static int nge_miibus_readreg(device_t, int, int); 188 static int nge_miibus_writereg(device_t, int, int, int); 189 static void nge_miibus_statchg(device_t); 190 191 static void nge_rxfilter(struct nge_softc *); 192 static void nge_reset(struct nge_softc *); 193 static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 194 static int nge_dma_alloc(struct nge_softc *); 195 static void nge_dma_free(struct nge_softc *); 196 static int nge_list_rx_init(struct nge_softc *); 197 static int nge_list_tx_init(struct nge_softc *); 198 static void nge_sysctl_node(struct nge_softc *); 199 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 200 static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS); 201 202 /* 203 * MII bit-bang glue 204 */ 205 static uint32_t nge_mii_bitbang_read(device_t); 206 static void nge_mii_bitbang_write(device_t, uint32_t); 207 208 static const struct mii_bitbang_ops nge_mii_bitbang_ops = { 209 nge_mii_bitbang_read, 210 nge_mii_bitbang_write, 211 { 212 NGE_MEAR_MII_DATA, /* MII_BIT_MDO */ 213 NGE_MEAR_MII_DATA, /* MII_BIT_MDI */ 214 NGE_MEAR_MII_CLK, /* MII_BIT_MDC */ 215 NGE_MEAR_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 216 0, /* MII_BIT_DIR_PHY_HOST */ 217 } 218 }; 219 220 static device_method_t nge_methods[] = { 221 /* Device interface */ 222 DEVMETHOD(device_probe, nge_probe), 223 DEVMETHOD(device_attach, nge_attach), 224 DEVMETHOD(device_detach, nge_detach), 225 DEVMETHOD(device_shutdown, nge_shutdown), 226 DEVMETHOD(device_suspend, nge_suspend), 227 DEVMETHOD(device_resume, nge_resume), 228 229 /* MII interface */ 230 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 231 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 232 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 233 234 DEVMETHOD_END 235 }; 236 237 static driver_t nge_driver = { 238 "nge", 239 nge_methods, 240 sizeof(struct nge_softc) 241 }; 242 243 DRIVER_MODULE(nge, pci, nge_driver, 0, 0); 244 DRIVER_MODULE(miibus, nge, miibus_driver, 0, 0); 245 246 #define NGE_SETBIT(sc, reg, x) \ 247 CSR_WRITE_4(sc, reg, \ 248 CSR_READ_4(sc, reg) | (x)) 249 250 #define NGE_CLRBIT(sc, reg, x) \ 251 CSR_WRITE_4(sc, reg, \ 252 CSR_READ_4(sc, reg) & ~(x)) 253 254 #define SIO_SET(x) \ 255 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 256 257 #define SIO_CLR(x) \ 258 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 259 260 static void 261 nge_delay(struct nge_softc *sc) 262 { 263 int idx; 264 265 for (idx = (300 / 33) + 1; idx > 0; idx--) 266 CSR_READ_4(sc, NGE_CSR); 267 } 268 269 static void 270 nge_eeprom_idle(struct nge_softc *sc) 271 { 272 int i; 273 274 SIO_SET(NGE_MEAR_EE_CSEL); 275 nge_delay(sc); 276 SIO_SET(NGE_MEAR_EE_CLK); 277 nge_delay(sc); 278 279 for (i = 0; i < 25; i++) { 280 SIO_CLR(NGE_MEAR_EE_CLK); 281 nge_delay(sc); 282 SIO_SET(NGE_MEAR_EE_CLK); 283 nge_delay(sc); 284 } 285 286 SIO_CLR(NGE_MEAR_EE_CLK); 287 nge_delay(sc); 288 SIO_CLR(NGE_MEAR_EE_CSEL); 289 nge_delay(sc); 290 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 291 } 292 293 /* 294 * Send a read command and address to the EEPROM, check for ACK. 295 */ 296 static void 297 nge_eeprom_putbyte(struct nge_softc *sc, int addr) 298 { 299 int d, i; 300 301 d = addr | NGE_EECMD_READ; 302 303 /* 304 * Feed in each bit and stobe the clock. 305 */ 306 for (i = 0x400; i; i >>= 1) { 307 if (d & i) { 308 SIO_SET(NGE_MEAR_EE_DIN); 309 } else { 310 SIO_CLR(NGE_MEAR_EE_DIN); 311 } 312 nge_delay(sc); 313 SIO_SET(NGE_MEAR_EE_CLK); 314 nge_delay(sc); 315 SIO_CLR(NGE_MEAR_EE_CLK); 316 nge_delay(sc); 317 } 318 } 319 320 /* 321 * Read a word of data stored in the EEPROM at address 'addr.' 322 */ 323 static void 324 nge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest) 325 { 326 int i; 327 uint16_t word = 0; 328 329 /* Force EEPROM to idle state. */ 330 nge_eeprom_idle(sc); 331 332 /* Enter EEPROM access mode. */ 333 nge_delay(sc); 334 SIO_CLR(NGE_MEAR_EE_CLK); 335 nge_delay(sc); 336 SIO_SET(NGE_MEAR_EE_CSEL); 337 nge_delay(sc); 338 339 /* 340 * Send address of word we want to read. 341 */ 342 nge_eeprom_putbyte(sc, addr); 343 344 /* 345 * Start reading bits from EEPROM. 346 */ 347 for (i = 0x8000; i; i >>= 1) { 348 SIO_SET(NGE_MEAR_EE_CLK); 349 nge_delay(sc); 350 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 351 word |= i; 352 nge_delay(sc); 353 SIO_CLR(NGE_MEAR_EE_CLK); 354 nge_delay(sc); 355 } 356 357 /* Turn off EEPROM access mode. */ 358 nge_eeprom_idle(sc); 359 360 *dest = word; 361 } 362 363 /* 364 * Read a sequence of words from the EEPROM. 365 */ 366 static void 367 nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt) 368 { 369 int i; 370 uint16_t word = 0, *ptr; 371 372 for (i = 0; i < cnt; i++) { 373 nge_eeprom_getword(sc, off + i, &word); 374 ptr = (uint16_t *)(dest + (i * 2)); 375 *ptr = word; 376 } 377 } 378 379 /* 380 * Read the MII serial port for the MII bit-bang module. 381 */ 382 static uint32_t 383 nge_mii_bitbang_read(device_t dev) 384 { 385 struct nge_softc *sc; 386 uint32_t val; 387 388 sc = device_get_softc(dev); 389 390 val = CSR_READ_4(sc, NGE_MEAR); 391 CSR_BARRIER_4(sc, NGE_MEAR, 392 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 393 394 return (val); 395 } 396 397 /* 398 * Write the MII serial port for the MII bit-bang module. 399 */ 400 static void 401 nge_mii_bitbang_write(device_t dev, uint32_t val) 402 { 403 struct nge_softc *sc; 404 405 sc = device_get_softc(dev); 406 407 CSR_WRITE_4(sc, NGE_MEAR, val); 408 CSR_BARRIER_4(sc, NGE_MEAR, 409 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 410 } 411 412 static int 413 nge_miibus_readreg(device_t dev, int phy, int reg) 414 { 415 struct nge_softc *sc; 416 int rv; 417 418 sc = device_get_softc(dev); 419 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 420 /* Pretend PHY is at address 0. */ 421 if (phy != 0) 422 return (0); 423 switch (reg) { 424 case MII_BMCR: 425 reg = NGE_TBI_BMCR; 426 break; 427 case MII_BMSR: 428 /* 83820/83821 has different bit layout for BMSR. */ 429 rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT; 430 reg = CSR_READ_4(sc, NGE_TBI_BMSR); 431 if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0) 432 rv |= BMSR_ACOMP; 433 if ((reg & NGE_TBIBMSR_LINKSTAT) != 0) 434 rv |= BMSR_LINK; 435 return (rv); 436 case MII_ANAR: 437 reg = NGE_TBI_ANAR; 438 break; 439 case MII_ANLPAR: 440 reg = NGE_TBI_ANLPAR; 441 break; 442 case MII_ANER: 443 reg = NGE_TBI_ANER; 444 break; 445 case MII_EXTSR: 446 reg = NGE_TBI_ESR; 447 break; 448 case MII_PHYIDR1: 449 case MII_PHYIDR2: 450 return (0); 451 default: 452 device_printf(sc->nge_dev, 453 "bad phy register read : %d\n", reg); 454 return (0); 455 } 456 return (CSR_READ_4(sc, reg)); 457 } 458 459 return (mii_bitbang_readreg(dev, &nge_mii_bitbang_ops, phy, reg)); 460 } 461 462 static int 463 nge_miibus_writereg(device_t dev, int phy, int reg, int data) 464 { 465 struct nge_softc *sc; 466 467 sc = device_get_softc(dev); 468 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 469 /* Pretend PHY is at address 0. */ 470 if (phy != 0) 471 return (0); 472 switch (reg) { 473 case MII_BMCR: 474 reg = NGE_TBI_BMCR; 475 break; 476 case MII_BMSR: 477 return (0); 478 case MII_ANAR: 479 reg = NGE_TBI_ANAR; 480 break; 481 case MII_ANLPAR: 482 reg = NGE_TBI_ANLPAR; 483 break; 484 case MII_ANER: 485 reg = NGE_TBI_ANER; 486 break; 487 case MII_EXTSR: 488 reg = NGE_TBI_ESR; 489 break; 490 case MII_PHYIDR1: 491 case MII_PHYIDR2: 492 return (0); 493 default: 494 device_printf(sc->nge_dev, 495 "bad phy register write : %d\n", reg); 496 return (0); 497 } 498 CSR_WRITE_4(sc, reg, data); 499 return (0); 500 } 501 502 mii_bitbang_writereg(dev, &nge_mii_bitbang_ops, phy, reg, data); 503 504 return (0); 505 } 506 507 /* 508 * media status/link state change handler. 509 */ 510 static void 511 nge_miibus_statchg(device_t dev) 512 { 513 struct nge_softc *sc; 514 struct mii_data *mii; 515 struct ifnet *ifp; 516 struct nge_txdesc *txd; 517 uint32_t done, reg, status; 518 int i; 519 520 sc = device_get_softc(dev); 521 NGE_LOCK_ASSERT(sc); 522 523 mii = device_get_softc(sc->nge_miibus); 524 ifp = sc->nge_ifp; 525 if (mii == NULL || ifp == NULL || 526 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 527 return; 528 529 sc->nge_flags &= ~NGE_FLAG_LINK; 530 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 531 (IFM_AVALID | IFM_ACTIVE)) { 532 switch (IFM_SUBTYPE(mii->mii_media_active)) { 533 case IFM_10_T: 534 case IFM_100_TX: 535 case IFM_1000_T: 536 case IFM_1000_SX: 537 case IFM_1000_LX: 538 case IFM_1000_CX: 539 sc->nge_flags |= NGE_FLAG_LINK; 540 break; 541 default: 542 break; 543 } 544 } 545 546 /* Stop Tx/Rx MACs. */ 547 if (nge_stop_mac(sc) == ETIMEDOUT) 548 device_printf(sc->nge_dev, 549 "%s: unable to stop Tx/Rx MAC\n", __func__); 550 nge_txeof(sc); 551 nge_rxeof(sc); 552 if (sc->nge_head != NULL) { 553 m_freem(sc->nge_head); 554 sc->nge_head = sc->nge_tail = NULL; 555 } 556 557 /* Release queued frames. */ 558 for (i = 0; i < NGE_TX_RING_CNT; i++) { 559 txd = &sc->nge_cdata.nge_txdesc[i]; 560 if (txd->tx_m != NULL) { 561 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 562 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 563 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 564 txd->tx_dmamap); 565 m_freem(txd->tx_m); 566 txd->tx_m = NULL; 567 } 568 } 569 570 /* Program MAC with resolved speed/duplex. */ 571 if ((sc->nge_flags & NGE_FLAG_LINK) != 0) { 572 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 573 NGE_SETBIT(sc, NGE_TX_CFG, 574 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 575 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 576 #ifdef notyet 577 /* Enable flow-control. */ 578 if ((IFM_OPTIONS(mii->mii_media_active) & 579 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0) 580 NGE_SETBIT(sc, NGE_PAUSECSR, 581 NGE_PAUSECSR_PAUSE_ENB); 582 #endif 583 } else { 584 NGE_CLRBIT(sc, NGE_TX_CFG, 585 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 586 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 587 NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB); 588 } 589 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 590 reg = CSR_READ_4(sc, NGE_CFG); 591 switch (IFM_SUBTYPE(mii->mii_media_active)) { 592 case IFM_1000_SX: 593 case IFM_1000_LX: 594 case IFM_1000_CX: 595 case IFM_1000_T: 596 reg |= NGE_CFG_MODE_1000; 597 break; 598 default: 599 reg &= ~NGE_CFG_MODE_1000; 600 break; 601 } 602 CSR_WRITE_4(sc, NGE_CFG, reg); 603 604 /* Reset Tx/Rx MAC. */ 605 reg = CSR_READ_4(sc, NGE_CSR); 606 reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET; 607 CSR_WRITE_4(sc, NGE_CSR, reg); 608 /* Check the completion of reset. */ 609 done = 0; 610 for (i = 0; i < NGE_TIMEOUT; i++) { 611 DELAY(1); 612 status = CSR_READ_4(sc, NGE_ISR); 613 if ((status & NGE_ISR_RX_RESET_DONE) != 0) 614 done |= NGE_ISR_RX_RESET_DONE; 615 if ((status & NGE_ISR_TX_RESET_DONE) != 0) 616 done |= NGE_ISR_TX_RESET_DONE; 617 if (done == 618 (NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE)) 619 break; 620 } 621 if (i == NGE_TIMEOUT) 622 device_printf(sc->nge_dev, 623 "%s: unable to reset Tx/Rx MAC\n", __func__); 624 /* Reuse Rx buffer and reset consumer pointer. */ 625 sc->nge_cdata.nge_rx_cons = 0; 626 /* 627 * It seems that resetting Rx/Tx MAC results in 628 * resetting Tx/Rx descriptor pointer registers such 629 * that reloading Tx/Rx lists address are needed. 630 */ 631 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 632 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 633 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 634 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 635 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 636 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 637 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 638 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 639 /* Reinitialize Tx buffers. */ 640 nge_list_tx_init(sc); 641 642 /* Restart Rx MAC. */ 643 reg = CSR_READ_4(sc, NGE_CSR); 644 reg |= NGE_CSR_RX_ENABLE; 645 CSR_WRITE_4(sc, NGE_CSR, reg); 646 for (i = 0; i < NGE_TIMEOUT; i++) { 647 if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0) 648 break; 649 DELAY(1); 650 } 651 if (i == NGE_TIMEOUT) 652 device_printf(sc->nge_dev, 653 "%s: unable to restart Rx MAC\n", __func__); 654 } 655 656 /* Data LED off for TBI mode */ 657 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 658 CSR_WRITE_4(sc, NGE_GPIO, 659 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 660 } 661 662 static u_int 663 nge_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 664 { 665 struct nge_softc *sc = arg; 666 uint32_t h; 667 int bit, index; 668 669 /* 670 * From the 11 bits returned by the crc routine, the top 7 671 * bits represent the 16-bit word in the mcast hash table 672 * that needs to be updated, and the lower 4 bits represent 673 * which bit within that byte needs to be set. 674 */ 675 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 21; 676 index = (h >> 4) & 0x7F; 677 bit = h & 0xF; 678 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + (index * 2)); 679 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 680 681 return (1); 682 } 683 684 static void 685 nge_rxfilter(struct nge_softc *sc) 686 { 687 struct ifnet *ifp; 688 uint32_t i, rxfilt; 689 690 NGE_LOCK_ASSERT(sc); 691 ifp = sc->nge_ifp; 692 693 /* Make sure to stop Rx filtering. */ 694 rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL); 695 rxfilt &= ~NGE_RXFILTCTL_ENABLE; 696 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 697 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 698 699 rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS); 700 rxfilt &= ~NGE_RXFILTCTL_BROAD; 701 /* 702 * We don't want to use the hash table for matching unicast 703 * addresses. 704 */ 705 rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH); 706 707 /* 708 * For the NatSemi chip, we have to explicitly enable the 709 * reception of ARP frames, as well as turn on the 'perfect 710 * match' filter where we store the station address, otherwise 711 * we won't receive unicasts meant for this host. 712 */ 713 rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT; 714 715 /* 716 * Set the capture broadcast bit to capture broadcast frames. 717 */ 718 if ((ifp->if_flags & IFF_BROADCAST) != 0) 719 rxfilt |= NGE_RXFILTCTL_BROAD; 720 721 if ((ifp->if_flags & IFF_PROMISC) != 0 || 722 (ifp->if_flags & IFF_ALLMULTI) != 0) { 723 rxfilt |= NGE_RXFILTCTL_ALLMULTI; 724 if ((ifp->if_flags & IFF_PROMISC) != 0) 725 rxfilt |= NGE_RXFILTCTL_ALLPHYS; 726 goto done; 727 } 728 729 /* 730 * We have to explicitly enable the multicast hash table 731 * on the NatSemi chip if we want to use it, which we do. 732 */ 733 rxfilt |= NGE_RXFILTCTL_MCHASH; 734 735 /* first, zot all the existing hash bits */ 736 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 737 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 738 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 739 } 740 741 if_foreach_llmaddr(ifp, nge_write_maddr, sc); 742 done: 743 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 744 /* Turn the receive filter on. */ 745 rxfilt |= NGE_RXFILTCTL_ENABLE; 746 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 747 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 748 } 749 750 static void 751 nge_reset(struct nge_softc *sc) 752 { 753 uint32_t v; 754 int i; 755 756 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 757 758 for (i = 0; i < NGE_TIMEOUT; i++) { 759 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 760 break; 761 DELAY(1); 762 } 763 764 if (i == NGE_TIMEOUT) 765 device_printf(sc->nge_dev, "reset never completed\n"); 766 767 /* Wait a little while for the chip to get its brains in order. */ 768 DELAY(1000); 769 770 /* 771 * If this is a NetSemi chip, make sure to clear 772 * PME mode. 773 */ 774 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 775 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 776 777 /* Clear WOL events which may interfere normal Rx filter opertaion. */ 778 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 779 780 /* 781 * Only DP83820 supports 64bits addressing/data transfers and 782 * 64bit addressing requires different descriptor structures. 783 * To make it simple, disable 64bit addressing/data transfers. 784 */ 785 v = CSR_READ_4(sc, NGE_CFG); 786 v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB); 787 CSR_WRITE_4(sc, NGE_CFG, v); 788 } 789 790 /* 791 * Probe for a NatSemi chip. Check the PCI vendor and device 792 * IDs against our list and return a device name if we find a match. 793 */ 794 static int 795 nge_probe(device_t dev) 796 { 797 const struct nge_type *t; 798 799 t = nge_devs; 800 801 while (t->nge_name != NULL) { 802 if ((pci_get_vendor(dev) == t->nge_vid) && 803 (pci_get_device(dev) == t->nge_did)) { 804 device_set_desc(dev, t->nge_name); 805 return (BUS_PROBE_DEFAULT); 806 } 807 t++; 808 } 809 810 return (ENXIO); 811 } 812 813 /* 814 * Attach the interface. Allocate softc structures, do ifmedia 815 * setup and ethernet/BPF attach. 816 */ 817 static int 818 nge_attach(device_t dev) 819 { 820 uint8_t eaddr[ETHER_ADDR_LEN]; 821 uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg; 822 struct nge_softc *sc; 823 struct ifnet *ifp; 824 int error, i, rid; 825 826 error = 0; 827 sc = device_get_softc(dev); 828 sc->nge_dev = dev; 829 830 NGE_LOCK_INIT(sc, device_get_nameunit(dev)); 831 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); 832 833 /* 834 * Map control/status registers. 835 */ 836 pci_enable_busmaster(dev); 837 838 #ifdef NGE_USEIOSPACE 839 sc->nge_res_type = SYS_RES_IOPORT; 840 sc->nge_res_id = PCIR_BAR(0); 841 #else 842 sc->nge_res_type = SYS_RES_MEMORY; 843 sc->nge_res_id = PCIR_BAR(1); 844 #endif 845 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 846 &sc->nge_res_id, RF_ACTIVE); 847 848 if (sc->nge_res == NULL) { 849 if (sc->nge_res_type == SYS_RES_MEMORY) { 850 sc->nge_res_type = SYS_RES_IOPORT; 851 sc->nge_res_id = PCIR_BAR(0); 852 } else { 853 sc->nge_res_type = SYS_RES_MEMORY; 854 sc->nge_res_id = PCIR_BAR(1); 855 } 856 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 857 &sc->nge_res_id, RF_ACTIVE); 858 if (sc->nge_res == NULL) { 859 device_printf(dev, "couldn't allocate %s resources\n", 860 sc->nge_res_type == SYS_RES_MEMORY ? "memory" : 861 "I/O"); 862 NGE_LOCK_DESTROY(sc); 863 return (ENXIO); 864 } 865 } 866 867 /* Allocate interrupt */ 868 rid = 0; 869 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 870 RF_SHAREABLE | RF_ACTIVE); 871 872 if (sc->nge_irq == NULL) { 873 device_printf(dev, "couldn't map interrupt\n"); 874 error = ENXIO; 875 goto fail; 876 } 877 878 /* Enable MWI. */ 879 reg = pci_read_config(dev, PCIR_COMMAND, 2); 880 reg |= PCIM_CMD_MWRICEN; 881 pci_write_config(dev, PCIR_COMMAND, reg, 2); 882 883 /* Reset the adapter. */ 884 nge_reset(sc); 885 886 /* 887 * Get station address from the EEPROM. 888 */ 889 nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3); 890 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 891 ea[i] = le16toh(ea[i]); 892 ea_temp = ea[0]; 893 ea[0] = ea[2]; 894 ea[2] = ea_temp; 895 bcopy(ea, eaddr, sizeof(eaddr)); 896 897 if (nge_dma_alloc(sc) != 0) { 898 error = ENXIO; 899 goto fail; 900 } 901 902 nge_sysctl_node(sc); 903 904 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); 905 if (ifp == NULL) { 906 device_printf(dev, "can not allocate ifnet structure\n"); 907 error = ENOSPC; 908 goto fail; 909 } 910 ifp->if_softc = sc; 911 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 912 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 913 ifp->if_ioctl = nge_ioctl; 914 ifp->if_start = nge_start; 915 ifp->if_init = nge_init; 916 ifp->if_snd.ifq_drv_maxlen = NGE_TX_RING_CNT - 1; 917 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 918 IFQ_SET_READY(&ifp->if_snd); 919 ifp->if_hwassist = NGE_CSUM_FEATURES; 920 ifp->if_capabilities = IFCAP_HWCSUM; 921 /* 922 * It seems that some hardwares doesn't provide 3.3V auxiliary 923 * supply(3VAUX) to drive PME such that checking PCI power 924 * management capability is necessary. 925 */ 926 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &i) == 0) 927 ifp->if_capabilities |= IFCAP_WOL; 928 ifp->if_capenable = ifp->if_capabilities; 929 930 if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) { 931 sc->nge_flags |= NGE_FLAG_TBI; 932 device_printf(dev, "Using TBI\n"); 933 /* Configure GPIO. */ 934 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 935 | NGE_GPIO_GP4_OUT 936 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 937 | NGE_GPIO_GP3_OUTENB 938 | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); 939 } 940 941 /* 942 * Do MII setup. 943 */ 944 error = mii_attach(dev, &sc->nge_miibus, ifp, nge_mediachange, 945 nge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 946 if (error != 0) { 947 device_printf(dev, "attaching PHYs failed\n"); 948 goto fail; 949 } 950 951 /* 952 * Call MI attach routine. 953 */ 954 ether_ifattach(ifp, eaddr); 955 956 /* VLAN capability setup. */ 957 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 958 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 959 ifp->if_capenable = ifp->if_capabilities; 960 #ifdef DEVICE_POLLING 961 ifp->if_capabilities |= IFCAP_POLLING; 962 #endif 963 /* 964 * Tell the upper layer(s) we support long frames. 965 * Must appear after the call to ether_ifattach() because 966 * ether_ifattach() sets ifi_hdrlen to the default value. 967 */ 968 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 969 970 /* 971 * Hookup IRQ last. 972 */ 973 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, 974 NULL, nge_intr, sc, &sc->nge_intrhand); 975 if (error) { 976 device_printf(dev, "couldn't set up irq\n"); 977 goto fail; 978 } 979 980 fail: 981 if (error != 0) 982 nge_detach(dev); 983 return (error); 984 } 985 986 static int 987 nge_detach(device_t dev) 988 { 989 struct nge_softc *sc; 990 struct ifnet *ifp; 991 992 sc = device_get_softc(dev); 993 ifp = sc->nge_ifp; 994 995 #ifdef DEVICE_POLLING 996 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 997 ether_poll_deregister(ifp); 998 #endif 999 1000 if (device_is_attached(dev)) { 1001 NGE_LOCK(sc); 1002 sc->nge_flags |= NGE_FLAG_DETACH; 1003 nge_stop(sc); 1004 NGE_UNLOCK(sc); 1005 callout_drain(&sc->nge_stat_ch); 1006 if (ifp != NULL) 1007 ether_ifdetach(ifp); 1008 } 1009 1010 if (sc->nge_miibus != NULL) { 1011 device_delete_child(dev, sc->nge_miibus); 1012 sc->nge_miibus = NULL; 1013 } 1014 bus_generic_detach(dev); 1015 if (sc->nge_intrhand != NULL) 1016 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 1017 if (sc->nge_irq != NULL) 1018 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 1019 if (sc->nge_res != NULL) 1020 bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id, 1021 sc->nge_res); 1022 1023 nge_dma_free(sc); 1024 if (ifp != NULL) 1025 if_free(ifp); 1026 1027 NGE_LOCK_DESTROY(sc); 1028 1029 return (0); 1030 } 1031 1032 struct nge_dmamap_arg { 1033 bus_addr_t nge_busaddr; 1034 }; 1035 1036 static void 1037 nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1038 { 1039 struct nge_dmamap_arg *ctx; 1040 1041 if (error != 0) 1042 return; 1043 ctx = arg; 1044 ctx->nge_busaddr = segs[0].ds_addr; 1045 } 1046 1047 static int 1048 nge_dma_alloc(struct nge_softc *sc) 1049 { 1050 struct nge_dmamap_arg ctx; 1051 struct nge_txdesc *txd; 1052 struct nge_rxdesc *rxd; 1053 int error, i; 1054 1055 /* Create parent DMA tag. */ 1056 error = bus_dma_tag_create( 1057 bus_get_dma_tag(sc->nge_dev), /* parent */ 1058 1, 0, /* alignment, boundary */ 1059 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1060 BUS_SPACE_MAXADDR, /* highaddr */ 1061 NULL, NULL, /* filter, filterarg */ 1062 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1063 0, /* nsegments */ 1064 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1065 0, /* flags */ 1066 NULL, NULL, /* lockfunc, lockarg */ 1067 &sc->nge_cdata.nge_parent_tag); 1068 if (error != 0) { 1069 device_printf(sc->nge_dev, "failed to create parent DMA tag\n"); 1070 goto fail; 1071 } 1072 /* Create tag for Tx ring. */ 1073 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1074 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1075 BUS_SPACE_MAXADDR, /* lowaddr */ 1076 BUS_SPACE_MAXADDR, /* highaddr */ 1077 NULL, NULL, /* filter, filterarg */ 1078 NGE_TX_RING_SIZE, /* maxsize */ 1079 1, /* nsegments */ 1080 NGE_TX_RING_SIZE, /* maxsegsize */ 1081 0, /* flags */ 1082 NULL, NULL, /* lockfunc, lockarg */ 1083 &sc->nge_cdata.nge_tx_ring_tag); 1084 if (error != 0) { 1085 device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n"); 1086 goto fail; 1087 } 1088 1089 /* Create tag for Rx ring. */ 1090 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1091 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1092 BUS_SPACE_MAXADDR, /* lowaddr */ 1093 BUS_SPACE_MAXADDR, /* highaddr */ 1094 NULL, NULL, /* filter, filterarg */ 1095 NGE_RX_RING_SIZE, /* maxsize */ 1096 1, /* nsegments */ 1097 NGE_RX_RING_SIZE, /* maxsegsize */ 1098 0, /* flags */ 1099 NULL, NULL, /* lockfunc, lockarg */ 1100 &sc->nge_cdata.nge_rx_ring_tag); 1101 if (error != 0) { 1102 device_printf(sc->nge_dev, 1103 "failed to create Rx ring DMA tag\n"); 1104 goto fail; 1105 } 1106 1107 /* Create tag for Tx buffers. */ 1108 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1109 1, 0, /* alignment, boundary */ 1110 BUS_SPACE_MAXADDR, /* lowaddr */ 1111 BUS_SPACE_MAXADDR, /* highaddr */ 1112 NULL, NULL, /* filter, filterarg */ 1113 MCLBYTES * NGE_MAXTXSEGS, /* maxsize */ 1114 NGE_MAXTXSEGS, /* nsegments */ 1115 MCLBYTES, /* maxsegsize */ 1116 0, /* flags */ 1117 NULL, NULL, /* lockfunc, lockarg */ 1118 &sc->nge_cdata.nge_tx_tag); 1119 if (error != 0) { 1120 device_printf(sc->nge_dev, "failed to create Tx DMA tag\n"); 1121 goto fail; 1122 } 1123 1124 /* Create tag for Rx buffers. */ 1125 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1126 NGE_RX_ALIGN, 0, /* alignment, boundary */ 1127 BUS_SPACE_MAXADDR, /* lowaddr */ 1128 BUS_SPACE_MAXADDR, /* highaddr */ 1129 NULL, NULL, /* filter, filterarg */ 1130 MCLBYTES, /* maxsize */ 1131 1, /* nsegments */ 1132 MCLBYTES, /* maxsegsize */ 1133 0, /* flags */ 1134 NULL, NULL, /* lockfunc, lockarg */ 1135 &sc->nge_cdata.nge_rx_tag); 1136 if (error != 0) { 1137 device_printf(sc->nge_dev, "failed to create Rx DMA tag\n"); 1138 goto fail; 1139 } 1140 1141 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1142 error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag, 1143 (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK | 1144 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map); 1145 if (error != 0) { 1146 device_printf(sc->nge_dev, 1147 "failed to allocate DMA'able memory for Tx ring\n"); 1148 goto fail; 1149 } 1150 1151 ctx.nge_busaddr = 0; 1152 error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag, 1153 sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring, 1154 NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1155 if (error != 0 || ctx.nge_busaddr == 0) { 1156 device_printf(sc->nge_dev, 1157 "failed to load DMA'able memory for Tx ring\n"); 1158 goto fail; 1159 } 1160 sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr; 1161 1162 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1163 error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag, 1164 (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK | 1165 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map); 1166 if (error != 0) { 1167 device_printf(sc->nge_dev, 1168 "failed to allocate DMA'able memory for Rx ring\n"); 1169 goto fail; 1170 } 1171 1172 ctx.nge_busaddr = 0; 1173 error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag, 1174 sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring, 1175 NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1176 if (error != 0 || ctx.nge_busaddr == 0) { 1177 device_printf(sc->nge_dev, 1178 "failed to load DMA'able memory for Rx ring\n"); 1179 goto fail; 1180 } 1181 sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr; 1182 1183 /* Create DMA maps for Tx buffers. */ 1184 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1185 txd = &sc->nge_cdata.nge_txdesc[i]; 1186 txd->tx_m = NULL; 1187 txd->tx_dmamap = NULL; 1188 error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0, 1189 &txd->tx_dmamap); 1190 if (error != 0) { 1191 device_printf(sc->nge_dev, 1192 "failed to create Tx dmamap\n"); 1193 goto fail; 1194 } 1195 } 1196 /* Create DMA maps for Rx buffers. */ 1197 if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1198 &sc->nge_cdata.nge_rx_sparemap)) != 0) { 1199 device_printf(sc->nge_dev, 1200 "failed to create spare Rx dmamap\n"); 1201 goto fail; 1202 } 1203 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1204 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1205 rxd->rx_m = NULL; 1206 rxd->rx_dmamap = NULL; 1207 error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1208 &rxd->rx_dmamap); 1209 if (error != 0) { 1210 device_printf(sc->nge_dev, 1211 "failed to create Rx dmamap\n"); 1212 goto fail; 1213 } 1214 } 1215 1216 fail: 1217 return (error); 1218 } 1219 1220 static void 1221 nge_dma_free(struct nge_softc *sc) 1222 { 1223 struct nge_txdesc *txd; 1224 struct nge_rxdesc *rxd; 1225 int i; 1226 1227 /* Tx ring. */ 1228 if (sc->nge_cdata.nge_tx_ring_tag) { 1229 if (sc->nge_rdata.nge_tx_ring_paddr) 1230 bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag, 1231 sc->nge_cdata.nge_tx_ring_map); 1232 if (sc->nge_rdata.nge_tx_ring) 1233 bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag, 1234 sc->nge_rdata.nge_tx_ring, 1235 sc->nge_cdata.nge_tx_ring_map); 1236 sc->nge_rdata.nge_tx_ring = NULL; 1237 sc->nge_rdata.nge_tx_ring_paddr = 0; 1238 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag); 1239 sc->nge_cdata.nge_tx_ring_tag = NULL; 1240 } 1241 /* Rx ring. */ 1242 if (sc->nge_cdata.nge_rx_ring_tag) { 1243 if (sc->nge_rdata.nge_rx_ring_paddr) 1244 bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag, 1245 sc->nge_cdata.nge_rx_ring_map); 1246 if (sc->nge_rdata.nge_rx_ring) 1247 bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag, 1248 sc->nge_rdata.nge_rx_ring, 1249 sc->nge_cdata.nge_rx_ring_map); 1250 sc->nge_rdata.nge_rx_ring = NULL; 1251 sc->nge_rdata.nge_rx_ring_paddr = 0; 1252 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag); 1253 sc->nge_cdata.nge_rx_ring_tag = NULL; 1254 } 1255 /* Tx buffers. */ 1256 if (sc->nge_cdata.nge_tx_tag) { 1257 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1258 txd = &sc->nge_cdata.nge_txdesc[i]; 1259 if (txd->tx_dmamap) { 1260 bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag, 1261 txd->tx_dmamap); 1262 txd->tx_dmamap = NULL; 1263 } 1264 } 1265 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag); 1266 sc->nge_cdata.nge_tx_tag = NULL; 1267 } 1268 /* Rx buffers. */ 1269 if (sc->nge_cdata.nge_rx_tag) { 1270 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1271 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1272 if (rxd->rx_dmamap) { 1273 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1274 rxd->rx_dmamap); 1275 rxd->rx_dmamap = NULL; 1276 } 1277 } 1278 if (sc->nge_cdata.nge_rx_sparemap) { 1279 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1280 sc->nge_cdata.nge_rx_sparemap); 1281 sc->nge_cdata.nge_rx_sparemap = 0; 1282 } 1283 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag); 1284 sc->nge_cdata.nge_rx_tag = NULL; 1285 } 1286 1287 if (sc->nge_cdata.nge_parent_tag) { 1288 bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag); 1289 sc->nge_cdata.nge_parent_tag = NULL; 1290 } 1291 } 1292 1293 /* 1294 * Initialize the transmit descriptors. 1295 */ 1296 static int 1297 nge_list_tx_init(struct nge_softc *sc) 1298 { 1299 struct nge_ring_data *rd; 1300 struct nge_txdesc *txd; 1301 bus_addr_t addr; 1302 int i; 1303 1304 sc->nge_cdata.nge_tx_prod = 0; 1305 sc->nge_cdata.nge_tx_cons = 0; 1306 sc->nge_cdata.nge_tx_cnt = 0; 1307 1308 rd = &sc->nge_rdata; 1309 bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT); 1310 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1311 if (i == NGE_TX_RING_CNT - 1) 1312 addr = NGE_TX_RING_ADDR(sc, 0); 1313 else 1314 addr = NGE_TX_RING_ADDR(sc, i + 1); 1315 rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1316 txd = &sc->nge_cdata.nge_txdesc[i]; 1317 txd->tx_m = NULL; 1318 } 1319 1320 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1321 sc->nge_cdata.nge_tx_ring_map, 1322 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1323 1324 return (0); 1325 } 1326 1327 /* 1328 * Initialize the RX descriptors and allocate mbufs for them. Note that 1329 * we arrange the descriptors in a closed ring, so that the last descriptor 1330 * points back to the first. 1331 */ 1332 static int 1333 nge_list_rx_init(struct nge_softc *sc) 1334 { 1335 struct nge_ring_data *rd; 1336 bus_addr_t addr; 1337 int i; 1338 1339 sc->nge_cdata.nge_rx_cons = 0; 1340 sc->nge_head = sc->nge_tail = NULL; 1341 1342 rd = &sc->nge_rdata; 1343 bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT); 1344 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1345 if (nge_newbuf(sc, i) != 0) 1346 return (ENOBUFS); 1347 if (i == NGE_RX_RING_CNT - 1) 1348 addr = NGE_RX_RING_ADDR(sc, 0); 1349 else 1350 addr = NGE_RX_RING_ADDR(sc, i + 1); 1351 rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1352 } 1353 1354 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1355 sc->nge_cdata.nge_rx_ring_map, 1356 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1357 1358 return (0); 1359 } 1360 1361 static __inline void 1362 nge_discard_rxbuf(struct nge_softc *sc, int idx) 1363 { 1364 struct nge_desc *desc; 1365 1366 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1367 desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t)); 1368 desc->nge_extsts = 0; 1369 } 1370 1371 /* 1372 * Initialize an RX descriptor and attach an MBUF cluster. 1373 */ 1374 static int 1375 nge_newbuf(struct nge_softc *sc, int idx) 1376 { 1377 struct nge_desc *desc; 1378 struct nge_rxdesc *rxd; 1379 struct mbuf *m; 1380 bus_dma_segment_t segs[1]; 1381 bus_dmamap_t map; 1382 int nsegs; 1383 1384 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1385 if (m == NULL) 1386 return (ENOBUFS); 1387 m->m_len = m->m_pkthdr.len = MCLBYTES; 1388 m_adj(m, sizeof(uint64_t)); 1389 1390 if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag, 1391 sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1392 m_freem(m); 1393 return (ENOBUFS); 1394 } 1395 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1396 1397 rxd = &sc->nge_cdata.nge_rxdesc[idx]; 1398 if (rxd->rx_m != NULL) { 1399 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1400 BUS_DMASYNC_POSTREAD); 1401 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap); 1402 } 1403 map = rxd->rx_dmamap; 1404 rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap; 1405 sc->nge_cdata.nge_rx_sparemap = map; 1406 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1407 BUS_DMASYNC_PREREAD); 1408 rxd->rx_m = m; 1409 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1410 desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr)); 1411 desc->nge_cmdsts = htole32(segs[0].ds_len); 1412 desc->nge_extsts = 0; 1413 1414 return (0); 1415 } 1416 1417 #ifndef __NO_STRICT_ALIGNMENT 1418 static __inline void 1419 nge_fixup_rx(struct mbuf *m) 1420 { 1421 int i; 1422 uint16_t *src, *dst; 1423 1424 src = mtod(m, uint16_t *); 1425 dst = src - 1; 1426 1427 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1428 *dst++ = *src++; 1429 1430 m->m_data -= ETHER_ALIGN; 1431 } 1432 #endif 1433 1434 /* 1435 * A frame has been uploaded: pass the resulting mbuf chain up to 1436 * the higher level protocols. 1437 */ 1438 static int 1439 nge_rxeof(struct nge_softc *sc) 1440 { 1441 struct mbuf *m; 1442 struct ifnet *ifp; 1443 struct nge_desc *cur_rx; 1444 struct nge_rxdesc *rxd; 1445 int cons, prog, rx_npkts, total_len; 1446 uint32_t cmdsts, extsts; 1447 1448 NGE_LOCK_ASSERT(sc); 1449 1450 ifp = sc->nge_ifp; 1451 cons = sc->nge_cdata.nge_rx_cons; 1452 rx_npkts = 0; 1453 1454 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1455 sc->nge_cdata.nge_rx_ring_map, 1456 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1457 1458 for (prog = 0; prog < NGE_RX_RING_CNT && 1459 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1460 NGE_INC(cons, NGE_RX_RING_CNT)) { 1461 #ifdef DEVICE_POLLING 1462 if (ifp->if_capenable & IFCAP_POLLING) { 1463 if (sc->rxcycles <= 0) 1464 break; 1465 sc->rxcycles--; 1466 } 1467 #endif 1468 cur_rx = &sc->nge_rdata.nge_rx_ring[cons]; 1469 cmdsts = le32toh(cur_rx->nge_cmdsts); 1470 extsts = le32toh(cur_rx->nge_extsts); 1471 if ((cmdsts & NGE_CMDSTS_OWN) == 0) 1472 break; 1473 prog++; 1474 rxd = &sc->nge_cdata.nge_rxdesc[cons]; 1475 m = rxd->rx_m; 1476 total_len = cmdsts & NGE_CMDSTS_BUFLEN; 1477 1478 if ((cmdsts & NGE_CMDSTS_MORE) != 0) { 1479 if (nge_newbuf(sc, cons) != 0) { 1480 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1481 if (sc->nge_head != NULL) { 1482 m_freem(sc->nge_head); 1483 sc->nge_head = sc->nge_tail = NULL; 1484 } 1485 nge_discard_rxbuf(sc, cons); 1486 continue; 1487 } 1488 m->m_len = total_len; 1489 if (sc->nge_head == NULL) { 1490 m->m_pkthdr.len = total_len; 1491 sc->nge_head = sc->nge_tail = m; 1492 } else { 1493 m->m_flags &= ~M_PKTHDR; 1494 sc->nge_head->m_pkthdr.len += total_len; 1495 sc->nge_tail->m_next = m; 1496 sc->nge_tail = m; 1497 } 1498 continue; 1499 } 1500 1501 /* 1502 * If an error occurs, update stats, clear the 1503 * status word and leave the mbuf cluster in place: 1504 * it should simply get re-used next time this descriptor 1505 * comes up in the ring. 1506 */ 1507 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1508 if ((cmdsts & NGE_RXSTAT_RUNT) && 1509 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) { 1510 /* 1511 * Work-around hardware bug, accept runt frames 1512 * if its length is larger than or equal to 56. 1513 */ 1514 } else { 1515 /* 1516 * Input error counters are updated by hardware. 1517 */ 1518 if (sc->nge_head != NULL) { 1519 m_freem(sc->nge_head); 1520 sc->nge_head = sc->nge_tail = NULL; 1521 } 1522 nge_discard_rxbuf(sc, cons); 1523 continue; 1524 } 1525 } 1526 1527 /* Try conjure up a replacement mbuf. */ 1528 1529 if (nge_newbuf(sc, cons) != 0) { 1530 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1531 if (sc->nge_head != NULL) { 1532 m_freem(sc->nge_head); 1533 sc->nge_head = sc->nge_tail = NULL; 1534 } 1535 nge_discard_rxbuf(sc, cons); 1536 continue; 1537 } 1538 1539 /* Chain received mbufs. */ 1540 if (sc->nge_head != NULL) { 1541 m->m_len = total_len; 1542 m->m_flags &= ~M_PKTHDR; 1543 sc->nge_tail->m_next = m; 1544 m = sc->nge_head; 1545 m->m_pkthdr.len += total_len; 1546 sc->nge_head = sc->nge_tail = NULL; 1547 } else 1548 m->m_pkthdr.len = m->m_len = total_len; 1549 1550 /* 1551 * Ok. NatSemi really screwed up here. This is the 1552 * only gigE chip I know of with alignment constraints 1553 * on receive buffers. RX buffers must be 64-bit aligned. 1554 */ 1555 /* 1556 * By popular demand, ignore the alignment problems 1557 * on the non-strict alignment platform. The performance hit 1558 * incurred due to unaligned accesses is much smaller 1559 * than the hit produced by forcing buffer copies all 1560 * the time, especially with jumbo frames. We still 1561 * need to fix up the alignment everywhere else though. 1562 */ 1563 #ifndef __NO_STRICT_ALIGNMENT 1564 nge_fixup_rx(m); 1565 #endif 1566 m->m_pkthdr.rcvif = ifp; 1567 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1568 1569 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1570 /* Do IP checksum checking. */ 1571 if ((extsts & NGE_RXEXTSTS_IPPKT) != 0) 1572 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1573 if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0) 1574 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1575 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1576 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1577 (extsts & NGE_RXEXTSTS_UDPPKT && 1578 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1579 m->m_pkthdr.csum_flags |= 1580 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1581 m->m_pkthdr.csum_data = 0xffff; 1582 } 1583 } 1584 1585 /* 1586 * If we received a packet with a vlan tag, pass it 1587 * to vlan_input() instead of ether_input(). 1588 */ 1589 if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 && 1590 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1591 m->m_pkthdr.ether_vtag = 1592 bswap16(extsts & NGE_RXEXTSTS_VTCI); 1593 m->m_flags |= M_VLANTAG; 1594 } 1595 NGE_UNLOCK(sc); 1596 (*ifp->if_input)(ifp, m); 1597 NGE_LOCK(sc); 1598 rx_npkts++; 1599 } 1600 1601 if (prog > 0) { 1602 sc->nge_cdata.nge_rx_cons = cons; 1603 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1604 sc->nge_cdata.nge_rx_ring_map, 1605 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1606 } 1607 return (rx_npkts); 1608 } 1609 1610 /* 1611 * A frame was downloaded to the chip. It's safe for us to clean up 1612 * the list buffers. 1613 */ 1614 static void 1615 nge_txeof(struct nge_softc *sc) 1616 { 1617 struct nge_desc *cur_tx; 1618 struct nge_txdesc *txd; 1619 struct ifnet *ifp; 1620 uint32_t cmdsts; 1621 int cons, prod; 1622 1623 NGE_LOCK_ASSERT(sc); 1624 ifp = sc->nge_ifp; 1625 1626 cons = sc->nge_cdata.nge_tx_cons; 1627 prod = sc->nge_cdata.nge_tx_prod; 1628 if (cons == prod) 1629 return; 1630 1631 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1632 sc->nge_cdata.nge_tx_ring_map, 1633 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1634 1635 /* 1636 * Go through our tx list and free mbufs for those 1637 * frames that have been transmitted. 1638 */ 1639 for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) { 1640 cur_tx = &sc->nge_rdata.nge_tx_ring[cons]; 1641 cmdsts = le32toh(cur_tx->nge_cmdsts); 1642 if ((cmdsts & NGE_CMDSTS_OWN) != 0) 1643 break; 1644 sc->nge_cdata.nge_tx_cnt--; 1645 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1646 if ((cmdsts & NGE_CMDSTS_MORE) != 0) 1647 continue; 1648 1649 txd = &sc->nge_cdata.nge_txdesc[cons]; 1650 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap, 1651 BUS_DMASYNC_POSTWRITE); 1652 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap); 1653 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1654 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1655 if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0) 1656 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1657 if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0) 1658 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1659 } else 1660 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1661 1662 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (cmdsts & NGE_TXSTAT_COLLCNT) >> 16); 1663 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1664 __func__)); 1665 m_freem(txd->tx_m); 1666 txd->tx_m = NULL; 1667 } 1668 1669 sc->nge_cdata.nge_tx_cons = cons; 1670 if (sc->nge_cdata.nge_tx_cnt == 0) 1671 sc->nge_watchdog_timer = 0; 1672 } 1673 1674 static void 1675 nge_tick(void *xsc) 1676 { 1677 struct nge_softc *sc; 1678 struct mii_data *mii; 1679 1680 sc = xsc; 1681 NGE_LOCK_ASSERT(sc); 1682 mii = device_get_softc(sc->nge_miibus); 1683 mii_tick(mii); 1684 /* 1685 * For PHYs that does not reset established link, it is 1686 * necessary to check whether driver still have a valid 1687 * link(e.g link state change callback is not called). 1688 * Otherwise, driver think it lost link because driver 1689 * initialization routine clears link state flag. 1690 */ 1691 if ((sc->nge_flags & NGE_FLAG_LINK) == 0) 1692 nge_miibus_statchg(sc->nge_dev); 1693 nge_stats_update(sc); 1694 nge_watchdog(sc); 1695 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 1696 } 1697 1698 static void 1699 nge_stats_update(struct nge_softc *sc) 1700 { 1701 struct ifnet *ifp; 1702 struct nge_stats now, *stats, *nstats; 1703 1704 NGE_LOCK_ASSERT(sc); 1705 1706 ifp = sc->nge_ifp; 1707 stats = &now; 1708 stats->rx_pkts_errs = 1709 CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF; 1710 stats->rx_crc_errs = 1711 CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF; 1712 stats->rx_fifo_oflows = 1713 CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF; 1714 stats->rx_align_errs = 1715 CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF; 1716 stats->rx_sym_errs = 1717 CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF; 1718 stats->rx_pkts_jumbos = 1719 CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF; 1720 stats->rx_len_errs = 1721 CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF; 1722 stats->rx_unctl_frames = 1723 CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF; 1724 stats->rx_pause = 1725 CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF; 1726 stats->tx_pause = 1727 CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF; 1728 stats->tx_seq_errs = 1729 CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF; 1730 1731 /* 1732 * Since we've accept errored frames exclude Rx length errors. 1733 */ 1734 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1735 stats->rx_pkts_errs + stats->rx_crc_errs + 1736 stats->rx_fifo_oflows + stats->rx_sym_errs); 1737 1738 nstats = &sc->nge_stats; 1739 nstats->rx_pkts_errs += stats->rx_pkts_errs; 1740 nstats->rx_crc_errs += stats->rx_crc_errs; 1741 nstats->rx_fifo_oflows += stats->rx_fifo_oflows; 1742 nstats->rx_align_errs += stats->rx_align_errs; 1743 nstats->rx_sym_errs += stats->rx_sym_errs; 1744 nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos; 1745 nstats->rx_len_errs += stats->rx_len_errs; 1746 nstats->rx_unctl_frames += stats->rx_unctl_frames; 1747 nstats->rx_pause += stats->rx_pause; 1748 nstats->tx_pause += stats->tx_pause; 1749 nstats->tx_seq_errs += stats->tx_seq_errs; 1750 } 1751 1752 #ifdef DEVICE_POLLING 1753 static poll_handler_t nge_poll; 1754 1755 static int 1756 nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1757 { 1758 struct nge_softc *sc; 1759 int rx_npkts = 0; 1760 1761 sc = ifp->if_softc; 1762 1763 NGE_LOCK(sc); 1764 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1765 NGE_UNLOCK(sc); 1766 return (rx_npkts); 1767 } 1768 1769 /* 1770 * On the nge, reading the status register also clears it. 1771 * So before returning to intr mode we must make sure that all 1772 * possible pending sources of interrupts have been served. 1773 * In practice this means run to completion the *eof routines, 1774 * and then call the interrupt routine. 1775 */ 1776 sc->rxcycles = count; 1777 rx_npkts = nge_rxeof(sc); 1778 nge_txeof(sc); 1779 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1780 nge_start_locked(ifp); 1781 1782 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1783 uint32_t status; 1784 1785 /* Reading the ISR register clears all interrupts. */ 1786 status = CSR_READ_4(sc, NGE_ISR); 1787 1788 if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0) 1789 rx_npkts += nge_rxeof(sc); 1790 1791 if ((status & NGE_ISR_RX_IDLE) != 0) 1792 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1793 1794 if ((status & NGE_ISR_SYSERR) != 0) { 1795 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1796 nge_init_locked(sc); 1797 } 1798 } 1799 NGE_UNLOCK(sc); 1800 return (rx_npkts); 1801 } 1802 #endif /* DEVICE_POLLING */ 1803 1804 static void 1805 nge_intr(void *arg) 1806 { 1807 struct nge_softc *sc; 1808 struct ifnet *ifp; 1809 uint32_t status; 1810 1811 sc = (struct nge_softc *)arg; 1812 ifp = sc->nge_ifp; 1813 1814 NGE_LOCK(sc); 1815 1816 if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0) 1817 goto done_locked; 1818 1819 /* Reading the ISR register clears all interrupts. */ 1820 status = CSR_READ_4(sc, NGE_ISR); 1821 if (status == 0xffffffff || (status & NGE_INTRS) == 0) 1822 goto done_locked; 1823 #ifdef DEVICE_POLLING 1824 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1825 goto done_locked; 1826 #endif 1827 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1828 goto done_locked; 1829 1830 /* Disable interrupts. */ 1831 CSR_WRITE_4(sc, NGE_IER, 0); 1832 1833 /* Data LED on for TBI mode */ 1834 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1835 CSR_WRITE_4(sc, NGE_GPIO, 1836 CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); 1837 1838 for (; (status & NGE_INTRS) != 0;) { 1839 if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR | 1840 NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0) 1841 nge_txeof(sc); 1842 1843 if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR | 1844 NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW | 1845 NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0) 1846 nge_rxeof(sc); 1847 1848 if ((status & NGE_ISR_RX_IDLE) != 0) 1849 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1850 1851 if ((status & NGE_ISR_SYSERR) != 0) { 1852 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1853 nge_init_locked(sc); 1854 } 1855 /* Reading the ISR register clears all interrupts. */ 1856 status = CSR_READ_4(sc, NGE_ISR); 1857 } 1858 1859 /* Re-enable interrupts. */ 1860 CSR_WRITE_4(sc, NGE_IER, 1); 1861 1862 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1863 nge_start_locked(ifp); 1864 1865 /* Data LED off for TBI mode */ 1866 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1867 CSR_WRITE_4(sc, NGE_GPIO, 1868 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 1869 1870 done_locked: 1871 NGE_UNLOCK(sc); 1872 } 1873 1874 /* 1875 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1876 * pointers to the fragment pointers. 1877 */ 1878 static int 1879 nge_encap(struct nge_softc *sc, struct mbuf **m_head) 1880 { 1881 struct nge_txdesc *txd, *txd_last; 1882 struct nge_desc *desc; 1883 struct mbuf *m; 1884 bus_dmamap_t map; 1885 bus_dma_segment_t txsegs[NGE_MAXTXSEGS]; 1886 int error, i, nsegs, prod, si; 1887 1888 NGE_LOCK_ASSERT(sc); 1889 1890 m = *m_head; 1891 prod = sc->nge_cdata.nge_tx_prod; 1892 txd = &sc->nge_cdata.nge_txdesc[prod]; 1893 txd_last = txd; 1894 map = txd->tx_dmamap; 1895 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map, 1896 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1897 if (error == EFBIG) { 1898 m = m_collapse(*m_head, M_NOWAIT, NGE_MAXTXSEGS); 1899 if (m == NULL) { 1900 m_freem(*m_head); 1901 *m_head = NULL; 1902 return (ENOBUFS); 1903 } 1904 *m_head = m; 1905 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, 1906 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1907 if (error != 0) { 1908 m_freem(*m_head); 1909 *m_head = NULL; 1910 return (error); 1911 } 1912 } else if (error != 0) 1913 return (error); 1914 if (nsegs == 0) { 1915 m_freem(*m_head); 1916 *m_head = NULL; 1917 return (EIO); 1918 } 1919 1920 /* Check number of available descriptors. */ 1921 if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) { 1922 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map); 1923 return (ENOBUFS); 1924 } 1925 1926 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE); 1927 1928 si = prod; 1929 for (i = 0; i < nsegs; i++) { 1930 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1931 desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr)); 1932 if (i == 0) 1933 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1934 NGE_CMDSTS_MORE); 1935 else 1936 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 1937 NGE_CMDSTS_MORE | NGE_CMDSTS_OWN); 1938 desc->nge_extsts = 0; 1939 sc->nge_cdata.nge_tx_cnt++; 1940 NGE_INC(prod, NGE_TX_RING_CNT); 1941 } 1942 /* Update producer index. */ 1943 sc->nge_cdata.nge_tx_prod = prod; 1944 1945 prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT; 1946 desc = &sc->nge_rdata.nge_tx_ring[prod]; 1947 /* Check if we have a VLAN tag to insert. */ 1948 if ((m->m_flags & M_VLANTAG) != 0) 1949 desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT | 1950 bswap16(m->m_pkthdr.ether_vtag)); 1951 /* Set EOP on the last descriptor. */ 1952 desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE); 1953 1954 /* Set checksum offload in the first descriptor. */ 1955 desc = &sc->nge_rdata.nge_tx_ring[si]; 1956 if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) { 1957 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 1958 desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM); 1959 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1960 desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM); 1961 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1962 desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM); 1963 } 1964 /* Lastly, turn the first descriptor ownership to hardware. */ 1965 desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN); 1966 1967 txd = &sc->nge_cdata.nge_txdesc[prod]; 1968 map = txd_last->tx_dmamap; 1969 txd_last->tx_dmamap = txd->tx_dmamap; 1970 txd->tx_dmamap = map; 1971 txd->tx_m = m; 1972 1973 return (0); 1974 } 1975 1976 /* 1977 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1978 * to the mbuf data regions directly in the transmit lists. We also save a 1979 * copy of the pointers since the transmit list fragment pointers are 1980 * physical addresses. 1981 */ 1982 1983 static void 1984 nge_start(struct ifnet *ifp) 1985 { 1986 struct nge_softc *sc; 1987 1988 sc = ifp->if_softc; 1989 NGE_LOCK(sc); 1990 nge_start_locked(ifp); 1991 NGE_UNLOCK(sc); 1992 } 1993 1994 static void 1995 nge_start_locked(struct ifnet *ifp) 1996 { 1997 struct nge_softc *sc; 1998 struct mbuf *m_head; 1999 int enq; 2000 2001 sc = ifp->if_softc; 2002 2003 NGE_LOCK_ASSERT(sc); 2004 2005 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2006 IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0) 2007 return; 2008 2009 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2010 sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) { 2011 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2012 if (m_head == NULL) 2013 break; 2014 /* 2015 * Pack the data into the transmit ring. If we 2016 * don't have room, set the OACTIVE flag and wait 2017 * for the NIC to drain the ring. 2018 */ 2019 if (nge_encap(sc, &m_head)) { 2020 if (m_head == NULL) 2021 break; 2022 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2023 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2024 break; 2025 } 2026 2027 enq++; 2028 /* 2029 * If there's a BPF listener, bounce a copy of this frame 2030 * to him. 2031 */ 2032 ETHER_BPF_MTAP(ifp, m_head); 2033 } 2034 2035 if (enq > 0) { 2036 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 2037 sc->nge_cdata.nge_tx_ring_map, 2038 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2039 /* Transmit */ 2040 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 2041 2042 /* Set a timeout in case the chip goes out to lunch. */ 2043 sc->nge_watchdog_timer = 5; 2044 } 2045 } 2046 2047 static void 2048 nge_init(void *xsc) 2049 { 2050 struct nge_softc *sc = xsc; 2051 2052 NGE_LOCK(sc); 2053 nge_init_locked(sc); 2054 NGE_UNLOCK(sc); 2055 } 2056 2057 static void 2058 nge_init_locked(struct nge_softc *sc) 2059 { 2060 struct ifnet *ifp = sc->nge_ifp; 2061 struct mii_data *mii; 2062 uint8_t *eaddr; 2063 uint32_t reg; 2064 2065 NGE_LOCK_ASSERT(sc); 2066 2067 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2068 return; 2069 2070 /* 2071 * Cancel pending I/O and free all RX/TX buffers. 2072 */ 2073 nge_stop(sc); 2074 2075 /* Reset the adapter. */ 2076 nge_reset(sc); 2077 2078 /* Disable Rx filter prior to programming Rx filter. */ 2079 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0); 2080 CSR_BARRIER_4(sc, NGE_RXFILT_CTL, BUS_SPACE_BARRIER_WRITE); 2081 2082 mii = device_get_softc(sc->nge_miibus); 2083 2084 /* Set MAC address. */ 2085 eaddr = IF_LLADDR(sc->nge_ifp); 2086 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 2087 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]); 2088 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 2089 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]); 2090 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 2091 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]); 2092 2093 /* Init circular RX list. */ 2094 if (nge_list_rx_init(sc) == ENOBUFS) { 2095 device_printf(sc->nge_dev, "initialization failed: no " 2096 "memory for rx buffers\n"); 2097 nge_stop(sc); 2098 return; 2099 } 2100 2101 /* 2102 * Init tx descriptors. 2103 */ 2104 nge_list_tx_init(sc); 2105 2106 /* Set Rx filter. */ 2107 nge_rxfilter(sc); 2108 2109 /* Disable PRIQ ctl. */ 2110 CSR_WRITE_4(sc, NGE_PRIOQCTL, 0); 2111 2112 /* 2113 * Set pause frames parameters. 2114 * Rx stat FIFO hi-threshold : 2 or more packets 2115 * Rx stat FIFO lo-threshold : less than 2 packets 2116 * Rx data FIFO hi-threshold : 2K or more bytes 2117 * Rx data FIFO lo-threshold : less than 2K bytes 2118 * pause time : (512ns * 0xffff) -> 33.55ms 2119 */ 2120 CSR_WRITE_4(sc, NGE_PAUSECSR, 2121 NGE_PAUSECSR_PAUSE_ON_MCAST | 2122 NGE_PAUSECSR_PAUSE_ON_DA | 2123 ((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) | 2124 ((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) | 2125 ((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) | 2126 ((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) | 2127 NGE_PAUSECSR_CNT); 2128 2129 /* 2130 * Load the address of the RX and TX lists. 2131 */ 2132 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 2133 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 2134 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 2135 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 2136 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 2137 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 2138 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 2139 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 2140 2141 /* Set RX configuration. */ 2142 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 2143 2144 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0); 2145 /* 2146 * Enable hardware checksum validation for all IPv4 2147 * packets, do not reject packets with bad checksums. 2148 */ 2149 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2150 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 2151 2152 /* 2153 * Tell the chip to detect and strip VLAN tag info from 2154 * received frames. The tag will be provided in the extsts 2155 * field in the RX descriptors. 2156 */ 2157 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB); 2158 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2159 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB); 2160 2161 /* Set TX configuration. */ 2162 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 2163 2164 /* 2165 * Enable TX IPv4 checksumming on a per-packet basis. 2166 */ 2167 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 2168 2169 /* 2170 * Tell the chip to insert VLAN tags on a per-packet basis as 2171 * dictated by the code in the frame encapsulation routine. 2172 */ 2173 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 2174 2175 /* 2176 * Enable the delivery of PHY interrupts based on 2177 * link/speed/duplex status changes. Also enable the 2178 * extsts field in the DMA descriptors (needed for 2179 * TCP/IP checksum offload on transmit). 2180 */ 2181 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD | 2182 NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB); 2183 2184 /* 2185 * Configure interrupt holdoff (moderation). We can 2186 * have the chip delay interrupt delivery for a certain 2187 * period. Units are in 100us, and the max setting 2188 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 2189 */ 2190 CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff); 2191 2192 /* 2193 * Enable MAC statistics counters and clear. 2194 */ 2195 reg = CSR_READ_4(sc, NGE_MIBCTL); 2196 reg &= ~NGE_MIBCTL_FREEZE_CNT; 2197 reg |= NGE_MIBCTL_CLEAR_CNT; 2198 CSR_WRITE_4(sc, NGE_MIBCTL, reg); 2199 2200 /* 2201 * Enable interrupts. 2202 */ 2203 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 2204 #ifdef DEVICE_POLLING 2205 /* 2206 * ... only enable interrupts if we are not polling, make sure 2207 * they are off otherwise. 2208 */ 2209 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2210 CSR_WRITE_4(sc, NGE_IER, 0); 2211 else 2212 #endif 2213 CSR_WRITE_4(sc, NGE_IER, 1); 2214 2215 sc->nge_flags &= ~NGE_FLAG_LINK; 2216 mii_mediachg(mii); 2217 2218 sc->nge_watchdog_timer = 0; 2219 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 2220 2221 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2222 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2223 } 2224 2225 /* 2226 * Set media options. 2227 */ 2228 static int 2229 nge_mediachange(struct ifnet *ifp) 2230 { 2231 struct nge_softc *sc; 2232 struct mii_data *mii; 2233 struct mii_softc *miisc; 2234 int error; 2235 2236 sc = ifp->if_softc; 2237 NGE_LOCK(sc); 2238 mii = device_get_softc(sc->nge_miibus); 2239 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2240 PHY_RESET(miisc); 2241 error = mii_mediachg(mii); 2242 NGE_UNLOCK(sc); 2243 2244 return (error); 2245 } 2246 2247 /* 2248 * Report current media status. 2249 */ 2250 static void 2251 nge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2252 { 2253 struct nge_softc *sc; 2254 struct mii_data *mii; 2255 2256 sc = ifp->if_softc; 2257 NGE_LOCK(sc); 2258 mii = device_get_softc(sc->nge_miibus); 2259 mii_pollstat(mii); 2260 ifmr->ifm_active = mii->mii_media_active; 2261 ifmr->ifm_status = mii->mii_media_status; 2262 NGE_UNLOCK(sc); 2263 } 2264 2265 static int 2266 nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2267 { 2268 struct nge_softc *sc = ifp->if_softc; 2269 struct ifreq *ifr = (struct ifreq *) data; 2270 struct mii_data *mii; 2271 int error = 0, mask; 2272 2273 switch (command) { 2274 case SIOCSIFMTU: 2275 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU) 2276 error = EINVAL; 2277 else { 2278 NGE_LOCK(sc); 2279 ifp->if_mtu = ifr->ifr_mtu; 2280 /* 2281 * Workaround: if the MTU is larger than 2282 * 8152 (TX FIFO size minus 64 minus 18), turn off 2283 * TX checksum offloading. 2284 */ 2285 if (ifr->ifr_mtu >= 8152) { 2286 ifp->if_capenable &= ~IFCAP_TXCSUM; 2287 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2288 } else { 2289 ifp->if_capenable |= IFCAP_TXCSUM; 2290 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2291 } 2292 NGE_UNLOCK(sc); 2293 VLAN_CAPABILITIES(ifp); 2294 } 2295 break; 2296 case SIOCSIFFLAGS: 2297 NGE_LOCK(sc); 2298 if ((ifp->if_flags & IFF_UP) != 0) { 2299 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2300 if ((ifp->if_flags ^ sc->nge_if_flags) & 2301 (IFF_PROMISC | IFF_ALLMULTI)) 2302 nge_rxfilter(sc); 2303 } else { 2304 if ((sc->nge_flags & NGE_FLAG_DETACH) == 0) 2305 nge_init_locked(sc); 2306 } 2307 } else { 2308 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2309 nge_stop(sc); 2310 } 2311 sc->nge_if_flags = ifp->if_flags; 2312 NGE_UNLOCK(sc); 2313 error = 0; 2314 break; 2315 case SIOCADDMULTI: 2316 case SIOCDELMULTI: 2317 NGE_LOCK(sc); 2318 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2319 nge_rxfilter(sc); 2320 NGE_UNLOCK(sc); 2321 break; 2322 case SIOCGIFMEDIA: 2323 case SIOCSIFMEDIA: 2324 mii = device_get_softc(sc->nge_miibus); 2325 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2326 break; 2327 case SIOCSIFCAP: 2328 NGE_LOCK(sc); 2329 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2330 #ifdef DEVICE_POLLING 2331 if ((mask & IFCAP_POLLING) != 0 && 2332 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 2333 ifp->if_capenable ^= IFCAP_POLLING; 2334 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 2335 error = ether_poll_register(nge_poll, ifp); 2336 if (error != 0) { 2337 NGE_UNLOCK(sc); 2338 break; 2339 } 2340 /* Disable interrupts. */ 2341 CSR_WRITE_4(sc, NGE_IER, 0); 2342 } else { 2343 error = ether_poll_deregister(ifp); 2344 /* Enable interrupts. */ 2345 CSR_WRITE_4(sc, NGE_IER, 1); 2346 } 2347 } 2348 #endif /* DEVICE_POLLING */ 2349 if ((mask & IFCAP_TXCSUM) != 0 && 2350 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2351 ifp->if_capenable ^= IFCAP_TXCSUM; 2352 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2353 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2354 else 2355 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2356 } 2357 if ((mask & IFCAP_RXCSUM) != 0 && 2358 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2359 ifp->if_capenable ^= IFCAP_RXCSUM; 2360 2361 if ((mask & IFCAP_WOL) != 0 && 2362 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2363 if ((mask & IFCAP_WOL_UCAST) != 0) 2364 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2365 if ((mask & IFCAP_WOL_MCAST) != 0) 2366 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2367 if ((mask & IFCAP_WOL_MAGIC) != 0) 2368 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2369 } 2370 2371 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2372 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2373 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2374 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2375 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2376 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2377 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2378 if ((ifp->if_capenable & 2379 IFCAP_VLAN_HWTAGGING) != 0) 2380 NGE_SETBIT(sc, 2381 NGE_VLAN_IP_RXCTL, 2382 NGE_VIPRXCTL_TAG_STRIP_ENB); 2383 else 2384 NGE_CLRBIT(sc, 2385 NGE_VLAN_IP_RXCTL, 2386 NGE_VIPRXCTL_TAG_STRIP_ENB); 2387 } 2388 } 2389 /* 2390 * Both VLAN hardware tagging and checksum offload is 2391 * required to do checksum offload on VLAN interface. 2392 */ 2393 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2394 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2395 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2396 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2397 NGE_UNLOCK(sc); 2398 VLAN_CAPABILITIES(ifp); 2399 break; 2400 default: 2401 error = ether_ioctl(ifp, command, data); 2402 break; 2403 } 2404 2405 return (error); 2406 } 2407 2408 static void 2409 nge_watchdog(struct nge_softc *sc) 2410 { 2411 struct ifnet *ifp; 2412 2413 NGE_LOCK_ASSERT(sc); 2414 2415 if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer) 2416 return; 2417 2418 ifp = sc->nge_ifp; 2419 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2420 if_printf(ifp, "watchdog timeout\n"); 2421 2422 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2423 nge_init_locked(sc); 2424 2425 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2426 nge_start_locked(ifp); 2427 } 2428 2429 static int 2430 nge_stop_mac(struct nge_softc *sc) 2431 { 2432 uint32_t reg; 2433 int i; 2434 2435 NGE_LOCK_ASSERT(sc); 2436 2437 reg = CSR_READ_4(sc, NGE_CSR); 2438 if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) { 2439 reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE); 2440 reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE; 2441 CSR_WRITE_4(sc, NGE_CSR, reg); 2442 for (i = 0; i < NGE_TIMEOUT; i++) { 2443 DELAY(1); 2444 if ((CSR_READ_4(sc, NGE_CSR) & 2445 (NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0) 2446 break; 2447 } 2448 if (i == NGE_TIMEOUT) 2449 return (ETIMEDOUT); 2450 } 2451 2452 return (0); 2453 } 2454 2455 /* 2456 * Stop the adapter and free any mbufs allocated to the 2457 * RX and TX lists. 2458 */ 2459 static void 2460 nge_stop(struct nge_softc *sc) 2461 { 2462 struct nge_txdesc *txd; 2463 struct nge_rxdesc *rxd; 2464 int i; 2465 struct ifnet *ifp; 2466 2467 NGE_LOCK_ASSERT(sc); 2468 ifp = sc->nge_ifp; 2469 2470 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2471 sc->nge_flags &= ~NGE_FLAG_LINK; 2472 callout_stop(&sc->nge_stat_ch); 2473 sc->nge_watchdog_timer = 0; 2474 2475 CSR_WRITE_4(sc, NGE_IER, 0); 2476 CSR_WRITE_4(sc, NGE_IMR, 0); 2477 if (nge_stop_mac(sc) == ETIMEDOUT) 2478 device_printf(sc->nge_dev, 2479 "%s: unable to stop Tx/Rx MAC\n", __func__); 2480 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0); 2481 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0); 2482 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2483 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2484 nge_stats_update(sc); 2485 if (sc->nge_head != NULL) { 2486 m_freem(sc->nge_head); 2487 sc->nge_head = sc->nge_tail = NULL; 2488 } 2489 2490 /* 2491 * Free RX and TX mbufs still in the queues. 2492 */ 2493 for (i = 0; i < NGE_RX_RING_CNT; i++) { 2494 rxd = &sc->nge_cdata.nge_rxdesc[i]; 2495 if (rxd->rx_m != NULL) { 2496 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, 2497 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2498 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, 2499 rxd->rx_dmamap); 2500 m_freem(rxd->rx_m); 2501 rxd->rx_m = NULL; 2502 } 2503 } 2504 for (i = 0; i < NGE_TX_RING_CNT; i++) { 2505 txd = &sc->nge_cdata.nge_txdesc[i]; 2506 if (txd->tx_m != NULL) { 2507 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 2508 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2509 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 2510 txd->tx_dmamap); 2511 m_freem(txd->tx_m); 2512 txd->tx_m = NULL; 2513 } 2514 } 2515 } 2516 2517 /* 2518 * Before setting WOL bits, caller should have stopped Receiver. 2519 */ 2520 static void 2521 nge_wol(struct nge_softc *sc) 2522 { 2523 struct ifnet *ifp; 2524 uint32_t reg; 2525 uint16_t pmstat; 2526 int pmc; 2527 2528 NGE_LOCK_ASSERT(sc); 2529 2530 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) != 0) 2531 return; 2532 2533 ifp = sc->nge_ifp; 2534 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2535 /* Disable WOL & disconnect CLKRUN to save power. */ 2536 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 2537 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 2538 } else { 2539 if (nge_stop_mac(sc) == ETIMEDOUT) 2540 device_printf(sc->nge_dev, 2541 "%s: unable to stop Tx/Rx MAC\n", __func__); 2542 /* 2543 * Make sure wake frames will be buffered in the Rx FIFO. 2544 * (i.e. Silent Rx mode.) 2545 */ 2546 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2547 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_HI, BUS_SPACE_BARRIER_WRITE); 2548 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2549 CSR_BARRIER_4(sc, NGE_RX_LISTPTR_LO, BUS_SPACE_BARRIER_WRITE); 2550 /* Enable Rx again. */ 2551 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 2552 CSR_BARRIER_4(sc, NGE_CSR, BUS_SPACE_BARRIER_WRITE); 2553 2554 /* Configure WOL events. */ 2555 reg = 0; 2556 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2557 reg |= NGE_WOLCSR_WAKE_ON_UNICAST; 2558 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2559 reg |= NGE_WOLCSR_WAKE_ON_MULTICAST; 2560 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2561 reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT; 2562 CSR_WRITE_4(sc, NGE_WOLCSR, reg); 2563 2564 /* Activate CLKRUN. */ 2565 reg = CSR_READ_4(sc, NGE_CLKRUN); 2566 reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB; 2567 CSR_WRITE_4(sc, NGE_CLKRUN, reg); 2568 } 2569 2570 /* Request PME. */ 2571 pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2); 2572 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2573 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2574 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2575 pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2576 } 2577 2578 /* 2579 * Stop all chip I/O so that the kernel's probe routines don't 2580 * get confused by errant DMAs when rebooting. 2581 */ 2582 static int 2583 nge_shutdown(device_t dev) 2584 { 2585 2586 return (nge_suspend(dev)); 2587 } 2588 2589 static int 2590 nge_suspend(device_t dev) 2591 { 2592 struct nge_softc *sc; 2593 2594 sc = device_get_softc(dev); 2595 2596 NGE_LOCK(sc); 2597 nge_stop(sc); 2598 nge_wol(sc); 2599 sc->nge_flags |= NGE_FLAG_SUSPENDED; 2600 NGE_UNLOCK(sc); 2601 2602 return (0); 2603 } 2604 2605 static int 2606 nge_resume(device_t dev) 2607 { 2608 struct nge_softc *sc; 2609 struct ifnet *ifp; 2610 uint16_t pmstat; 2611 int pmc; 2612 2613 sc = device_get_softc(dev); 2614 2615 NGE_LOCK(sc); 2616 ifp = sc->nge_ifp; 2617 if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) == 0) { 2618 /* Disable PME and clear PME status. */ 2619 pmstat = pci_read_config(sc->nge_dev, 2620 pmc + PCIR_POWER_STATUS, 2); 2621 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2622 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2623 pci_write_config(sc->nge_dev, 2624 pmc + PCIR_POWER_STATUS, pmstat, 2); 2625 } 2626 } 2627 if (ifp->if_flags & IFF_UP) { 2628 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2629 nge_init_locked(sc); 2630 } 2631 2632 sc->nge_flags &= ~NGE_FLAG_SUSPENDED; 2633 NGE_UNLOCK(sc); 2634 2635 return (0); 2636 } 2637 2638 #define NGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2639 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2640 2641 static void 2642 nge_sysctl_node(struct nge_softc *sc) 2643 { 2644 struct sysctl_ctx_list *ctx; 2645 struct sysctl_oid_list *child, *parent; 2646 struct sysctl_oid *tree; 2647 struct nge_stats *stats; 2648 int error; 2649 2650 ctx = device_get_sysctl_ctx(sc->nge_dev); 2651 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev)); 2652 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff", 2653 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, &sc->nge_int_holdoff, 2654 0, sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation"); 2655 /* Pull in device tunables. */ 2656 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2657 error = resource_int_value(device_get_name(sc->nge_dev), 2658 device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff); 2659 if (error == 0) { 2660 if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN || 2661 sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) { 2662 device_printf(sc->nge_dev, 2663 "int_holdoff value out of range; " 2664 "using default: %d(%d us)\n", 2665 NGE_INT_HOLDOFF_DEFAULT, 2666 NGE_INT_HOLDOFF_DEFAULT * 100); 2667 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2668 } 2669 } 2670 2671 stats = &sc->nge_stats; 2672 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 2673 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "NGE statistics"); 2674 parent = SYSCTL_CHILDREN(tree); 2675 2676 /* Rx statistics. */ 2677 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 2678 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics"); 2679 child = SYSCTL_CHILDREN(tree); 2680 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs", 2681 &stats->rx_pkts_errs, 2682 "Packet errors including both wire errors and FIFO overruns"); 2683 NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 2684 &stats->rx_crc_errs, "CRC errors"); 2685 NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2686 &stats->rx_fifo_oflows, "FIFO overflows"); 2687 NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2688 &stats->rx_align_errs, "Frame alignment errors"); 2689 NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2690 &stats->rx_sym_errs, "One or more symbol errors"); 2691 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos", 2692 &stats->rx_pkts_jumbos, 2693 "Packets received with length greater than 1518 bytes"); 2694 NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2695 &stats->rx_len_errs, "In Range Length errors"); 2696 NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames", 2697 &stats->rx_unctl_frames, "Control frames with unsupported opcode"); 2698 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2699 &stats->rx_pause, "Pause frames"); 2700 2701 /* Tx statistics. */ 2702 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 2703 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics"); 2704 child = SYSCTL_CHILDREN(tree); 2705 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2706 &stats->tx_pause, "Pause frames"); 2707 NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs", 2708 &stats->tx_seq_errs, 2709 "Loss of collision heartbeat during transmission"); 2710 } 2711 2712 #undef NGE_SYSCTL_STAT_ADD32 2713 2714 static int 2715 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2716 { 2717 int error, value; 2718 2719 if (arg1 == NULL) 2720 return (EINVAL); 2721 value = *(int *)arg1; 2722 error = sysctl_handle_int(oidp, &value, 0, req); 2723 if (error != 0 || req->newptr == NULL) 2724 return (error); 2725 if (value < low || value > high) 2726 return (EINVAL); 2727 *(int *)arg1 = value; 2728 2729 return (0); 2730 } 2731 2732 static int 2733 sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS) 2734 { 2735 2736 return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN, 2737 NGE_INT_HOLDOFF_MAX)); 2738 } 2739