1 /*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 /* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * 44 * These chips are used on several low cost gigabit ethernet NICs 45 * sold by D-Link, Addtron, SMC and Asante. Both parts are 46 * virtually the same, except the 83820 is a 64-bit/32-bit part, 47 * while the 83821 is 32-bit only. 48 * 49 * Many cards also use National gigE transceivers, such as the 50 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 51 * contains a full register description that applies to all of these 52 * components: 53 * 54 * http://www.national.com/ds/DP/DP83861.pdf 55 * 56 * Written by Bill Paul <wpaul@bsdi.com> 57 * BSDi Open Source Solutions 58 */ 59 60 /* 61 * The NatSemi DP83820 and 83821 controllers are enhanced versions 62 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 63 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 64 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 65 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 66 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 67 * matching buffers, one perfect address filter buffer and interrupt 68 * moderation. The 83820 supports both 64-bit and 32-bit addressing 69 * and data transfers: the 64-bit support can be toggled on or off 70 * via software. This affects the size of certain fields in the DMA 71 * descriptors. 72 * 73 * There are two bugs/misfeatures in the 83820/83821 that I have 74 * discovered so far: 75 * 76 * - Receive buffers must be aligned on 64-bit boundaries, which means 77 * you must resort to copying data in order to fix up the payload 78 * alignment. 79 * 80 * - In order to transmit jumbo frames larger than 8170 bytes, you have 81 * to turn off transmit checksum offloading, because the chip can't 82 * compute the checksum on an outgoing frame unless it fits entirely 83 * within the TX FIFO, which is only 8192 bytes in size. If you have 84 * TX checksum offload enabled and you transmit attempt to transmit a 85 * frame larger than 8170 bytes, the transmitter will wedge. 86 * 87 * To work around the latter problem, TX checksum offload is disabled 88 * if the user selects an MTU larger than 8152 (8170 - 18). 89 */ 90 91 #ifdef HAVE_KERNEL_OPTION_HEADERS 92 #include "opt_device_polling.h" 93 #endif 94 95 #include <sys/param.h> 96 #include <sys/systm.h> 97 #include <sys/bus.h> 98 #include <sys/endian.h> 99 #include <sys/kernel.h> 100 #include <sys/lock.h> 101 #include <sys/malloc.h> 102 #include <sys/mbuf.h> 103 #include <sys/module.h> 104 #include <sys/mutex.h> 105 #include <sys/rman.h> 106 #include <sys/socket.h> 107 #include <sys/sockio.h> 108 #include <sys/sysctl.h> 109 110 #include <net/bpf.h> 111 #include <net/if.h> 112 #include <net/if_arp.h> 113 #include <net/ethernet.h> 114 #include <net/if_dl.h> 115 #include <net/if_media.h> 116 #include <net/if_types.h> 117 #include <net/if_vlan_var.h> 118 119 #include <dev/mii/mii.h> 120 #include <dev/mii/miivar.h> 121 122 #include <dev/pci/pcireg.h> 123 #include <dev/pci/pcivar.h> 124 125 #include <machine/bus.h> 126 127 #include <dev/nge/if_ngereg.h> 128 129 /* "device miibus" required. See GENERIC if you get errors here. */ 130 #include "miibus_if.h" 131 132 MODULE_DEPEND(nge, pci, 1, 1, 1); 133 MODULE_DEPEND(nge, ether, 1, 1, 1); 134 MODULE_DEPEND(nge, miibus, 1, 1, 1); 135 136 #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 137 138 /* 139 * Various supported device vendors/types and their names. 140 */ 141 static struct nge_type nge_devs[] = { 142 { NGE_VENDORID, NGE_DEVICEID, 143 "National Semiconductor Gigabit Ethernet" }, 144 { 0, 0, NULL } 145 }; 146 147 static int nge_probe(device_t); 148 static int nge_attach(device_t); 149 static int nge_detach(device_t); 150 static int nge_shutdown(device_t); 151 static int nge_suspend(device_t); 152 static int nge_resume(device_t); 153 154 static __inline void nge_discard_rxbuf(struct nge_softc *, int); 155 static int nge_newbuf(struct nge_softc *, int); 156 static int nge_encap(struct nge_softc *, struct mbuf **); 157 #ifndef __NO_STRICT_ALIGNMENT 158 static __inline void nge_fixup_rx(struct mbuf *); 159 #endif 160 static int nge_rxeof(struct nge_softc *); 161 static void nge_txeof(struct nge_softc *); 162 static void nge_intr(void *); 163 static void nge_tick(void *); 164 static void nge_stats_update(struct nge_softc *); 165 static void nge_start(struct ifnet *); 166 static void nge_start_locked(struct ifnet *); 167 static int nge_ioctl(struct ifnet *, u_long, caddr_t); 168 static void nge_init(void *); 169 static void nge_init_locked(struct nge_softc *); 170 static int nge_stop_mac(struct nge_softc *); 171 static void nge_stop(struct nge_softc *); 172 static void nge_wol(struct nge_softc *); 173 static void nge_watchdog(struct nge_softc *); 174 static int nge_mediachange(struct ifnet *); 175 static void nge_mediastatus(struct ifnet *, struct ifmediareq *); 176 177 static void nge_delay(struct nge_softc *); 178 static void nge_eeprom_idle(struct nge_softc *); 179 static void nge_eeprom_putbyte(struct nge_softc *, int); 180 static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *); 181 static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int); 182 183 static void nge_mii_sync(struct nge_softc *); 184 static void nge_mii_send(struct nge_softc *, uint32_t, int); 185 static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); 186 static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); 187 188 static int nge_miibus_readreg(device_t, int, int); 189 static int nge_miibus_writereg(device_t, int, int, int); 190 static void nge_miibus_statchg(device_t); 191 192 static void nge_rxfilter(struct nge_softc *); 193 static void nge_reset(struct nge_softc *); 194 static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 195 static int nge_dma_alloc(struct nge_softc *); 196 static void nge_dma_free(struct nge_softc *); 197 static int nge_list_rx_init(struct nge_softc *); 198 static int nge_list_tx_init(struct nge_softc *); 199 static void nge_sysctl_node(struct nge_softc *); 200 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 201 static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS); 202 203 static device_method_t nge_methods[] = { 204 /* Device interface */ 205 DEVMETHOD(device_probe, nge_probe), 206 DEVMETHOD(device_attach, nge_attach), 207 DEVMETHOD(device_detach, nge_detach), 208 DEVMETHOD(device_shutdown, nge_shutdown), 209 DEVMETHOD(device_suspend, nge_suspend), 210 DEVMETHOD(device_resume, nge_resume), 211 212 /* bus interface */ 213 DEVMETHOD(bus_print_child, bus_generic_print_child), 214 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 215 216 /* MII interface */ 217 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 218 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 219 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 220 221 { NULL, NULL } 222 }; 223 224 static driver_t nge_driver = { 225 "nge", 226 nge_methods, 227 sizeof(struct nge_softc) 228 }; 229 230 static devclass_t nge_devclass; 231 232 DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0); 233 DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); 234 235 #define NGE_SETBIT(sc, reg, x) \ 236 CSR_WRITE_4(sc, reg, \ 237 CSR_READ_4(sc, reg) | (x)) 238 239 #define NGE_CLRBIT(sc, reg, x) \ 240 CSR_WRITE_4(sc, reg, \ 241 CSR_READ_4(sc, reg) & ~(x)) 242 243 #define SIO_SET(x) \ 244 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) 245 246 #define SIO_CLR(x) \ 247 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) 248 249 static void 250 nge_delay(struct nge_softc *sc) 251 { 252 int idx; 253 254 for (idx = (300 / 33) + 1; idx > 0; idx--) 255 CSR_READ_4(sc, NGE_CSR); 256 } 257 258 static void 259 nge_eeprom_idle(struct nge_softc *sc) 260 { 261 int i; 262 263 SIO_SET(NGE_MEAR_EE_CSEL); 264 nge_delay(sc); 265 SIO_SET(NGE_MEAR_EE_CLK); 266 nge_delay(sc); 267 268 for (i = 0; i < 25; i++) { 269 SIO_CLR(NGE_MEAR_EE_CLK); 270 nge_delay(sc); 271 SIO_SET(NGE_MEAR_EE_CLK); 272 nge_delay(sc); 273 } 274 275 SIO_CLR(NGE_MEAR_EE_CLK); 276 nge_delay(sc); 277 SIO_CLR(NGE_MEAR_EE_CSEL); 278 nge_delay(sc); 279 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 280 } 281 282 /* 283 * Send a read command and address to the EEPROM, check for ACK. 284 */ 285 static void 286 nge_eeprom_putbyte(struct nge_softc *sc, int addr) 287 { 288 int d, i; 289 290 d = addr | NGE_EECMD_READ; 291 292 /* 293 * Feed in each bit and stobe the clock. 294 */ 295 for (i = 0x400; i; i >>= 1) { 296 if (d & i) { 297 SIO_SET(NGE_MEAR_EE_DIN); 298 } else { 299 SIO_CLR(NGE_MEAR_EE_DIN); 300 } 301 nge_delay(sc); 302 SIO_SET(NGE_MEAR_EE_CLK); 303 nge_delay(sc); 304 SIO_CLR(NGE_MEAR_EE_CLK); 305 nge_delay(sc); 306 } 307 } 308 309 /* 310 * Read a word of data stored in the EEPROM at address 'addr.' 311 */ 312 static void 313 nge_eeprom_getword(struct nge_softc *sc, int addr, uint16_t *dest) 314 { 315 int i; 316 uint16_t word = 0; 317 318 /* Force EEPROM to idle state. */ 319 nge_eeprom_idle(sc); 320 321 /* Enter EEPROM access mode. */ 322 nge_delay(sc); 323 SIO_CLR(NGE_MEAR_EE_CLK); 324 nge_delay(sc); 325 SIO_SET(NGE_MEAR_EE_CSEL); 326 nge_delay(sc); 327 328 /* 329 * Send address of word we want to read. 330 */ 331 nge_eeprom_putbyte(sc, addr); 332 333 /* 334 * Start reading bits from EEPROM. 335 */ 336 for (i = 0x8000; i; i >>= 1) { 337 SIO_SET(NGE_MEAR_EE_CLK); 338 nge_delay(sc); 339 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 340 word |= i; 341 nge_delay(sc); 342 SIO_CLR(NGE_MEAR_EE_CLK); 343 nge_delay(sc); 344 } 345 346 /* Turn off EEPROM access mode. */ 347 nge_eeprom_idle(sc); 348 349 *dest = word; 350 } 351 352 /* 353 * Read a sequence of words from the EEPROM. 354 */ 355 static void 356 nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt) 357 { 358 int i; 359 uint16_t word = 0, *ptr; 360 361 for (i = 0; i < cnt; i++) { 362 nge_eeprom_getword(sc, off + i, &word); 363 ptr = (uint16_t *)(dest + (i * 2)); 364 *ptr = word; 365 } 366 } 367 368 /* 369 * Sync the PHYs by setting data bit and strobing the clock 32 times. 370 */ 371 static void 372 nge_mii_sync(struct nge_softc *sc) 373 { 374 int i; 375 376 SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); 377 378 for (i = 0; i < 32; i++) { 379 SIO_SET(NGE_MEAR_MII_CLK); 380 DELAY(1); 381 SIO_CLR(NGE_MEAR_MII_CLK); 382 DELAY(1); 383 } 384 } 385 386 /* 387 * Clock a series of bits through the MII. 388 */ 389 static void 390 nge_mii_send(struct nge_softc *sc, uint32_t bits, int cnt) 391 { 392 int i; 393 394 SIO_CLR(NGE_MEAR_MII_CLK); 395 396 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 397 if (bits & i) { 398 SIO_SET(NGE_MEAR_MII_DATA); 399 } else { 400 SIO_CLR(NGE_MEAR_MII_DATA); 401 } 402 DELAY(1); 403 SIO_CLR(NGE_MEAR_MII_CLK); 404 DELAY(1); 405 SIO_SET(NGE_MEAR_MII_CLK); 406 } 407 } 408 409 /* 410 * Read an PHY register through the MII. 411 */ 412 static int 413 nge_mii_readreg(struct nge_softc *sc, struct nge_mii_frame *frame) 414 { 415 int i, ack; 416 417 /* 418 * Set up frame for RX. 419 */ 420 frame->mii_stdelim = NGE_MII_STARTDELIM; 421 frame->mii_opcode = NGE_MII_READOP; 422 frame->mii_turnaround = 0; 423 frame->mii_data = 0; 424 425 CSR_WRITE_4(sc, NGE_MEAR, 0); 426 427 /* 428 * Turn on data xmit. 429 */ 430 SIO_SET(NGE_MEAR_MII_DIR); 431 432 nge_mii_sync(sc); 433 434 /* 435 * Send command/address info. 436 */ 437 nge_mii_send(sc, frame->mii_stdelim, 2); 438 nge_mii_send(sc, frame->mii_opcode, 2); 439 nge_mii_send(sc, frame->mii_phyaddr, 5); 440 nge_mii_send(sc, frame->mii_regaddr, 5); 441 442 /* Idle bit */ 443 SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); 444 DELAY(1); 445 SIO_SET(NGE_MEAR_MII_CLK); 446 DELAY(1); 447 448 /* Turn off xmit. */ 449 SIO_CLR(NGE_MEAR_MII_DIR); 450 /* Check for ack */ 451 SIO_CLR(NGE_MEAR_MII_CLK); 452 DELAY(1); 453 ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; 454 SIO_SET(NGE_MEAR_MII_CLK); 455 DELAY(1); 456 457 /* 458 * Now try reading data bits. If the ack failed, we still 459 * need to clock through 16 cycles to keep the PHY(s) in sync. 460 */ 461 if (ack) { 462 for (i = 0; i < 16; i++) { 463 SIO_CLR(NGE_MEAR_MII_CLK); 464 DELAY(1); 465 SIO_SET(NGE_MEAR_MII_CLK); 466 DELAY(1); 467 } 468 goto fail; 469 } 470 471 for (i = 0x8000; i; i >>= 1) { 472 SIO_CLR(NGE_MEAR_MII_CLK); 473 DELAY(1); 474 if (!ack) { 475 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) 476 frame->mii_data |= i; 477 DELAY(1); 478 } 479 SIO_SET(NGE_MEAR_MII_CLK); 480 DELAY(1); 481 } 482 483 fail: 484 485 SIO_CLR(NGE_MEAR_MII_CLK); 486 DELAY(1); 487 SIO_SET(NGE_MEAR_MII_CLK); 488 DELAY(1); 489 490 if (ack) 491 return (1); 492 return (0); 493 } 494 495 /* 496 * Write to a PHY register through the MII. 497 */ 498 static int 499 nge_mii_writereg(struct nge_softc *sc, struct nge_mii_frame *frame) 500 { 501 502 /* 503 * Set up frame for TX. 504 */ 505 506 frame->mii_stdelim = NGE_MII_STARTDELIM; 507 frame->mii_opcode = NGE_MII_WRITEOP; 508 frame->mii_turnaround = NGE_MII_TURNAROUND; 509 510 /* 511 * Turn on data output. 512 */ 513 SIO_SET(NGE_MEAR_MII_DIR); 514 515 nge_mii_sync(sc); 516 517 nge_mii_send(sc, frame->mii_stdelim, 2); 518 nge_mii_send(sc, frame->mii_opcode, 2); 519 nge_mii_send(sc, frame->mii_phyaddr, 5); 520 nge_mii_send(sc, frame->mii_regaddr, 5); 521 nge_mii_send(sc, frame->mii_turnaround, 2); 522 nge_mii_send(sc, frame->mii_data, 16); 523 524 /* Idle bit. */ 525 SIO_SET(NGE_MEAR_MII_CLK); 526 DELAY(1); 527 SIO_CLR(NGE_MEAR_MII_CLK); 528 DELAY(1); 529 530 /* 531 * Turn off xmit. 532 */ 533 SIO_CLR(NGE_MEAR_MII_DIR); 534 535 return (0); 536 } 537 538 static int 539 nge_miibus_readreg(device_t dev, int phy, int reg) 540 { 541 struct nge_softc *sc; 542 struct nge_mii_frame frame; 543 int rv; 544 545 sc = device_get_softc(dev); 546 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 547 /* Pretend PHY is at address 0. */ 548 if (phy != 0) 549 return (0); 550 switch (reg) { 551 case MII_BMCR: 552 reg = NGE_TBI_BMCR; 553 break; 554 case MII_BMSR: 555 /* 83820/83821 has different bit layout for BMSR. */ 556 rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT; 557 reg = CSR_READ_4(sc, NGE_TBI_BMSR); 558 if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0) 559 rv |= BMSR_ACOMP; 560 if ((reg & NGE_TBIBMSR_LINKSTAT) != 0) 561 rv |= BMSR_LINK; 562 return (rv); 563 case MII_ANAR: 564 reg = NGE_TBI_ANAR; 565 break; 566 case MII_ANLPAR: 567 reg = NGE_TBI_ANLPAR; 568 break; 569 case MII_ANER: 570 reg = NGE_TBI_ANER; 571 break; 572 case MII_EXTSR: 573 reg = NGE_TBI_ESR; 574 break; 575 case MII_PHYIDR1: 576 case MII_PHYIDR2: 577 return (0); 578 default: 579 device_printf(sc->nge_dev, 580 "bad phy register read : %d\n", reg); 581 return (0); 582 } 583 return (CSR_READ_4(sc, reg)); 584 } 585 586 bzero((char *)&frame, sizeof(frame)); 587 588 frame.mii_phyaddr = phy; 589 frame.mii_regaddr = reg; 590 nge_mii_readreg(sc, &frame); 591 592 return (frame.mii_data); 593 } 594 595 static int 596 nge_miibus_writereg(device_t dev, int phy, int reg, int data) 597 { 598 struct nge_softc *sc; 599 struct nge_mii_frame frame; 600 601 sc = device_get_softc(dev); 602 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 603 /* Pretend PHY is at address 0. */ 604 if (phy != 0) 605 return (0); 606 switch (reg) { 607 case MII_BMCR: 608 reg = NGE_TBI_BMCR; 609 break; 610 case MII_BMSR: 611 return (0); 612 case MII_ANAR: 613 reg = NGE_TBI_ANAR; 614 break; 615 case MII_ANLPAR: 616 reg = NGE_TBI_ANLPAR; 617 break; 618 case MII_ANER: 619 reg = NGE_TBI_ANER; 620 break; 621 case MII_EXTSR: 622 reg = NGE_TBI_ESR; 623 break; 624 case MII_PHYIDR1: 625 case MII_PHYIDR2: 626 return (0); 627 default: 628 device_printf(sc->nge_dev, 629 "bad phy register write : %d\n", reg); 630 return (0); 631 } 632 CSR_WRITE_4(sc, reg, data); 633 return (0); 634 } 635 636 bzero((char *)&frame, sizeof(frame)); 637 638 frame.mii_phyaddr = phy; 639 frame.mii_regaddr = reg; 640 frame.mii_data = data; 641 nge_mii_writereg(sc, &frame); 642 643 return (0); 644 } 645 646 /* 647 * media status/link state change handler. 648 */ 649 static void 650 nge_miibus_statchg(device_t dev) 651 { 652 struct nge_softc *sc; 653 struct mii_data *mii; 654 struct ifnet *ifp; 655 struct nge_txdesc *txd; 656 uint32_t done, reg, status; 657 int i; 658 659 sc = device_get_softc(dev); 660 NGE_LOCK_ASSERT(sc); 661 662 mii = device_get_softc(sc->nge_miibus); 663 ifp = sc->nge_ifp; 664 if (mii == NULL || ifp == NULL || 665 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 666 return; 667 668 sc->nge_flags &= ~NGE_FLAG_LINK; 669 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 670 (IFM_AVALID | IFM_ACTIVE)) { 671 switch (IFM_SUBTYPE(mii->mii_media_active)) { 672 case IFM_10_T: 673 case IFM_100_TX: 674 case IFM_1000_T: 675 case IFM_1000_SX: 676 case IFM_1000_LX: 677 case IFM_1000_CX: 678 sc->nge_flags |= NGE_FLAG_LINK; 679 break; 680 default: 681 break; 682 } 683 } 684 685 /* Stop Tx/Rx MACs. */ 686 if (nge_stop_mac(sc) == ETIMEDOUT) 687 device_printf(sc->nge_dev, 688 "%s: unable to stop Tx/Rx MAC\n", __func__); 689 nge_txeof(sc); 690 nge_rxeof(sc); 691 if (sc->nge_head != NULL) { 692 m_freem(sc->nge_head); 693 sc->nge_head = sc->nge_tail = NULL; 694 } 695 696 /* Release queued frames. */ 697 for (i = 0; i < NGE_TX_RING_CNT; i++) { 698 txd = &sc->nge_cdata.nge_txdesc[i]; 699 if (txd->tx_m != NULL) { 700 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 701 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 702 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 703 txd->tx_dmamap); 704 m_freem(txd->tx_m); 705 txd->tx_m = NULL; 706 } 707 } 708 709 /* Program MAC with resolved speed/duplex. */ 710 if ((sc->nge_flags & NGE_FLAG_LINK) != 0) { 711 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 712 NGE_SETBIT(sc, NGE_TX_CFG, 713 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 714 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 715 #ifdef notyet 716 /* Enable flow-control. */ 717 if ((IFM_OPTIONS(mii->mii_media_active) & 718 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0) 719 NGE_SETBIT(sc, NGE_PAUSECSR, 720 NGE_PAUSECSR_PAUSE_ENB); 721 #endif 722 } else { 723 NGE_CLRBIT(sc, NGE_TX_CFG, 724 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); 725 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 726 NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB); 727 } 728 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 729 reg = CSR_READ_4(sc, NGE_CFG); 730 switch (IFM_SUBTYPE(mii->mii_media_active)) { 731 case IFM_1000_SX: 732 case IFM_1000_LX: 733 case IFM_1000_CX: 734 case IFM_1000_T: 735 reg |= NGE_CFG_MODE_1000; 736 break; 737 default: 738 reg &= ~NGE_CFG_MODE_1000; 739 break; 740 } 741 CSR_WRITE_4(sc, NGE_CFG, reg); 742 743 /* Reset Tx/Rx MAC. */ 744 reg = CSR_READ_4(sc, NGE_CSR); 745 reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET; 746 CSR_WRITE_4(sc, NGE_CSR, reg); 747 /* Check the completion of reset. */ 748 done = 0; 749 for (i = 0; i < NGE_TIMEOUT; i++) { 750 DELAY(1); 751 status = CSR_READ_4(sc, NGE_ISR); 752 if ((status & NGE_ISR_RX_RESET_DONE) != 0) 753 done |= NGE_ISR_RX_RESET_DONE; 754 if ((status & NGE_ISR_TX_RESET_DONE) != 0) 755 done |= NGE_ISR_TX_RESET_DONE; 756 if (done == 757 (NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE)) 758 break; 759 } 760 if (i == NGE_TIMEOUT) 761 device_printf(sc->nge_dev, 762 "%s: unable to reset Tx/Rx MAC\n", __func__); 763 /* Reuse Rx buffer and reset consumer pointer. */ 764 sc->nge_cdata.nge_rx_cons = 0; 765 /* 766 * It seems that resetting Rx/Tx MAC results in 767 * resetting Tx/Rx descriptor pointer registers such 768 * that reloading Tx/Rx lists address are needed. 769 */ 770 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 771 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 772 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 773 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 774 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 775 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 776 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 777 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 778 /* Reinitialize Tx buffers. */ 779 nge_list_tx_init(sc); 780 781 /* Restart Rx MAC. */ 782 reg = CSR_READ_4(sc, NGE_CSR); 783 reg |= NGE_CSR_RX_ENABLE; 784 CSR_WRITE_4(sc, NGE_CSR, reg); 785 for (i = 0; i < NGE_TIMEOUT; i++) { 786 if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0) 787 break; 788 DELAY(1); 789 } 790 if (i == NGE_TIMEOUT) 791 device_printf(sc->nge_dev, 792 "%s: unable to restart Rx MAC\n", __func__); 793 } 794 795 /* Data LED off for TBI mode */ 796 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 797 CSR_WRITE_4(sc, NGE_GPIO, 798 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 799 } 800 801 static void 802 nge_rxfilter(struct nge_softc *sc) 803 { 804 struct ifnet *ifp; 805 struct ifmultiaddr *ifma; 806 uint32_t h, i, rxfilt; 807 int bit, index; 808 809 NGE_LOCK_ASSERT(sc); 810 ifp = sc->nge_ifp; 811 812 /* Make sure to stop Rx filtering. */ 813 rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL); 814 rxfilt &= ~NGE_RXFILTCTL_ENABLE; 815 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 816 CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL); 817 818 rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS); 819 rxfilt &= ~NGE_RXFILTCTL_BROAD; 820 /* 821 * We don't want to use the hash table for matching unicast 822 * addresses. 823 */ 824 rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH); 825 826 /* 827 * For the NatSemi chip, we have to explicitly enable the 828 * reception of ARP frames, as well as turn on the 'perfect 829 * match' filter where we store the station address, otherwise 830 * we won't receive unicasts meant for this host. 831 */ 832 rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT; 833 834 /* 835 * Set the capture broadcast bit to capture broadcast frames. 836 */ 837 if ((ifp->if_flags & IFF_BROADCAST) != 0) 838 rxfilt |= NGE_RXFILTCTL_BROAD; 839 840 if ((ifp->if_flags & IFF_PROMISC) != 0 || 841 (ifp->if_flags & IFF_ALLMULTI) != 0) { 842 rxfilt |= NGE_RXFILTCTL_ALLMULTI; 843 if ((ifp->if_flags & IFF_PROMISC) != 0) 844 rxfilt |= NGE_RXFILTCTL_ALLPHYS; 845 goto done; 846 } 847 848 /* 849 * We have to explicitly enable the multicast hash table 850 * on the NatSemi chip if we want to use it, which we do. 851 */ 852 rxfilt |= NGE_RXFILTCTL_MCHASH; 853 854 /* first, zot all the existing hash bits */ 855 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 856 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 857 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 858 } 859 860 /* 861 * From the 11 bits returned by the crc routine, the top 7 862 * bits represent the 16-bit word in the mcast hash table 863 * that needs to be updated, and the lower 4 bits represent 864 * which bit within that byte needs to be set. 865 */ 866 if_maddr_rlock(ifp); 867 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 868 if (ifma->ifma_addr->sa_family != AF_LINK) 869 continue; 870 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 871 ifma->ifma_addr), ETHER_ADDR_LEN) >> 21; 872 index = (h >> 4) & 0x7F; 873 bit = h & 0xF; 874 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 875 NGE_FILTADDR_MCAST_LO + (index * 2)); 876 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 877 } 878 if_maddr_runlock(ifp); 879 880 done: 881 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 882 /* Turn the receive filter on. */ 883 rxfilt |= NGE_RXFILTCTL_ENABLE; 884 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 885 CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL); 886 } 887 888 static void 889 nge_reset(struct nge_softc *sc) 890 { 891 uint32_t v; 892 int i; 893 894 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 895 896 for (i = 0; i < NGE_TIMEOUT; i++) { 897 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 898 break; 899 DELAY(1); 900 } 901 902 if (i == NGE_TIMEOUT) 903 device_printf(sc->nge_dev, "reset never completed\n"); 904 905 /* Wait a little while for the chip to get its brains in order. */ 906 DELAY(1000); 907 908 /* 909 * If this is a NetSemi chip, make sure to clear 910 * PME mode. 911 */ 912 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 913 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 914 915 /* Clear WOL events which may interfere normal Rx filter opertaion. */ 916 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 917 918 /* 919 * Only DP83820 supports 64bits addressing/data transfers and 920 * 64bit addressing requires different descriptor structures. 921 * To make it simple, disable 64bit addressing/data transfers. 922 */ 923 v = CSR_READ_4(sc, NGE_CFG); 924 v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB); 925 CSR_WRITE_4(sc, NGE_CFG, v); 926 } 927 928 /* 929 * Probe for a NatSemi chip. Check the PCI vendor and device 930 * IDs against our list and return a device name if we find a match. 931 */ 932 static int 933 nge_probe(device_t dev) 934 { 935 struct nge_type *t; 936 937 t = nge_devs; 938 939 while (t->nge_name != NULL) { 940 if ((pci_get_vendor(dev) == t->nge_vid) && 941 (pci_get_device(dev) == t->nge_did)) { 942 device_set_desc(dev, t->nge_name); 943 return (BUS_PROBE_DEFAULT); 944 } 945 t++; 946 } 947 948 return (ENXIO); 949 } 950 951 /* 952 * Attach the interface. Allocate softc structures, do ifmedia 953 * setup and ethernet/BPF attach. 954 */ 955 static int 956 nge_attach(device_t dev) 957 { 958 uint8_t eaddr[ETHER_ADDR_LEN]; 959 uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg; 960 struct nge_softc *sc; 961 struct ifnet *ifp; 962 int error, i, rid; 963 964 error = 0; 965 sc = device_get_softc(dev); 966 sc->nge_dev = dev; 967 968 NGE_LOCK_INIT(sc, device_get_nameunit(dev)); 969 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); 970 971 /* 972 * Map control/status registers. 973 */ 974 pci_enable_busmaster(dev); 975 976 #ifdef NGE_USEIOSPACE 977 sc->nge_res_type = SYS_RES_IOPORT; 978 sc->nge_res_id = PCIR_BAR(0); 979 #else 980 sc->nge_res_type = SYS_RES_MEMORY; 981 sc->nge_res_id = PCIR_BAR(1); 982 #endif 983 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 984 &sc->nge_res_id, RF_ACTIVE); 985 986 if (sc->nge_res == NULL) { 987 if (sc->nge_res_type == SYS_RES_MEMORY) { 988 sc->nge_res_type = SYS_RES_IOPORT; 989 sc->nge_res_id = PCIR_BAR(0); 990 } else { 991 sc->nge_res_type = SYS_RES_MEMORY; 992 sc->nge_res_id = PCIR_BAR(1); 993 } 994 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 995 &sc->nge_res_id, RF_ACTIVE); 996 if (sc->nge_res == NULL) { 997 device_printf(dev, "couldn't allocate %s resources\n", 998 sc->nge_res_type == SYS_RES_MEMORY ? "memory" : 999 "I/O"); 1000 NGE_LOCK_DESTROY(sc); 1001 return (ENXIO); 1002 } 1003 } 1004 1005 /* Allocate interrupt */ 1006 rid = 0; 1007 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1008 RF_SHAREABLE | RF_ACTIVE); 1009 1010 if (sc->nge_irq == NULL) { 1011 device_printf(dev, "couldn't map interrupt\n"); 1012 error = ENXIO; 1013 goto fail; 1014 } 1015 1016 /* Enable MWI. */ 1017 reg = pci_read_config(dev, PCIR_COMMAND, 2); 1018 reg |= PCIM_CMD_MWRICEN; 1019 pci_write_config(dev, PCIR_COMMAND, reg, 2); 1020 1021 /* Reset the adapter. */ 1022 nge_reset(sc); 1023 1024 /* 1025 * Get station address from the EEPROM. 1026 */ 1027 nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3); 1028 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1029 ea[i] = le16toh(ea[i]); 1030 ea_temp = ea[0]; 1031 ea[0] = ea[2]; 1032 ea[2] = ea_temp; 1033 bcopy(ea, eaddr, sizeof(eaddr)); 1034 1035 if (nge_dma_alloc(sc) != 0) { 1036 error = ENXIO; 1037 goto fail; 1038 } 1039 1040 nge_sysctl_node(sc); 1041 1042 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); 1043 if (ifp == NULL) { 1044 device_printf(dev, "can not allocate ifnet structure\n"); 1045 error = ENOSPC; 1046 goto fail; 1047 } 1048 ifp->if_softc = sc; 1049 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1050 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1051 ifp->if_ioctl = nge_ioctl; 1052 ifp->if_start = nge_start; 1053 ifp->if_init = nge_init; 1054 ifp->if_snd.ifq_drv_maxlen = NGE_TX_RING_CNT - 1; 1055 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1056 IFQ_SET_READY(&ifp->if_snd); 1057 ifp->if_hwassist = NGE_CSUM_FEATURES; 1058 ifp->if_capabilities = IFCAP_HWCSUM; 1059 /* 1060 * It seems that some hardwares doesn't provide 3.3V auxiliary 1061 * supply(3VAUX) to drive PME such that checking PCI power 1062 * management capability is necessary. 1063 */ 1064 if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &i) == 0) 1065 ifp->if_capabilities |= IFCAP_WOL; 1066 ifp->if_capenable = ifp->if_capabilities; 1067 1068 if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) { 1069 sc->nge_flags |= NGE_FLAG_TBI; 1070 device_printf(dev, "Using TBI\n"); 1071 /* Configure GPIO. */ 1072 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1073 | NGE_GPIO_GP4_OUT 1074 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 1075 | NGE_GPIO_GP3_OUTENB 1076 | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); 1077 } 1078 1079 /* 1080 * Do MII setup. 1081 */ 1082 error = mii_attach(dev, &sc->nge_miibus, ifp, nge_mediachange, 1083 nge_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 1084 if (error != 0) { 1085 device_printf(dev, "attaching PHYs failed\n"); 1086 goto fail; 1087 } 1088 1089 /* 1090 * Call MI attach routine. 1091 */ 1092 ether_ifattach(ifp, eaddr); 1093 1094 /* VLAN capability setup. */ 1095 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1096 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1097 ifp->if_capenable = ifp->if_capabilities; 1098 #ifdef DEVICE_POLLING 1099 ifp->if_capabilities |= IFCAP_POLLING; 1100 #endif 1101 /* 1102 * Tell the upper layer(s) we support long frames. 1103 * Must appear after the call to ether_ifattach() because 1104 * ether_ifattach() sets ifi_hdrlen to the default value. 1105 */ 1106 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1107 1108 /* 1109 * Hookup IRQ last. 1110 */ 1111 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, 1112 NULL, nge_intr, sc, &sc->nge_intrhand); 1113 if (error) { 1114 device_printf(dev, "couldn't set up irq\n"); 1115 goto fail; 1116 } 1117 1118 fail: 1119 if (error != 0) 1120 nge_detach(dev); 1121 return (error); 1122 } 1123 1124 static int 1125 nge_detach(device_t dev) 1126 { 1127 struct nge_softc *sc; 1128 struct ifnet *ifp; 1129 1130 sc = device_get_softc(dev); 1131 ifp = sc->nge_ifp; 1132 1133 #ifdef DEVICE_POLLING 1134 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 1135 ether_poll_deregister(ifp); 1136 #endif 1137 1138 if (device_is_attached(dev)) { 1139 NGE_LOCK(sc); 1140 sc->nge_flags |= NGE_FLAG_DETACH; 1141 nge_stop(sc); 1142 NGE_UNLOCK(sc); 1143 callout_drain(&sc->nge_stat_ch); 1144 if (ifp != NULL) 1145 ether_ifdetach(ifp); 1146 } 1147 1148 if (sc->nge_miibus != NULL) { 1149 device_delete_child(dev, sc->nge_miibus); 1150 sc->nge_miibus = NULL; 1151 } 1152 bus_generic_detach(dev); 1153 if (sc->nge_intrhand != NULL) 1154 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 1155 if (sc->nge_irq != NULL) 1156 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 1157 if (sc->nge_res != NULL) 1158 bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id, 1159 sc->nge_res); 1160 1161 nge_dma_free(sc); 1162 if (ifp != NULL) 1163 if_free(ifp); 1164 1165 NGE_LOCK_DESTROY(sc); 1166 1167 return (0); 1168 } 1169 1170 struct nge_dmamap_arg { 1171 bus_addr_t nge_busaddr; 1172 }; 1173 1174 static void 1175 nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1176 { 1177 struct nge_dmamap_arg *ctx; 1178 1179 if (error != 0) 1180 return; 1181 ctx = arg; 1182 ctx->nge_busaddr = segs[0].ds_addr; 1183 } 1184 1185 static int 1186 nge_dma_alloc(struct nge_softc *sc) 1187 { 1188 struct nge_dmamap_arg ctx; 1189 struct nge_txdesc *txd; 1190 struct nge_rxdesc *rxd; 1191 int error, i; 1192 1193 /* Create parent DMA tag. */ 1194 error = bus_dma_tag_create( 1195 bus_get_dma_tag(sc->nge_dev), /* parent */ 1196 1, 0, /* alignment, boundary */ 1197 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1198 BUS_SPACE_MAXADDR, /* highaddr */ 1199 NULL, NULL, /* filter, filterarg */ 1200 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1201 0, /* nsegments */ 1202 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1203 0, /* flags */ 1204 NULL, NULL, /* lockfunc, lockarg */ 1205 &sc->nge_cdata.nge_parent_tag); 1206 if (error != 0) { 1207 device_printf(sc->nge_dev, "failed to create parent DMA tag\n"); 1208 goto fail; 1209 } 1210 /* Create tag for Tx ring. */ 1211 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1212 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1213 BUS_SPACE_MAXADDR, /* lowaddr */ 1214 BUS_SPACE_MAXADDR, /* highaddr */ 1215 NULL, NULL, /* filter, filterarg */ 1216 NGE_TX_RING_SIZE, /* maxsize */ 1217 1, /* nsegments */ 1218 NGE_TX_RING_SIZE, /* maxsegsize */ 1219 0, /* flags */ 1220 NULL, NULL, /* lockfunc, lockarg */ 1221 &sc->nge_cdata.nge_tx_ring_tag); 1222 if (error != 0) { 1223 device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n"); 1224 goto fail; 1225 } 1226 1227 /* Create tag for Rx ring. */ 1228 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1229 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1230 BUS_SPACE_MAXADDR, /* lowaddr */ 1231 BUS_SPACE_MAXADDR, /* highaddr */ 1232 NULL, NULL, /* filter, filterarg */ 1233 NGE_RX_RING_SIZE, /* maxsize */ 1234 1, /* nsegments */ 1235 NGE_RX_RING_SIZE, /* maxsegsize */ 1236 0, /* flags */ 1237 NULL, NULL, /* lockfunc, lockarg */ 1238 &sc->nge_cdata.nge_rx_ring_tag); 1239 if (error != 0) { 1240 device_printf(sc->nge_dev, 1241 "failed to create Rx ring DMA tag\n"); 1242 goto fail; 1243 } 1244 1245 /* Create tag for Tx buffers. */ 1246 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1247 1, 0, /* alignment, boundary */ 1248 BUS_SPACE_MAXADDR, /* lowaddr */ 1249 BUS_SPACE_MAXADDR, /* highaddr */ 1250 NULL, NULL, /* filter, filterarg */ 1251 MCLBYTES * NGE_MAXTXSEGS, /* maxsize */ 1252 NGE_MAXTXSEGS, /* nsegments */ 1253 MCLBYTES, /* maxsegsize */ 1254 0, /* flags */ 1255 NULL, NULL, /* lockfunc, lockarg */ 1256 &sc->nge_cdata.nge_tx_tag); 1257 if (error != 0) { 1258 device_printf(sc->nge_dev, "failed to create Tx DMA tag\n"); 1259 goto fail; 1260 } 1261 1262 /* Create tag for Rx buffers. */ 1263 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1264 NGE_RX_ALIGN, 0, /* alignment, boundary */ 1265 BUS_SPACE_MAXADDR, /* lowaddr */ 1266 BUS_SPACE_MAXADDR, /* highaddr */ 1267 NULL, NULL, /* filter, filterarg */ 1268 MCLBYTES, /* maxsize */ 1269 1, /* nsegments */ 1270 MCLBYTES, /* maxsegsize */ 1271 0, /* flags */ 1272 NULL, NULL, /* lockfunc, lockarg */ 1273 &sc->nge_cdata.nge_rx_tag); 1274 if (error != 0) { 1275 device_printf(sc->nge_dev, "failed to create Rx DMA tag\n"); 1276 goto fail; 1277 } 1278 1279 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1280 error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag, 1281 (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK | 1282 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map); 1283 if (error != 0) { 1284 device_printf(sc->nge_dev, 1285 "failed to allocate DMA'able memory for Tx ring\n"); 1286 goto fail; 1287 } 1288 1289 ctx.nge_busaddr = 0; 1290 error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag, 1291 sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring, 1292 NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1293 if (error != 0 || ctx.nge_busaddr == 0) { 1294 device_printf(sc->nge_dev, 1295 "failed to load DMA'able memory for Tx ring\n"); 1296 goto fail; 1297 } 1298 sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr; 1299 1300 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1301 error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag, 1302 (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK | 1303 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map); 1304 if (error != 0) { 1305 device_printf(sc->nge_dev, 1306 "failed to allocate DMA'able memory for Rx ring\n"); 1307 goto fail; 1308 } 1309 1310 ctx.nge_busaddr = 0; 1311 error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag, 1312 sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring, 1313 NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1314 if (error != 0 || ctx.nge_busaddr == 0) { 1315 device_printf(sc->nge_dev, 1316 "failed to load DMA'able memory for Rx ring\n"); 1317 goto fail; 1318 } 1319 sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr; 1320 1321 /* Create DMA maps for Tx buffers. */ 1322 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1323 txd = &sc->nge_cdata.nge_txdesc[i]; 1324 txd->tx_m = NULL; 1325 txd->tx_dmamap = NULL; 1326 error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0, 1327 &txd->tx_dmamap); 1328 if (error != 0) { 1329 device_printf(sc->nge_dev, 1330 "failed to create Tx dmamap\n"); 1331 goto fail; 1332 } 1333 } 1334 /* Create DMA maps for Rx buffers. */ 1335 if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1336 &sc->nge_cdata.nge_rx_sparemap)) != 0) { 1337 device_printf(sc->nge_dev, 1338 "failed to create spare Rx dmamap\n"); 1339 goto fail; 1340 } 1341 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1342 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1343 rxd->rx_m = NULL; 1344 rxd->rx_dmamap = NULL; 1345 error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1346 &rxd->rx_dmamap); 1347 if (error != 0) { 1348 device_printf(sc->nge_dev, 1349 "failed to create Rx dmamap\n"); 1350 goto fail; 1351 } 1352 } 1353 1354 fail: 1355 return (error); 1356 } 1357 1358 static void 1359 nge_dma_free(struct nge_softc *sc) 1360 { 1361 struct nge_txdesc *txd; 1362 struct nge_rxdesc *rxd; 1363 int i; 1364 1365 /* Tx ring. */ 1366 if (sc->nge_cdata.nge_tx_ring_tag) { 1367 if (sc->nge_cdata.nge_tx_ring_map) 1368 bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag, 1369 sc->nge_cdata.nge_tx_ring_map); 1370 if (sc->nge_cdata.nge_tx_ring_map && 1371 sc->nge_rdata.nge_tx_ring) 1372 bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag, 1373 sc->nge_rdata.nge_tx_ring, 1374 sc->nge_cdata.nge_tx_ring_map); 1375 sc->nge_rdata.nge_tx_ring = NULL; 1376 sc->nge_cdata.nge_tx_ring_map = NULL; 1377 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag); 1378 sc->nge_cdata.nge_tx_ring_tag = NULL; 1379 } 1380 /* Rx ring. */ 1381 if (sc->nge_cdata.nge_rx_ring_tag) { 1382 if (sc->nge_cdata.nge_rx_ring_map) 1383 bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag, 1384 sc->nge_cdata.nge_rx_ring_map); 1385 if (sc->nge_cdata.nge_rx_ring_map && 1386 sc->nge_rdata.nge_rx_ring) 1387 bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag, 1388 sc->nge_rdata.nge_rx_ring, 1389 sc->nge_cdata.nge_rx_ring_map); 1390 sc->nge_rdata.nge_rx_ring = NULL; 1391 sc->nge_cdata.nge_rx_ring_map = NULL; 1392 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag); 1393 sc->nge_cdata.nge_rx_ring_tag = NULL; 1394 } 1395 /* Tx buffers. */ 1396 if (sc->nge_cdata.nge_tx_tag) { 1397 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1398 txd = &sc->nge_cdata.nge_txdesc[i]; 1399 if (txd->tx_dmamap) { 1400 bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag, 1401 txd->tx_dmamap); 1402 txd->tx_dmamap = NULL; 1403 } 1404 } 1405 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag); 1406 sc->nge_cdata.nge_tx_tag = NULL; 1407 } 1408 /* Rx buffers. */ 1409 if (sc->nge_cdata.nge_rx_tag) { 1410 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1411 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1412 if (rxd->rx_dmamap) { 1413 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1414 rxd->rx_dmamap); 1415 rxd->rx_dmamap = NULL; 1416 } 1417 } 1418 if (sc->nge_cdata.nge_rx_sparemap) { 1419 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1420 sc->nge_cdata.nge_rx_sparemap); 1421 sc->nge_cdata.nge_rx_sparemap = 0; 1422 } 1423 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag); 1424 sc->nge_cdata.nge_rx_tag = NULL; 1425 } 1426 1427 if (sc->nge_cdata.nge_parent_tag) { 1428 bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag); 1429 sc->nge_cdata.nge_parent_tag = NULL; 1430 } 1431 } 1432 1433 /* 1434 * Initialize the transmit descriptors. 1435 */ 1436 static int 1437 nge_list_tx_init(struct nge_softc *sc) 1438 { 1439 struct nge_ring_data *rd; 1440 struct nge_txdesc *txd; 1441 bus_addr_t addr; 1442 int i; 1443 1444 sc->nge_cdata.nge_tx_prod = 0; 1445 sc->nge_cdata.nge_tx_cons = 0; 1446 sc->nge_cdata.nge_tx_cnt = 0; 1447 1448 rd = &sc->nge_rdata; 1449 bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT); 1450 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1451 if (i == NGE_TX_RING_CNT - 1) 1452 addr = NGE_TX_RING_ADDR(sc, 0); 1453 else 1454 addr = NGE_TX_RING_ADDR(sc, i + 1); 1455 rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1456 txd = &sc->nge_cdata.nge_txdesc[i]; 1457 txd->tx_m = NULL; 1458 } 1459 1460 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1461 sc->nge_cdata.nge_tx_ring_map, 1462 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1463 1464 return (0); 1465 } 1466 1467 /* 1468 * Initialize the RX descriptors and allocate mbufs for them. Note that 1469 * we arrange the descriptors in a closed ring, so that the last descriptor 1470 * points back to the first. 1471 */ 1472 static int 1473 nge_list_rx_init(struct nge_softc *sc) 1474 { 1475 struct nge_ring_data *rd; 1476 bus_addr_t addr; 1477 int i; 1478 1479 sc->nge_cdata.nge_rx_cons = 0; 1480 sc->nge_head = sc->nge_tail = NULL; 1481 1482 rd = &sc->nge_rdata; 1483 bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT); 1484 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1485 if (nge_newbuf(sc, i) != 0) 1486 return (ENOBUFS); 1487 if (i == NGE_RX_RING_CNT - 1) 1488 addr = NGE_RX_RING_ADDR(sc, 0); 1489 else 1490 addr = NGE_RX_RING_ADDR(sc, i + 1); 1491 rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1492 } 1493 1494 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1495 sc->nge_cdata.nge_rx_ring_map, 1496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1497 1498 return (0); 1499 } 1500 1501 static __inline void 1502 nge_discard_rxbuf(struct nge_softc *sc, int idx) 1503 { 1504 struct nge_desc *desc; 1505 1506 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1507 desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t)); 1508 desc->nge_extsts = 0; 1509 } 1510 1511 /* 1512 * Initialize an RX descriptor and attach an MBUF cluster. 1513 */ 1514 static int 1515 nge_newbuf(struct nge_softc *sc, int idx) 1516 { 1517 struct nge_desc *desc; 1518 struct nge_rxdesc *rxd; 1519 struct mbuf *m; 1520 bus_dma_segment_t segs[1]; 1521 bus_dmamap_t map; 1522 int nsegs; 1523 1524 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1525 if (m == NULL) 1526 return (ENOBUFS); 1527 m->m_len = m->m_pkthdr.len = MCLBYTES; 1528 m_adj(m, sizeof(uint64_t)); 1529 1530 if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag, 1531 sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1532 m_freem(m); 1533 return (ENOBUFS); 1534 } 1535 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1536 1537 rxd = &sc->nge_cdata.nge_rxdesc[idx]; 1538 if (rxd->rx_m != NULL) { 1539 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1540 BUS_DMASYNC_POSTREAD); 1541 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap); 1542 } 1543 map = rxd->rx_dmamap; 1544 rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap; 1545 sc->nge_cdata.nge_rx_sparemap = map; 1546 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1547 BUS_DMASYNC_PREREAD); 1548 rxd->rx_m = m; 1549 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1550 desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr)); 1551 desc->nge_cmdsts = htole32(segs[0].ds_len); 1552 desc->nge_extsts = 0; 1553 1554 return (0); 1555 } 1556 1557 #ifndef __NO_STRICT_ALIGNMENT 1558 static __inline void 1559 nge_fixup_rx(struct mbuf *m) 1560 { 1561 int i; 1562 uint16_t *src, *dst; 1563 1564 src = mtod(m, uint16_t *); 1565 dst = src - 1; 1566 1567 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1568 *dst++ = *src++; 1569 1570 m->m_data -= ETHER_ALIGN; 1571 } 1572 #endif 1573 1574 /* 1575 * A frame has been uploaded: pass the resulting mbuf chain up to 1576 * the higher level protocols. 1577 */ 1578 static int 1579 nge_rxeof(struct nge_softc *sc) 1580 { 1581 struct mbuf *m; 1582 struct ifnet *ifp; 1583 struct nge_desc *cur_rx; 1584 struct nge_rxdesc *rxd; 1585 int cons, prog, rx_npkts, total_len; 1586 uint32_t cmdsts, extsts; 1587 1588 NGE_LOCK_ASSERT(sc); 1589 1590 ifp = sc->nge_ifp; 1591 cons = sc->nge_cdata.nge_rx_cons; 1592 rx_npkts = 0; 1593 1594 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1595 sc->nge_cdata.nge_rx_ring_map, 1596 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1597 1598 for (prog = 0; prog < NGE_RX_RING_CNT && 1599 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1600 NGE_INC(cons, NGE_RX_RING_CNT)) { 1601 #ifdef DEVICE_POLLING 1602 if (ifp->if_capenable & IFCAP_POLLING) { 1603 if (sc->rxcycles <= 0) 1604 break; 1605 sc->rxcycles--; 1606 } 1607 #endif 1608 cur_rx = &sc->nge_rdata.nge_rx_ring[cons]; 1609 cmdsts = le32toh(cur_rx->nge_cmdsts); 1610 extsts = le32toh(cur_rx->nge_extsts); 1611 if ((cmdsts & NGE_CMDSTS_OWN) == 0) 1612 break; 1613 prog++; 1614 rxd = &sc->nge_cdata.nge_rxdesc[cons]; 1615 m = rxd->rx_m; 1616 total_len = cmdsts & NGE_CMDSTS_BUFLEN; 1617 1618 if ((cmdsts & NGE_CMDSTS_MORE) != 0) { 1619 if (nge_newbuf(sc, cons) != 0) { 1620 ifp->if_iqdrops++; 1621 if (sc->nge_head != NULL) { 1622 m_freem(sc->nge_head); 1623 sc->nge_head = sc->nge_tail = NULL; 1624 } 1625 nge_discard_rxbuf(sc, cons); 1626 continue; 1627 } 1628 m->m_len = total_len; 1629 if (sc->nge_head == NULL) { 1630 m->m_pkthdr.len = total_len; 1631 sc->nge_head = sc->nge_tail = m; 1632 } else { 1633 m->m_flags &= ~M_PKTHDR; 1634 sc->nge_head->m_pkthdr.len += total_len; 1635 sc->nge_tail->m_next = m; 1636 sc->nge_tail = m; 1637 } 1638 continue; 1639 } 1640 1641 /* 1642 * If an error occurs, update stats, clear the 1643 * status word and leave the mbuf cluster in place: 1644 * it should simply get re-used next time this descriptor 1645 * comes up in the ring. 1646 */ 1647 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1648 if ((cmdsts & NGE_RXSTAT_RUNT) && 1649 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) { 1650 /* 1651 * Work-around hardware bug, accept runt frames 1652 * if its length is larger than or equal to 56. 1653 */ 1654 } else { 1655 /* 1656 * Input error counters are updated by hardware. 1657 */ 1658 if (sc->nge_head != NULL) { 1659 m_freem(sc->nge_head); 1660 sc->nge_head = sc->nge_tail = NULL; 1661 } 1662 nge_discard_rxbuf(sc, cons); 1663 continue; 1664 } 1665 } 1666 1667 /* Try conjure up a replacement mbuf. */ 1668 1669 if (nge_newbuf(sc, cons) != 0) { 1670 ifp->if_iqdrops++; 1671 if (sc->nge_head != NULL) { 1672 m_freem(sc->nge_head); 1673 sc->nge_head = sc->nge_tail = NULL; 1674 } 1675 nge_discard_rxbuf(sc, cons); 1676 continue; 1677 } 1678 1679 /* Chain received mbufs. */ 1680 if (sc->nge_head != NULL) { 1681 m->m_len = total_len; 1682 m->m_flags &= ~M_PKTHDR; 1683 sc->nge_tail->m_next = m; 1684 m = sc->nge_head; 1685 m->m_pkthdr.len += total_len; 1686 sc->nge_head = sc->nge_tail = NULL; 1687 } else 1688 m->m_pkthdr.len = m->m_len = total_len; 1689 1690 /* 1691 * Ok. NatSemi really screwed up here. This is the 1692 * only gigE chip I know of with alignment constraints 1693 * on receive buffers. RX buffers must be 64-bit aligned. 1694 */ 1695 /* 1696 * By popular demand, ignore the alignment problems 1697 * on the non-strict alignment platform. The performance hit 1698 * incurred due to unaligned accesses is much smaller 1699 * than the hit produced by forcing buffer copies all 1700 * the time, especially with jumbo frames. We still 1701 * need to fix up the alignment everywhere else though. 1702 */ 1703 #ifndef __NO_STRICT_ALIGNMENT 1704 nge_fixup_rx(m); 1705 #endif 1706 m->m_pkthdr.rcvif = ifp; 1707 ifp->if_ipackets++; 1708 1709 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1710 /* Do IP checksum checking. */ 1711 if ((extsts & NGE_RXEXTSTS_IPPKT) != 0) 1712 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1713 if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0) 1714 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1715 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1716 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1717 (extsts & NGE_RXEXTSTS_UDPPKT && 1718 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1719 m->m_pkthdr.csum_flags |= 1720 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1721 m->m_pkthdr.csum_data = 0xffff; 1722 } 1723 } 1724 1725 /* 1726 * If we received a packet with a vlan tag, pass it 1727 * to vlan_input() instead of ether_input(). 1728 */ 1729 if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 && 1730 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { 1731 m->m_pkthdr.ether_vtag = 1732 bswap16(extsts & NGE_RXEXTSTS_VTCI); 1733 m->m_flags |= M_VLANTAG; 1734 } 1735 NGE_UNLOCK(sc); 1736 (*ifp->if_input)(ifp, m); 1737 NGE_LOCK(sc); 1738 rx_npkts++; 1739 } 1740 1741 if (prog > 0) { 1742 sc->nge_cdata.nge_rx_cons = cons; 1743 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1744 sc->nge_cdata.nge_rx_ring_map, 1745 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1746 } 1747 return (rx_npkts); 1748 } 1749 1750 /* 1751 * A frame was downloaded to the chip. It's safe for us to clean up 1752 * the list buffers. 1753 */ 1754 static void 1755 nge_txeof(struct nge_softc *sc) 1756 { 1757 struct nge_desc *cur_tx; 1758 struct nge_txdesc *txd; 1759 struct ifnet *ifp; 1760 uint32_t cmdsts; 1761 int cons, prod; 1762 1763 NGE_LOCK_ASSERT(sc); 1764 ifp = sc->nge_ifp; 1765 1766 cons = sc->nge_cdata.nge_tx_cons; 1767 prod = sc->nge_cdata.nge_tx_prod; 1768 if (cons == prod) 1769 return; 1770 1771 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1772 sc->nge_cdata.nge_tx_ring_map, 1773 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1774 1775 /* 1776 * Go through our tx list and free mbufs for those 1777 * frames that have been transmitted. 1778 */ 1779 for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) { 1780 cur_tx = &sc->nge_rdata.nge_tx_ring[cons]; 1781 cmdsts = le32toh(cur_tx->nge_cmdsts); 1782 if ((cmdsts & NGE_CMDSTS_OWN) != 0) 1783 break; 1784 sc->nge_cdata.nge_tx_cnt--; 1785 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1786 if ((cmdsts & NGE_CMDSTS_MORE) != 0) 1787 continue; 1788 1789 txd = &sc->nge_cdata.nge_txdesc[cons]; 1790 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap, 1791 BUS_DMASYNC_POSTWRITE); 1792 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap); 1793 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1794 ifp->if_oerrors++; 1795 if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0) 1796 ifp->if_collisions++; 1797 if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0) 1798 ifp->if_collisions++; 1799 } else 1800 ifp->if_opackets++; 1801 1802 ifp->if_collisions += (cmdsts & NGE_TXSTAT_COLLCNT) >> 16; 1803 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1804 __func__)); 1805 m_freem(txd->tx_m); 1806 txd->tx_m = NULL; 1807 } 1808 1809 sc->nge_cdata.nge_tx_cons = cons; 1810 if (sc->nge_cdata.nge_tx_cnt == 0) 1811 sc->nge_watchdog_timer = 0; 1812 } 1813 1814 static void 1815 nge_tick(void *xsc) 1816 { 1817 struct nge_softc *sc; 1818 struct mii_data *mii; 1819 1820 sc = xsc; 1821 NGE_LOCK_ASSERT(sc); 1822 mii = device_get_softc(sc->nge_miibus); 1823 mii_tick(mii); 1824 /* 1825 * For PHYs that does not reset established link, it is 1826 * necessary to check whether driver still have a valid 1827 * link(e.g link state change callback is not called). 1828 * Otherwise, driver think it lost link because driver 1829 * initialization routine clears link state flag. 1830 */ 1831 if ((sc->nge_flags & NGE_FLAG_LINK) == 0) 1832 nge_miibus_statchg(sc->nge_dev); 1833 nge_stats_update(sc); 1834 nge_watchdog(sc); 1835 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 1836 } 1837 1838 static void 1839 nge_stats_update(struct nge_softc *sc) 1840 { 1841 struct ifnet *ifp; 1842 struct nge_stats now, *stats, *nstats; 1843 1844 NGE_LOCK_ASSERT(sc); 1845 1846 ifp = sc->nge_ifp; 1847 stats = &now; 1848 stats->rx_pkts_errs = 1849 CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF; 1850 stats->rx_crc_errs = 1851 CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF; 1852 stats->rx_fifo_oflows = 1853 CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF; 1854 stats->rx_align_errs = 1855 CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF; 1856 stats->rx_sym_errs = 1857 CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF; 1858 stats->rx_pkts_jumbos = 1859 CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF; 1860 stats->rx_len_errs = 1861 CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF; 1862 stats->rx_unctl_frames = 1863 CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF; 1864 stats->rx_pause = 1865 CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF; 1866 stats->tx_pause = 1867 CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF; 1868 stats->tx_seq_errs = 1869 CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF; 1870 1871 /* 1872 * Since we've accept errored frames exclude Rx length errors. 1873 */ 1874 ifp->if_ierrors += stats->rx_pkts_errs + stats->rx_crc_errs + 1875 stats->rx_fifo_oflows + stats->rx_sym_errs; 1876 1877 nstats = &sc->nge_stats; 1878 nstats->rx_pkts_errs += stats->rx_pkts_errs; 1879 nstats->rx_crc_errs += stats->rx_crc_errs; 1880 nstats->rx_fifo_oflows += stats->rx_fifo_oflows; 1881 nstats->rx_align_errs += stats->rx_align_errs; 1882 nstats->rx_sym_errs += stats->rx_sym_errs; 1883 nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos; 1884 nstats->rx_len_errs += stats->rx_len_errs; 1885 nstats->rx_unctl_frames += stats->rx_unctl_frames; 1886 nstats->rx_pause += stats->rx_pause; 1887 nstats->tx_pause += stats->tx_pause; 1888 nstats->tx_seq_errs += stats->tx_seq_errs; 1889 } 1890 1891 #ifdef DEVICE_POLLING 1892 static poll_handler_t nge_poll; 1893 1894 static int 1895 nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1896 { 1897 struct nge_softc *sc; 1898 int rx_npkts = 0; 1899 1900 sc = ifp->if_softc; 1901 1902 NGE_LOCK(sc); 1903 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1904 NGE_UNLOCK(sc); 1905 return (rx_npkts); 1906 } 1907 1908 /* 1909 * On the nge, reading the status register also clears it. 1910 * So before returning to intr mode we must make sure that all 1911 * possible pending sources of interrupts have been served. 1912 * In practice this means run to completion the *eof routines, 1913 * and then call the interrupt routine. 1914 */ 1915 sc->rxcycles = count; 1916 rx_npkts = nge_rxeof(sc); 1917 nge_txeof(sc); 1918 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1919 nge_start_locked(ifp); 1920 1921 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1922 uint32_t status; 1923 1924 /* Reading the ISR register clears all interrupts. */ 1925 status = CSR_READ_4(sc, NGE_ISR); 1926 1927 if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0) 1928 rx_npkts += nge_rxeof(sc); 1929 1930 if ((status & NGE_ISR_RX_IDLE) != 0) 1931 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1932 1933 if ((status & NGE_ISR_SYSERR) != 0) { 1934 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1935 nge_init_locked(sc); 1936 } 1937 } 1938 NGE_UNLOCK(sc); 1939 return (rx_npkts); 1940 } 1941 #endif /* DEVICE_POLLING */ 1942 1943 static void 1944 nge_intr(void *arg) 1945 { 1946 struct nge_softc *sc; 1947 struct ifnet *ifp; 1948 uint32_t status; 1949 1950 sc = (struct nge_softc *)arg; 1951 ifp = sc->nge_ifp; 1952 1953 NGE_LOCK(sc); 1954 1955 if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0) 1956 goto done_locked; 1957 1958 /* Reading the ISR register clears all interrupts. */ 1959 status = CSR_READ_4(sc, NGE_ISR); 1960 if (status == 0xffffffff || (status & NGE_INTRS) == 0) 1961 goto done_locked; 1962 #ifdef DEVICE_POLLING 1963 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1964 goto done_locked; 1965 #endif 1966 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1967 goto done_locked; 1968 1969 /* Disable interrupts. */ 1970 CSR_WRITE_4(sc, NGE_IER, 0); 1971 1972 /* Data LED on for TBI mode */ 1973 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1974 CSR_WRITE_4(sc, NGE_GPIO, 1975 CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); 1976 1977 for (; (status & NGE_INTRS) != 0;) { 1978 if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR | 1979 NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0) 1980 nge_txeof(sc); 1981 1982 if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR | 1983 NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW | 1984 NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0) 1985 nge_rxeof(sc); 1986 1987 if ((status & NGE_ISR_RX_IDLE) != 0) 1988 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1989 1990 if ((status & NGE_ISR_SYSERR) != 0) { 1991 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1992 nge_init_locked(sc); 1993 } 1994 /* Reading the ISR register clears all interrupts. */ 1995 status = CSR_READ_4(sc, NGE_ISR); 1996 } 1997 1998 /* Re-enable interrupts. */ 1999 CSR_WRITE_4(sc, NGE_IER, 1); 2000 2001 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2002 nge_start_locked(ifp); 2003 2004 /* Data LED off for TBI mode */ 2005 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 2006 CSR_WRITE_4(sc, NGE_GPIO, 2007 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); 2008 2009 done_locked: 2010 NGE_UNLOCK(sc); 2011 } 2012 2013 /* 2014 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2015 * pointers to the fragment pointers. 2016 */ 2017 static int 2018 nge_encap(struct nge_softc *sc, struct mbuf **m_head) 2019 { 2020 struct nge_txdesc *txd, *txd_last; 2021 struct nge_desc *desc; 2022 struct mbuf *m; 2023 bus_dmamap_t map; 2024 bus_dma_segment_t txsegs[NGE_MAXTXSEGS]; 2025 int error, i, nsegs, prod, si; 2026 2027 NGE_LOCK_ASSERT(sc); 2028 2029 m = *m_head; 2030 prod = sc->nge_cdata.nge_tx_prod; 2031 txd = &sc->nge_cdata.nge_txdesc[prod]; 2032 txd_last = txd; 2033 map = txd->tx_dmamap; 2034 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map, 2035 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2036 if (error == EFBIG) { 2037 m = m_collapse(*m_head, M_DONTWAIT, NGE_MAXTXSEGS); 2038 if (m == NULL) { 2039 m_freem(*m_head); 2040 *m_head = NULL; 2041 return (ENOBUFS); 2042 } 2043 *m_head = m; 2044 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, 2045 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2046 if (error != 0) { 2047 m_freem(*m_head); 2048 *m_head = NULL; 2049 return (error); 2050 } 2051 } else if (error != 0) 2052 return (error); 2053 if (nsegs == 0) { 2054 m_freem(*m_head); 2055 *m_head = NULL; 2056 return (EIO); 2057 } 2058 2059 /* Check number of available descriptors. */ 2060 if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) { 2061 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map); 2062 return (ENOBUFS); 2063 } 2064 2065 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE); 2066 2067 si = prod; 2068 for (i = 0; i < nsegs; i++) { 2069 desc = &sc->nge_rdata.nge_tx_ring[prod]; 2070 desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr)); 2071 if (i == 0) 2072 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 2073 NGE_CMDSTS_MORE); 2074 else 2075 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 2076 NGE_CMDSTS_MORE | NGE_CMDSTS_OWN); 2077 desc->nge_extsts = 0; 2078 sc->nge_cdata.nge_tx_cnt++; 2079 NGE_INC(prod, NGE_TX_RING_CNT); 2080 } 2081 /* Update producer index. */ 2082 sc->nge_cdata.nge_tx_prod = prod; 2083 2084 prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT; 2085 desc = &sc->nge_rdata.nge_tx_ring[prod]; 2086 /* Check if we have a VLAN tag to insert. */ 2087 if ((m->m_flags & M_VLANTAG) != 0) 2088 desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT | 2089 bswap16(m->m_pkthdr.ether_vtag)); 2090 /* Set EOP on the last desciptor. */ 2091 desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE); 2092 2093 /* Set checksum offload in the first descriptor. */ 2094 desc = &sc->nge_rdata.nge_tx_ring[si]; 2095 if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) { 2096 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2097 desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM); 2098 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2099 desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM); 2100 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2101 desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM); 2102 } 2103 /* Lastly, turn the first descriptor ownership to hardware. */ 2104 desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN); 2105 2106 txd = &sc->nge_cdata.nge_txdesc[prod]; 2107 map = txd_last->tx_dmamap; 2108 txd_last->tx_dmamap = txd->tx_dmamap; 2109 txd->tx_dmamap = map; 2110 txd->tx_m = m; 2111 2112 return (0); 2113 } 2114 2115 /* 2116 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2117 * to the mbuf data regions directly in the transmit lists. We also save a 2118 * copy of the pointers since the transmit list fragment pointers are 2119 * physical addresses. 2120 */ 2121 2122 static void 2123 nge_start(struct ifnet *ifp) 2124 { 2125 struct nge_softc *sc; 2126 2127 sc = ifp->if_softc; 2128 NGE_LOCK(sc); 2129 nge_start_locked(ifp); 2130 NGE_UNLOCK(sc); 2131 } 2132 2133 static void 2134 nge_start_locked(struct ifnet *ifp) 2135 { 2136 struct nge_softc *sc; 2137 struct mbuf *m_head; 2138 int enq; 2139 2140 sc = ifp->if_softc; 2141 2142 NGE_LOCK_ASSERT(sc); 2143 2144 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2145 IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0) 2146 return; 2147 2148 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2149 sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) { 2150 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2151 if (m_head == NULL) 2152 break; 2153 /* 2154 * Pack the data into the transmit ring. If we 2155 * don't have room, set the OACTIVE flag and wait 2156 * for the NIC to drain the ring. 2157 */ 2158 if (nge_encap(sc, &m_head)) { 2159 if (m_head == NULL) 2160 break; 2161 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2162 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2163 break; 2164 } 2165 2166 enq++; 2167 /* 2168 * If there's a BPF listener, bounce a copy of this frame 2169 * to him. 2170 */ 2171 ETHER_BPF_MTAP(ifp, m_head); 2172 } 2173 2174 if (enq > 0) { 2175 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 2176 sc->nge_cdata.nge_tx_ring_map, 2177 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2178 /* Transmit */ 2179 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 2180 2181 /* Set a timeout in case the chip goes out to lunch. */ 2182 sc->nge_watchdog_timer = 5; 2183 } 2184 } 2185 2186 static void 2187 nge_init(void *xsc) 2188 { 2189 struct nge_softc *sc = xsc; 2190 2191 NGE_LOCK(sc); 2192 nge_init_locked(sc); 2193 NGE_UNLOCK(sc); 2194 } 2195 2196 static void 2197 nge_init_locked(struct nge_softc *sc) 2198 { 2199 struct ifnet *ifp = sc->nge_ifp; 2200 struct mii_data *mii; 2201 uint8_t *eaddr; 2202 uint32_t reg; 2203 2204 NGE_LOCK_ASSERT(sc); 2205 2206 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2207 return; 2208 2209 /* 2210 * Cancel pending I/O and free all RX/TX buffers. 2211 */ 2212 nge_stop(sc); 2213 2214 /* Reset the adapter. */ 2215 nge_reset(sc); 2216 2217 /* Disable Rx filter prior to programming Rx filter. */ 2218 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0); 2219 CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL); 2220 2221 mii = device_get_softc(sc->nge_miibus); 2222 2223 /* Set MAC address. */ 2224 eaddr = IF_LLADDR(sc->nge_ifp); 2225 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 2226 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]); 2227 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 2228 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]); 2229 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 2230 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]); 2231 2232 /* Init circular RX list. */ 2233 if (nge_list_rx_init(sc) == ENOBUFS) { 2234 device_printf(sc->nge_dev, "initialization failed: no " 2235 "memory for rx buffers\n"); 2236 nge_stop(sc); 2237 return; 2238 } 2239 2240 /* 2241 * Init tx descriptors. 2242 */ 2243 nge_list_tx_init(sc); 2244 2245 /* 2246 * For the NatSemi chip, we have to explicitly enable the 2247 * reception of ARP frames, as well as turn on the 'perfect 2248 * match' filter where we store the station address, otherwise 2249 * we won't receive unicasts meant for this host. 2250 */ 2251 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 2252 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 2253 2254 /* 2255 * Set the capture broadcast bit to capture broadcast frames. 2256 */ 2257 if (ifp->if_flags & IFF_BROADCAST) { 2258 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 2259 } else { 2260 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 2261 } 2262 2263 /* Turn the receive filter on. */ 2264 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 2265 2266 /* Set Rx filter. */ 2267 nge_rxfilter(sc); 2268 2269 /* Disable PRIQ ctl. */ 2270 CSR_WRITE_4(sc, NGE_PRIOQCTL, 0); 2271 2272 /* 2273 * Set pause frames paramters. 2274 * Rx stat FIFO hi-threshold : 2 or more packets 2275 * Rx stat FIFO lo-threshold : less than 2 packets 2276 * Rx data FIFO hi-threshold : 2K or more bytes 2277 * Rx data FIFO lo-threshold : less than 2K bytes 2278 * pause time : (512ns * 0xffff) -> 33.55ms 2279 */ 2280 CSR_WRITE_4(sc, NGE_PAUSECSR, 2281 NGE_PAUSECSR_PAUSE_ON_MCAST | 2282 NGE_PAUSECSR_PAUSE_ON_DA | 2283 ((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) | 2284 ((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) | 2285 ((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) | 2286 ((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) | 2287 NGE_PAUSECSR_CNT); 2288 2289 /* 2290 * Load the address of the RX and TX lists. 2291 */ 2292 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 2293 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 2294 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 2295 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 2296 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 2297 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 2298 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 2299 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 2300 2301 /* Set RX configuration. */ 2302 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 2303 2304 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0); 2305 /* 2306 * Enable hardware checksum validation for all IPv4 2307 * packets, do not reject packets with bad checksums. 2308 */ 2309 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2310 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 2311 2312 /* 2313 * Tell the chip to detect and strip VLAN tag info from 2314 * received frames. The tag will be provided in the extsts 2315 * field in the RX descriptors. 2316 */ 2317 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB); 2318 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2319 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB); 2320 2321 /* Set TX configuration. */ 2322 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 2323 2324 /* 2325 * Enable TX IPv4 checksumming on a per-packet basis. 2326 */ 2327 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 2328 2329 /* 2330 * Tell the chip to insert VLAN tags on a per-packet basis as 2331 * dictated by the code in the frame encapsulation routine. 2332 */ 2333 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 2334 2335 /* 2336 * Enable the delivery of PHY interrupts based on 2337 * link/speed/duplex status changes. Also enable the 2338 * extsts field in the DMA descriptors (needed for 2339 * TCP/IP checksum offload on transmit). 2340 */ 2341 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD | 2342 NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB); 2343 2344 /* 2345 * Configure interrupt holdoff (moderation). We can 2346 * have the chip delay interrupt delivery for a certain 2347 * period. Units are in 100us, and the max setting 2348 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 2349 */ 2350 CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff); 2351 2352 /* 2353 * Enable MAC statistics counters and clear. 2354 */ 2355 reg = CSR_READ_4(sc, NGE_MIBCTL); 2356 reg &= ~NGE_MIBCTL_FREEZE_CNT; 2357 reg |= NGE_MIBCTL_CLEAR_CNT; 2358 CSR_WRITE_4(sc, NGE_MIBCTL, reg); 2359 2360 /* 2361 * Enable interrupts. 2362 */ 2363 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 2364 #ifdef DEVICE_POLLING 2365 /* 2366 * ... only enable interrupts if we are not polling, make sure 2367 * they are off otherwise. 2368 */ 2369 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 2370 CSR_WRITE_4(sc, NGE_IER, 0); 2371 else 2372 #endif 2373 CSR_WRITE_4(sc, NGE_IER, 1); 2374 2375 sc->nge_flags &= ~NGE_FLAG_LINK; 2376 mii_mediachg(mii); 2377 2378 sc->nge_watchdog_timer = 0; 2379 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 2380 2381 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2382 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2383 } 2384 2385 /* 2386 * Set media options. 2387 */ 2388 static int 2389 nge_mediachange(struct ifnet *ifp) 2390 { 2391 struct nge_softc *sc; 2392 struct mii_data *mii; 2393 struct mii_softc *miisc; 2394 int error; 2395 2396 sc = ifp->if_softc; 2397 NGE_LOCK(sc); 2398 mii = device_get_softc(sc->nge_miibus); 2399 if (mii->mii_instance) { 2400 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2401 mii_phy_reset(miisc); 2402 } 2403 error = mii_mediachg(mii); 2404 NGE_UNLOCK(sc); 2405 2406 return (error); 2407 } 2408 2409 /* 2410 * Report current media status. 2411 */ 2412 static void 2413 nge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2414 { 2415 struct nge_softc *sc; 2416 struct mii_data *mii; 2417 2418 sc = ifp->if_softc; 2419 NGE_LOCK(sc); 2420 mii = device_get_softc(sc->nge_miibus); 2421 mii_pollstat(mii); 2422 NGE_UNLOCK(sc); 2423 ifmr->ifm_active = mii->mii_media_active; 2424 ifmr->ifm_status = mii->mii_media_status; 2425 } 2426 2427 static int 2428 nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2429 { 2430 struct nge_softc *sc = ifp->if_softc; 2431 struct ifreq *ifr = (struct ifreq *) data; 2432 struct mii_data *mii; 2433 int error = 0, mask; 2434 2435 switch (command) { 2436 case SIOCSIFMTU: 2437 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU) 2438 error = EINVAL; 2439 else { 2440 NGE_LOCK(sc); 2441 ifp->if_mtu = ifr->ifr_mtu; 2442 /* 2443 * Workaround: if the MTU is larger than 2444 * 8152 (TX FIFO size minus 64 minus 18), turn off 2445 * TX checksum offloading. 2446 */ 2447 if (ifr->ifr_mtu >= 8152) { 2448 ifp->if_capenable &= ~IFCAP_TXCSUM; 2449 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2450 } else { 2451 ifp->if_capenable |= IFCAP_TXCSUM; 2452 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2453 } 2454 NGE_UNLOCK(sc); 2455 VLAN_CAPABILITIES(ifp); 2456 } 2457 break; 2458 case SIOCSIFFLAGS: 2459 NGE_LOCK(sc); 2460 if ((ifp->if_flags & IFF_UP) != 0) { 2461 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2462 if ((ifp->if_flags ^ sc->nge_if_flags) & 2463 (IFF_PROMISC | IFF_ALLMULTI)) 2464 nge_rxfilter(sc); 2465 } else { 2466 if ((sc->nge_flags & NGE_FLAG_DETACH) == 0) 2467 nge_init_locked(sc); 2468 } 2469 } else { 2470 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2471 nge_stop(sc); 2472 } 2473 sc->nge_if_flags = ifp->if_flags; 2474 NGE_UNLOCK(sc); 2475 error = 0; 2476 break; 2477 case SIOCADDMULTI: 2478 case SIOCDELMULTI: 2479 NGE_LOCK(sc); 2480 nge_rxfilter(sc); 2481 NGE_UNLOCK(sc); 2482 error = 0; 2483 break; 2484 case SIOCGIFMEDIA: 2485 case SIOCSIFMEDIA: 2486 mii = device_get_softc(sc->nge_miibus); 2487 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2488 break; 2489 case SIOCSIFCAP: 2490 NGE_LOCK(sc); 2491 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2492 #ifdef DEVICE_POLLING 2493 if ((mask & IFCAP_POLLING) != 0 && 2494 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 2495 ifp->if_capenable ^= IFCAP_POLLING; 2496 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 2497 error = ether_poll_register(nge_poll, ifp); 2498 if (error != 0) { 2499 NGE_UNLOCK(sc); 2500 break; 2501 } 2502 /* Disable interrupts. */ 2503 CSR_WRITE_4(sc, NGE_IER, 0); 2504 } else { 2505 error = ether_poll_deregister(ifp); 2506 /* Enable interrupts. */ 2507 CSR_WRITE_4(sc, NGE_IER, 1); 2508 } 2509 } 2510 #endif /* DEVICE_POLLING */ 2511 if ((mask & IFCAP_TXCSUM) != 0 && 2512 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2513 ifp->if_capenable ^= IFCAP_TXCSUM; 2514 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2515 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2516 else 2517 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2518 } 2519 if ((mask & IFCAP_RXCSUM) != 0 && 2520 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2521 ifp->if_capenable ^= IFCAP_RXCSUM; 2522 2523 if ((mask & IFCAP_WOL) != 0 && 2524 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2525 if ((mask & IFCAP_WOL_UCAST) != 0) 2526 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2527 if ((mask & IFCAP_WOL_MCAST) != 0) 2528 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2529 if ((mask & IFCAP_WOL_MAGIC) != 0) 2530 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2531 } 2532 2533 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2534 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2535 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2536 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2537 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2538 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2539 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2540 if ((ifp->if_capenable & 2541 IFCAP_VLAN_HWTAGGING) != 0) 2542 NGE_SETBIT(sc, 2543 NGE_VLAN_IP_RXCTL, 2544 NGE_VIPRXCTL_TAG_STRIP_ENB); 2545 else 2546 NGE_CLRBIT(sc, 2547 NGE_VLAN_IP_RXCTL, 2548 NGE_VIPRXCTL_TAG_STRIP_ENB); 2549 } 2550 } 2551 /* 2552 * Both VLAN hardware tagging and checksum offload is 2553 * required to do checksum offload on VLAN interface. 2554 */ 2555 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2556 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2557 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2558 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2559 NGE_UNLOCK(sc); 2560 VLAN_CAPABILITIES(ifp); 2561 break; 2562 default: 2563 error = ether_ioctl(ifp, command, data); 2564 break; 2565 } 2566 2567 return (error); 2568 } 2569 2570 static void 2571 nge_watchdog(struct nge_softc *sc) 2572 { 2573 struct ifnet *ifp; 2574 2575 NGE_LOCK_ASSERT(sc); 2576 2577 if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer) 2578 return; 2579 2580 ifp = sc->nge_ifp; 2581 ifp->if_oerrors++; 2582 if_printf(ifp, "watchdog timeout\n"); 2583 2584 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2585 nge_init_locked(sc); 2586 2587 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2588 nge_start_locked(ifp); 2589 } 2590 2591 static int 2592 nge_stop_mac(struct nge_softc *sc) 2593 { 2594 uint32_t reg; 2595 int i; 2596 2597 NGE_LOCK_ASSERT(sc); 2598 2599 reg = CSR_READ_4(sc, NGE_CSR); 2600 if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) { 2601 reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE); 2602 reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE; 2603 CSR_WRITE_4(sc, NGE_CSR, reg); 2604 for (i = 0; i < NGE_TIMEOUT; i++) { 2605 DELAY(1); 2606 if ((CSR_READ_4(sc, NGE_CSR) & 2607 (NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0) 2608 break; 2609 } 2610 if (i == NGE_TIMEOUT) 2611 return (ETIMEDOUT); 2612 } 2613 2614 return (0); 2615 } 2616 2617 /* 2618 * Stop the adapter and free any mbufs allocated to the 2619 * RX and TX lists. 2620 */ 2621 static void 2622 nge_stop(struct nge_softc *sc) 2623 { 2624 struct nge_txdesc *txd; 2625 struct nge_rxdesc *rxd; 2626 int i; 2627 struct ifnet *ifp; 2628 2629 NGE_LOCK_ASSERT(sc); 2630 ifp = sc->nge_ifp; 2631 2632 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2633 sc->nge_flags &= ~NGE_FLAG_LINK; 2634 callout_stop(&sc->nge_stat_ch); 2635 sc->nge_watchdog_timer = 0; 2636 2637 CSR_WRITE_4(sc, NGE_IER, 0); 2638 CSR_WRITE_4(sc, NGE_IMR, 0); 2639 if (nge_stop_mac(sc) == ETIMEDOUT) 2640 device_printf(sc->nge_dev, 2641 "%s: unable to stop Tx/Rx MAC\n", __func__); 2642 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0); 2643 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0); 2644 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2645 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2646 nge_stats_update(sc); 2647 if (sc->nge_head != NULL) { 2648 m_freem(sc->nge_head); 2649 sc->nge_head = sc->nge_tail = NULL; 2650 } 2651 2652 /* 2653 * Free RX and TX mbufs still in the queues. 2654 */ 2655 for (i = 0; i < NGE_RX_RING_CNT; i++) { 2656 rxd = &sc->nge_cdata.nge_rxdesc[i]; 2657 if (rxd->rx_m != NULL) { 2658 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, 2659 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2660 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, 2661 rxd->rx_dmamap); 2662 m_freem(rxd->rx_m); 2663 rxd->rx_m = NULL; 2664 } 2665 } 2666 for (i = 0; i < NGE_TX_RING_CNT; i++) { 2667 txd = &sc->nge_cdata.nge_txdesc[i]; 2668 if (txd->tx_m != NULL) { 2669 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 2670 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2671 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 2672 txd->tx_dmamap); 2673 m_freem(txd->tx_m); 2674 txd->tx_m = NULL; 2675 } 2676 } 2677 } 2678 2679 /* 2680 * Before setting WOL bits, caller should have stopped Receiver. 2681 */ 2682 static void 2683 nge_wol(struct nge_softc *sc) 2684 { 2685 struct ifnet *ifp; 2686 uint32_t reg; 2687 uint16_t pmstat; 2688 int pmc; 2689 2690 NGE_LOCK_ASSERT(sc); 2691 2692 if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &pmc) != 0) 2693 return; 2694 2695 ifp = sc->nge_ifp; 2696 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2697 /* Disable WOL & disconnect CLKRUN to save power. */ 2698 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 2699 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 2700 } else { 2701 if (nge_stop_mac(sc) == ETIMEDOUT) 2702 device_printf(sc->nge_dev, 2703 "%s: unable to stop Tx/Rx MAC\n", __func__); 2704 /* 2705 * Make sure wake frames will be buffered in the Rx FIFO. 2706 * (i.e. Silent Rx mode.) 2707 */ 2708 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2709 CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_HI); 2710 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2711 CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_LO); 2712 /* Enable Rx again. */ 2713 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 2714 CSR_BARRIER_WRITE_4(sc, NGE_CSR); 2715 2716 /* Configure WOL events. */ 2717 reg = 0; 2718 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2719 reg |= NGE_WOLCSR_WAKE_ON_UNICAST; 2720 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2721 reg |= NGE_WOLCSR_WAKE_ON_MULTICAST; 2722 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2723 reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT; 2724 CSR_WRITE_4(sc, NGE_WOLCSR, reg); 2725 2726 /* Activate CLKRUN. */ 2727 reg = CSR_READ_4(sc, NGE_CLKRUN); 2728 reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB; 2729 CSR_WRITE_4(sc, NGE_CLKRUN, reg); 2730 } 2731 2732 /* Request PME. */ 2733 pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2); 2734 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2735 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2736 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2737 pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2738 } 2739 2740 /* 2741 * Stop all chip I/O so that the kernel's probe routines don't 2742 * get confused by errant DMAs when rebooting. 2743 */ 2744 static int 2745 nge_shutdown(device_t dev) 2746 { 2747 2748 return (nge_suspend(dev)); 2749 } 2750 2751 static int 2752 nge_suspend(device_t dev) 2753 { 2754 struct nge_softc *sc; 2755 2756 sc = device_get_softc(dev); 2757 2758 NGE_LOCK(sc); 2759 nge_stop(sc); 2760 nge_wol(sc); 2761 sc->nge_flags |= NGE_FLAG_SUSPENDED; 2762 NGE_UNLOCK(sc); 2763 2764 return (0); 2765 } 2766 2767 static int 2768 nge_resume(device_t dev) 2769 { 2770 struct nge_softc *sc; 2771 struct ifnet *ifp; 2772 uint16_t pmstat; 2773 int pmc; 2774 2775 sc = device_get_softc(dev); 2776 2777 NGE_LOCK(sc); 2778 ifp = sc->nge_ifp; 2779 if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &pmc) == 0) { 2780 /* Disable PME and clear PME status. */ 2781 pmstat = pci_read_config(sc->nge_dev, 2782 pmc + PCIR_POWER_STATUS, 2); 2783 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2784 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2785 pci_write_config(sc->nge_dev, 2786 pmc + PCIR_POWER_STATUS, pmstat, 2); 2787 } 2788 } 2789 if (ifp->if_flags & IFF_UP) { 2790 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2791 nge_init_locked(sc); 2792 } 2793 2794 sc->nge_flags &= ~NGE_FLAG_SUSPENDED; 2795 NGE_UNLOCK(sc); 2796 2797 return (0); 2798 } 2799 2800 #define NGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2801 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2802 2803 static void 2804 nge_sysctl_node(struct nge_softc *sc) 2805 { 2806 struct sysctl_ctx_list *ctx; 2807 struct sysctl_oid_list *child, *parent; 2808 struct sysctl_oid *tree; 2809 struct nge_stats *stats; 2810 int error; 2811 2812 ctx = device_get_sysctl_ctx(sc->nge_dev); 2813 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev)); 2814 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff", 2815 CTLTYPE_INT | CTLFLAG_RW, &sc->nge_int_holdoff, 0, 2816 sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation"); 2817 /* Pull in device tunables. */ 2818 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2819 error = resource_int_value(device_get_name(sc->nge_dev), 2820 device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff); 2821 if (error == 0) { 2822 if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN || 2823 sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) { 2824 device_printf(sc->nge_dev, 2825 "int_holdoff value out of range; " 2826 "using default: %d(%d us)\n", 2827 NGE_INT_HOLDOFF_DEFAULT, 2828 NGE_INT_HOLDOFF_DEFAULT * 100); 2829 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2830 } 2831 } 2832 2833 stats = &sc->nge_stats; 2834 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2835 NULL, "NGE statistics"); 2836 parent = SYSCTL_CHILDREN(tree); 2837 2838 /* Rx statistics. */ 2839 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2840 NULL, "Rx MAC statistics"); 2841 child = SYSCTL_CHILDREN(tree); 2842 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs", 2843 &stats->rx_pkts_errs, 2844 "Packet errors including both wire errors and FIFO overruns"); 2845 NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 2846 &stats->rx_crc_errs, "CRC errors"); 2847 NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2848 &stats->rx_fifo_oflows, "FIFO overflows"); 2849 NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2850 &stats->rx_align_errs, "Frame alignment errors"); 2851 NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2852 &stats->rx_sym_errs, "One or more symbol errors"); 2853 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos", 2854 &stats->rx_pkts_jumbos, 2855 "Packets received with length greater than 1518 bytes"); 2856 NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2857 &stats->rx_len_errs, "In Range Length errors"); 2858 NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames", 2859 &stats->rx_unctl_frames, "Control frames with unsupported opcode"); 2860 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2861 &stats->rx_pause, "Pause frames"); 2862 2863 /* Tx statistics. */ 2864 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2865 NULL, "Tx MAC statistics"); 2866 child = SYSCTL_CHILDREN(tree); 2867 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2868 &stats->tx_pause, "Pause frames"); 2869 NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs", 2870 &stats->tx_seq_errs, 2871 "Loss of collision heartbeat during transmission"); 2872 } 2873 2874 #undef NGE_SYSCTL_STAT_ADD32 2875 2876 static int 2877 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2878 { 2879 int error, value; 2880 2881 if (arg1 == NULL) 2882 return (EINVAL); 2883 value = *(int *)arg1; 2884 error = sysctl_handle_int(oidp, &value, 0, req); 2885 if (error != 0 || req->newptr == NULL) 2886 return (error); 2887 if (value < low || value > high) 2888 return (EINVAL); 2889 *(int *)arg1 = value; 2890 2891 return (0); 2892 } 2893 2894 static int 2895 sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS) 2896 { 2897 2898 return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN, 2899 NGE_INT_HOLDOFF_MAX)); 2900 } 2901