1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 /* 37 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 38 * for FreeBSD. Datasheets are available from: 39 * 40 * http://www.national.com/ds/DP/DP83820.pdf 41 * http://www.national.com/ds/DP/DP83821.pdf 42 * 43 * These chips are used on several low cost gigabit ethernet NICs 44 * sold by D-Link, Addtron, SMC and Asante. Both parts are 45 * virtually the same, except the 83820 is a 64-bit/32-bit part, 46 * while the 83821 is 32-bit only. 47 * 48 * Many cards also use National gigE transceivers, such as the 49 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 50 * contains a full register description that applies to all of these 51 * components: 52 * 53 * http://www.national.com/ds/DP/DP83861.pdf 54 * 55 * Written by Bill Paul <wpaul@bsdi.com> 56 * BSDi Open Source Solutions 57 */ 58 59 /* 60 * The NatSemi DP83820 and 83821 controllers are enhanced versions 61 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 62 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 63 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 64 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 65 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 66 * matching buffers, one perfect address filter buffer and interrupt 67 * moderation. The 83820 supports both 64-bit and 32-bit addressing 68 * and data transfers: the 64-bit support can be toggled on or off 69 * via software. This affects the size of certain fields in the DMA 70 * descriptors. 71 * 72 * There are two bugs/misfeatures in the 83820/83821 that I have 73 * discovered so far: 74 * 75 * - Receive buffers must be aligned on 64-bit boundaries, which means 76 * you must resort to copying data in order to fix up the payload 77 * alignment. 78 * 79 * - In order to transmit jumbo frames larger than 8170 bytes, you have 80 * to turn off transmit checksum offloading, because the chip can't 81 * compute the checksum on an outgoing frame unless it fits entirely 82 * within the TX FIFO, which is only 8192 bytes in size. If you have 83 * TX checksum offload enabled and you transmit attempt to transmit a 84 * frame larger than 8170 bytes, the transmitter will wedge. 85 * 86 * To work around the latter problem, TX checksum offload is disabled 87 * if the user selects an MTU larger than 8152 (8170 - 18). 88 */ 89 90 #include <sys/param.h> 91 #include <sys/systm.h> 92 #include <sys/sockio.h> 93 #include <sys/mbuf.h> 94 #include <sys/malloc.h> 95 #include <sys/kernel.h> 96 #include <sys/socket.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/ethernet.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_types.h> 104 #include <net/if_vlan_var.h> 105 106 #include <net/bpf.h> 107 108 #include <vm/vm.h> /* for vtophys */ 109 #include <vm/pmap.h> /* for vtophys */ 110 #include <machine/clock.h> /* for DELAY */ 111 #include <machine/bus_pio.h> 112 #include <machine/bus_memio.h> 113 #include <machine/bus.h> 114 #include <machine/resource.h> 115 #include <sys/bus.h> 116 #include <sys/rman.h> 117 118 #include <dev/mii/mii.h> 119 #include <dev/mii/miivar.h> 120 121 #include <pci/pcireg.h> 122 #include <pci/pcivar.h> 123 124 #define NGE_USEIOSPACE 125 126 #include <dev/nge/if_ngereg.h> 127 128 MODULE_DEPEND(nge, miibus, 1, 1, 1); 129 130 /* "controller miibus0" required. See GENERIC if you get errors here. */ 131 #include "miibus_if.h" 132 133 #ifndef lint 134 static const char rcsid[] = 135 "$FreeBSD$"; 136 #endif 137 138 #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 139 140 /* 141 * Various supported device vendors/types and their names. 142 */ 143 static struct nge_type nge_devs[] = { 144 { NGE_VENDORID, NGE_DEVICEID, 145 "National Semiconductor Gigabit Ethernet" }, 146 { 0, 0, NULL } 147 }; 148 149 static int nge_probe (device_t); 150 static int nge_attach (device_t); 151 static int nge_detach (device_t); 152 153 static int nge_alloc_jumbo_mem (struct nge_softc *); 154 static void nge_free_jumbo_mem (struct nge_softc *); 155 static void *nge_jalloc (struct nge_softc *); 156 static void nge_jfree (caddr_t, void *); 157 158 static int nge_newbuf (struct nge_softc *, 159 struct nge_desc *, struct mbuf *); 160 static int nge_encap (struct nge_softc *, 161 struct mbuf *, u_int32_t *); 162 static void nge_rxeof (struct nge_softc *); 163 static void nge_txeof (struct nge_softc *); 164 static void nge_intr (void *); 165 static void nge_tick (void *); 166 static void nge_start (struct ifnet *); 167 static int nge_ioctl (struct ifnet *, u_long, caddr_t); 168 static void nge_init (void *); 169 static void nge_stop (struct nge_softc *); 170 static void nge_watchdog (struct ifnet *); 171 static void nge_shutdown (device_t); 172 static int nge_ifmedia_upd (struct ifnet *); 173 static void nge_ifmedia_sts (struct ifnet *, struct ifmediareq *); 174 175 static void nge_delay (struct nge_softc *); 176 static void nge_eeprom_idle (struct nge_softc *); 177 static void nge_eeprom_putbyte (struct nge_softc *, int); 178 static void nge_eeprom_getword (struct nge_softc *, int, u_int16_t *); 179 static void nge_read_eeprom (struct nge_softc *, caddr_t, int, int, int); 180 181 static void nge_mii_sync (struct nge_softc *); 182 static void nge_mii_send (struct nge_softc *, u_int32_t, int); 183 static int nge_mii_readreg (struct nge_softc *, struct nge_mii_frame *); 184 static int nge_mii_writereg (struct nge_softc *, struct nge_mii_frame *); 185 186 static int nge_miibus_readreg (device_t, int, int); 187 static int nge_miibus_writereg (device_t, int, int, int); 188 static void nge_miibus_statchg (device_t); 189 190 static void nge_setmulti (struct nge_softc *); 191 static u_int32_t nge_crc (struct nge_softc *, caddr_t); 192 static void nge_reset (struct nge_softc *); 193 static int nge_list_rx_init (struct nge_softc *); 194 static int nge_list_tx_init (struct nge_softc *); 195 196 #ifdef NGE_USEIOSPACE 197 #define NGE_RES SYS_RES_IOPORT 198 #define NGE_RID NGE_PCI_LOIO 199 #else 200 #define NGE_RES SYS_RES_MEMORY 201 #define NGE_RID NGE_PCI_LOMEM 202 #endif 203 204 static device_method_t nge_methods[] = { 205 /* Device interface */ 206 DEVMETHOD(device_probe, nge_probe), 207 DEVMETHOD(device_attach, nge_attach), 208 DEVMETHOD(device_detach, nge_detach), 209 DEVMETHOD(device_shutdown, nge_shutdown), 210 211 /* bus interface */ 212 DEVMETHOD(bus_print_child, bus_generic_print_child), 213 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 214 215 /* MII interface */ 216 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 217 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 218 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 219 220 { 0, 0 } 221 }; 222 223 static driver_t nge_driver = { 224 "nge", 225 nge_methods, 226 sizeof(struct nge_softc) 227 }; 228 229 static devclass_t nge_devclass; 230 231 DRIVER_MODULE(if_nge, pci, nge_driver, nge_devclass, 0, 0); 232 DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); 233 234 #define NGE_SETBIT(sc, reg, x) \ 235 CSR_WRITE_4(sc, reg, \ 236 CSR_READ_4(sc, reg) | (x)) 237 238 #define NGE_CLRBIT(sc, reg, x) \ 239 CSR_WRITE_4(sc, reg, \ 240 CSR_READ_4(sc, reg) & ~(x)) 241 242 #define SIO_SET(x) \ 243 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | x) 244 245 #define SIO_CLR(x) \ 246 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~x) 247 248 static void nge_delay(sc) 249 struct nge_softc *sc; 250 { 251 int idx; 252 253 for (idx = (300 / 33) + 1; idx > 0; idx--) 254 CSR_READ_4(sc, NGE_CSR); 255 256 return; 257 } 258 259 static void nge_eeprom_idle(sc) 260 struct nge_softc *sc; 261 { 262 register int i; 263 264 SIO_SET(NGE_MEAR_EE_CSEL); 265 nge_delay(sc); 266 SIO_SET(NGE_MEAR_EE_CLK); 267 nge_delay(sc); 268 269 for (i = 0; i < 25; i++) { 270 SIO_CLR(NGE_MEAR_EE_CLK); 271 nge_delay(sc); 272 SIO_SET(NGE_MEAR_EE_CLK); 273 nge_delay(sc); 274 } 275 276 SIO_CLR(NGE_MEAR_EE_CLK); 277 nge_delay(sc); 278 SIO_CLR(NGE_MEAR_EE_CSEL); 279 nge_delay(sc); 280 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 281 282 return; 283 } 284 285 /* 286 * Send a read command and address to the EEPROM, check for ACK. 287 */ 288 static void nge_eeprom_putbyte(sc, addr) 289 struct nge_softc *sc; 290 int addr; 291 { 292 register int d, i; 293 294 d = addr | NGE_EECMD_READ; 295 296 /* 297 * Feed in each bit and stobe the clock. 298 */ 299 for (i = 0x400; i; i >>= 1) { 300 if (d & i) { 301 SIO_SET(NGE_MEAR_EE_DIN); 302 } else { 303 SIO_CLR(NGE_MEAR_EE_DIN); 304 } 305 nge_delay(sc); 306 SIO_SET(NGE_MEAR_EE_CLK); 307 nge_delay(sc); 308 SIO_CLR(NGE_MEAR_EE_CLK); 309 nge_delay(sc); 310 } 311 312 return; 313 } 314 315 /* 316 * Read a word of data stored in the EEPROM at address 'addr.' 317 */ 318 static void nge_eeprom_getword(sc, addr, dest) 319 struct nge_softc *sc; 320 int addr; 321 u_int16_t *dest; 322 { 323 register int i; 324 u_int16_t word = 0; 325 326 /* Force EEPROM to idle state. */ 327 nge_eeprom_idle(sc); 328 329 /* Enter EEPROM access mode. */ 330 nge_delay(sc); 331 SIO_CLR(NGE_MEAR_EE_CLK); 332 nge_delay(sc); 333 SIO_SET(NGE_MEAR_EE_CSEL); 334 nge_delay(sc); 335 336 /* 337 * Send address of word we want to read. 338 */ 339 nge_eeprom_putbyte(sc, addr); 340 341 /* 342 * Start reading bits from EEPROM. 343 */ 344 for (i = 0x8000; i; i >>= 1) { 345 SIO_SET(NGE_MEAR_EE_CLK); 346 nge_delay(sc); 347 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 348 word |= i; 349 nge_delay(sc); 350 SIO_CLR(NGE_MEAR_EE_CLK); 351 nge_delay(sc); 352 } 353 354 /* Turn off EEPROM access mode. */ 355 nge_eeprom_idle(sc); 356 357 *dest = word; 358 359 return; 360 } 361 362 /* 363 * Read a sequence of words from the EEPROM. 364 */ 365 static void nge_read_eeprom(sc, dest, off, cnt, swap) 366 struct nge_softc *sc; 367 caddr_t dest; 368 int off; 369 int cnt; 370 int swap; 371 { 372 int i; 373 u_int16_t word = 0, *ptr; 374 375 for (i = 0; i < cnt; i++) { 376 nge_eeprom_getword(sc, off + i, &word); 377 ptr = (u_int16_t *)(dest + (i * 2)); 378 if (swap) 379 *ptr = ntohs(word); 380 else 381 *ptr = word; 382 } 383 384 return; 385 } 386 387 /* 388 * Sync the PHYs by setting data bit and strobing the clock 32 times. 389 */ 390 static void nge_mii_sync(sc) 391 struct nge_softc *sc; 392 { 393 register int i; 394 395 SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); 396 397 for (i = 0; i < 32; i++) { 398 SIO_SET(NGE_MEAR_MII_CLK); 399 DELAY(1); 400 SIO_CLR(NGE_MEAR_MII_CLK); 401 DELAY(1); 402 } 403 404 return; 405 } 406 407 /* 408 * Clock a series of bits through the MII. 409 */ 410 static void nge_mii_send(sc, bits, cnt) 411 struct nge_softc *sc; 412 u_int32_t bits; 413 int cnt; 414 { 415 int i; 416 417 SIO_CLR(NGE_MEAR_MII_CLK); 418 419 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 420 if (bits & i) { 421 SIO_SET(NGE_MEAR_MII_DATA); 422 } else { 423 SIO_CLR(NGE_MEAR_MII_DATA); 424 } 425 DELAY(1); 426 SIO_CLR(NGE_MEAR_MII_CLK); 427 DELAY(1); 428 SIO_SET(NGE_MEAR_MII_CLK); 429 } 430 } 431 432 /* 433 * Read an PHY register through the MII. 434 */ 435 static int nge_mii_readreg(sc, frame) 436 struct nge_softc *sc; 437 struct nge_mii_frame *frame; 438 439 { 440 int i, ack, s; 441 442 s = splimp(); 443 444 /* 445 * Set up frame for RX. 446 */ 447 frame->mii_stdelim = NGE_MII_STARTDELIM; 448 frame->mii_opcode = NGE_MII_READOP; 449 frame->mii_turnaround = 0; 450 frame->mii_data = 0; 451 452 CSR_WRITE_4(sc, NGE_MEAR, 0); 453 454 /* 455 * Turn on data xmit. 456 */ 457 SIO_SET(NGE_MEAR_MII_DIR); 458 459 nge_mii_sync(sc); 460 461 /* 462 * Send command/address info. 463 */ 464 nge_mii_send(sc, frame->mii_stdelim, 2); 465 nge_mii_send(sc, frame->mii_opcode, 2); 466 nge_mii_send(sc, frame->mii_phyaddr, 5); 467 nge_mii_send(sc, frame->mii_regaddr, 5); 468 469 /* Idle bit */ 470 SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); 471 DELAY(1); 472 SIO_SET(NGE_MEAR_MII_CLK); 473 DELAY(1); 474 475 /* Turn off xmit. */ 476 SIO_CLR(NGE_MEAR_MII_DIR); 477 /* Check for ack */ 478 SIO_CLR(NGE_MEAR_MII_CLK); 479 DELAY(1); 480 SIO_SET(NGE_MEAR_MII_CLK); 481 DELAY(1); 482 ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; 483 484 /* 485 * Now try reading data bits. If the ack failed, we still 486 * need to clock through 16 cycles to keep the PHY(s) in sync. 487 */ 488 if (ack) { 489 for(i = 0; i < 16; i++) { 490 SIO_CLR(NGE_MEAR_MII_CLK); 491 DELAY(1); 492 SIO_SET(NGE_MEAR_MII_CLK); 493 DELAY(1); 494 } 495 goto fail; 496 } 497 498 for (i = 0x8000; i; i >>= 1) { 499 SIO_CLR(NGE_MEAR_MII_CLK); 500 DELAY(1); 501 if (!ack) { 502 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) 503 frame->mii_data |= i; 504 DELAY(1); 505 } 506 SIO_SET(NGE_MEAR_MII_CLK); 507 DELAY(1); 508 } 509 510 fail: 511 512 SIO_CLR(NGE_MEAR_MII_CLK); 513 DELAY(1); 514 SIO_SET(NGE_MEAR_MII_CLK); 515 DELAY(1); 516 517 splx(s); 518 519 if (ack) 520 return(1); 521 return(0); 522 } 523 524 /* 525 * Write to a PHY register through the MII. 526 */ 527 static int nge_mii_writereg(sc, frame) 528 struct nge_softc *sc; 529 struct nge_mii_frame *frame; 530 531 { 532 int s; 533 534 s = splimp(); 535 /* 536 * Set up frame for TX. 537 */ 538 539 frame->mii_stdelim = NGE_MII_STARTDELIM; 540 frame->mii_opcode = NGE_MII_WRITEOP; 541 frame->mii_turnaround = NGE_MII_TURNAROUND; 542 543 /* 544 * Turn on data output. 545 */ 546 SIO_SET(NGE_MEAR_MII_DIR); 547 548 nge_mii_sync(sc); 549 550 nge_mii_send(sc, frame->mii_stdelim, 2); 551 nge_mii_send(sc, frame->mii_opcode, 2); 552 nge_mii_send(sc, frame->mii_phyaddr, 5); 553 nge_mii_send(sc, frame->mii_regaddr, 5); 554 nge_mii_send(sc, frame->mii_turnaround, 2); 555 nge_mii_send(sc, frame->mii_data, 16); 556 557 /* Idle bit. */ 558 SIO_SET(NGE_MEAR_MII_CLK); 559 DELAY(1); 560 SIO_CLR(NGE_MEAR_MII_CLK); 561 DELAY(1); 562 563 /* 564 * Turn off xmit. 565 */ 566 SIO_CLR(NGE_MEAR_MII_DIR); 567 568 splx(s); 569 570 return(0); 571 } 572 573 static int nge_miibus_readreg(dev, phy, reg) 574 device_t dev; 575 int phy, reg; 576 { 577 struct nge_softc *sc; 578 struct nge_mii_frame frame; 579 580 sc = device_get_softc(dev); 581 582 bzero((char *)&frame, sizeof(frame)); 583 584 frame.mii_phyaddr = phy; 585 frame.mii_regaddr = reg; 586 nge_mii_readreg(sc, &frame); 587 588 return(frame.mii_data); 589 } 590 591 static int nge_miibus_writereg(dev, phy, reg, data) 592 device_t dev; 593 int phy, reg, data; 594 { 595 struct nge_softc *sc; 596 struct nge_mii_frame frame; 597 598 sc = device_get_softc(dev); 599 600 bzero((char *)&frame, sizeof(frame)); 601 602 frame.mii_phyaddr = phy; 603 frame.mii_regaddr = reg; 604 frame.mii_data = data; 605 nge_mii_writereg(sc, &frame); 606 607 return(0); 608 } 609 610 static void nge_miibus_statchg(dev) 611 device_t dev; 612 { 613 struct nge_softc *sc; 614 struct mii_data *mii; 615 616 sc = device_get_softc(dev); 617 mii = device_get_softc(sc->nge_miibus); 618 619 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 620 NGE_SETBIT(sc, NGE_TX_CFG, 621 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 622 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 623 } else { 624 NGE_CLRBIT(sc, NGE_TX_CFG, 625 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 626 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 627 } 628 629 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 630 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 631 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 632 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 633 } else { 634 NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 635 } 636 637 return; 638 } 639 640 static u_int32_t nge_crc(sc, addr) 641 struct nge_softc *sc; 642 caddr_t addr; 643 { 644 u_int32_t crc, carry; 645 int i, j; 646 u_int8_t c; 647 648 /* Compute CRC for the address value. */ 649 crc = 0xFFFFFFFF; /* initial value */ 650 651 for (i = 0; i < 6; i++) { 652 c = *(addr + i); 653 for (j = 0; j < 8; j++) { 654 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 655 crc <<= 1; 656 c >>= 1; 657 if (carry) 658 crc = (crc ^ 0x04c11db6) | carry; 659 } 660 } 661 662 /* 663 * return the filter bit position 664 */ 665 666 return((crc >> 21) & 0x00000FFF); 667 } 668 669 static void nge_setmulti(sc) 670 struct nge_softc *sc; 671 { 672 struct ifnet *ifp; 673 struct ifmultiaddr *ifma; 674 u_int32_t h = 0, i, filtsave; 675 int bit, index; 676 677 ifp = &sc->arpcom.ac_if; 678 679 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 680 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 681 NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); 682 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); 683 return; 684 } 685 686 /* 687 * We have to explicitly enable the multicast hash table 688 * on the NatSemi chip if we want to use it, which we do. 689 * We also have to tell it that we don't want to use the 690 * hash table for matching unicast addresses. 691 */ 692 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); 693 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 694 NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); 695 696 filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); 697 698 /* first, zot all the existing hash bits */ 699 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 700 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 701 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 702 } 703 704 /* 705 * From the 11 bits returned by the crc routine, the top 7 706 * bits represent the 16-bit word in the mcast hash table 707 * that needs to be updated, and the lower 4 bits represent 708 * which bit within that byte needs to be set. 709 */ 710 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 711 if (ifma->ifma_addr->sa_family != AF_LINK) 712 continue; 713 h = nge_crc(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 714 index = (h >> 4) & 0x7F; 715 bit = h & 0xF; 716 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 717 NGE_FILTADDR_MCAST_LO + (index * 2)); 718 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 719 } 720 721 CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); 722 723 return; 724 } 725 726 static void nge_reset(sc) 727 struct nge_softc *sc; 728 { 729 register int i; 730 731 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 732 733 for (i = 0; i < NGE_TIMEOUT; i++) { 734 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 735 break; 736 } 737 738 if (i == NGE_TIMEOUT) 739 printf("nge%d: reset never completed\n", sc->nge_unit); 740 741 /* Wait a little while for the chip to get its brains in order. */ 742 DELAY(1000); 743 744 /* 745 * If this is a NetSemi chip, make sure to clear 746 * PME mode. 747 */ 748 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 749 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 750 751 return; 752 } 753 754 /* 755 * Probe for an NatSemi chip. Check the PCI vendor and device 756 * IDs against our list and return a device name if we find a match. 757 */ 758 static int nge_probe(dev) 759 device_t dev; 760 { 761 struct nge_type *t; 762 763 t = nge_devs; 764 765 while(t->nge_name != NULL) { 766 if ((pci_get_vendor(dev) == t->nge_vid) && 767 (pci_get_device(dev) == t->nge_did)) { 768 device_set_desc(dev, t->nge_name); 769 return(0); 770 } 771 t++; 772 } 773 774 return(ENXIO); 775 } 776 777 /* 778 * Attach the interface. Allocate softc structures, do ifmedia 779 * setup and ethernet/BPF attach. 780 */ 781 static int nge_attach(dev) 782 device_t dev; 783 { 784 int s; 785 u_char eaddr[ETHER_ADDR_LEN]; 786 u_int32_t command; 787 struct nge_softc *sc; 788 struct ifnet *ifp; 789 int unit, error = 0, rid; 790 791 s = splimp(); 792 793 sc = device_get_softc(dev); 794 unit = device_get_unit(dev); 795 bzero(sc, sizeof(struct nge_softc)); 796 797 mtx_init(&sc->nge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 798 MTX_DEF | MTX_RECURSE); 799 800 /* 801 * Handle power management nonsense. 802 */ 803 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 804 u_int32_t iobase, membase, irq; 805 806 /* Save important PCI config data. */ 807 iobase = pci_read_config(dev, NGE_PCI_LOIO, 4); 808 membase = pci_read_config(dev, NGE_PCI_LOMEM, 4); 809 irq = pci_read_config(dev, NGE_PCI_INTLINE, 4); 810 811 /* Reset the power state. */ 812 printf("nge%d: chip is in D%d power mode " 813 "-- setting to D0\n", unit, 814 pci_get_powerstate(dev)); 815 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 816 817 /* Restore PCI config data. */ 818 pci_write_config(dev, NGE_PCI_LOIO, iobase, 4); 819 pci_write_config(dev, NGE_PCI_LOMEM, membase, 4); 820 pci_write_config(dev, NGE_PCI_INTLINE, irq, 4); 821 } 822 823 /* 824 * Map control/status registers. 825 */ 826 pci_enable_busmaster(dev); 827 pci_enable_io(dev, SYS_RES_IOPORT); 828 pci_enable_io(dev, SYS_RES_MEMORY); 829 command = pci_read_config(dev, PCIR_COMMAND, 4); 830 831 #ifdef NGE_USEIOSPACE 832 if (!(command & PCIM_CMD_PORTEN)) { 833 printf("nge%d: failed to enable I/O ports!\n", unit); 834 error = ENXIO;; 835 goto fail; 836 } 837 #else 838 if (!(command & PCIM_CMD_MEMEN)) { 839 printf("nge%d: failed to enable memory mapping!\n", unit); 840 error = ENXIO;; 841 goto fail; 842 } 843 #endif 844 845 rid = NGE_RID; 846 sc->nge_res = bus_alloc_resource(dev, NGE_RES, &rid, 847 0, ~0, 1, RF_ACTIVE); 848 849 if (sc->nge_res == NULL) { 850 printf("nge%d: couldn't map ports/memory\n", unit); 851 error = ENXIO; 852 goto fail; 853 } 854 855 sc->nge_btag = rman_get_bustag(sc->nge_res); 856 sc->nge_bhandle = rman_get_bushandle(sc->nge_res); 857 858 /* Allocate interrupt */ 859 rid = 0; 860 sc->nge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 861 RF_SHAREABLE | RF_ACTIVE); 862 863 if (sc->nge_irq == NULL) { 864 printf("nge%d: couldn't map interrupt\n", unit); 865 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 866 error = ENXIO; 867 goto fail; 868 } 869 870 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET, 871 nge_intr, sc, &sc->nge_intrhand); 872 873 if (error) { 874 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 875 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 876 printf("nge%d: couldn't set up irq\n", unit); 877 goto fail; 878 } 879 880 /* Reset the adapter. */ 881 nge_reset(sc); 882 883 /* 884 * Get station address from the EEPROM. 885 */ 886 nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); 887 nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); 888 nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); 889 890 /* 891 * A NatSemi chip was detected. Inform the world. 892 */ 893 printf("nge%d: Ethernet address: %6D\n", unit, eaddr, ":"); 894 895 sc->nge_unit = unit; 896 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 897 898 sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF, 899 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 900 901 if (sc->nge_ldata == NULL) { 902 printf("nge%d: no memory for list buffers!\n", unit); 903 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 904 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 905 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 906 error = ENXIO; 907 goto fail; 908 } 909 bzero(sc->nge_ldata, sizeof(struct nge_list_data)); 910 911 /* Try to allocate memory for jumbo buffers. */ 912 if (nge_alloc_jumbo_mem(sc)) { 913 printf("nge%d: jumbo buffer allocation failed\n", 914 sc->nge_unit); 915 contigfree(sc->nge_ldata, 916 sizeof(struct nge_list_data), M_DEVBUF); 917 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 918 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 919 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 920 error = ENXIO; 921 goto fail; 922 } 923 924 ifp = &sc->arpcom.ac_if; 925 ifp->if_softc = sc; 926 ifp->if_unit = unit; 927 ifp->if_name = "nge"; 928 ifp->if_mtu = ETHERMTU; 929 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 930 ifp->if_ioctl = nge_ioctl; 931 ifp->if_output = ether_output; 932 ifp->if_start = nge_start; 933 ifp->if_watchdog = nge_watchdog; 934 ifp->if_init = nge_init; 935 ifp->if_baudrate = 1000000000; 936 ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1; 937 ifp->if_hwassist = NGE_CSUM_FEATURES; 938 ifp->if_capabilities = IFCAP_HWCSUM; 939 ifp->if_capenable = ifp->if_capabilities; 940 941 /* 942 * Do MII setup. 943 */ 944 if (mii_phy_probe(dev, &sc->nge_miibus, 945 nge_ifmedia_upd, nge_ifmedia_sts)) { 946 printf("nge%d: MII without any PHY!\n", sc->nge_unit); 947 nge_free_jumbo_mem(sc); 948 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 949 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 950 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 951 error = ENXIO; 952 goto fail; 953 } 954 955 /* 956 * Call MI attach routine. 957 */ 958 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 959 callout_handle_init(&sc->nge_stat_ch); 960 961 fail: 962 splx(s); 963 mtx_destroy(&sc->nge_mtx); 964 return(error); 965 } 966 967 static int nge_detach(dev) 968 device_t dev; 969 { 970 struct nge_softc *sc; 971 struct ifnet *ifp; 972 int s; 973 974 s = splimp(); 975 976 sc = device_get_softc(dev); 977 ifp = &sc->arpcom.ac_if; 978 979 nge_reset(sc); 980 nge_stop(sc); 981 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); 982 983 bus_generic_detach(dev); 984 device_delete_child(dev, sc->nge_miibus); 985 986 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 987 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 988 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 989 990 contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF); 991 nge_free_jumbo_mem(sc); 992 993 splx(s); 994 mtx_destroy(&sc->nge_mtx); 995 996 return(0); 997 } 998 999 /* 1000 * Initialize the transmit descriptors. 1001 */ 1002 static int nge_list_tx_init(sc) 1003 struct nge_softc *sc; 1004 { 1005 struct nge_list_data *ld; 1006 struct nge_ring_data *cd; 1007 int i; 1008 1009 cd = &sc->nge_cdata; 1010 ld = sc->nge_ldata; 1011 1012 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 1013 if (i == (NGE_TX_LIST_CNT - 1)) { 1014 ld->nge_tx_list[i].nge_nextdesc = 1015 &ld->nge_tx_list[0]; 1016 ld->nge_tx_list[i].nge_next = 1017 vtophys(&ld->nge_tx_list[0]); 1018 } else { 1019 ld->nge_tx_list[i].nge_nextdesc = 1020 &ld->nge_tx_list[i + 1]; 1021 ld->nge_tx_list[i].nge_next = 1022 vtophys(&ld->nge_tx_list[i + 1]); 1023 } 1024 ld->nge_tx_list[i].nge_mbuf = NULL; 1025 ld->nge_tx_list[i].nge_ptr = 0; 1026 ld->nge_tx_list[i].nge_ctl = 0; 1027 } 1028 1029 cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; 1030 1031 return(0); 1032 } 1033 1034 1035 /* 1036 * Initialize the RX descriptors and allocate mbufs for them. Note that 1037 * we arrange the descriptors in a closed ring, so that the last descriptor 1038 * points back to the first. 1039 */ 1040 static int nge_list_rx_init(sc) 1041 struct nge_softc *sc; 1042 { 1043 struct nge_list_data *ld; 1044 struct nge_ring_data *cd; 1045 int i; 1046 1047 ld = sc->nge_ldata; 1048 cd = &sc->nge_cdata; 1049 1050 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 1051 if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) 1052 return(ENOBUFS); 1053 if (i == (NGE_RX_LIST_CNT - 1)) { 1054 ld->nge_rx_list[i].nge_nextdesc = 1055 &ld->nge_rx_list[0]; 1056 ld->nge_rx_list[i].nge_next = 1057 vtophys(&ld->nge_rx_list[0]); 1058 } else { 1059 ld->nge_rx_list[i].nge_nextdesc = 1060 &ld->nge_rx_list[i + 1]; 1061 ld->nge_rx_list[i].nge_next = 1062 vtophys(&ld->nge_rx_list[i + 1]); 1063 } 1064 } 1065 1066 cd->nge_rx_prod = 0; 1067 1068 return(0); 1069 } 1070 1071 /* 1072 * Initialize an RX descriptor and attach an MBUF cluster. 1073 */ 1074 static int nge_newbuf(sc, c, m) 1075 struct nge_softc *sc; 1076 struct nge_desc *c; 1077 struct mbuf *m; 1078 { 1079 struct mbuf *m_new = NULL; 1080 caddr_t *buf = NULL; 1081 1082 if (m == NULL) { 1083 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1084 if (m_new == NULL) { 1085 printf("nge%d: no memory for rx list " 1086 "-- packet dropped!\n", sc->nge_unit); 1087 return(ENOBUFS); 1088 } 1089 1090 /* Allocate the jumbo buffer */ 1091 buf = nge_jalloc(sc); 1092 if (buf == NULL) { 1093 #ifdef NGE_VERBOSE 1094 printf("nge%d: jumbo allocation failed " 1095 "-- packet dropped!\n", sc->nge_unit); 1096 #endif 1097 m_freem(m_new); 1098 return(ENOBUFS); 1099 } 1100 /* Attach the buffer to the mbuf */ 1101 m_new->m_data = (void *)buf; 1102 m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN; 1103 MEXTADD(m_new, buf, NGE_JUMBO_FRAMELEN, nge_jfree, 1104 (struct nge_softc *)sc, 0, EXT_NET_DRV); 1105 } else { 1106 m_new = m; 1107 m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN; 1108 m_new->m_data = m_new->m_ext.ext_buf; 1109 } 1110 1111 m_adj(m_new, sizeof(u_int64_t)); 1112 1113 c->nge_mbuf = m_new; 1114 c->nge_ptr = vtophys(mtod(m_new, caddr_t)); 1115 c->nge_ctl = m_new->m_len; 1116 c->nge_extsts = 0; 1117 1118 return(0); 1119 } 1120 1121 static int nge_alloc_jumbo_mem(sc) 1122 struct nge_softc *sc; 1123 { 1124 caddr_t ptr; 1125 register int i; 1126 struct nge_jpool_entry *entry; 1127 1128 /* Grab a big chunk o' storage. */ 1129 sc->nge_cdata.nge_jumbo_buf = contigmalloc(NGE_JMEM, M_DEVBUF, 1130 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1131 1132 if (sc->nge_cdata.nge_jumbo_buf == NULL) { 1133 printf("nge%d: no memory for jumbo buffers!\n", sc->nge_unit); 1134 return(ENOBUFS); 1135 } 1136 1137 SLIST_INIT(&sc->nge_jfree_listhead); 1138 SLIST_INIT(&sc->nge_jinuse_listhead); 1139 1140 /* 1141 * Now divide it up into 9K pieces and save the addresses 1142 * in an array. 1143 */ 1144 ptr = sc->nge_cdata.nge_jumbo_buf; 1145 for (i = 0; i < NGE_JSLOTS; i++) { 1146 sc->nge_cdata.nge_jslots[i] = ptr; 1147 ptr += NGE_JLEN; 1148 entry = malloc(sizeof(struct nge_jpool_entry), 1149 M_DEVBUF, M_NOWAIT); 1150 if (entry == NULL) { 1151 printf("nge%d: no memory for jumbo " 1152 "buffer queue!\n", sc->nge_unit); 1153 return(ENOBUFS); 1154 } 1155 entry->slot = i; 1156 SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, 1157 entry, jpool_entries); 1158 } 1159 1160 return(0); 1161 } 1162 1163 static void nge_free_jumbo_mem(sc) 1164 struct nge_softc *sc; 1165 { 1166 register int i; 1167 struct nge_jpool_entry *entry; 1168 1169 for (i = 0; i < NGE_JSLOTS; i++) { 1170 entry = SLIST_FIRST(&sc->nge_jfree_listhead); 1171 SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries); 1172 free(entry, M_DEVBUF); 1173 } 1174 1175 contigfree(sc->nge_cdata.nge_jumbo_buf, NGE_JMEM, M_DEVBUF); 1176 1177 return; 1178 } 1179 1180 /* 1181 * Allocate a jumbo buffer. 1182 */ 1183 static void *nge_jalloc(sc) 1184 struct nge_softc *sc; 1185 { 1186 struct nge_jpool_entry *entry; 1187 1188 entry = SLIST_FIRST(&sc->nge_jfree_listhead); 1189 1190 if (entry == NULL) { 1191 #ifdef NGE_VERBOSE 1192 printf("nge%d: no free jumbo buffers\n", sc->nge_unit); 1193 #endif 1194 return(NULL); 1195 } 1196 1197 SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries); 1198 SLIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries); 1199 return(sc->nge_cdata.nge_jslots[entry->slot]); 1200 } 1201 1202 /* 1203 * Release a jumbo buffer. 1204 */ 1205 static void nge_jfree(buf, args) 1206 caddr_t buf; 1207 void *args; 1208 { 1209 struct nge_softc *sc; 1210 int i; 1211 struct nge_jpool_entry *entry; 1212 1213 /* Extract the softc struct pointer. */ 1214 sc = args; 1215 1216 if (sc == NULL) 1217 panic("nge_jfree: can't find softc pointer!"); 1218 1219 /* calculate the slot this buffer belongs to */ 1220 i = ((vm_offset_t)buf 1221 - (vm_offset_t)sc->nge_cdata.nge_jumbo_buf) / NGE_JLEN; 1222 1223 if ((i < 0) || (i >= NGE_JSLOTS)) 1224 panic("nge_jfree: asked to free buffer that we don't manage!"); 1225 1226 entry = SLIST_FIRST(&sc->nge_jinuse_listhead); 1227 if (entry == NULL) 1228 panic("nge_jfree: buffer not in use!"); 1229 entry->slot = i; 1230 SLIST_REMOVE_HEAD(&sc->nge_jinuse_listhead, jpool_entries); 1231 SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, jpool_entries); 1232 1233 return; 1234 } 1235 /* 1236 * A frame has been uploaded: pass the resulting mbuf chain up to 1237 * the higher level protocols. 1238 */ 1239 static void nge_rxeof(sc) 1240 struct nge_softc *sc; 1241 { 1242 struct ether_header *eh; 1243 struct mbuf *m; 1244 struct ifnet *ifp; 1245 struct nge_desc *cur_rx; 1246 int i, total_len = 0; 1247 u_int32_t rxstat; 1248 1249 ifp = &sc->arpcom.ac_if; 1250 i = sc->nge_cdata.nge_rx_prod; 1251 1252 while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { 1253 struct mbuf *m0 = NULL; 1254 u_int32_t extsts; 1255 1256 cur_rx = &sc->nge_ldata->nge_rx_list[i]; 1257 rxstat = cur_rx->nge_rxstat; 1258 extsts = cur_rx->nge_extsts; 1259 m = cur_rx->nge_mbuf; 1260 cur_rx->nge_mbuf = NULL; 1261 total_len = NGE_RXBYTES(cur_rx); 1262 NGE_INC(i, NGE_RX_LIST_CNT); 1263 1264 /* 1265 * If an error occurs, update stats, clear the 1266 * status word and leave the mbuf cluster in place: 1267 * it should simply get re-used next time this descriptor 1268 * comes up in the ring. 1269 */ 1270 if (!(rxstat & NGE_CMDSTS_PKT_OK)) { 1271 ifp->if_ierrors++; 1272 nge_newbuf(sc, cur_rx, m); 1273 continue; 1274 } 1275 1276 1277 /* 1278 * Ok. NatSemi really screwed up here. This is the 1279 * only gigE chip I know of with alignment constraints 1280 * on receive buffers. RX buffers must be 64-bit aligned. 1281 */ 1282 #ifdef __i386__ 1283 /* 1284 * By popular demand, ignore the alignment problems 1285 * on the Intel x86 platform. The performance hit 1286 * incurred due to unaligned accesses is much smaller 1287 * than the hit produced by forcing buffer copies all 1288 * the time, especially with jumbo frames. We still 1289 * need to fix up the alignment everywhere else though. 1290 */ 1291 if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 1292 #endif 1293 m0 = m_devget(mtod(m, char *), total_len, 1294 ETHER_ALIGN, ifp, NULL); 1295 nge_newbuf(sc, cur_rx, m); 1296 if (m0 == NULL) { 1297 printf("nge%d: no receive buffers " 1298 "available -- packet dropped!\n", 1299 sc->nge_unit); 1300 ifp->if_ierrors++; 1301 continue; 1302 } 1303 m = m0; 1304 #ifdef __i386__ 1305 } else { 1306 m->m_pkthdr.rcvif = ifp; 1307 m->m_pkthdr.len = m->m_len = total_len; 1308 } 1309 #endif 1310 1311 ifp->if_ipackets++; 1312 eh = mtod(m, struct ether_header *); 1313 1314 /* Remove header from mbuf and pass it on. */ 1315 m_adj(m, sizeof(struct ether_header)); 1316 1317 /* Do IP checksum checking. */ 1318 if (extsts & NGE_RXEXTSTS_IPPKT) 1319 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1320 if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) 1321 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1322 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1323 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1324 (extsts & NGE_RXEXTSTS_UDPPKT && 1325 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1326 m->m_pkthdr.csum_flags |= 1327 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1328 m->m_pkthdr.csum_data = 0xffff; 1329 } 1330 1331 /* 1332 * If we received a packet with a vlan tag, pass it 1333 * to vlan_input() instead of ether_input(). 1334 */ 1335 if (extsts & NGE_RXEXTSTS_VLANPKT) { 1336 VLAN_INPUT_TAG(eh, m, extsts & NGE_RXEXTSTS_VTCI); 1337 continue; 1338 } 1339 1340 ether_input(ifp, eh, m); 1341 } 1342 1343 sc->nge_cdata.nge_rx_prod = i; 1344 1345 return; 1346 } 1347 1348 /* 1349 * A frame was downloaded to the chip. It's safe for us to clean up 1350 * the list buffers. 1351 */ 1352 1353 static void nge_txeof(sc) 1354 struct nge_softc *sc; 1355 { 1356 struct nge_desc *cur_tx = NULL; 1357 struct ifnet *ifp; 1358 u_int32_t idx; 1359 1360 ifp = &sc->arpcom.ac_if; 1361 1362 /* Clear the timeout timer. */ 1363 ifp->if_timer = 0; 1364 1365 /* 1366 * Go through our tx list and free mbufs for those 1367 * frames that have been transmitted. 1368 */ 1369 idx = sc->nge_cdata.nge_tx_cons; 1370 while (idx != sc->nge_cdata.nge_tx_prod) { 1371 cur_tx = &sc->nge_ldata->nge_tx_list[idx]; 1372 1373 if (NGE_OWNDESC(cur_tx)) 1374 break; 1375 1376 if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { 1377 sc->nge_cdata.nge_tx_cnt--; 1378 NGE_INC(idx, NGE_TX_LIST_CNT); 1379 continue; 1380 } 1381 1382 if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { 1383 ifp->if_oerrors++; 1384 if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) 1385 ifp->if_collisions++; 1386 if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) 1387 ifp->if_collisions++; 1388 } 1389 1390 ifp->if_collisions += 1391 (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; 1392 1393 ifp->if_opackets++; 1394 if (cur_tx->nge_mbuf != NULL) { 1395 m_freem(cur_tx->nge_mbuf); 1396 cur_tx->nge_mbuf = NULL; 1397 } 1398 1399 sc->nge_cdata.nge_tx_cnt--; 1400 NGE_INC(idx, NGE_TX_LIST_CNT); 1401 ifp->if_timer = 0; 1402 } 1403 1404 sc->nge_cdata.nge_tx_cons = idx; 1405 1406 if (cur_tx != NULL) 1407 ifp->if_flags &= ~IFF_OACTIVE; 1408 1409 return; 1410 } 1411 1412 static void nge_tick(xsc) 1413 void *xsc; 1414 { 1415 struct nge_softc *sc; 1416 struct mii_data *mii; 1417 struct ifnet *ifp; 1418 int s; 1419 1420 s = splimp(); 1421 1422 sc = xsc; 1423 ifp = &sc->arpcom.ac_if; 1424 1425 mii = device_get_softc(sc->nge_miibus); 1426 mii_tick(mii); 1427 1428 if (!sc->nge_link) { 1429 if (mii->mii_media_status & IFM_ACTIVE && 1430 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1431 sc->nge_link++; 1432 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 1433 printf("nge%d: gigabit link up\n", 1434 sc->nge_unit); 1435 if (ifp->if_snd.ifq_head != NULL) 1436 nge_start(ifp); 1437 } 1438 } 1439 sc->nge_stat_ch = timeout(nge_tick, sc, hz); 1440 1441 splx(s); 1442 1443 return; 1444 } 1445 1446 static void nge_intr(arg) 1447 void *arg; 1448 { 1449 struct nge_softc *sc; 1450 struct ifnet *ifp; 1451 u_int32_t status; 1452 1453 sc = arg; 1454 ifp = &sc->arpcom.ac_if; 1455 1456 /* Supress unwanted interrupts */ 1457 if (!(ifp->if_flags & IFF_UP)) { 1458 nge_stop(sc); 1459 return; 1460 } 1461 1462 /* Disable interrupts. */ 1463 CSR_WRITE_4(sc, NGE_IER, 0); 1464 1465 for (;;) { 1466 /* Reading the ISR register clears all interrupts. */ 1467 status = CSR_READ_4(sc, NGE_ISR); 1468 1469 if ((status & NGE_INTRS) == 0) 1470 break; 1471 1472 if ((status & NGE_ISR_TX_DESC_OK) || 1473 (status & NGE_ISR_TX_ERR) || 1474 (status & NGE_ISR_TX_OK) || 1475 (status & NGE_ISR_TX_IDLE)) 1476 nge_txeof(sc); 1477 1478 if ((status & NGE_ISR_RX_DESC_OK) || 1479 (status & NGE_ISR_RX_ERR) || 1480 (status & NGE_ISR_RX_OFLOW) || 1481 (status & NGE_ISR_RX_FIFO_OFLOW) || 1482 (status & NGE_ISR_RX_IDLE) || 1483 (status & NGE_ISR_RX_OK)) 1484 nge_rxeof(sc); 1485 1486 if ((status & NGE_ISR_RX_IDLE)) 1487 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1488 1489 if (status & NGE_ISR_SYSERR) { 1490 nge_reset(sc); 1491 ifp->if_flags &= ~IFF_RUNNING; 1492 nge_init(sc); 1493 } 1494 1495 #if 0 1496 /* 1497 * XXX: nge_tick() is not ready to be called this way 1498 * it screws up the aneg timeout because mii_tick() is 1499 * only to be called once per second. 1500 */ 1501 if (status & NGE_IMR_PHY_INTR) { 1502 sc->nge_link = 0; 1503 nge_tick(sc); 1504 } 1505 #endif 1506 } 1507 1508 /* Re-enable interrupts. */ 1509 CSR_WRITE_4(sc, NGE_IER, 1); 1510 1511 if (ifp->if_snd.ifq_head != NULL) 1512 nge_start(ifp); 1513 1514 return; 1515 } 1516 1517 /* 1518 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1519 * pointers to the fragment pointers. 1520 */ 1521 static int nge_encap(sc, m_head, txidx) 1522 struct nge_softc *sc; 1523 struct mbuf *m_head; 1524 u_int32_t *txidx; 1525 { 1526 struct nge_desc *f = NULL; 1527 struct mbuf *m; 1528 int frag, cur, cnt = 0; 1529 struct ifvlan *ifv = NULL; 1530 1531 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 1532 m_head->m_pkthdr.rcvif != NULL && 1533 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 1534 ifv = m_head->m_pkthdr.rcvif->if_softc; 1535 1536 /* 1537 * Start packing the mbufs in this chain into 1538 * the fragment pointers. Stop when we run out 1539 * of fragments or hit the end of the mbuf chain. 1540 */ 1541 m = m_head; 1542 cur = frag = *txidx; 1543 1544 for (m = m_head; m != NULL; m = m->m_next) { 1545 if (m->m_len != 0) { 1546 if ((NGE_TX_LIST_CNT - 1547 (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) 1548 return(ENOBUFS); 1549 f = &sc->nge_ldata->nge_tx_list[frag]; 1550 f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; 1551 f->nge_ptr = vtophys(mtod(m, vm_offset_t)); 1552 if (cnt != 0) 1553 f->nge_ctl |= NGE_CMDSTS_OWN; 1554 cur = frag; 1555 NGE_INC(frag, NGE_TX_LIST_CNT); 1556 cnt++; 1557 } 1558 } 1559 1560 if (m != NULL) 1561 return(ENOBUFS); 1562 1563 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; 1564 if (m_head->m_pkthdr.csum_flags) { 1565 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1566 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1567 NGE_TXEXTSTS_IPCSUM; 1568 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1569 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1570 NGE_TXEXTSTS_TCPCSUM; 1571 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1572 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1573 NGE_TXEXTSTS_UDPCSUM; 1574 } 1575 1576 if (ifv != NULL) { 1577 sc->nge_ldata->nge_tx_list[cur].nge_extsts |= 1578 (NGE_TXEXTSTS_VLANPKT|ifv->ifv_tag); 1579 } 1580 1581 sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; 1582 sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; 1583 sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; 1584 sc->nge_cdata.nge_tx_cnt += cnt; 1585 *txidx = frag; 1586 1587 return(0); 1588 } 1589 1590 /* 1591 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1592 * to the mbuf data regions directly in the transmit lists. We also save a 1593 * copy of the pointers since the transmit list fragment pointers are 1594 * physical addresses. 1595 */ 1596 1597 static void nge_start(ifp) 1598 struct ifnet *ifp; 1599 { 1600 struct nge_softc *sc; 1601 struct mbuf *m_head = NULL; 1602 u_int32_t idx; 1603 1604 sc = ifp->if_softc; 1605 1606 if (!sc->nge_link) 1607 return; 1608 1609 idx = sc->nge_cdata.nge_tx_prod; 1610 1611 if (ifp->if_flags & IFF_OACTIVE) 1612 return; 1613 1614 while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { 1615 IF_DEQUEUE(&ifp->if_snd, m_head); 1616 if (m_head == NULL) 1617 break; 1618 1619 if (nge_encap(sc, m_head, &idx)) { 1620 IF_PREPEND(&ifp->if_snd, m_head); 1621 ifp->if_flags |= IFF_OACTIVE; 1622 break; 1623 } 1624 1625 /* 1626 * If there's a BPF listener, bounce a copy of this frame 1627 * to him. 1628 */ 1629 if (ifp->if_bpf) 1630 bpf_mtap(ifp, m_head); 1631 1632 } 1633 1634 /* Transmit */ 1635 sc->nge_cdata.nge_tx_prod = idx; 1636 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 1637 1638 /* 1639 * Set a timeout in case the chip goes out to lunch. 1640 */ 1641 ifp->if_timer = 5; 1642 1643 return; 1644 } 1645 1646 static void nge_init(xsc) 1647 void *xsc; 1648 { 1649 struct nge_softc *sc = xsc; 1650 struct ifnet *ifp = &sc->arpcom.ac_if; 1651 struct mii_data *mii; 1652 int s; 1653 1654 if (ifp->if_flags & IFF_RUNNING) 1655 return; 1656 1657 s = splimp(); 1658 1659 /* 1660 * Cancel pending I/O and free all RX/TX buffers. 1661 */ 1662 nge_stop(sc); 1663 1664 mii = device_get_softc(sc->nge_miibus); 1665 1666 /* Set MAC address */ 1667 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 1668 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1669 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1670 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 1671 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1672 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1673 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 1674 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1675 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1676 1677 /* Init circular RX list. */ 1678 if (nge_list_rx_init(sc) == ENOBUFS) { 1679 printf("nge%d: initialization failed: no " 1680 "memory for rx buffers\n", sc->nge_unit); 1681 nge_stop(sc); 1682 (void)splx(s); 1683 return; 1684 } 1685 1686 /* 1687 * Init tx descriptors. 1688 */ 1689 nge_list_tx_init(sc); 1690 1691 /* 1692 * For the NatSemi chip, we have to explicitly enable the 1693 * reception of ARP frames, as well as turn on the 'perfect 1694 * match' filter where we store the station address, otherwise 1695 * we won't receive unicasts meant for this host. 1696 */ 1697 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 1698 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 1699 1700 /* If we want promiscuous mode, set the allframes bit. */ 1701 if (ifp->if_flags & IFF_PROMISC) { 1702 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1703 } else { 1704 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1705 } 1706 1707 /* 1708 * Set the capture broadcast bit to capture broadcast frames. 1709 */ 1710 if (ifp->if_flags & IFF_BROADCAST) { 1711 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1712 } else { 1713 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1714 } 1715 1716 /* 1717 * Load the multicast filter. 1718 */ 1719 nge_setmulti(sc); 1720 1721 /* Turn the receive filter on */ 1722 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 1723 1724 /* 1725 * Load the address of the RX and TX lists. 1726 */ 1727 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 1728 vtophys(&sc->nge_ldata->nge_rx_list[0])); 1729 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 1730 vtophys(&sc->nge_ldata->nge_tx_list[0])); 1731 1732 /* Set RX configuration */ 1733 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 1734 /* 1735 * Enable hardware checksum validation for all IPv4 1736 * packets, do not reject packets with bad checksums. 1737 */ 1738 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 1739 1740 /* 1741 * Tell the chip to detect and strip VLAN tag info from 1742 * received frames. The tag will be provided in the extsts 1743 * field in the RX descriptors. 1744 */ 1745 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, 1746 NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB); 1747 1748 /* Set TX configuration */ 1749 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 1750 1751 /* 1752 * Enable TX IPv4 checksumming on a per-packet basis. 1753 */ 1754 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 1755 1756 /* 1757 * Tell the chip to insert VLAN tags on a per-packet basis as 1758 * dictated by the code in the frame encapsulation routine. 1759 */ 1760 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 1761 1762 /* Set full/half duplex mode. */ 1763 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1764 NGE_SETBIT(sc, NGE_TX_CFG, 1765 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1766 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1767 } else { 1768 NGE_CLRBIT(sc, NGE_TX_CFG, 1769 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1770 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1771 } 1772 1773 nge_tick(sc); 1774 1775 /* 1776 * Enable the delivery of PHY interrupts based on 1777 * link/speed/duplex status changes. Also enable the 1778 * extsts field in the DMA descriptors (needed for 1779 * TCP/IP checksum offload on transmit). 1780 */ 1781 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD| 1782 NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); 1783 1784 /* 1785 * Configure interrupt holdoff (moderation). We can 1786 * have the chip delay interrupt delivery for a certain 1787 * period. Units are in 100us, and the max setting 1788 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 1789 */ 1790 CSR_WRITE_4(sc, NGE_IHR, 0x01); 1791 1792 /* 1793 * Enable interrupts. 1794 */ 1795 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 1796 CSR_WRITE_4(sc, NGE_IER, 1); 1797 1798 /* Enable receiver and transmitter. */ 1799 NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1800 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1801 1802 nge_ifmedia_upd(ifp); 1803 1804 ifp->if_flags |= IFF_RUNNING; 1805 ifp->if_flags &= ~IFF_OACTIVE; 1806 1807 (void)splx(s); 1808 1809 return; 1810 } 1811 1812 /* 1813 * Set media options. 1814 */ 1815 static int nge_ifmedia_upd(ifp) 1816 struct ifnet *ifp; 1817 { 1818 struct nge_softc *sc; 1819 struct mii_data *mii; 1820 1821 sc = ifp->if_softc; 1822 1823 mii = device_get_softc(sc->nge_miibus); 1824 sc->nge_link = 0; 1825 if (mii->mii_instance) { 1826 struct mii_softc *miisc; 1827 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1828 miisc = LIST_NEXT(miisc, mii_list)) 1829 mii_phy_reset(miisc); 1830 } 1831 mii_mediachg(mii); 1832 1833 return(0); 1834 } 1835 1836 /* 1837 * Report current media status. 1838 */ 1839 static void nge_ifmedia_sts(ifp, ifmr) 1840 struct ifnet *ifp; 1841 struct ifmediareq *ifmr; 1842 { 1843 struct nge_softc *sc; 1844 struct mii_data *mii; 1845 1846 sc = ifp->if_softc; 1847 1848 mii = device_get_softc(sc->nge_miibus); 1849 mii_pollstat(mii); 1850 ifmr->ifm_active = mii->mii_media_active; 1851 ifmr->ifm_status = mii->mii_media_status; 1852 1853 return; 1854 } 1855 1856 static int nge_ioctl(ifp, command, data) 1857 struct ifnet *ifp; 1858 u_long command; 1859 caddr_t data; 1860 { 1861 struct nge_softc *sc = ifp->if_softc; 1862 struct ifreq *ifr = (struct ifreq *) data; 1863 struct mii_data *mii; 1864 int s, error = 0; 1865 1866 s = splimp(); 1867 1868 switch(command) { 1869 case SIOCSIFADDR: 1870 case SIOCGIFADDR: 1871 error = ether_ioctl(ifp, command, data); 1872 break; 1873 case SIOCSIFMTU: 1874 if (ifr->ifr_mtu > NGE_JUMBO_MTU) 1875 error = EINVAL; 1876 else { 1877 ifp->if_mtu = ifr->ifr_mtu; 1878 /* 1879 * Workaround: if the MTU is larger than 1880 * 8152 (TX FIFO size minus 64 minus 18), turn off 1881 * TX checksum offloading. 1882 */ 1883 if (ifr->ifr_mtu >= 8152) 1884 ifp->if_hwassist = 0; 1885 else 1886 ifp->if_hwassist = NGE_CSUM_FEATURES; 1887 } 1888 break; 1889 case SIOCSIFFLAGS: 1890 if (ifp->if_flags & IFF_UP) { 1891 if (ifp->if_flags & IFF_RUNNING && 1892 ifp->if_flags & IFF_PROMISC && 1893 !(sc->nge_if_flags & IFF_PROMISC)) { 1894 NGE_SETBIT(sc, NGE_RXFILT_CTL, 1895 NGE_RXFILTCTL_ALLPHYS| 1896 NGE_RXFILTCTL_ALLMULTI); 1897 } else if (ifp->if_flags & IFF_RUNNING && 1898 !(ifp->if_flags & IFF_PROMISC) && 1899 sc->nge_if_flags & IFF_PROMISC) { 1900 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1901 NGE_RXFILTCTL_ALLPHYS); 1902 if (!(ifp->if_flags & IFF_ALLMULTI)) 1903 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1904 NGE_RXFILTCTL_ALLMULTI); 1905 } else { 1906 ifp->if_flags &= ~IFF_RUNNING; 1907 nge_init(sc); 1908 } 1909 } else { 1910 if (ifp->if_flags & IFF_RUNNING) 1911 nge_stop(sc); 1912 } 1913 sc->nge_if_flags = ifp->if_flags; 1914 error = 0; 1915 break; 1916 case SIOCADDMULTI: 1917 case SIOCDELMULTI: 1918 nge_setmulti(sc); 1919 error = 0; 1920 break; 1921 case SIOCGIFMEDIA: 1922 case SIOCSIFMEDIA: 1923 mii = device_get_softc(sc->nge_miibus); 1924 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1925 break; 1926 default: 1927 error = EINVAL; 1928 break; 1929 } 1930 1931 (void)splx(s); 1932 1933 return(error); 1934 } 1935 1936 static void nge_watchdog(ifp) 1937 struct ifnet *ifp; 1938 { 1939 struct nge_softc *sc; 1940 1941 sc = ifp->if_softc; 1942 1943 ifp->if_oerrors++; 1944 printf("nge%d: watchdog timeout\n", sc->nge_unit); 1945 1946 nge_stop(sc); 1947 nge_reset(sc); 1948 ifp->if_flags &= ~IFF_RUNNING; 1949 nge_init(sc); 1950 1951 if (ifp->if_snd.ifq_head != NULL) 1952 nge_start(ifp); 1953 1954 return; 1955 } 1956 1957 /* 1958 * Stop the adapter and free any mbufs allocated to the 1959 * RX and TX lists. 1960 */ 1961 static void nge_stop(sc) 1962 struct nge_softc *sc; 1963 { 1964 register int i; 1965 struct ifnet *ifp; 1966 struct mii_data *mii; 1967 1968 ifp = &sc->arpcom.ac_if; 1969 ifp->if_timer = 0; 1970 mii = device_get_softc(sc->nge_miibus); 1971 1972 untimeout(nge_tick, sc, sc->nge_stat_ch); 1973 CSR_WRITE_4(sc, NGE_IER, 0); 1974 CSR_WRITE_4(sc, NGE_IMR, 0); 1975 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1976 DELAY(1000); 1977 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); 1978 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); 1979 1980 mii_down(mii); 1981 1982 sc->nge_link = 0; 1983 1984 /* 1985 * Free data in the RX lists. 1986 */ 1987 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 1988 if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { 1989 m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); 1990 sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; 1991 } 1992 } 1993 bzero((char *)&sc->nge_ldata->nge_rx_list, 1994 sizeof(sc->nge_ldata->nge_rx_list)); 1995 1996 /* 1997 * Free the TX list buffers. 1998 */ 1999 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 2000 if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { 2001 m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); 2002 sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; 2003 } 2004 } 2005 2006 bzero((char *)&sc->nge_ldata->nge_tx_list, 2007 sizeof(sc->nge_ldata->nge_tx_list)); 2008 2009 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2010 2011 return; 2012 } 2013 2014 /* 2015 * Stop all chip I/O so that the kernel's probe routines don't 2016 * get confused by errant DMAs when rebooting. 2017 */ 2018 static void nge_shutdown(dev) 2019 device_t dev; 2020 { 2021 struct nge_softc *sc; 2022 2023 sc = device_get_softc(dev); 2024 2025 nge_reset(sc); 2026 nge_stop(sc); 2027 2028 return; 2029 } 2030