1 /* 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by Bill Paul. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 */ 35 36 /* 37 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 38 * for FreeBSD. Datasheets are available from: 39 * 40 * http://www.national.com/ds/DP/DP83820.pdf 41 * http://www.national.com/ds/DP/DP83821.pdf 42 * 43 * These chips are used on several low cost gigabit ethernet NICs 44 * sold by D-Link, Addtron, SMC and Asante. Both parts are 45 * virtually the same, except the 83820 is a 64-bit/32-bit part, 46 * while the 83821 is 32-bit only. 47 * 48 * Many cards also use National gigE transceivers, such as the 49 * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet 50 * contains a full register description that applies to all of these 51 * components: 52 * 53 * http://www.national.com/ds/DP/DP83861.pdf 54 * 55 * Written by Bill Paul <wpaul@bsdi.com> 56 * BSDi Open Source Solutions 57 */ 58 59 /* 60 * The NatSemi DP83820 and 83821 controllers are enhanced versions 61 * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 62 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII 63 * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP 64 * hardware checksum offload (IPv4 only), VLAN tagging and filtering, 65 * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern 66 * matching buffers, one perfect address filter buffer and interrupt 67 * moderation. The 83820 supports both 64-bit and 32-bit addressing 68 * and data transfers: the 64-bit support can be toggled on or off 69 * via software. This affects the size of certain fields in the DMA 70 * descriptors. 71 * 72 * There are two bugs/misfeatures in the 83820/83821 that I have 73 * discovered so far: 74 * 75 * - Receive buffers must be aligned on 64-bit boundaries, which means 76 * you must resort to copying data in order to fix up the payload 77 * alignment. 78 * 79 * - In order to transmit jumbo frames larger than 8170 bytes, you have 80 * to turn off transmit checksum offloading, because the chip can't 81 * compute the checksum on an outgoing frame unless it fits entirely 82 * within the TX FIFO, which is only 8192 bytes in size. If you have 83 * TX checksum offload enabled and you transmit attempt to transmit a 84 * frame larger than 8170 bytes, the transmitter will wedge. 85 * 86 * To work around the latter problem, TX checksum offload is disabled 87 * if the user selects an MTU larger than 8152 (8170 - 18). 88 */ 89 90 #include <sys/param.h> 91 #include <sys/systm.h> 92 #include <sys/sockio.h> 93 #include <sys/mbuf.h> 94 #include <sys/malloc.h> 95 #include <sys/kernel.h> 96 #include <sys/socket.h> 97 98 #include <net/if.h> 99 #include <net/if_arp.h> 100 #include <net/ethernet.h> 101 #include <net/if_dl.h> 102 #include <net/if_media.h> 103 #include <net/if_types.h> 104 #include <net/if_vlan_var.h> 105 106 #include <net/bpf.h> 107 108 #include <vm/vm.h> /* for vtophys */ 109 #include <vm/pmap.h> /* for vtophys */ 110 #include <machine/clock.h> /* for DELAY */ 111 #include <machine/bus_pio.h> 112 #include <machine/bus_memio.h> 113 #include <machine/bus.h> 114 #include <machine/resource.h> 115 #include <sys/bus.h> 116 #include <sys/rman.h> 117 118 #include <dev/mii/mii.h> 119 #include <dev/mii/miivar.h> 120 121 #include <pci/pcireg.h> 122 #include <pci/pcivar.h> 123 124 #define NGE_USEIOSPACE 125 126 #include <dev/nge/if_ngereg.h> 127 128 MODULE_DEPEND(nge, miibus, 1, 1, 1); 129 130 /* "controller miibus0" required. See GENERIC if you get errors here. */ 131 #include "miibus_if.h" 132 133 #ifndef lint 134 static const char rcsid[] = 135 "$FreeBSD$"; 136 #endif 137 138 #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 139 140 /* 141 * Various supported device vendors/types and their names. 142 */ 143 static struct nge_type nge_devs[] = { 144 { NGE_VENDORID, NGE_DEVICEID, 145 "National Semiconductor Gigabit Ethernet" }, 146 { 0, 0, NULL } 147 }; 148 149 static int nge_probe __P((device_t)); 150 static int nge_attach __P((device_t)); 151 static int nge_detach __P((device_t)); 152 153 static int nge_alloc_jumbo_mem __P((struct nge_softc *)); 154 static void nge_free_jumbo_mem __P((struct nge_softc *)); 155 static void *nge_jalloc __P((struct nge_softc *)); 156 static void nge_jfree __P((caddr_t, void *)); 157 158 static int nge_newbuf __P((struct nge_softc *, 159 struct nge_desc *, 160 struct mbuf *)); 161 static int nge_encap __P((struct nge_softc *, 162 struct mbuf *, u_int32_t *)); 163 static void nge_rxeof __P((struct nge_softc *)); 164 static void nge_rxeoc __P((struct nge_softc *)); 165 static void nge_txeof __P((struct nge_softc *)); 166 static void nge_intr __P((void *)); 167 static void nge_tick __P((void *)); 168 static void nge_start __P((struct ifnet *)); 169 static int nge_ioctl __P((struct ifnet *, u_long, caddr_t)); 170 static void nge_init __P((void *)); 171 static void nge_stop __P((struct nge_softc *)); 172 static void nge_watchdog __P((struct ifnet *)); 173 static void nge_shutdown __P((device_t)); 174 static int nge_ifmedia_upd __P((struct ifnet *)); 175 static void nge_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 176 177 static void nge_delay __P((struct nge_softc *)); 178 static void nge_eeprom_idle __P((struct nge_softc *)); 179 static void nge_eeprom_putbyte __P((struct nge_softc *, int)); 180 static void nge_eeprom_getword __P((struct nge_softc *, int, u_int16_t *)); 181 static void nge_read_eeprom __P((struct nge_softc *, caddr_t, int, 182 int, int)); 183 184 static void nge_mii_sync __P((struct nge_softc *)); 185 static void nge_mii_send __P((struct nge_softc *, u_int32_t, int)); 186 static int nge_mii_readreg __P((struct nge_softc *, 187 struct nge_mii_frame *)); 188 static int nge_mii_writereg __P((struct nge_softc *, 189 struct nge_mii_frame *)); 190 191 static int nge_miibus_readreg __P((device_t, int, int)); 192 static int nge_miibus_writereg __P((device_t, int, int, int)); 193 static void nge_miibus_statchg __P((device_t)); 194 195 static void nge_setmulti __P((struct nge_softc *)); 196 static u_int32_t nge_crc __P((struct nge_softc *, caddr_t)); 197 static void nge_reset __P((struct nge_softc *)); 198 static int nge_list_rx_init __P((struct nge_softc *)); 199 static int nge_list_tx_init __P((struct nge_softc *)); 200 201 #ifdef NGE_USEIOSPACE 202 #define NGE_RES SYS_RES_IOPORT 203 #define NGE_RID NGE_PCI_LOIO 204 #else 205 #define NGE_RES SYS_RES_MEMORY 206 #define NGE_RID NGE_PCI_LOMEM 207 #endif 208 209 static device_method_t nge_methods[] = { 210 /* Device interface */ 211 DEVMETHOD(device_probe, nge_probe), 212 DEVMETHOD(device_attach, nge_attach), 213 DEVMETHOD(device_detach, nge_detach), 214 DEVMETHOD(device_shutdown, nge_shutdown), 215 216 /* bus interface */ 217 DEVMETHOD(bus_print_child, bus_generic_print_child), 218 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 219 220 /* MII interface */ 221 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 222 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 223 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 224 225 { 0, 0 } 226 }; 227 228 static driver_t nge_driver = { 229 "nge", 230 nge_methods, 231 sizeof(struct nge_softc) 232 }; 233 234 static devclass_t nge_devclass; 235 236 DRIVER_MODULE(if_nge, pci, nge_driver, nge_devclass, 0, 0); 237 DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); 238 239 #define NGE_SETBIT(sc, reg, x) \ 240 CSR_WRITE_4(sc, reg, \ 241 CSR_READ_4(sc, reg) | (x)) 242 243 #define NGE_CLRBIT(sc, reg, x) \ 244 CSR_WRITE_4(sc, reg, \ 245 CSR_READ_4(sc, reg) & ~(x)) 246 247 #define SIO_SET(x) \ 248 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | x) 249 250 #define SIO_CLR(x) \ 251 CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~x) 252 253 static void nge_delay(sc) 254 struct nge_softc *sc; 255 { 256 int idx; 257 258 for (idx = (300 / 33) + 1; idx > 0; idx--) 259 CSR_READ_4(sc, NGE_CSR); 260 261 return; 262 } 263 264 static void nge_eeprom_idle(sc) 265 struct nge_softc *sc; 266 { 267 register int i; 268 269 SIO_SET(NGE_MEAR_EE_CSEL); 270 nge_delay(sc); 271 SIO_SET(NGE_MEAR_EE_CLK); 272 nge_delay(sc); 273 274 for (i = 0; i < 25; i++) { 275 SIO_CLR(NGE_MEAR_EE_CLK); 276 nge_delay(sc); 277 SIO_SET(NGE_MEAR_EE_CLK); 278 nge_delay(sc); 279 } 280 281 SIO_CLR(NGE_MEAR_EE_CLK); 282 nge_delay(sc); 283 SIO_CLR(NGE_MEAR_EE_CSEL); 284 nge_delay(sc); 285 CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); 286 287 return; 288 } 289 290 /* 291 * Send a read command and address to the EEPROM, check for ACK. 292 */ 293 static void nge_eeprom_putbyte(sc, addr) 294 struct nge_softc *sc; 295 int addr; 296 { 297 register int d, i; 298 299 d = addr | NGE_EECMD_READ; 300 301 /* 302 * Feed in each bit and stobe the clock. 303 */ 304 for (i = 0x400; i; i >>= 1) { 305 if (d & i) { 306 SIO_SET(NGE_MEAR_EE_DIN); 307 } else { 308 SIO_CLR(NGE_MEAR_EE_DIN); 309 } 310 nge_delay(sc); 311 SIO_SET(NGE_MEAR_EE_CLK); 312 nge_delay(sc); 313 SIO_CLR(NGE_MEAR_EE_CLK); 314 nge_delay(sc); 315 } 316 317 return; 318 } 319 320 /* 321 * Read a word of data stored in the EEPROM at address 'addr.' 322 */ 323 static void nge_eeprom_getword(sc, addr, dest) 324 struct nge_softc *sc; 325 int addr; 326 u_int16_t *dest; 327 { 328 register int i; 329 u_int16_t word = 0; 330 331 /* Force EEPROM to idle state. */ 332 nge_eeprom_idle(sc); 333 334 /* Enter EEPROM access mode. */ 335 nge_delay(sc); 336 SIO_CLR(NGE_MEAR_EE_CLK); 337 nge_delay(sc); 338 SIO_SET(NGE_MEAR_EE_CSEL); 339 nge_delay(sc); 340 341 /* 342 * Send address of word we want to read. 343 */ 344 nge_eeprom_putbyte(sc, addr); 345 346 /* 347 * Start reading bits from EEPROM. 348 */ 349 for (i = 0x8000; i; i >>= 1) { 350 SIO_SET(NGE_MEAR_EE_CLK); 351 nge_delay(sc); 352 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) 353 word |= i; 354 nge_delay(sc); 355 SIO_CLR(NGE_MEAR_EE_CLK); 356 nge_delay(sc); 357 } 358 359 /* Turn off EEPROM access mode. */ 360 nge_eeprom_idle(sc); 361 362 *dest = word; 363 364 return; 365 } 366 367 /* 368 * Read a sequence of words from the EEPROM. 369 */ 370 static void nge_read_eeprom(sc, dest, off, cnt, swap) 371 struct nge_softc *sc; 372 caddr_t dest; 373 int off; 374 int cnt; 375 int swap; 376 { 377 int i; 378 u_int16_t word = 0, *ptr; 379 380 for (i = 0; i < cnt; i++) { 381 nge_eeprom_getword(sc, off + i, &word); 382 ptr = (u_int16_t *)(dest + (i * 2)); 383 if (swap) 384 *ptr = ntohs(word); 385 else 386 *ptr = word; 387 } 388 389 return; 390 } 391 392 /* 393 * Sync the PHYs by setting data bit and strobing the clock 32 times. 394 */ 395 static void nge_mii_sync(sc) 396 struct nge_softc *sc; 397 { 398 register int i; 399 400 SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); 401 402 for (i = 0; i < 32; i++) { 403 SIO_SET(NGE_MEAR_MII_CLK); 404 DELAY(1); 405 SIO_CLR(NGE_MEAR_MII_CLK); 406 DELAY(1); 407 } 408 409 return; 410 } 411 412 /* 413 * Clock a series of bits through the MII. 414 */ 415 static void nge_mii_send(sc, bits, cnt) 416 struct nge_softc *sc; 417 u_int32_t bits; 418 int cnt; 419 { 420 int i; 421 422 SIO_CLR(NGE_MEAR_MII_CLK); 423 424 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 425 if (bits & i) { 426 SIO_SET(NGE_MEAR_MII_DATA); 427 } else { 428 SIO_CLR(NGE_MEAR_MII_DATA); 429 } 430 DELAY(1); 431 SIO_CLR(NGE_MEAR_MII_CLK); 432 DELAY(1); 433 SIO_SET(NGE_MEAR_MII_CLK); 434 } 435 } 436 437 /* 438 * Read an PHY register through the MII. 439 */ 440 static int nge_mii_readreg(sc, frame) 441 struct nge_softc *sc; 442 struct nge_mii_frame *frame; 443 444 { 445 int i, ack, s; 446 447 s = splimp(); 448 449 /* 450 * Set up frame for RX. 451 */ 452 frame->mii_stdelim = NGE_MII_STARTDELIM; 453 frame->mii_opcode = NGE_MII_READOP; 454 frame->mii_turnaround = 0; 455 frame->mii_data = 0; 456 457 CSR_WRITE_4(sc, NGE_MEAR, 0); 458 459 /* 460 * Turn on data xmit. 461 */ 462 SIO_SET(NGE_MEAR_MII_DIR); 463 464 nge_mii_sync(sc); 465 466 /* 467 * Send command/address info. 468 */ 469 nge_mii_send(sc, frame->mii_stdelim, 2); 470 nge_mii_send(sc, frame->mii_opcode, 2); 471 nge_mii_send(sc, frame->mii_phyaddr, 5); 472 nge_mii_send(sc, frame->mii_regaddr, 5); 473 474 /* Idle bit */ 475 SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); 476 DELAY(1); 477 SIO_SET(NGE_MEAR_MII_CLK); 478 DELAY(1); 479 480 /* Turn off xmit. */ 481 SIO_CLR(NGE_MEAR_MII_DIR); 482 /* Check for ack */ 483 SIO_CLR(NGE_MEAR_MII_CLK); 484 DELAY(1); 485 SIO_SET(NGE_MEAR_MII_CLK); 486 DELAY(1); 487 ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; 488 489 /* 490 * Now try reading data bits. If the ack failed, we still 491 * need to clock through 16 cycles to keep the PHY(s) in sync. 492 */ 493 if (ack) { 494 for(i = 0; i < 16; i++) { 495 SIO_CLR(NGE_MEAR_MII_CLK); 496 DELAY(1); 497 SIO_SET(NGE_MEAR_MII_CLK); 498 DELAY(1); 499 } 500 goto fail; 501 } 502 503 for (i = 0x8000; i; i >>= 1) { 504 SIO_CLR(NGE_MEAR_MII_CLK); 505 DELAY(1); 506 if (!ack) { 507 if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) 508 frame->mii_data |= i; 509 DELAY(1); 510 } 511 SIO_SET(NGE_MEAR_MII_CLK); 512 DELAY(1); 513 } 514 515 fail: 516 517 SIO_CLR(NGE_MEAR_MII_CLK); 518 DELAY(1); 519 SIO_SET(NGE_MEAR_MII_CLK); 520 DELAY(1); 521 522 splx(s); 523 524 if (ack) 525 return(1); 526 return(0); 527 } 528 529 /* 530 * Write to a PHY register through the MII. 531 */ 532 static int nge_mii_writereg(sc, frame) 533 struct nge_softc *sc; 534 struct nge_mii_frame *frame; 535 536 { 537 int s; 538 539 s = splimp(); 540 /* 541 * Set up frame for TX. 542 */ 543 544 frame->mii_stdelim = NGE_MII_STARTDELIM; 545 frame->mii_opcode = NGE_MII_WRITEOP; 546 frame->mii_turnaround = NGE_MII_TURNAROUND; 547 548 /* 549 * Turn on data output. 550 */ 551 SIO_SET(NGE_MEAR_MII_DIR); 552 553 nge_mii_sync(sc); 554 555 nge_mii_send(sc, frame->mii_stdelim, 2); 556 nge_mii_send(sc, frame->mii_opcode, 2); 557 nge_mii_send(sc, frame->mii_phyaddr, 5); 558 nge_mii_send(sc, frame->mii_regaddr, 5); 559 nge_mii_send(sc, frame->mii_turnaround, 2); 560 nge_mii_send(sc, frame->mii_data, 16); 561 562 /* Idle bit. */ 563 SIO_SET(NGE_MEAR_MII_CLK); 564 DELAY(1); 565 SIO_CLR(NGE_MEAR_MII_CLK); 566 DELAY(1); 567 568 /* 569 * Turn off xmit. 570 */ 571 SIO_CLR(NGE_MEAR_MII_DIR); 572 573 splx(s); 574 575 return(0); 576 } 577 578 static int nge_miibus_readreg(dev, phy, reg) 579 device_t dev; 580 int phy, reg; 581 { 582 struct nge_softc *sc; 583 struct nge_mii_frame frame; 584 585 sc = device_get_softc(dev); 586 587 bzero((char *)&frame, sizeof(frame)); 588 589 frame.mii_phyaddr = phy; 590 frame.mii_regaddr = reg; 591 nge_mii_readreg(sc, &frame); 592 593 return(frame.mii_data); 594 } 595 596 static int nge_miibus_writereg(dev, phy, reg, data) 597 device_t dev; 598 int phy, reg, data; 599 { 600 struct nge_softc *sc; 601 struct nge_mii_frame frame; 602 603 sc = device_get_softc(dev); 604 605 bzero((char *)&frame, sizeof(frame)); 606 607 frame.mii_phyaddr = phy; 608 frame.mii_regaddr = reg; 609 frame.mii_data = data; 610 nge_mii_writereg(sc, &frame); 611 612 return(0); 613 } 614 615 static void nge_miibus_statchg(dev) 616 device_t dev; 617 { 618 struct nge_softc *sc; 619 struct mii_data *mii; 620 621 sc = device_get_softc(dev); 622 mii = device_get_softc(sc->nge_miibus); 623 624 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 625 NGE_SETBIT(sc, NGE_TX_CFG, 626 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 627 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 628 } else { 629 NGE_CLRBIT(sc, NGE_TX_CFG, 630 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 631 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 632 } 633 634 /* If we have a 1000Mbps link, set the mode_1000 bit. */ 635 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX || 636 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 637 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 638 } else { 639 NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 640 } 641 642 return; 643 } 644 645 static u_int32_t nge_crc(sc, addr) 646 struct nge_softc *sc; 647 caddr_t addr; 648 { 649 u_int32_t crc, carry; 650 int i, j; 651 u_int8_t c; 652 653 /* Compute CRC for the address value. */ 654 crc = 0xFFFFFFFF; /* initial value */ 655 656 for (i = 0; i < 6; i++) { 657 c = *(addr + i); 658 for (j = 0; j < 8; j++) { 659 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 660 crc <<= 1; 661 c >>= 1; 662 if (carry) 663 crc = (crc ^ 0x04c11db6) | carry; 664 } 665 } 666 667 /* 668 * return the filter bit position 669 */ 670 671 return((crc >> 21) & 0x00000FFF); 672 } 673 674 static void nge_setmulti(sc) 675 struct nge_softc *sc; 676 { 677 struct ifnet *ifp; 678 struct ifmultiaddr *ifma; 679 u_int32_t h = 0, i, filtsave; 680 int bit, index; 681 682 ifp = &sc->arpcom.ac_if; 683 684 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 685 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 686 NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); 687 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); 688 return; 689 } 690 691 /* 692 * We have to explicitly enable the multicast hash table 693 * on the NatSemi chip if we want to use it, which we do. 694 * We also have to tell it that we don't want to use the 695 * hash table for matching unicast addresses. 696 */ 697 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); 698 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 699 NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); 700 701 filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); 702 703 /* first, zot all the existing hash bits */ 704 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 705 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 706 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 707 } 708 709 /* 710 * From the 11 bits returned by the crc routine, the top 7 711 * bits represent the 16-bit word in the mcast hash table 712 * that needs to be updated, and the lower 4 bits represent 713 * which bit within that byte needs to be set. 714 */ 715 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 716 if (ifma->ifma_addr->sa_family != AF_LINK) 717 continue; 718 h = nge_crc(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 719 index = (h >> 4) & 0x7F; 720 bit = h & 0xF; 721 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 722 NGE_FILTADDR_MCAST_LO + (index * 2)); 723 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 724 } 725 726 CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); 727 728 return; 729 } 730 731 static void nge_reset(sc) 732 struct nge_softc *sc; 733 { 734 register int i; 735 736 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 737 738 for (i = 0; i < NGE_TIMEOUT; i++) { 739 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 740 break; 741 } 742 743 if (i == NGE_TIMEOUT) 744 printf("nge%d: reset never completed\n", sc->nge_unit); 745 746 /* Wait a little while for the chip to get its brains in order. */ 747 DELAY(1000); 748 749 /* 750 * If this is a NetSemi chip, make sure to clear 751 * PME mode. 752 */ 753 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 754 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 755 756 return; 757 } 758 759 /* 760 * Probe for an NatSemi chip. Check the PCI vendor and device 761 * IDs against our list and return a device name if we find a match. 762 */ 763 static int nge_probe(dev) 764 device_t dev; 765 { 766 struct nge_type *t; 767 768 t = nge_devs; 769 770 while(t->nge_name != NULL) { 771 if ((pci_get_vendor(dev) == t->nge_vid) && 772 (pci_get_device(dev) == t->nge_did)) { 773 device_set_desc(dev, t->nge_name); 774 return(0); 775 } 776 t++; 777 } 778 779 return(ENXIO); 780 } 781 782 /* 783 * Attach the interface. Allocate softc structures, do ifmedia 784 * setup and ethernet/BPF attach. 785 */ 786 static int nge_attach(dev) 787 device_t dev; 788 { 789 int s; 790 u_char eaddr[ETHER_ADDR_LEN]; 791 u_int32_t command; 792 struct nge_softc *sc; 793 struct ifnet *ifp; 794 int unit, error = 0, rid; 795 796 s = splimp(); 797 798 sc = device_get_softc(dev); 799 unit = device_get_unit(dev); 800 bzero(sc, sizeof(struct nge_softc)); 801 802 mtx_init(&sc->nge_mtx, device_get_nameunit(dev), MTX_DEF|MTX_RECURSE); 803 804 /* 805 * Handle power management nonsense. 806 */ 807 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 808 u_int32_t iobase, membase, irq; 809 810 /* Save important PCI config data. */ 811 iobase = pci_read_config(dev, NGE_PCI_LOIO, 4); 812 membase = pci_read_config(dev, NGE_PCI_LOMEM, 4); 813 irq = pci_read_config(dev, NGE_PCI_INTLINE, 4); 814 815 /* Reset the power state. */ 816 printf("nge%d: chip is in D%d power mode " 817 "-- setting to D0\n", unit, 818 pci_get_powerstate(dev)); 819 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 820 821 /* Restore PCI config data. */ 822 pci_write_config(dev, NGE_PCI_LOIO, iobase, 4); 823 pci_write_config(dev, NGE_PCI_LOMEM, membase, 4); 824 pci_write_config(dev, NGE_PCI_INTLINE, irq, 4); 825 } 826 827 /* 828 * Map control/status registers. 829 */ 830 pci_enable_busmaster(dev); 831 pci_enable_io(dev, SYS_RES_IOPORT); 832 pci_enable_io(dev, SYS_RES_MEMORY); 833 command = pci_read_config(dev, PCIR_COMMAND, 4); 834 835 #ifdef NGE_USEIOSPACE 836 if (!(command & PCIM_CMD_PORTEN)) { 837 printf("nge%d: failed to enable I/O ports!\n", unit); 838 error = ENXIO;; 839 goto fail; 840 } 841 #else 842 if (!(command & PCIM_CMD_MEMEN)) { 843 printf("nge%d: failed to enable memory mapping!\n", unit); 844 error = ENXIO;; 845 goto fail; 846 } 847 #endif 848 849 rid = NGE_RID; 850 sc->nge_res = bus_alloc_resource(dev, NGE_RES, &rid, 851 0, ~0, 1, RF_ACTIVE); 852 853 if (sc->nge_res == NULL) { 854 printf("nge%d: couldn't map ports/memory\n", unit); 855 error = ENXIO; 856 goto fail; 857 } 858 859 sc->nge_btag = rman_get_bustag(sc->nge_res); 860 sc->nge_bhandle = rman_get_bushandle(sc->nge_res); 861 862 /* Allocate interrupt */ 863 rid = 0; 864 sc->nge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 865 RF_SHAREABLE | RF_ACTIVE); 866 867 if (sc->nge_irq == NULL) { 868 printf("nge%d: couldn't map interrupt\n", unit); 869 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 870 error = ENXIO; 871 goto fail; 872 } 873 874 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET, 875 nge_intr, sc, &sc->nge_intrhand); 876 877 if (error) { 878 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 879 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 880 printf("nge%d: couldn't set up irq\n", unit); 881 goto fail; 882 } 883 884 /* Reset the adapter. */ 885 nge_reset(sc); 886 887 /* 888 * Get station address from the EEPROM. 889 */ 890 nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); 891 nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); 892 nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); 893 894 /* 895 * A NatSemi chip was detected. Inform the world. 896 */ 897 printf("nge%d: Ethernet address: %6D\n", unit, eaddr, ":"); 898 899 sc->nge_unit = unit; 900 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 901 902 sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF, 903 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 904 905 if (sc->nge_ldata == NULL) { 906 printf("nge%d: no memory for list buffers!\n", unit); 907 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 908 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 909 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 910 error = ENXIO; 911 goto fail; 912 } 913 bzero(sc->nge_ldata, sizeof(struct nge_list_data)); 914 915 /* Try to allocate memory for jumbo buffers. */ 916 if (nge_alloc_jumbo_mem(sc)) { 917 printf("nge%d: jumbo buffer allocation failed\n", 918 sc->nge_unit); 919 contigfree(sc->nge_ldata, 920 sizeof(struct nge_list_data), M_DEVBUF); 921 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 922 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 923 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 924 error = ENXIO; 925 goto fail; 926 } 927 928 ifp = &sc->arpcom.ac_if; 929 ifp->if_softc = sc; 930 ifp->if_unit = unit; 931 ifp->if_name = "nge"; 932 ifp->if_mtu = ETHERMTU; 933 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 934 ifp->if_ioctl = nge_ioctl; 935 ifp->if_output = ether_output; 936 ifp->if_start = nge_start; 937 ifp->if_watchdog = nge_watchdog; 938 ifp->if_init = nge_init; 939 ifp->if_baudrate = 1000000000; 940 ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1; 941 ifp->if_hwassist = NGE_CSUM_FEATURES; 942 943 /* 944 * Do MII setup. 945 */ 946 if (mii_phy_probe(dev, &sc->nge_miibus, 947 nge_ifmedia_upd, nge_ifmedia_sts)) { 948 printf("nge%d: MII without any PHY!\n", sc->nge_unit); 949 nge_free_jumbo_mem(sc); 950 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 951 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 952 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 953 error = ENXIO; 954 goto fail; 955 } 956 957 /* 958 * Call MI attach routine. 959 */ 960 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 961 callout_handle_init(&sc->nge_stat_ch); 962 963 fail: 964 splx(s); 965 mtx_destroy(&sc->nge_mtx); 966 return(error); 967 } 968 969 static int nge_detach(dev) 970 device_t dev; 971 { 972 struct nge_softc *sc; 973 struct ifnet *ifp; 974 int s; 975 976 s = splimp(); 977 978 sc = device_get_softc(dev); 979 ifp = &sc->arpcom.ac_if; 980 981 nge_reset(sc); 982 nge_stop(sc); 983 ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); 984 985 bus_generic_detach(dev); 986 device_delete_child(dev, sc->nge_miibus); 987 988 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 989 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 990 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 991 992 contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF); 993 nge_free_jumbo_mem(sc); 994 995 splx(s); 996 mtx_destroy(&sc->nge_mtx); 997 998 return(0); 999 } 1000 1001 /* 1002 * Initialize the transmit descriptors. 1003 */ 1004 static int nge_list_tx_init(sc) 1005 struct nge_softc *sc; 1006 { 1007 struct nge_list_data *ld; 1008 struct nge_ring_data *cd; 1009 int i; 1010 1011 cd = &sc->nge_cdata; 1012 ld = sc->nge_ldata; 1013 1014 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 1015 if (i == (NGE_TX_LIST_CNT - 1)) { 1016 ld->nge_tx_list[i].nge_nextdesc = 1017 &ld->nge_tx_list[0]; 1018 ld->nge_tx_list[i].nge_next = 1019 vtophys(&ld->nge_tx_list[0]); 1020 } else { 1021 ld->nge_tx_list[i].nge_nextdesc = 1022 &ld->nge_tx_list[i + 1]; 1023 ld->nge_tx_list[i].nge_next = 1024 vtophys(&ld->nge_tx_list[i + 1]); 1025 } 1026 ld->nge_tx_list[i].nge_mbuf = NULL; 1027 ld->nge_tx_list[i].nge_ptr = 0; 1028 ld->nge_tx_list[i].nge_ctl = 0; 1029 } 1030 1031 cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; 1032 1033 return(0); 1034 } 1035 1036 1037 /* 1038 * Initialize the RX descriptors and allocate mbufs for them. Note that 1039 * we arrange the descriptors in a closed ring, so that the last descriptor 1040 * points back to the first. 1041 */ 1042 static int nge_list_rx_init(sc) 1043 struct nge_softc *sc; 1044 { 1045 struct nge_list_data *ld; 1046 struct nge_ring_data *cd; 1047 int i; 1048 1049 ld = sc->nge_ldata; 1050 cd = &sc->nge_cdata; 1051 1052 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 1053 if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) 1054 return(ENOBUFS); 1055 if (i == (NGE_RX_LIST_CNT - 1)) { 1056 ld->nge_rx_list[i].nge_nextdesc = 1057 &ld->nge_rx_list[0]; 1058 ld->nge_rx_list[i].nge_next = 1059 vtophys(&ld->nge_rx_list[0]); 1060 } else { 1061 ld->nge_rx_list[i].nge_nextdesc = 1062 &ld->nge_rx_list[i + 1]; 1063 ld->nge_rx_list[i].nge_next = 1064 vtophys(&ld->nge_rx_list[i + 1]); 1065 } 1066 } 1067 1068 cd->nge_rx_prod = 0; 1069 1070 return(0); 1071 } 1072 1073 /* 1074 * Initialize an RX descriptor and attach an MBUF cluster. 1075 */ 1076 static int nge_newbuf(sc, c, m) 1077 struct nge_softc *sc; 1078 struct nge_desc *c; 1079 struct mbuf *m; 1080 { 1081 struct mbuf *m_new = NULL; 1082 caddr_t *buf = NULL; 1083 1084 if (m == NULL) { 1085 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1086 if (m_new == NULL) { 1087 printf("nge%d: no memory for rx list " 1088 "-- packet dropped!\n", sc->nge_unit); 1089 return(ENOBUFS); 1090 } 1091 1092 /* Allocate the jumbo buffer */ 1093 buf = nge_jalloc(sc); 1094 if (buf == NULL) { 1095 #ifdef NGE_VERBOSE 1096 printf("nge%d: jumbo allocation failed " 1097 "-- packet dropped!\n", sc->nge_unit); 1098 #endif 1099 m_freem(m_new); 1100 return(ENOBUFS); 1101 } 1102 /* Attach the buffer to the mbuf */ 1103 m_new->m_data = (void *)buf; 1104 m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN; 1105 MEXTADD(m_new, buf, NGE_JUMBO_FRAMELEN, nge_jfree, 1106 (struct nge_softc *)sc, 0, EXT_NET_DRV); 1107 } else { 1108 m_new = m; 1109 m_new->m_len = m_new->m_pkthdr.len = NGE_JUMBO_FRAMELEN; 1110 m_new->m_data = m_new->m_ext.ext_buf; 1111 } 1112 1113 m_adj(m_new, sizeof(u_int64_t)); 1114 1115 c->nge_mbuf = m_new; 1116 c->nge_ptr = vtophys(mtod(m_new, caddr_t)); 1117 c->nge_ctl = m_new->m_len; 1118 c->nge_extsts = 0; 1119 1120 return(0); 1121 } 1122 1123 static int nge_alloc_jumbo_mem(sc) 1124 struct nge_softc *sc; 1125 { 1126 caddr_t ptr; 1127 register int i; 1128 struct nge_jpool_entry *entry; 1129 1130 /* Grab a big chunk o' storage. */ 1131 sc->nge_cdata.nge_jumbo_buf = contigmalloc(NGE_JMEM, M_DEVBUF, 1132 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1133 1134 if (sc->nge_cdata.nge_jumbo_buf == NULL) { 1135 printf("nge%d: no memory for jumbo buffers!\n", sc->nge_unit); 1136 return(ENOBUFS); 1137 } 1138 1139 SLIST_INIT(&sc->nge_jfree_listhead); 1140 SLIST_INIT(&sc->nge_jinuse_listhead); 1141 1142 /* 1143 * Now divide it up into 9K pieces and save the addresses 1144 * in an array. 1145 */ 1146 ptr = sc->nge_cdata.nge_jumbo_buf; 1147 for (i = 0; i < NGE_JSLOTS; i++) { 1148 sc->nge_cdata.nge_jslots[i] = ptr; 1149 ptr += NGE_JLEN; 1150 entry = malloc(sizeof(struct nge_jpool_entry), 1151 M_DEVBUF, M_NOWAIT); 1152 if (entry == NULL) { 1153 printf("nge%d: no memory for jumbo " 1154 "buffer queue!\n", sc->nge_unit); 1155 return(ENOBUFS); 1156 } 1157 entry->slot = i; 1158 SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, 1159 entry, jpool_entries); 1160 } 1161 1162 return(0); 1163 } 1164 1165 static void nge_free_jumbo_mem(sc) 1166 struct nge_softc *sc; 1167 { 1168 register int i; 1169 struct nge_jpool_entry *entry; 1170 1171 for (i = 0; i < NGE_JSLOTS; i++) { 1172 entry = SLIST_FIRST(&sc->nge_jfree_listhead); 1173 SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries); 1174 free(entry, M_DEVBUF); 1175 } 1176 1177 contigfree(sc->nge_cdata.nge_jumbo_buf, NGE_JMEM, M_DEVBUF); 1178 1179 return; 1180 } 1181 1182 /* 1183 * Allocate a jumbo buffer. 1184 */ 1185 static void *nge_jalloc(sc) 1186 struct nge_softc *sc; 1187 { 1188 struct nge_jpool_entry *entry; 1189 1190 entry = SLIST_FIRST(&sc->nge_jfree_listhead); 1191 1192 if (entry == NULL) { 1193 #ifdef NGE_VERBOSE 1194 printf("nge%d: no free jumbo buffers\n", sc->nge_unit); 1195 #endif 1196 return(NULL); 1197 } 1198 1199 SLIST_REMOVE_HEAD(&sc->nge_jfree_listhead, jpool_entries); 1200 SLIST_INSERT_HEAD(&sc->nge_jinuse_listhead, entry, jpool_entries); 1201 return(sc->nge_cdata.nge_jslots[entry->slot]); 1202 } 1203 1204 /* 1205 * Release a jumbo buffer. 1206 */ 1207 static void nge_jfree(buf, args) 1208 caddr_t buf; 1209 void *args; 1210 { 1211 struct nge_softc *sc; 1212 int i; 1213 struct nge_jpool_entry *entry; 1214 1215 /* Extract the softc struct pointer. */ 1216 sc = args; 1217 1218 if (sc == NULL) 1219 panic("nge_jfree: can't find softc pointer!"); 1220 1221 /* calculate the slot this buffer belongs to */ 1222 i = ((vm_offset_t)buf 1223 - (vm_offset_t)sc->nge_cdata.nge_jumbo_buf) / NGE_JLEN; 1224 1225 if ((i < 0) || (i >= NGE_JSLOTS)) 1226 panic("nge_jfree: asked to free buffer that we don't manage!"); 1227 1228 entry = SLIST_FIRST(&sc->nge_jinuse_listhead); 1229 if (entry == NULL) 1230 panic("nge_jfree: buffer not in use!"); 1231 entry->slot = i; 1232 SLIST_REMOVE_HEAD(&sc->nge_jinuse_listhead, jpool_entries); 1233 SLIST_INSERT_HEAD(&sc->nge_jfree_listhead, entry, jpool_entries); 1234 1235 return; 1236 } 1237 /* 1238 * A frame has been uploaded: pass the resulting mbuf chain up to 1239 * the higher level protocols. 1240 */ 1241 static void nge_rxeof(sc) 1242 struct nge_softc *sc; 1243 { 1244 struct ether_header *eh; 1245 struct mbuf *m; 1246 struct ifnet *ifp; 1247 struct nge_desc *cur_rx; 1248 int i, total_len = 0; 1249 u_int32_t rxstat; 1250 1251 ifp = &sc->arpcom.ac_if; 1252 i = sc->nge_cdata.nge_rx_prod; 1253 1254 while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { 1255 struct mbuf *m0 = NULL; 1256 u_int32_t extsts; 1257 1258 cur_rx = &sc->nge_ldata->nge_rx_list[i]; 1259 rxstat = cur_rx->nge_rxstat; 1260 extsts = cur_rx->nge_extsts; 1261 m = cur_rx->nge_mbuf; 1262 cur_rx->nge_mbuf = NULL; 1263 total_len = NGE_RXBYTES(cur_rx); 1264 NGE_INC(i, NGE_RX_LIST_CNT); 1265 1266 /* 1267 * If an error occurs, update stats, clear the 1268 * status word and leave the mbuf cluster in place: 1269 * it should simply get re-used next time this descriptor 1270 * comes up in the ring. 1271 */ 1272 if (!(rxstat & NGE_CMDSTS_PKT_OK)) { 1273 ifp->if_ierrors++; 1274 nge_newbuf(sc, cur_rx, m); 1275 continue; 1276 } 1277 1278 1279 /* 1280 * Ok. NatSemi really screwed up here. This is the 1281 * only gigE chip I know of with alignment constraints 1282 * on receive buffers. RX buffers must be 64-bit aligned. 1283 */ 1284 #ifdef __i386__ 1285 /* 1286 * By popular demand, ignore the alignment problems 1287 * on the Intel x86 platform. The performance hit 1288 * incurred due to unaligned accesses is much smaller 1289 * than the hit produced by forcing buffer copies all 1290 * the time, especially with jumbo frames. We still 1291 * need to fix up the alignment everywhere else though. 1292 */ 1293 if (nge_newbuf(sc, cur_rx, NULL) == ENOBUFS) { 1294 #endif 1295 m0 = m_devget(mtod(m, char *), total_len, 1296 ETHER_ALIGN, ifp, NULL); 1297 nge_newbuf(sc, cur_rx, m); 1298 if (m0 == NULL) { 1299 printf("nge%d: no receive buffers " 1300 "available -- packet dropped!\n", 1301 sc->nge_unit); 1302 ifp->if_ierrors++; 1303 continue; 1304 } 1305 m = m0; 1306 #ifdef __i386__ 1307 } else { 1308 m->m_pkthdr.rcvif = ifp; 1309 m->m_pkthdr.len = m->m_len = total_len; 1310 } 1311 #endif 1312 1313 ifp->if_ipackets++; 1314 eh = mtod(m, struct ether_header *); 1315 1316 /* Remove header from mbuf and pass it on. */ 1317 m_adj(m, sizeof(struct ether_header)); 1318 1319 /* Do IP checksum checking. */ 1320 if (extsts & NGE_RXEXTSTS_IPPKT) 1321 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1322 if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) 1323 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1324 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1325 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1326 (extsts & NGE_RXEXTSTS_UDPPKT && 1327 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1328 m->m_pkthdr.csum_flags |= 1329 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1330 m->m_pkthdr.csum_data = 0xffff; 1331 } 1332 1333 /* 1334 * If we received a packet with a vlan tag, pass it 1335 * to vlan_input() instead of ether_input(). 1336 */ 1337 if (extsts & NGE_RXEXTSTS_VLANPKT) { 1338 VLAN_INPUT_TAG(ifp, eh, m, extsts & NGE_RXEXTSTS_VTCI); 1339 continue; 1340 } 1341 1342 ether_input(ifp, eh, m); 1343 } 1344 1345 sc->nge_cdata.nge_rx_prod = i; 1346 1347 return; 1348 } 1349 1350 void nge_rxeoc(sc) 1351 struct nge_softc *sc; 1352 { 1353 struct ifnet *ifp; 1354 1355 ifp = &sc->arpcom.ac_if; 1356 nge_rxeof(sc); 1357 ifp->if_flags &= ~IFF_RUNNING; 1358 nge_init(sc); 1359 return; 1360 } 1361 1362 /* 1363 * A frame was downloaded to the chip. It's safe for us to clean up 1364 * the list buffers. 1365 */ 1366 1367 static void nge_txeof(sc) 1368 struct nge_softc *sc; 1369 { 1370 struct nge_desc *cur_tx = NULL; 1371 struct ifnet *ifp; 1372 u_int32_t idx; 1373 1374 ifp = &sc->arpcom.ac_if; 1375 1376 /* Clear the timeout timer. */ 1377 ifp->if_timer = 0; 1378 1379 /* 1380 * Go through our tx list and free mbufs for those 1381 * frames that have been transmitted. 1382 */ 1383 idx = sc->nge_cdata.nge_tx_cons; 1384 while (idx != sc->nge_cdata.nge_tx_prod) { 1385 cur_tx = &sc->nge_ldata->nge_tx_list[idx]; 1386 1387 if (NGE_OWNDESC(cur_tx)) 1388 break; 1389 1390 if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { 1391 sc->nge_cdata.nge_tx_cnt--; 1392 NGE_INC(idx, NGE_TX_LIST_CNT); 1393 continue; 1394 } 1395 1396 if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { 1397 ifp->if_oerrors++; 1398 if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) 1399 ifp->if_collisions++; 1400 if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) 1401 ifp->if_collisions++; 1402 } 1403 1404 ifp->if_collisions += 1405 (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; 1406 1407 ifp->if_opackets++; 1408 if (cur_tx->nge_mbuf != NULL) { 1409 m_freem(cur_tx->nge_mbuf); 1410 cur_tx->nge_mbuf = NULL; 1411 } 1412 1413 sc->nge_cdata.nge_tx_cnt--; 1414 NGE_INC(idx, NGE_TX_LIST_CNT); 1415 ifp->if_timer = 0; 1416 } 1417 1418 sc->nge_cdata.nge_tx_cons = idx; 1419 1420 if (cur_tx != NULL) 1421 ifp->if_flags &= ~IFF_OACTIVE; 1422 1423 return; 1424 } 1425 1426 static void nge_tick(xsc) 1427 void *xsc; 1428 { 1429 struct nge_softc *sc; 1430 struct mii_data *mii; 1431 struct ifnet *ifp; 1432 int s; 1433 1434 s = splimp(); 1435 1436 sc = xsc; 1437 ifp = &sc->arpcom.ac_if; 1438 1439 mii = device_get_softc(sc->nge_miibus); 1440 mii_tick(mii); 1441 1442 if (!sc->nge_link) { 1443 mii_pollstat(mii); 1444 if (mii->mii_media_status & IFM_ACTIVE && 1445 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1446 sc->nge_link++; 1447 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_TX) 1448 printf("nge%d: gigabit link up\n", 1449 sc->nge_unit); 1450 if (ifp->if_snd.ifq_head != NULL) 1451 nge_start(ifp); 1452 } else 1453 sc->nge_stat_ch = timeout(nge_tick, sc, hz); 1454 } 1455 1456 1457 splx(s); 1458 1459 return; 1460 } 1461 1462 static void nge_intr(arg) 1463 void *arg; 1464 { 1465 struct nge_softc *sc; 1466 struct ifnet *ifp; 1467 u_int32_t status; 1468 1469 sc = arg; 1470 ifp = &sc->arpcom.ac_if; 1471 1472 /* Supress unwanted interrupts */ 1473 if (!(ifp->if_flags & IFF_UP)) { 1474 nge_stop(sc); 1475 return; 1476 } 1477 1478 /* Disable interrupts. */ 1479 CSR_WRITE_4(sc, NGE_IER, 0); 1480 1481 for (;;) { 1482 /* Reading the ISR register clears all interrupts. */ 1483 status = CSR_READ_4(sc, NGE_ISR); 1484 1485 if ((status & NGE_INTRS) == 0) 1486 break; 1487 1488 if ((status & NGE_ISR_TX_DESC_OK) || 1489 (status & NGE_ISR_TX_ERR) || 1490 (status & NGE_ISR_TX_OK) || 1491 (status & NGE_ISR_TX_IDLE)) 1492 nge_txeof(sc); 1493 1494 if ((status & NGE_ISR_RX_DESC_OK) || 1495 (status & NGE_ISR_RX_ERR) || 1496 (status & NGE_ISR_RX_OK)) 1497 nge_rxeof(sc); 1498 1499 if ((status & NGE_ISR_RX_OFLOW)) 1500 nge_rxeoc(sc); 1501 1502 if (status & NGE_ISR_SYSERR) { 1503 nge_reset(sc); 1504 ifp->if_flags &= ~IFF_RUNNING; 1505 nge_init(sc); 1506 } 1507 1508 if (status & NGE_IMR_PHY_INTR) { 1509 sc->nge_link = 0; 1510 nge_tick(sc); 1511 } 1512 } 1513 1514 /* Re-enable interrupts. */ 1515 CSR_WRITE_4(sc, NGE_IER, 1); 1516 1517 if (ifp->if_snd.ifq_head != NULL) 1518 nge_start(ifp); 1519 1520 return; 1521 } 1522 1523 /* 1524 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1525 * pointers to the fragment pointers. 1526 */ 1527 static int nge_encap(sc, m_head, txidx) 1528 struct nge_softc *sc; 1529 struct mbuf *m_head; 1530 u_int32_t *txidx; 1531 { 1532 struct nge_desc *f = NULL; 1533 struct mbuf *m; 1534 int frag, cur, cnt = 0; 1535 struct ifvlan *ifv = NULL; 1536 1537 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 1538 m_head->m_pkthdr.rcvif != NULL && 1539 m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) 1540 ifv = m_head->m_pkthdr.rcvif->if_softc; 1541 1542 /* 1543 * Start packing the mbufs in this chain into 1544 * the fragment pointers. Stop when we run out 1545 * of fragments or hit the end of the mbuf chain. 1546 */ 1547 m = m_head; 1548 cur = frag = *txidx; 1549 1550 for (m = m_head; m != NULL; m = m->m_next) { 1551 if (m->m_len != 0) { 1552 if ((NGE_TX_LIST_CNT - 1553 (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) 1554 return(ENOBUFS); 1555 f = &sc->nge_ldata->nge_tx_list[frag]; 1556 f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; 1557 f->nge_ptr = vtophys(mtod(m, vm_offset_t)); 1558 if (cnt != 0) 1559 f->nge_ctl |= NGE_CMDSTS_OWN; 1560 cur = frag; 1561 NGE_INC(frag, NGE_TX_LIST_CNT); 1562 cnt++; 1563 } 1564 } 1565 1566 if (m != NULL) 1567 return(ENOBUFS); 1568 1569 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; 1570 if (m_head->m_pkthdr.csum_flags) { 1571 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1572 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1573 NGE_TXEXTSTS_IPCSUM; 1574 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1575 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1576 NGE_TXEXTSTS_TCPCSUM; 1577 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1578 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1579 NGE_TXEXTSTS_UDPCSUM; 1580 } 1581 1582 if (ifv != NULL) { 1583 sc->nge_ldata->nge_tx_list[cur].nge_extsts |= 1584 (NGE_TXEXTSTS_VLANPKT|ifv->ifv_tag); 1585 } 1586 1587 sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; 1588 sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; 1589 sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; 1590 sc->nge_cdata.nge_tx_cnt += cnt; 1591 *txidx = frag; 1592 1593 return(0); 1594 } 1595 1596 /* 1597 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1598 * to the mbuf data regions directly in the transmit lists. We also save a 1599 * copy of the pointers since the transmit list fragment pointers are 1600 * physical addresses. 1601 */ 1602 1603 static void nge_start(ifp) 1604 struct ifnet *ifp; 1605 { 1606 struct nge_softc *sc; 1607 struct mbuf *m_head = NULL; 1608 u_int32_t idx; 1609 1610 sc = ifp->if_softc; 1611 1612 if (!sc->nge_link) 1613 return; 1614 1615 idx = sc->nge_cdata.nge_tx_prod; 1616 1617 if (ifp->if_flags & IFF_OACTIVE) 1618 return; 1619 1620 while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { 1621 IF_DEQUEUE(&ifp->if_snd, m_head); 1622 if (m_head == NULL) 1623 break; 1624 1625 if (nge_encap(sc, m_head, &idx)) { 1626 IF_PREPEND(&ifp->if_snd, m_head); 1627 ifp->if_flags |= IFF_OACTIVE; 1628 break; 1629 } 1630 1631 /* 1632 * If there's a BPF listener, bounce a copy of this frame 1633 * to him. 1634 */ 1635 if (ifp->if_bpf) 1636 bpf_mtap(ifp, m_head); 1637 1638 } 1639 1640 /* Transmit */ 1641 sc->nge_cdata.nge_tx_prod = idx; 1642 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); 1643 1644 /* 1645 * Set a timeout in case the chip goes out to lunch. 1646 */ 1647 ifp->if_timer = 5; 1648 1649 return; 1650 } 1651 1652 static void nge_init(xsc) 1653 void *xsc; 1654 { 1655 struct nge_softc *sc = xsc; 1656 struct ifnet *ifp = &sc->arpcom.ac_if; 1657 struct mii_data *mii; 1658 int s; 1659 1660 if (ifp->if_flags & IFF_RUNNING) 1661 return; 1662 1663 s = splimp(); 1664 1665 /* 1666 * Cancel pending I/O and free all RX/TX buffers. 1667 */ 1668 nge_stop(sc); 1669 1670 mii = device_get_softc(sc->nge_miibus); 1671 1672 /* Set MAC address */ 1673 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); 1674 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1675 ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1676 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); 1677 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1678 ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1679 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); 1680 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1681 ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1682 1683 /* Init circular RX list. */ 1684 if (nge_list_rx_init(sc) == ENOBUFS) { 1685 printf("nge%d: initialization failed: no " 1686 "memory for rx buffers\n", sc->nge_unit); 1687 nge_stop(sc); 1688 (void)splx(s); 1689 return; 1690 } 1691 1692 /* 1693 * Init tx descriptors. 1694 */ 1695 nge_list_tx_init(sc); 1696 1697 /* 1698 * For the NatSemi chip, we have to explicitly enable the 1699 * reception of ARP frames, as well as turn on the 'perfect 1700 * match' filter where we store the station address, otherwise 1701 * we won't receive unicasts meant for this host. 1702 */ 1703 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 1704 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 1705 1706 /* If we want promiscuous mode, set the allframes bit. */ 1707 if (ifp->if_flags & IFF_PROMISC) { 1708 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1709 } else { 1710 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1711 } 1712 1713 /* 1714 * Set the capture broadcast bit to capture broadcast frames. 1715 */ 1716 if (ifp->if_flags & IFF_BROADCAST) { 1717 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1718 } else { 1719 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1720 } 1721 1722 /* 1723 * Load the multicast filter. 1724 */ 1725 nge_setmulti(sc); 1726 1727 /* Turn the receive filter on */ 1728 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 1729 1730 /* 1731 * Load the address of the RX and TX lists. 1732 */ 1733 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 1734 vtophys(&sc->nge_ldata->nge_rx_list[0])); 1735 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 1736 vtophys(&sc->nge_ldata->nge_tx_list[0])); 1737 1738 /* Set RX configuration */ 1739 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); 1740 /* 1741 * Enable hardware checksum validation for all IPv4 1742 * packets, do not reject packets with bad checksums. 1743 */ 1744 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); 1745 1746 /* 1747 * Tell the chip to detect and strip VLAN tag info from 1748 * received frames. The tag will be provided in the extsts 1749 * field in the RX descriptors. 1750 */ 1751 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, 1752 NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB); 1753 1754 /* Set TX configuration */ 1755 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 1756 1757 /* 1758 * Enable TX IPv4 checksumming on a per-packet basis. 1759 */ 1760 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 1761 1762 /* 1763 * Tell the chip to insert VLAN tags on a per-packet basis as 1764 * dictated by the code in the frame encapsulation routine. 1765 */ 1766 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 1767 1768 /* Set full/half duplex mode. */ 1769 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1770 NGE_SETBIT(sc, NGE_TX_CFG, 1771 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1772 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1773 } else { 1774 NGE_CLRBIT(sc, NGE_TX_CFG, 1775 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1776 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1777 } 1778 1779 /* 1780 * Enable the delivery of PHY interrupts based on 1781 * link/speed/duplex status changes. Also enable the 1782 * extsts field in the DMA descriptors (needed for 1783 * TCP/IP checksum offload on transmit). 1784 */ 1785 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD| 1786 NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); 1787 1788 /* 1789 * Configure interrupt holdoff (moderation). We can 1790 * have the chip delay interrupt delivery for a certain 1791 * period. Units are in 100us, and the max setting 1792 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 1793 */ 1794 CSR_WRITE_4(sc, NGE_IHR, 0x01); 1795 1796 /* 1797 * Enable interrupts. 1798 */ 1799 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 1800 CSR_WRITE_4(sc, NGE_IER, 1); 1801 1802 /* Enable receiver and transmitter. */ 1803 NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1804 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1805 1806 nge_ifmedia_upd(ifp); 1807 1808 ifp->if_flags |= IFF_RUNNING; 1809 ifp->if_flags &= ~IFF_OACTIVE; 1810 1811 (void)splx(s); 1812 1813 return; 1814 } 1815 1816 /* 1817 * Set media options. 1818 */ 1819 static int nge_ifmedia_upd(ifp) 1820 struct ifnet *ifp; 1821 { 1822 struct nge_softc *sc; 1823 struct mii_data *mii; 1824 1825 sc = ifp->if_softc; 1826 1827 mii = device_get_softc(sc->nge_miibus); 1828 sc->nge_link = 0; 1829 if (mii->mii_instance) { 1830 struct mii_softc *miisc; 1831 for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; 1832 miisc = LIST_NEXT(miisc, mii_list)) 1833 mii_phy_reset(miisc); 1834 } 1835 mii_mediachg(mii); 1836 1837 return(0); 1838 } 1839 1840 /* 1841 * Report current media status. 1842 */ 1843 static void nge_ifmedia_sts(ifp, ifmr) 1844 struct ifnet *ifp; 1845 struct ifmediareq *ifmr; 1846 { 1847 struct nge_softc *sc; 1848 struct mii_data *mii; 1849 1850 sc = ifp->if_softc; 1851 1852 mii = device_get_softc(sc->nge_miibus); 1853 mii_pollstat(mii); 1854 ifmr->ifm_active = mii->mii_media_active; 1855 ifmr->ifm_status = mii->mii_media_status; 1856 1857 return; 1858 } 1859 1860 static int nge_ioctl(ifp, command, data) 1861 struct ifnet *ifp; 1862 u_long command; 1863 caddr_t data; 1864 { 1865 struct nge_softc *sc = ifp->if_softc; 1866 struct ifreq *ifr = (struct ifreq *) data; 1867 struct mii_data *mii; 1868 int s, error = 0; 1869 1870 s = splimp(); 1871 1872 switch(command) { 1873 case SIOCSIFADDR: 1874 case SIOCGIFADDR: 1875 error = ether_ioctl(ifp, command, data); 1876 break; 1877 case SIOCSIFMTU: 1878 if (ifr->ifr_mtu > NGE_JUMBO_MTU) 1879 error = EINVAL; 1880 else { 1881 ifp->if_mtu = ifr->ifr_mtu; 1882 /* 1883 * Workaround: if the MTU is larger than 1884 * 8152 (TX FIFO size minus 64 minus 18), turn off 1885 * TX checksum offloading. 1886 */ 1887 if (ifr->ifr_mtu >= 8152) 1888 ifp->if_hwassist = 0; 1889 else 1890 ifp->if_hwassist = NGE_CSUM_FEATURES; 1891 } 1892 break; 1893 case SIOCSIFFLAGS: 1894 if (ifp->if_flags & IFF_UP) { 1895 if (ifp->if_flags & IFF_RUNNING && 1896 ifp->if_flags & IFF_PROMISC && 1897 !(sc->nge_if_flags & IFF_PROMISC)) { 1898 NGE_SETBIT(sc, NGE_RXFILT_CTL, 1899 NGE_RXFILTCTL_ALLPHYS| 1900 NGE_RXFILTCTL_ALLMULTI); 1901 } else if (ifp->if_flags & IFF_RUNNING && 1902 !(ifp->if_flags & IFF_PROMISC) && 1903 sc->nge_if_flags & IFF_PROMISC) { 1904 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1905 NGE_RXFILTCTL_ALLPHYS); 1906 if (!(ifp->if_flags & IFF_ALLMULTI)) 1907 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1908 NGE_RXFILTCTL_ALLMULTI); 1909 } else { 1910 ifp->if_flags &= ~IFF_RUNNING; 1911 nge_init(sc); 1912 } 1913 } else { 1914 if (ifp->if_flags & IFF_RUNNING) 1915 nge_stop(sc); 1916 } 1917 sc->nge_if_flags = ifp->if_flags; 1918 error = 0; 1919 break; 1920 case SIOCADDMULTI: 1921 case SIOCDELMULTI: 1922 nge_setmulti(sc); 1923 error = 0; 1924 break; 1925 case SIOCGIFMEDIA: 1926 case SIOCSIFMEDIA: 1927 mii = device_get_softc(sc->nge_miibus); 1928 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1929 break; 1930 default: 1931 error = EINVAL; 1932 break; 1933 } 1934 1935 (void)splx(s); 1936 1937 return(error); 1938 } 1939 1940 static void nge_watchdog(ifp) 1941 struct ifnet *ifp; 1942 { 1943 struct nge_softc *sc; 1944 1945 sc = ifp->if_softc; 1946 1947 ifp->if_oerrors++; 1948 printf("nge%d: watchdog timeout\n", sc->nge_unit); 1949 1950 nge_stop(sc); 1951 nge_reset(sc); 1952 ifp->if_flags &= ~IFF_RUNNING; 1953 nge_init(sc); 1954 1955 if (ifp->if_snd.ifq_head != NULL) 1956 nge_start(ifp); 1957 1958 return; 1959 } 1960 1961 /* 1962 * Stop the adapter and free any mbufs allocated to the 1963 * RX and TX lists. 1964 */ 1965 static void nge_stop(sc) 1966 struct nge_softc *sc; 1967 { 1968 register int i; 1969 struct ifnet *ifp; 1970 struct ifmedia_entry *ifm; 1971 struct mii_data *mii; 1972 int mtmp, itmp; 1973 1974 ifp = &sc->arpcom.ac_if; 1975 ifp->if_timer = 0; 1976 mii = device_get_softc(sc->nge_miibus); 1977 1978 untimeout(nge_tick, sc, sc->nge_stat_ch); 1979 CSR_WRITE_4(sc, NGE_IER, 0); 1980 CSR_WRITE_4(sc, NGE_IMR, 0); 1981 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1982 DELAY(1000); 1983 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); 1984 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); 1985 1986 /* 1987 * Isolate/power down the PHY, but leave the media selection 1988 * unchanged so that things will be put back to normal when 1989 * we bring the interface back up. 1990 */ 1991 itmp = ifp->if_flags; 1992 ifp->if_flags |= IFF_UP; 1993 ifm = mii->mii_media.ifm_cur; 1994 mtmp = ifm->ifm_media; 1995 ifm->ifm_media = IFM_ETHER|IFM_NONE; 1996 mii_mediachg(mii); 1997 ifm->ifm_media = mtmp; 1998 ifp->if_flags = itmp; 1999 2000 sc->nge_link = 0; 2001 2002 /* 2003 * Free data in the RX lists. 2004 */ 2005 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 2006 if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { 2007 m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); 2008 sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; 2009 } 2010 } 2011 bzero((char *)&sc->nge_ldata->nge_rx_list, 2012 sizeof(sc->nge_ldata->nge_rx_list)); 2013 2014 /* 2015 * Free the TX list buffers. 2016 */ 2017 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 2018 if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { 2019 m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); 2020 sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; 2021 } 2022 } 2023 2024 bzero((char *)&sc->nge_ldata->nge_tx_list, 2025 sizeof(sc->nge_ldata->nge_tx_list)); 2026 2027 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2028 2029 return; 2030 } 2031 2032 /* 2033 * Stop all chip I/O so that the kernel's probe routines don't 2034 * get confused by errant DMAs when rebooting. 2035 */ 2036 static void nge_shutdown(dev) 2037 device_t dev; 2038 { 2039 struct nge_softc *sc; 2040 2041 sc = device_get_softc(dev); 2042 2043 nge_reset(sc); 2044 nge_stop(sc); 2045 2046 return; 2047 } 2048