1 /* 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $Id: if_vr.c,v 1.4 1998/12/14 06:32:56 dillon Exp $ 33 */ 34 35 /* 36 * VIA Rhine fast ethernet PCI NIC driver 37 * 38 * Supports various network adapters based on the VIA Rhine 39 * and Rhine II PCI controllers, including the D-Link DFE530TX. 40 * Datasheets are available at http://www.via.com.tw. 41 * 42 * Written by Bill Paul <wpaul@ctr.columbia.edu> 43 * Electrical Engineering Department 44 * Columbia University, New York City 45 */ 46 47 /* 48 * The VIA Rhine controllers are similar in some respects to the 49 * the DEC tulip chips, except less complicated. The controller 50 * uses an MII bus and an external physical layer interface. The 51 * receiver has a one entry perfect filter and a 64-bit hash table 52 * multicast filter. Transmit and receive descriptors are similar 53 * to the tulip. 54 * 55 * The Rhine has a serious flaw in its transmit DMA mechanism: 56 * transmit buffers must be longword aligned. Unfortunately, 57 * FreeBSD doesn't guarantee that mbufs will be filled in starting 58 * at longword boundaries, so we have to do a buffer copy before 59 * transmission. 60 */ 61 62 #include "bpfilter.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/sockio.h> 67 #include <sys/mbuf.h> 68 #include <sys/malloc.h> 69 #include <sys/kernel.h> 70 #include <sys/socket.h> 71 72 #include <net/if.h> 73 #include <net/if_arp.h> 74 #include <net/ethernet.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 78 #if NBPFILTER > 0 79 #include <net/bpf.h> 80 #endif 81 82 #include <vm/vm.h> /* for vtophys */ 83 #include <vm/pmap.h> /* for vtophys */ 84 #include <machine/clock.h> /* for DELAY */ 85 #include <machine/bus_pio.h> 86 #include <machine/bus_memio.h> 87 #include <machine/bus.h> 88 89 #include <pci/pcireg.h> 90 #include <pci/pcivar.h> 91 92 #define VR_USEIOSPACE 93 94 /* #define VR_BACKGROUND_AUTONEG */ 95 96 #include <pci/if_vrreg.h> 97 98 #ifndef lint 99 static const char rcsid[] = 100 "$Id: if_vr.c,v 1.4 1998/12/14 06:32:56 dillon Exp $"; 101 #endif 102 103 /* 104 * Various supported device vendors/types and their names. 105 */ 106 static struct vr_type vr_devs[] = { 107 { VIA_VENDORID, VIA_DEVICEID_RHINE, 108 "VIA VT3043 Rhine I 10/100BaseTX" }, 109 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 110 "VIA VT86C100A Rhine II 10/100BaseTX" }, 111 { 0, 0, NULL } 112 }; 113 114 /* 115 * Various supported PHY vendors/types and their names. Note that 116 * this driver will work with pretty much any MII-compliant PHY, 117 * so failure to positively identify the chip is not a fatal error. 118 */ 119 120 static struct vr_type vr_phys[] = { 121 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" }, 122 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" }, 123 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"}, 124 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" }, 125 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" }, 126 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" }, 127 { 0, 0, "<MII-compliant physical interface>" } 128 }; 129 130 static unsigned long vr_count = 0; 131 static const char *vr_probe __P((pcici_t, pcidi_t)); 132 static void vr_attach __P((pcici_t, int)); 133 134 static int vr_newbuf __P((struct vr_softc *, 135 struct vr_chain_onefrag *)); 136 static int vr_encap __P((struct vr_softc *, struct vr_chain *, 137 struct mbuf * )); 138 139 static void vr_rxeof __P((struct vr_softc *)); 140 static void vr_rxeoc __P((struct vr_softc *)); 141 static void vr_txeof __P((struct vr_softc *)); 142 static void vr_txeoc __P((struct vr_softc *)); 143 static void vr_intr __P((void *)); 144 static void vr_start __P((struct ifnet *)); 145 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t)); 146 static void vr_init __P((void *)); 147 static void vr_stop __P((struct vr_softc *)); 148 static void vr_watchdog __P((struct ifnet *)); 149 static void vr_shutdown __P((int, void *)); 150 static int vr_ifmedia_upd __P((struct ifnet *)); 151 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 152 153 static void vr_mii_sync __P((struct vr_softc *)); 154 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int)); 155 static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *)); 156 static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *)); 157 static u_int16_t vr_phy_readreg __P((struct vr_softc *, int)); 158 static void vr_phy_writereg __P((struct vr_softc *, u_int16_t, u_int16_t)); 159 160 static void vr_autoneg_xmit __P((struct vr_softc *)); 161 static void vr_autoneg_mii __P((struct vr_softc *, int, int)); 162 static void vr_setmode_mii __P((struct vr_softc *, int)); 163 static void vr_getmode_mii __P((struct vr_softc *)); 164 static void vr_setcfg __P((struct vr_softc *, u_int16_t)); 165 static u_int8_t vr_calchash __P((u_int8_t *)); 166 static void vr_setmulti __P((struct vr_softc *)); 167 static void vr_reset __P((struct vr_softc *)); 168 static int vr_list_rx_init __P((struct vr_softc *)); 169 static int vr_list_tx_init __P((struct vr_softc *)); 170 171 #define VR_SETBIT(sc, reg, x) \ 172 CSR_WRITE_1(sc, reg, \ 173 CSR_READ_1(sc, reg) | x) 174 175 #define VR_CLRBIT(sc, reg, x) \ 176 CSR_WRITE_1(sc, reg, \ 177 CSR_READ_1(sc, reg) & ~x) 178 179 #define VR_SETBIT16(sc, reg, x) \ 180 CSR_WRITE_2(sc, reg, \ 181 CSR_READ_2(sc, reg) | x) 182 183 #define VR_CLRBIT16(sc, reg, x) \ 184 CSR_WRITE_2(sc, reg, \ 185 CSR_READ_2(sc, reg) & ~x) 186 187 #define VR_SETBIT32(sc, reg, x) \ 188 CSR_WRITE_4(sc, reg, \ 189 CSR_READ_4(sc, reg) | x) 190 191 #define VR_CLRBIT32(sc, reg, x) \ 192 CSR_WRITE_4(sc, reg, \ 193 CSR_READ_4(sc, reg) & ~x) 194 195 #define SIO_SET(x) \ 196 CSR_WRITE_1(sc, VR_MIICMD, \ 197 CSR_READ_1(sc, VR_MIICMD) | x) 198 199 #define SIO_CLR(x) \ 200 CSR_WRITE_1(sc, VR_MIICMD, \ 201 CSR_READ_1(sc, VR_MIICMD) & ~x) 202 203 /* 204 * Sync the PHYs by setting data bit and strobing the clock 32 times. 205 */ 206 static void vr_mii_sync(sc) 207 struct vr_softc *sc; 208 { 209 register int i; 210 211 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 212 213 for (i = 0; i < 32; i++) { 214 SIO_SET(VR_MIICMD_CLK); 215 DELAY(1); 216 SIO_CLR(VR_MIICMD_CLK); 217 DELAY(1); 218 } 219 220 return; 221 } 222 223 /* 224 * Clock a series of bits through the MII. 225 */ 226 static void vr_mii_send(sc, bits, cnt) 227 struct vr_softc *sc; 228 u_int32_t bits; 229 int cnt; 230 { 231 int i; 232 233 SIO_CLR(VR_MIICMD_CLK); 234 235 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 236 if (bits & i) { 237 SIO_SET(VR_MIICMD_DATAIN); 238 } else { 239 SIO_CLR(VR_MIICMD_DATAIN); 240 } 241 DELAY(1); 242 SIO_CLR(VR_MIICMD_CLK); 243 DELAY(1); 244 SIO_SET(VR_MIICMD_CLK); 245 } 246 } 247 248 /* 249 * Read an PHY register through the MII. 250 */ 251 static int vr_mii_readreg(sc, frame) 252 struct vr_softc *sc; 253 struct vr_mii_frame *frame; 254 255 { 256 int i, ack, s; 257 258 s = splimp(); 259 260 /* 261 * Set up frame for RX. 262 */ 263 frame->mii_stdelim = VR_MII_STARTDELIM; 264 frame->mii_opcode = VR_MII_READOP; 265 frame->mii_turnaround = 0; 266 frame->mii_data = 0; 267 268 CSR_WRITE_1(sc, VR_MIICMD, 0); 269 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 270 271 /* 272 * Turn on data xmit. 273 */ 274 SIO_SET(VR_MIICMD_DIR); 275 276 vr_mii_sync(sc); 277 278 /* 279 * Send command/address info. 280 */ 281 vr_mii_send(sc, frame->mii_stdelim, 2); 282 vr_mii_send(sc, frame->mii_opcode, 2); 283 vr_mii_send(sc, frame->mii_phyaddr, 5); 284 vr_mii_send(sc, frame->mii_regaddr, 5); 285 286 /* Idle bit */ 287 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 288 DELAY(1); 289 SIO_SET(VR_MIICMD_CLK); 290 DELAY(1); 291 292 /* Turn off xmit. */ 293 SIO_CLR(VR_MIICMD_DIR); 294 295 /* Check for ack */ 296 SIO_CLR(VR_MIICMD_CLK); 297 DELAY(1); 298 SIO_SET(VR_MIICMD_CLK); 299 DELAY(1); 300 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 301 302 /* 303 * Now try reading data bits. If the ack failed, we still 304 * need to clock through 16 cycles to keep the PHY(s) in sync. 305 */ 306 if (ack) { 307 for(i = 0; i < 16; i++) { 308 SIO_CLR(VR_MIICMD_CLK); 309 DELAY(1); 310 SIO_SET(VR_MIICMD_CLK); 311 DELAY(1); 312 } 313 goto fail; 314 } 315 316 for (i = 0x8000; i; i >>= 1) { 317 SIO_CLR(VR_MIICMD_CLK); 318 DELAY(1); 319 if (!ack) { 320 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 321 frame->mii_data |= i; 322 DELAY(1); 323 } 324 SIO_SET(VR_MIICMD_CLK); 325 DELAY(1); 326 } 327 328 fail: 329 330 SIO_CLR(VR_MIICMD_CLK); 331 DELAY(1); 332 SIO_SET(VR_MIICMD_CLK); 333 DELAY(1); 334 335 splx(s); 336 337 if (ack) 338 return(1); 339 return(0); 340 } 341 342 /* 343 * Write to a PHY register through the MII. 344 */ 345 static int vr_mii_writereg(sc, frame) 346 struct vr_softc *sc; 347 struct vr_mii_frame *frame; 348 349 { 350 int s; 351 352 s = splimp(); 353 354 CSR_WRITE_1(sc, VR_MIICMD, 0); 355 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 356 357 /* 358 * Set up frame for TX. 359 */ 360 361 frame->mii_stdelim = VR_MII_STARTDELIM; 362 frame->mii_opcode = VR_MII_WRITEOP; 363 frame->mii_turnaround = VR_MII_TURNAROUND; 364 365 /* 366 * Turn on data output. 367 */ 368 SIO_SET(VR_MIICMD_DIR); 369 370 vr_mii_sync(sc); 371 372 vr_mii_send(sc, frame->mii_stdelim, 2); 373 vr_mii_send(sc, frame->mii_opcode, 2); 374 vr_mii_send(sc, frame->mii_phyaddr, 5); 375 vr_mii_send(sc, frame->mii_regaddr, 5); 376 vr_mii_send(sc, frame->mii_turnaround, 2); 377 vr_mii_send(sc, frame->mii_data, 16); 378 379 /* Idle bit. */ 380 SIO_SET(VR_MIICMD_CLK); 381 DELAY(1); 382 SIO_CLR(VR_MIICMD_CLK); 383 DELAY(1); 384 385 /* 386 * Turn off xmit. 387 */ 388 SIO_CLR(VR_MIICMD_DIR); 389 390 splx(s); 391 392 return(0); 393 } 394 395 static u_int16_t vr_phy_readreg(sc, reg) 396 struct vr_softc *sc; 397 int reg; 398 { 399 struct vr_mii_frame frame; 400 401 bzero((char *)&frame, sizeof(frame)); 402 403 frame.mii_phyaddr = sc->vr_phy_addr; 404 frame.mii_regaddr = reg; 405 vr_mii_readreg(sc, &frame); 406 407 return(frame.mii_data); 408 } 409 410 static void vr_phy_writereg(sc, reg, data) 411 struct vr_softc *sc; 412 u_int16_t reg; 413 u_int16_t data; 414 { 415 struct vr_mii_frame frame; 416 417 bzero((char *)&frame, sizeof(frame)); 418 419 frame.mii_phyaddr = sc->vr_phy_addr; 420 frame.mii_regaddr = reg; 421 frame.mii_data = data; 422 423 vr_mii_writereg(sc, &frame); 424 425 return; 426 } 427 428 /* 429 * Calculate CRC of a multicast group address, return the lower 6 bits. 430 */ 431 static u_int8_t vr_calchash(addr) 432 u_int8_t *addr; 433 { 434 u_int32_t crc, carry; 435 int i, j; 436 u_int8_t c; 437 438 /* Compute CRC for the address value. */ 439 crc = 0xFFFFFFFF; /* initial value */ 440 441 for (i = 0; i < 6; i++) { 442 c = *(addr + i); 443 for (j = 0; j < 8; j++) { 444 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 445 crc <<= 1; 446 c >>= 1; 447 if (carry) 448 crc = (crc ^ 0x04c11db6) | carry; 449 } 450 } 451 452 /* return the filter bit position */ 453 return((crc >> 26) & 0x0000003F); 454 } 455 456 /* 457 * Program the 64-bit multicast hash filter. 458 */ 459 static void vr_setmulti(sc) 460 struct vr_softc *sc; 461 { 462 struct ifnet *ifp; 463 int h = 0; 464 u_int32_t hashes[2] = { 0, 0 }; 465 struct ifmultiaddr *ifma; 466 u_int8_t rxfilt; 467 int mcnt = 0; 468 469 ifp = &sc->arpcom.ac_if; 470 471 rxfilt = CSR_READ_1(sc, VR_RXCFG); 472 473 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 474 rxfilt |= VR_RXCFG_RX_MULTI; 475 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 476 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 477 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 478 return; 479 } 480 481 /* first, zot all the existing hash bits */ 482 CSR_WRITE_4(sc, VR_MAR0, 0); 483 CSR_WRITE_4(sc, VR_MAR1, 0); 484 485 /* now program new ones */ 486 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 487 ifma = ifma->ifma_link.le_next) { 488 if (ifma->ifma_addr->sa_family != AF_LINK) 489 continue; 490 h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 491 if (h < 32) 492 hashes[0] |= (1 << h); 493 else 494 hashes[1] |= (1 << (h - 32)); 495 mcnt++; 496 } 497 498 if (mcnt) 499 rxfilt |= VR_RXCFG_RX_MULTI; 500 else 501 rxfilt &= ~VR_RXCFG_RX_MULTI; 502 503 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 504 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 505 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 506 507 return; 508 } 509 510 /* 511 * Initiate an autonegotiation session. 512 */ 513 static void vr_autoneg_xmit(sc) 514 struct vr_softc *sc; 515 { 516 u_int16_t phy_sts; 517 518 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 519 DELAY(500); 520 while(vr_phy_readreg(sc, PHY_BMCR) 521 & PHY_BMCR_RESET); 522 523 phy_sts = vr_phy_readreg(sc, PHY_BMCR); 524 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR; 525 vr_phy_writereg(sc, PHY_BMCR, phy_sts); 526 527 return; 528 } 529 530 /* 531 * Invoke autonegotiation on a PHY. 532 */ 533 static void vr_autoneg_mii(sc, flag, verbose) 534 struct vr_softc *sc; 535 int flag; 536 int verbose; 537 { 538 u_int16_t phy_sts = 0, media, advert, ability; 539 struct ifnet *ifp; 540 struct ifmedia *ifm; 541 542 ifm = &sc->ifmedia; 543 ifp = &sc->arpcom.ac_if; 544 545 ifm->ifm_media = IFM_ETHER | IFM_AUTO; 546 547 /* 548 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported' 549 * bit cleared in the status register, but has the 'autoneg enabled' 550 * bit set in the control register. This is a contradiction, and 551 * I'm not sure how to handle it. If you want to force an attempt 552 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR 553 * and see what happens. 554 */ 555 #ifndef FORCE_AUTONEG_TFOUR 556 /* 557 * First, see if autoneg is supported. If not, there's 558 * no point in continuing. 559 */ 560 phy_sts = vr_phy_readreg(sc, PHY_BMSR); 561 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 562 if (verbose) 563 printf("vr%d: autonegotiation not supported\n", 564 sc->vr_unit); 565 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX; 566 return; 567 } 568 #endif 569 570 switch (flag) { 571 case VR_FLAG_FORCEDELAY: 572 /* 573 * XXX Never use this option anywhere but in the probe 574 * routine: making the kernel stop dead in its tracks 575 * for three whole seconds after we've gone multi-user 576 * is really bad manners. 577 */ 578 vr_autoneg_xmit(sc); 579 DELAY(5000000); 580 break; 581 case VR_FLAG_SCHEDDELAY: 582 /* 583 * Wait for the transmitter to go idle before starting 584 * an autoneg session, otherwise vr_start() may clobber 585 * our timeout, and we don't want to allow transmission 586 * during an autoneg session since that can screw it up. 587 */ 588 if (sc->vr_cdata.vr_tx_head != NULL) { 589 sc->vr_want_auto = 1; 590 return; 591 } 592 vr_autoneg_xmit(sc); 593 ifp->if_timer = 5; 594 sc->vr_autoneg = 1; 595 sc->vr_want_auto = 0; 596 return; 597 break; 598 case VR_FLAG_DELAYTIMEO: 599 ifp->if_timer = 0; 600 sc->vr_autoneg = 0; 601 break; 602 default: 603 printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag); 604 return; 605 } 606 607 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { 608 if (verbose) 609 printf("vr%d: autoneg complete, ", sc->vr_unit); 610 phy_sts = vr_phy_readreg(sc, PHY_BMSR); 611 } else { 612 if (verbose) 613 printf("vr%d: autoneg not complete, ", sc->vr_unit); 614 } 615 616 media = vr_phy_readreg(sc, PHY_BMCR); 617 618 /* Link is good. Report modes and set duplex mode. */ 619 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { 620 if (verbose) 621 printf("link status good "); 622 advert = vr_phy_readreg(sc, PHY_ANAR); 623 ability = vr_phy_readreg(sc, PHY_LPAR); 624 625 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { 626 ifm->ifm_media = IFM_ETHER|IFM_100_T4; 627 media |= PHY_BMCR_SPEEDSEL; 628 media &= ~PHY_BMCR_DUPLEX; 629 printf("(100baseT4)\n"); 630 } else if (advert & PHY_ANAR_100BTXFULL && 631 ability & PHY_ANAR_100BTXFULL) { 632 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX; 633 media |= PHY_BMCR_SPEEDSEL; 634 media |= PHY_BMCR_DUPLEX; 635 printf("(full-duplex, 100Mbps)\n"); 636 } else if (advert & PHY_ANAR_100BTXHALF && 637 ability & PHY_ANAR_100BTXHALF) { 638 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX; 639 media |= PHY_BMCR_SPEEDSEL; 640 media &= ~PHY_BMCR_DUPLEX; 641 printf("(half-duplex, 100Mbps)\n"); 642 } else if (advert & PHY_ANAR_10BTFULL && 643 ability & PHY_ANAR_10BTFULL) { 644 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX; 645 media &= ~PHY_BMCR_SPEEDSEL; 646 media |= PHY_BMCR_DUPLEX; 647 printf("(full-duplex, 10Mbps)\n"); 648 } else { 649 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX; 650 media &= ~PHY_BMCR_SPEEDSEL; 651 media &= ~PHY_BMCR_DUPLEX; 652 printf("(half-duplex, 10Mbps)\n"); 653 } 654 655 media &= ~PHY_BMCR_AUTONEGENBL; 656 657 /* Set ASIC's duplex mode to match the PHY. */ 658 vr_setcfg(sc, media); 659 vr_phy_writereg(sc, PHY_BMCR, media); 660 } else { 661 if (verbose) 662 printf("no carrier\n"); 663 } 664 665 vr_init(sc); 666 667 if (sc->vr_tx_pend) { 668 sc->vr_autoneg = 0; 669 sc->vr_tx_pend = 0; 670 vr_start(ifp); 671 } 672 673 return; 674 } 675 676 static void vr_getmode_mii(sc) 677 struct vr_softc *sc; 678 { 679 u_int16_t bmsr; 680 struct ifnet *ifp; 681 682 ifp = &sc->arpcom.ac_if; 683 684 bmsr = vr_phy_readreg(sc, PHY_BMSR); 685 if (bootverbose) 686 printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr); 687 688 /* fallback */ 689 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX; 690 691 if (bmsr & PHY_BMSR_10BTHALF) { 692 if (bootverbose) 693 printf("vr%d: 10Mbps half-duplex mode supported\n", 694 sc->vr_unit); 695 ifmedia_add(&sc->ifmedia, 696 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 697 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 698 } 699 700 if (bmsr & PHY_BMSR_10BTFULL) { 701 if (bootverbose) 702 printf("vr%d: 10Mbps full-duplex mode supported\n", 703 sc->vr_unit); 704 ifmedia_add(&sc->ifmedia, 705 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 706 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX; 707 } 708 709 if (bmsr & PHY_BMSR_100BTXHALF) { 710 if (bootverbose) 711 printf("vr%d: 100Mbps half-duplex mode supported\n", 712 sc->vr_unit); 713 ifp->if_baudrate = 100000000; 714 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 715 ifmedia_add(&sc->ifmedia, 716 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); 717 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX; 718 } 719 720 if (bmsr & PHY_BMSR_100BTXFULL) { 721 if (bootverbose) 722 printf("vr%d: 100Mbps full-duplex mode supported\n", 723 sc->vr_unit); 724 ifp->if_baudrate = 100000000; 725 ifmedia_add(&sc->ifmedia, 726 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 727 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX; 728 } 729 730 /* Some also support 100BaseT4. */ 731 if (bmsr & PHY_BMSR_100BT4) { 732 if (bootverbose) 733 printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit); 734 ifp->if_baudrate = 100000000; 735 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL); 736 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4; 737 #ifdef FORCE_AUTONEG_TFOUR 738 if (bootverbose) 739 printf("vr%d: forcing on autoneg support for BT4\n", 740 sc->vr_unit); 741 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL): 742 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO; 743 #endif 744 } 745 746 if (bmsr & PHY_BMSR_CANAUTONEG) { 747 if (bootverbose) 748 printf("vr%d: autoneg supported\n", sc->vr_unit); 749 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 750 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO; 751 } 752 753 return; 754 } 755 756 /* 757 * Set speed and duplex mode. 758 */ 759 static void vr_setmode_mii(sc, media) 760 struct vr_softc *sc; 761 int media; 762 { 763 u_int16_t bmcr; 764 struct ifnet *ifp; 765 766 ifp = &sc->arpcom.ac_if; 767 768 /* 769 * If an autoneg session is in progress, stop it. 770 */ 771 if (sc->vr_autoneg) { 772 printf("vr%d: canceling autoneg session\n", sc->vr_unit); 773 ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0; 774 bmcr = vr_phy_readreg(sc, PHY_BMCR); 775 bmcr &= ~PHY_BMCR_AUTONEGENBL; 776 vr_phy_writereg(sc, PHY_BMCR, bmcr); 777 } 778 779 printf("vr%d: selecting MII, ", sc->vr_unit); 780 781 bmcr = vr_phy_readreg(sc, PHY_BMCR); 782 783 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL| 784 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK); 785 786 if (IFM_SUBTYPE(media) == IFM_100_T4) { 787 printf("100Mbps/T4, half-duplex\n"); 788 bmcr |= PHY_BMCR_SPEEDSEL; 789 bmcr &= ~PHY_BMCR_DUPLEX; 790 } 791 792 if (IFM_SUBTYPE(media) == IFM_100_TX) { 793 printf("100Mbps, "); 794 bmcr |= PHY_BMCR_SPEEDSEL; 795 } 796 797 if (IFM_SUBTYPE(media) == IFM_10_T) { 798 printf("10Mbps, "); 799 bmcr &= ~PHY_BMCR_SPEEDSEL; 800 } 801 802 if ((media & IFM_GMASK) == IFM_FDX) { 803 printf("full duplex\n"); 804 bmcr |= PHY_BMCR_DUPLEX; 805 } else { 806 printf("half duplex\n"); 807 bmcr &= ~PHY_BMCR_DUPLEX; 808 } 809 810 vr_setcfg(sc, bmcr); 811 vr_phy_writereg(sc, PHY_BMCR, bmcr); 812 813 return; 814 } 815 816 /* 817 * In order to fiddle with the 818 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 819 * first have to put the transmit and/or receive logic in the idle state. 820 */ 821 static void vr_setcfg(sc, bmcr) 822 struct vr_softc *sc; 823 u_int16_t bmcr; 824 { 825 int restart = 0; 826 827 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 828 restart = 1; 829 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 830 } 831 832 if (bmcr & PHY_BMCR_DUPLEX) 833 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 834 else 835 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 836 837 if (restart) 838 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 839 840 return; 841 } 842 843 static void vr_reset(sc) 844 struct vr_softc *sc; 845 { 846 register int i; 847 848 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 849 850 for (i = 0; i < VR_TIMEOUT; i++) { 851 DELAY(10); 852 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 853 break; 854 } 855 if (i == VR_TIMEOUT) 856 printf("vr%d: reset never completed!\n", sc->vr_unit); 857 858 /* Wait a little while for the chip to get its brains in order. */ 859 DELAY(1000); 860 861 return; 862 } 863 864 /* 865 * Probe for a VIA Rhine chip. Check the PCI vendor and device 866 * IDs against our list and return a device name if we find a match. 867 */ 868 static const char * 869 vr_probe(config_id, device_id) 870 pcici_t config_id; 871 pcidi_t device_id; 872 { 873 struct vr_type *t; 874 875 t = vr_devs; 876 877 while(t->vr_name != NULL) { 878 if ((device_id & 0xFFFF) == t->vr_vid && 879 ((device_id >> 16) & 0xFFFF) == t->vr_did) { 880 return(t->vr_name); 881 } 882 t++; 883 } 884 885 return(NULL); 886 } 887 888 /* 889 * Attach the interface. Allocate softc structures, do ifmedia 890 * setup and ethernet/BPF attach. 891 */ 892 static void 893 vr_attach(config_id, unit) 894 pcici_t config_id; 895 int unit; 896 { 897 int s, i; 898 #ifndef VR_USEIOSPACE 899 vm_offset_t pbase, vbase; 900 #endif 901 u_char eaddr[ETHER_ADDR_LEN]; 902 u_int32_t command; 903 struct vr_softc *sc; 904 struct ifnet *ifp; 905 int media = IFM_ETHER|IFM_100_TX|IFM_FDX; 906 unsigned int round; 907 caddr_t roundptr; 908 struct vr_type *p; 909 u_int16_t phy_vid, phy_did, phy_sts; 910 911 s = splimp(); 912 913 sc = malloc(sizeof(struct vr_softc), M_DEVBUF, M_NOWAIT); 914 if (sc == NULL) { 915 printf("vr%d: no memory for softc struct!\n", unit); 916 return; 917 } 918 bzero(sc, sizeof(struct vr_softc)); 919 920 /* 921 * Handle power management nonsense. 922 */ 923 924 command = pci_conf_read(config_id, VR_PCI_CAPID) & 0x000000FF; 925 if (command == 0x01) { 926 927 command = pci_conf_read(config_id, VR_PCI_PWRMGMTCTRL); 928 if (command & VR_PSTATE_MASK) { 929 u_int32_t iobase, membase, irq; 930 931 /* Save important PCI config data. */ 932 iobase = pci_conf_read(config_id, VR_PCI_LOIO); 933 membase = pci_conf_read(config_id, VR_PCI_LOMEM); 934 irq = pci_conf_read(config_id, VR_PCI_INTLINE); 935 936 /* Reset the power state. */ 937 printf("vr%d: chip is in D%d power mode " 938 "-- setting to D0\n", unit, command & VR_PSTATE_MASK); 939 command &= 0xFFFFFFFC; 940 pci_conf_write(config_id, VR_PCI_PWRMGMTCTRL, command); 941 942 /* Restore PCI config data. */ 943 pci_conf_write(config_id, VR_PCI_LOIO, iobase); 944 pci_conf_write(config_id, VR_PCI_LOMEM, membase); 945 pci_conf_write(config_id, VR_PCI_INTLINE, irq); 946 } 947 } 948 949 /* 950 * Map control/status registers. 951 */ 952 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 953 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 954 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command); 955 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 956 957 #ifdef VR_USEIOSPACE 958 if (!(command & PCIM_CMD_PORTEN)) { 959 printf("vr%d: failed to enable I/O ports!\n", unit); 960 free(sc, M_DEVBUF); 961 goto fail; 962 } 963 964 if (!pci_map_port(config_id, VR_PCI_LOIO, 965 (u_int16_t *)(&sc->vr_bhandle))) { 966 printf ("vr%d: couldn't map ports\n", unit); 967 goto fail; 968 } 969 sc->vr_btag = I386_BUS_SPACE_IO; 970 #else 971 if (!(command & PCIM_CMD_MEMEN)) { 972 printf("vr%d: failed to enable memory mapping!\n", unit); 973 goto fail; 974 } 975 976 if (!pci_map_mem(config_id, VR_PCI_LOMEM, &vbase, &pbase)) { 977 printf ("vr%d: couldn't map memory\n", unit); 978 goto fail; 979 } 980 981 sc->vr_bhandle = vbase; 982 sc->vr_btag = I386_BUS_SPACE_MEM; 983 #endif 984 985 /* Allocate interrupt */ 986 if (!pci_map_int(config_id, vr_intr, sc, &net_imask)) { 987 printf("vr%d: couldn't map interrupt\n", unit); 988 goto fail; 989 } 990 991 /* Reset the adapter. */ 992 vr_reset(sc); 993 994 /* 995 * Get station address. The way the Rhine chips work, 996 * you're not allowed to directly access the EEPROM once 997 * they've been programmed a special way. Consequently, 998 * we need to read the node address from the PAR0 and PAR1 999 * registers. 1000 */ 1001 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 1002 DELAY(200); 1003 for (i = 0; i < ETHER_ADDR_LEN; i++) 1004 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 1005 1006 /* 1007 * A Rhine chip was detected. Inform the world. 1008 */ 1009 printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1010 1011 sc->vr_unit = unit; 1012 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 1013 1014 sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8, 1015 M_DEVBUF, M_NOWAIT); 1016 if (sc->vr_ldata_ptr == NULL) { 1017 free(sc, M_DEVBUF); 1018 printf("vr%d: no memory for list buffers!\n", unit); 1019 return; 1020 } 1021 1022 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr; 1023 round = (unsigned int)sc->vr_ldata_ptr & 0xF; 1024 roundptr = sc->vr_ldata_ptr; 1025 for (i = 0; i < 8; i++) { 1026 if (round % 8) { 1027 round++; 1028 roundptr++; 1029 } else 1030 break; 1031 } 1032 sc->vr_ldata = (struct vr_list_data *)roundptr; 1033 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 1034 1035 ifp = &sc->arpcom.ac_if; 1036 ifp->if_softc = sc; 1037 ifp->if_unit = unit; 1038 ifp->if_name = "vr"; 1039 ifp->if_mtu = ETHERMTU; 1040 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1041 ifp->if_ioctl = vr_ioctl; 1042 ifp->if_output = ether_output; 1043 ifp->if_start = vr_start; 1044 ifp->if_watchdog = vr_watchdog; 1045 ifp->if_init = vr_init; 1046 ifp->if_baudrate = 10000000; 1047 1048 if (bootverbose) 1049 printf("vr%d: probing for a PHY\n", sc->vr_unit); 1050 for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) { 1051 if (bootverbose) 1052 printf("vr%d: checking address: %d\n", 1053 sc->vr_unit, i); 1054 sc->vr_phy_addr = i; 1055 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 1056 DELAY(500); 1057 while(vr_phy_readreg(sc, PHY_BMCR) 1058 & PHY_BMCR_RESET); 1059 if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR))) 1060 break; 1061 } 1062 if (phy_sts) { 1063 phy_vid = vr_phy_readreg(sc, PHY_VENID); 1064 phy_did = vr_phy_readreg(sc, PHY_DEVID); 1065 if (bootverbose) 1066 printf("vr%d: found PHY at address %d, ", 1067 sc->vr_unit, sc->vr_phy_addr); 1068 if (bootverbose) 1069 printf("vendor id: %x device id: %x\n", 1070 phy_vid, phy_did); 1071 p = vr_phys; 1072 while(p->vr_vid) { 1073 if (phy_vid == p->vr_vid && 1074 (phy_did | 0x000F) == p->vr_did) { 1075 sc->vr_pinfo = p; 1076 break; 1077 } 1078 p++; 1079 } 1080 if (sc->vr_pinfo == NULL) 1081 sc->vr_pinfo = &vr_phys[PHY_UNKNOWN]; 1082 if (bootverbose) 1083 printf("vr%d: PHY type: %s\n", 1084 sc->vr_unit, sc->vr_pinfo->vr_name); 1085 } else { 1086 printf("vr%d: MII without any phy!\n", sc->vr_unit); 1087 goto fail; 1088 } 1089 1090 /* 1091 * Do ifmedia setup. 1092 */ 1093 ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts); 1094 1095 vr_getmode_mii(sc); 1096 vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1); 1097 media = sc->ifmedia.ifm_media; 1098 vr_stop(sc); 1099 1100 ifmedia_set(&sc->ifmedia, media); 1101 1102 /* 1103 * Call MI attach routines. 1104 */ 1105 if_attach(ifp); 1106 ether_ifattach(ifp); 1107 1108 #if NBPFILTER > 0 1109 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 1110 #endif 1111 1112 at_shutdown(vr_shutdown, sc, SHUTDOWN_POST_SYNC); 1113 1114 fail: 1115 splx(s); 1116 return; 1117 } 1118 1119 /* 1120 * Initialize the transmit descriptors. 1121 */ 1122 static int vr_list_tx_init(sc) 1123 struct vr_softc *sc; 1124 { 1125 struct vr_chain_data *cd; 1126 struct vr_list_data *ld; 1127 int i; 1128 1129 cd = &sc->vr_cdata; 1130 ld = sc->vr_ldata; 1131 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1132 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 1133 if (i == (VR_TX_LIST_CNT - 1)) 1134 cd->vr_tx_chain[i].vr_nextdesc = 1135 &cd->vr_tx_chain[0]; 1136 else 1137 cd->vr_tx_chain[i].vr_nextdesc = 1138 &cd->vr_tx_chain[i + 1]; 1139 } 1140 1141 cd->vr_tx_free = &cd->vr_tx_chain[0]; 1142 cd->vr_tx_tail = cd->vr_tx_head = NULL; 1143 1144 return(0); 1145 } 1146 1147 1148 /* 1149 * Initialize the RX descriptors and allocate mbufs for them. Note that 1150 * we arrange the descriptors in a closed ring, so that the last descriptor 1151 * points back to the first. 1152 */ 1153 static int vr_list_rx_init(sc) 1154 struct vr_softc *sc; 1155 { 1156 struct vr_chain_data *cd; 1157 struct vr_list_data *ld; 1158 int i; 1159 1160 cd = &sc->vr_cdata; 1161 ld = sc->vr_ldata; 1162 1163 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1164 cd->vr_rx_chain[i].vr_ptr = 1165 (struct vr_desc *)&ld->vr_rx_list[i]; 1166 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS) 1167 return(ENOBUFS); 1168 if (i == (VR_RX_LIST_CNT - 1)) { 1169 cd->vr_rx_chain[i].vr_nextdesc = 1170 &cd->vr_rx_chain[0]; 1171 ld->vr_rx_list[i].vr_next = 1172 vtophys(&ld->vr_rx_list[0]); 1173 } else { 1174 cd->vr_rx_chain[i].vr_nextdesc = 1175 &cd->vr_rx_chain[i + 1]; 1176 ld->vr_rx_list[i].vr_next = 1177 vtophys(&ld->vr_rx_list[i + 1]); 1178 } 1179 } 1180 1181 cd->vr_rx_head = &cd->vr_rx_chain[0]; 1182 1183 return(0); 1184 } 1185 1186 /* 1187 * Initialize an RX descriptor and attach an MBUF cluster. 1188 * Note: the length fields are only 11 bits wide, which means the 1189 * largest size we can specify is 2047. This is important because 1190 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1191 * overflow the field and make a mess. 1192 */ 1193 static int vr_newbuf(sc, c) 1194 struct vr_softc *sc; 1195 struct vr_chain_onefrag *c; 1196 { 1197 struct mbuf *m_new = NULL; 1198 1199 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1200 if (m_new == NULL) { 1201 printf("vr%d: no memory for rx list -- packet dropped!\n", 1202 sc->vr_unit); 1203 return(ENOBUFS); 1204 } 1205 1206 MCLGET(m_new, M_DONTWAIT); 1207 if (!(m_new->m_flags & M_EXT)) { 1208 printf("vr%d: no memory for rx list -- packet dropped!\n", 1209 sc->vr_unit); 1210 m_freem(m_new); 1211 return(ENOBUFS); 1212 } 1213 1214 c->vr_mbuf = m_new; 1215 c->vr_ptr->vr_status = VR_RXSTAT; 1216 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 1217 c->vr_ptr->vr_ctl = VR_RXCTL_CHAIN | (MCLBYTES - 1); 1218 1219 return(0); 1220 } 1221 1222 /* 1223 * A frame has been uploaded: pass the resulting mbuf chain up to 1224 * the higher level protocols. 1225 */ 1226 static void vr_rxeof(sc) 1227 struct vr_softc *sc; 1228 { 1229 struct ether_header *eh; 1230 struct mbuf *m; 1231 struct ifnet *ifp; 1232 struct vr_chain_onefrag *cur_rx; 1233 int total_len = 0; 1234 u_int32_t rxstat; 1235 1236 ifp = &sc->arpcom.ac_if; 1237 1238 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 1239 VR_RXSTAT_OWN)) { 1240 cur_rx = sc->vr_cdata.vr_rx_head; 1241 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 1242 1243 /* 1244 * If an error occurs, update stats, clear the 1245 * status word and leave the mbuf cluster in place: 1246 * it should simply get re-used next time this descriptor 1247 * comes up in the ring. 1248 */ 1249 if (rxstat & VR_RXSTAT_RXERR) { 1250 ifp->if_ierrors++; 1251 printf("vr%d: rx error: ", sc->vr_unit); 1252 switch(rxstat & 0x000000FF) { 1253 case VR_RXSTAT_CRCERR: 1254 printf("crc error\n"); 1255 break; 1256 case VR_RXSTAT_FRAMEALIGNERR: 1257 printf("frame alignment error\n"); 1258 break; 1259 case VR_RXSTAT_FIFOOFLOW: 1260 printf("FIFO overflow\n"); 1261 break; 1262 case VR_RXSTAT_GIANT: 1263 printf("received giant packet\n"); 1264 break; 1265 case VR_RXSTAT_RUNT: 1266 printf("received runt packet\n"); 1267 break; 1268 case VR_RXSTAT_BUSERR: 1269 printf("system bus error\n"); 1270 break; 1271 case VR_RXSTAT_BUFFERR: 1272 printf("rx buffer error\n"); 1273 break; 1274 default: 1275 printf("unknown rx error\n"); 1276 break; 1277 } 1278 cur_rx->vr_ptr->vr_status = VR_RXSTAT; 1279 cur_rx->vr_ptr->vr_ctl = 1280 VR_RXCTL_CHAIN | (MCLBYTES - 1); 1281 continue; 1282 } 1283 1284 /* No errors; receive the packet. */ 1285 m = cur_rx->vr_mbuf; 1286 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 1287 1288 /* 1289 * XXX The VIA Rhine chip includes the CRC with every 1290 * received frame, and there's no way to turn this 1291 * behavior off (at least, I can't find anything in 1292 * the manual that explains how to do it) so we have 1293 * to trim off the CRC manually. 1294 */ 1295 total_len -= ETHER_CRC_LEN; 1296 1297 /* 1298 * Try to conjure up a new mbuf cluster. If that 1299 * fails, it means we have an out of memory condition and 1300 * should leave the buffer in place and continue. This will 1301 * result in a lost packet, but there's little else we 1302 * can do in this situation. 1303 */ 1304 if (vr_newbuf(sc, cur_rx) == ENOBUFS) { 1305 ifp->if_ierrors++; 1306 cur_rx->vr_ptr->vr_status = 1307 VR_RXSTAT_FIRSTFRAG|VR_RXSTAT_LASTFRAG; 1308 cur_rx->vr_ptr->vr_ctl = 1309 VR_RXCTL_CHAIN | (MCLBYTES - 1); 1310 continue; 1311 } 1312 1313 ifp->if_ipackets++; 1314 eh = mtod(m, struct ether_header *); 1315 m->m_pkthdr.rcvif = ifp; 1316 m->m_pkthdr.len = m->m_len = total_len; 1317 #if NBPFILTER > 0 1318 /* 1319 * Handle BPF listeners. Let the BPF user see the packet, but 1320 * don't pass it up to the ether_input() layer unless it's 1321 * a broadcast packet, multicast packet, matches our ethernet 1322 * address or the interface is in promiscuous mode. 1323 */ 1324 if (ifp->if_bpf) { 1325 bpf_mtap(ifp, m); 1326 if (ifp->if_flags & IFF_PROMISC && 1327 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr, 1328 ETHER_ADDR_LEN) && 1329 (eh->ether_dhost[0] & 1) == 0)) { 1330 m_freem(m); 1331 continue; 1332 } 1333 } 1334 #endif 1335 /* Remove header from mbuf and pass it on. */ 1336 m_adj(m, sizeof(struct ether_header)); 1337 ether_input(ifp, eh, m); 1338 } 1339 1340 return; 1341 } 1342 1343 void vr_rxeoc(sc) 1344 struct vr_softc *sc; 1345 { 1346 1347 vr_rxeof(sc); 1348 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1349 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1350 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1351 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1352 1353 return; 1354 } 1355 1356 /* 1357 * A frame was downloaded to the chip. It's safe for us to clean up 1358 * the list buffers. 1359 */ 1360 1361 static void vr_txeof(sc) 1362 struct vr_softc *sc; 1363 { 1364 struct vr_chain *cur_tx; 1365 struct ifnet *ifp; 1366 register struct mbuf *n; 1367 1368 ifp = &sc->arpcom.ac_if; 1369 1370 /* Clear the timeout timer. */ 1371 ifp->if_timer = 0; 1372 1373 /* Sanity check. */ 1374 if (sc->vr_cdata.vr_tx_head == NULL) 1375 return; 1376 1377 /* 1378 * Go through our tx list and free mbufs for those 1379 * frames that have been transmitted. 1380 */ 1381 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { 1382 u_int32_t txstat; 1383 1384 cur_tx = sc->vr_cdata.vr_tx_head; 1385 txstat = cur_tx->vr_ptr->vr_status; 1386 1387 if ((txstat & VR_TXSTAT_OWN) || txstat == VR_UNSENT) 1388 break; 1389 1390 if (txstat & VR_TXSTAT_ERRSUM) { 1391 ifp->if_oerrors++; 1392 if (txstat & VR_TXSTAT_DEFER) 1393 ifp->if_collisions++; 1394 if (txstat & VR_TXSTAT_LATECOLL) 1395 ifp->if_collisions++; 1396 } 1397 1398 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1399 1400 ifp->if_opackets++; 1401 MFREE(cur_tx->vr_mbuf, n); 1402 cur_tx->vr_mbuf = NULL; 1403 1404 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { 1405 sc->vr_cdata.vr_tx_head = NULL; 1406 sc->vr_cdata.vr_tx_tail = NULL; 1407 break; 1408 } 1409 1410 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; 1411 } 1412 1413 return; 1414 } 1415 1416 /* 1417 * TX 'end of channel' interrupt handler. 1418 */ 1419 static void vr_txeoc(sc) 1420 struct vr_softc *sc; 1421 { 1422 struct ifnet *ifp; 1423 1424 ifp = &sc->arpcom.ac_if; 1425 1426 ifp->if_timer = 0; 1427 1428 if (sc->vr_cdata.vr_tx_head == NULL) { 1429 ifp->if_flags &= ~IFF_OACTIVE; 1430 sc->vr_cdata.vr_tx_tail = NULL; 1431 if (sc->vr_want_auto) 1432 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1); 1433 } else { 1434 if (VR_TXOWN(sc->vr_cdata.vr_tx_head) == VR_UNSENT) { 1435 VR_TXOWN(sc->vr_cdata.vr_tx_head) = VR_TXSTAT_OWN; 1436 ifp->if_timer = 5; 1437 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO); 1438 } 1439 } 1440 1441 return; 1442 } 1443 1444 static void vr_intr(arg) 1445 void *arg; 1446 { 1447 struct vr_softc *sc; 1448 struct ifnet *ifp; 1449 u_int16_t status; 1450 1451 sc = arg; 1452 ifp = &sc->arpcom.ac_if; 1453 1454 /* Supress unwanted interrupts. */ 1455 if (!(ifp->if_flags & IFF_UP)) { 1456 vr_stop(sc); 1457 return; 1458 } 1459 1460 /* Disable interrupts. */ 1461 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1462 1463 for (;;) { 1464 1465 status = CSR_READ_2(sc, VR_ISR); 1466 if (status) 1467 CSR_WRITE_2(sc, VR_ISR, status); 1468 1469 if ((status & VR_INTRS) == 0) 1470 break; 1471 1472 if (status & VR_ISR_RX_OK) 1473 vr_rxeof(sc); 1474 1475 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1476 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) || 1477 (status & VR_ISR_RX_DROPPED)) { 1478 vr_rxeof(sc); 1479 vr_rxeoc(sc); 1480 } 1481 1482 if (status & VR_ISR_TX_OK) { 1483 vr_txeof(sc); 1484 vr_txeoc(sc); 1485 } 1486 1487 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){ 1488 ifp->if_oerrors++; 1489 vr_txeof(sc); 1490 if (sc->vr_cdata.vr_tx_head != NULL) { 1491 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1492 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1493 } 1494 } 1495 1496 if (status & VR_ISR_BUSERR) { 1497 vr_reset(sc); 1498 vr_init(sc); 1499 } 1500 } 1501 1502 /* Re-enable interrupts. */ 1503 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1504 1505 if (ifp->if_snd.ifq_head != NULL) { 1506 vr_start(ifp); 1507 } 1508 1509 return; 1510 } 1511 1512 /* 1513 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1514 * pointers to the fragment pointers. 1515 */ 1516 static int vr_encap(sc, c, m_head) 1517 struct vr_softc *sc; 1518 struct vr_chain *c; 1519 struct mbuf *m_head; 1520 { 1521 int frag = 0; 1522 struct vr_desc *f = NULL; 1523 int total_len; 1524 struct mbuf *m; 1525 1526 m = m_head; 1527 total_len = 0; 1528 1529 /* 1530 * The VIA Rhine wants packet buffers to be longword 1531 * aligned, but very often our mbufs aren't. Rather than 1532 * waste time trying to decide when to copy and when not 1533 * to copy, just do it all the time. 1534 */ 1535 if (m != NULL) { 1536 struct mbuf *m_new = NULL; 1537 1538 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1539 if (m_new == NULL) { 1540 printf("vr%d: no memory for tx list", sc->vr_unit); 1541 return(1); 1542 } 1543 if (m_head->m_pkthdr.len > MHLEN) { 1544 MCLGET(m_new, M_DONTWAIT); 1545 if (!(m_new->m_flags & M_EXT)) { 1546 m_freem(m_new); 1547 printf("vr%d: no memory for tx list", 1548 sc->vr_unit); 1549 return(1); 1550 } 1551 } 1552 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1553 mtod(m_new, caddr_t)); 1554 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1555 m_freem(m_head); 1556 m_head = m_new; 1557 /* 1558 * The Rhine chip doesn't auto-pad, so we have to make 1559 * sure to pad short frames out to the minimum frame length 1560 * ourselves. 1561 */ 1562 if (m_head->m_len < VR_MIN_FRAMELEN) { 1563 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1564 m_new->m_len = m_new->m_pkthdr.len; 1565 } 1566 f = c->vr_ptr; 1567 f->vr_data = vtophys(mtod(m_new, caddr_t)); 1568 f->vr_ctl = total_len = m_new->m_len; 1569 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1570 f->vr_status = 0; 1571 frag = 1; 1572 } 1573 1574 c->vr_mbuf = m_head; 1575 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG; 1576 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1577 1578 return(0); 1579 } 1580 1581 /* 1582 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1583 * to the mbuf data regions directly in the transmit lists. We also save a 1584 * copy of the pointers since the transmit list fragment pointers are 1585 * physical addresses. 1586 */ 1587 1588 static void vr_start(ifp) 1589 struct ifnet *ifp; 1590 { 1591 struct vr_softc *sc; 1592 struct mbuf *m_head = NULL; 1593 struct vr_chain *cur_tx = NULL, *start_tx; 1594 1595 sc = ifp->if_softc; 1596 1597 if (sc->vr_autoneg) { 1598 sc->vr_tx_pend = 1; 1599 return; 1600 } 1601 1602 /* 1603 * Check for an available queue slot. If there are none, 1604 * punt. 1605 */ 1606 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { 1607 ifp->if_flags |= IFF_OACTIVE; 1608 return; 1609 } 1610 1611 start_tx = sc->vr_cdata.vr_tx_free; 1612 1613 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { 1614 IF_DEQUEUE(&ifp->if_snd, m_head); 1615 if (m_head == NULL) 1616 break; 1617 1618 /* Pick a descriptor off the free list. */ 1619 cur_tx = sc->vr_cdata.vr_tx_free; 1620 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; 1621 1622 /* Pack the data into the descriptor. */ 1623 vr_encap(sc, cur_tx, m_head); 1624 1625 if (cur_tx != start_tx) 1626 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1627 1628 #if NBPFILTER > 0 1629 /* 1630 * If there's a BPF listener, bounce a copy of this frame 1631 * to him. 1632 */ 1633 if (ifp->if_bpf) 1634 bpf_mtap(ifp, cur_tx->vr_mbuf); 1635 #endif 1636 } 1637 1638 /* 1639 * If there are no frames queued, bail. 1640 */ 1641 if (cur_tx == NULL) 1642 return; 1643 1644 /* 1645 * Place the request for the upload interrupt 1646 * in the last descriptor in the chain. This way, if 1647 * we're chaining several packets at once, we'll only 1648 * get an interupt once for the whole chain rather than 1649 * once for each packet. 1650 */ 1651 cur_tx->vr_ptr->vr_ctl |= VR_TXCTL_FINT; 1652 sc->vr_cdata.vr_tx_tail = cur_tx; 1653 1654 if (sc->vr_cdata.vr_tx_head == NULL) { 1655 sc->vr_cdata.vr_tx_head = start_tx; 1656 VR_TXOWN(start_tx) = VR_TXSTAT_OWN; 1657 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO); 1658 } else { 1659 VR_TXOWN(start_tx) = VR_UNSENT; 1660 } 1661 1662 /* 1663 * Set a timeout in case the chip goes out to lunch. 1664 */ 1665 ifp->if_timer = 5; 1666 1667 return; 1668 } 1669 1670 static void vr_init(xsc) 1671 void *xsc; 1672 { 1673 struct vr_softc *sc = xsc; 1674 struct ifnet *ifp = &sc->arpcom.ac_if; 1675 u_int16_t phy_bmcr = 0; 1676 int s; 1677 1678 if (sc->vr_autoneg) 1679 return; 1680 1681 s = splimp(); 1682 1683 if (sc->vr_pinfo != NULL) 1684 phy_bmcr = vr_phy_readreg(sc, PHY_BMCR); 1685 1686 /* 1687 * Cancel pending I/O and free all RX/TX buffers. 1688 */ 1689 vr_stop(sc); 1690 vr_reset(sc); 1691 1692 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1693 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD); 1694 1695 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1696 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1697 1698 /* Init circular RX list. */ 1699 if (vr_list_rx_init(sc) == ENOBUFS) { 1700 printf("vr%d: initialization failed: no " 1701 "memory for rx buffers\n", sc->vr_unit); 1702 vr_stop(sc); 1703 (void)splx(s); 1704 return; 1705 } 1706 1707 /* 1708 * Init tx descriptors. 1709 */ 1710 vr_list_tx_init(sc); 1711 1712 /* If we want promiscuous mode, set the allframes bit. */ 1713 if (ifp->if_flags & IFF_PROMISC) 1714 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1715 else 1716 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1717 1718 /* Set capture broadcast bit to capture broadcast frames. */ 1719 if (ifp->if_flags & IFF_BROADCAST) 1720 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1721 else 1722 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1723 1724 /* 1725 * Program the multicast filter, if necessary. 1726 */ 1727 vr_setmulti(sc); 1728 1729 /* 1730 * Load the address of the RX list. 1731 */ 1732 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1733 1734 /* Enable receiver and transmitter. */ 1735 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1736 VR_CMD_TX_ON|VR_CMD_RX_ON| 1737 VR_CMD_RX_GO); 1738 1739 vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR)); 1740 1741 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1742 1743 /* 1744 * Enable interrupts. 1745 */ 1746 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1747 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1748 1749 /* Restore state of BMCR */ 1750 if (sc->vr_pinfo != NULL) 1751 vr_phy_writereg(sc, PHY_BMCR, phy_bmcr); 1752 1753 ifp->if_flags |= IFF_RUNNING; 1754 ifp->if_flags &= ~IFF_OACTIVE; 1755 1756 (void)splx(s); 1757 1758 return; 1759 } 1760 1761 /* 1762 * Set media options. 1763 */ 1764 static int vr_ifmedia_upd(ifp) 1765 struct ifnet *ifp; 1766 { 1767 struct vr_softc *sc; 1768 struct ifmedia *ifm; 1769 1770 sc = ifp->if_softc; 1771 ifm = &sc->ifmedia; 1772 1773 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1774 return(EINVAL); 1775 1776 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 1777 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1); 1778 else 1779 vr_setmode_mii(sc, ifm->ifm_media); 1780 1781 return(0); 1782 } 1783 1784 /* 1785 * Report current media status. 1786 */ 1787 static void vr_ifmedia_sts(ifp, ifmr) 1788 struct ifnet *ifp; 1789 struct ifmediareq *ifmr; 1790 { 1791 struct vr_softc *sc; 1792 u_int16_t advert = 0, ability = 0; 1793 1794 sc = ifp->if_softc; 1795 1796 ifmr->ifm_active = IFM_ETHER; 1797 1798 if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { 1799 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) 1800 ifmr->ifm_active = IFM_ETHER|IFM_100_TX; 1801 else 1802 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 1803 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) 1804 ifmr->ifm_active |= IFM_FDX; 1805 else 1806 ifmr->ifm_active |= IFM_HDX; 1807 return; 1808 } 1809 1810 ability = vr_phy_readreg(sc, PHY_LPAR); 1811 advert = vr_phy_readreg(sc, PHY_ANAR); 1812 if (advert & PHY_ANAR_100BT4 && 1813 ability & PHY_ANAR_100BT4) { 1814 ifmr->ifm_active = IFM_ETHER|IFM_100_T4; 1815 } else if (advert & PHY_ANAR_100BTXFULL && 1816 ability & PHY_ANAR_100BTXFULL) { 1817 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX; 1818 } else if (advert & PHY_ANAR_100BTXHALF && 1819 ability & PHY_ANAR_100BTXHALF) { 1820 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX; 1821 } else if (advert & PHY_ANAR_10BTFULL && 1822 ability & PHY_ANAR_10BTFULL) { 1823 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX; 1824 } else if (advert & PHY_ANAR_10BTHALF && 1825 ability & PHY_ANAR_10BTHALF) { 1826 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX; 1827 } 1828 1829 return; 1830 } 1831 1832 static int vr_ioctl(ifp, command, data) 1833 struct ifnet *ifp; 1834 u_long command; 1835 caddr_t data; 1836 { 1837 struct vr_softc *sc = ifp->if_softc; 1838 struct ifreq *ifr = (struct ifreq *) data; 1839 int s, error = 0; 1840 1841 s = splimp(); 1842 1843 switch(command) { 1844 case SIOCSIFADDR: 1845 case SIOCGIFADDR: 1846 case SIOCSIFMTU: 1847 error = ether_ioctl(ifp, command, data); 1848 break; 1849 case SIOCSIFFLAGS: 1850 if (ifp->if_flags & IFF_UP) { 1851 vr_init(sc); 1852 } else { 1853 if (ifp->if_flags & IFF_RUNNING) 1854 vr_stop(sc); 1855 } 1856 error = 0; 1857 break; 1858 case SIOCADDMULTI: 1859 case SIOCDELMULTI: 1860 vr_setmulti(sc); 1861 error = 0; 1862 break; 1863 case SIOCGIFMEDIA: 1864 case SIOCSIFMEDIA: 1865 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1866 break; 1867 default: 1868 error = EINVAL; 1869 break; 1870 } 1871 1872 (void)splx(s); 1873 1874 return(error); 1875 } 1876 1877 static void vr_watchdog(ifp) 1878 struct ifnet *ifp; 1879 { 1880 struct vr_softc *sc; 1881 1882 sc = ifp->if_softc; 1883 1884 if (sc->vr_autoneg) { 1885 vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1); 1886 return; 1887 } 1888 1889 ifp->if_oerrors++; 1890 printf("vr%d: watchdog timeout\n", sc->vr_unit); 1891 1892 if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1893 printf("vr%d: no carrier - transceiver cable problem?\n", 1894 sc->vr_unit); 1895 1896 vr_stop(sc); 1897 vr_reset(sc); 1898 vr_init(sc); 1899 1900 if (ifp->if_snd.ifq_head != NULL) 1901 vr_start(ifp); 1902 1903 return; 1904 } 1905 1906 /* 1907 * Stop the adapter and free any mbufs allocated to the 1908 * RX and TX lists. 1909 */ 1910 static void vr_stop(sc) 1911 struct vr_softc *sc; 1912 { 1913 register int i; 1914 struct ifnet *ifp; 1915 1916 ifp = &sc->arpcom.ac_if; 1917 ifp->if_timer = 0; 1918 1919 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1920 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1921 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1922 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1923 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1924 1925 /* 1926 * Free data in the RX lists. 1927 */ 1928 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1929 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1930 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1931 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1932 } 1933 } 1934 bzero((char *)&sc->vr_ldata->vr_rx_list, 1935 sizeof(sc->vr_ldata->vr_rx_list)); 1936 1937 /* 1938 * Free the TX list buffers. 1939 */ 1940 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1941 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1942 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1943 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1944 } 1945 } 1946 1947 bzero((char *)&sc->vr_ldata->vr_tx_list, 1948 sizeof(sc->vr_ldata->vr_tx_list)); 1949 1950 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1951 1952 return; 1953 } 1954 1955 /* 1956 * Stop all chip I/O so that the kernel's probe routines don't 1957 * get confused by errant DMAs when rebooting. 1958 */ 1959 static void vr_shutdown(howto, arg) 1960 int howto; 1961 void *arg; 1962 { 1963 struct vr_softc *sc = (struct vr_softc *)arg; 1964 1965 vr_stop(sc); 1966 1967 return; 1968 } 1969 1970 static struct pci_device vr_device = { 1971 "vr", 1972 vr_probe, 1973 vr_attach, 1974 &vr_count, 1975 NULL 1976 }; 1977 DATA_SET(pcidevice_set, vr_device); 1978