1 /* 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 * 32 * $Id: if_vr.c,v 1.10 1999/04/24 20:14:01 peter Exp $ 33 */ 34 35 /* 36 * VIA Rhine fast ethernet PCI NIC driver 37 * 38 * Supports various network adapters based on the VIA Rhine 39 * and Rhine II PCI controllers, including the D-Link DFE530TX. 40 * Datasheets are available at http://www.via.com.tw. 41 * 42 * Written by Bill Paul <wpaul@ctr.columbia.edu> 43 * Electrical Engineering Department 44 * Columbia University, New York City 45 */ 46 47 /* 48 * The VIA Rhine controllers are similar in some respects to the 49 * the DEC tulip chips, except less complicated. The controller 50 * uses an MII bus and an external physical layer interface. The 51 * receiver has a one entry perfect filter and a 64-bit hash table 52 * multicast filter. Transmit and receive descriptors are similar 53 * to the tulip. 54 * 55 * The Rhine has a serious flaw in its transmit DMA mechanism: 56 * transmit buffers must be longword aligned. Unfortunately, 57 * FreeBSD doesn't guarantee that mbufs will be filled in starting 58 * at longword boundaries, so we have to do a buffer copy before 59 * transmission. 60 */ 61 62 #include "bpfilter.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/sockio.h> 67 #include <sys/mbuf.h> 68 #include <sys/malloc.h> 69 #include <sys/kernel.h> 70 #include <sys/socket.h> 71 72 #include <net/if.h> 73 #include <net/if_arp.h> 74 #include <net/ethernet.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 78 #if NBPFILTER > 0 79 #include <net/bpf.h> 80 #endif 81 82 #include <vm/vm.h> /* for vtophys */ 83 #include <vm/pmap.h> /* for vtophys */ 84 #include <machine/clock.h> /* for DELAY */ 85 #include <machine/bus_pio.h> 86 #include <machine/bus_memio.h> 87 #include <machine/bus.h> 88 89 #include <pci/pcireg.h> 90 #include <pci/pcivar.h> 91 92 #define VR_USEIOSPACE 93 94 /* #define VR_BACKGROUND_AUTONEG */ 95 96 #include <pci/if_vrreg.h> 97 98 #ifndef lint 99 static const char rcsid[] = 100 "$Id: if_vr.c,v 1.10 1999/04/24 20:14:01 peter Exp $"; 101 #endif 102 103 /* 104 * Various supported device vendors/types and their names. 105 */ 106 static struct vr_type vr_devs[] = { 107 { VIA_VENDORID, VIA_DEVICEID_RHINE, 108 "VIA VT3043 Rhine I 10/100BaseTX" }, 109 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 110 "VIA VT86C100A Rhine II 10/100BaseTX" }, 111 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 112 "Delta Electronics Rhine II 10/100BaseTX" }, 113 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 114 "Addtron Technology Rhine II 10/100BaseTX" }, 115 { 0, 0, NULL } 116 }; 117 118 /* 119 * Various supported PHY vendors/types and their names. Note that 120 * this driver will work with pretty much any MII-compliant PHY, 121 * so failure to positively identify the chip is not a fatal error. 122 */ 123 124 static struct vr_type vr_phys[] = { 125 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" }, 126 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" }, 127 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"}, 128 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" }, 129 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" }, 130 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" }, 131 { 0, 0, "<MII-compliant physical interface>" } 132 }; 133 134 static unsigned long vr_count = 0; 135 static const char *vr_probe __P((pcici_t, pcidi_t)); 136 static void vr_attach __P((pcici_t, int)); 137 138 static int vr_newbuf __P((struct vr_softc *, 139 struct vr_chain_onefrag *)); 140 static int vr_encap __P((struct vr_softc *, struct vr_chain *, 141 struct mbuf * )); 142 143 static void vr_rxeof __P((struct vr_softc *)); 144 static void vr_rxeoc __P((struct vr_softc *)); 145 static void vr_txeof __P((struct vr_softc *)); 146 static void vr_txeoc __P((struct vr_softc *)); 147 static void vr_intr __P((void *)); 148 static void vr_start __P((struct ifnet *)); 149 static int vr_ioctl __P((struct ifnet *, u_long, caddr_t)); 150 static void vr_init __P((void *)); 151 static void vr_stop __P((struct vr_softc *)); 152 static void vr_watchdog __P((struct ifnet *)); 153 static void vr_shutdown __P((int, void *)); 154 static int vr_ifmedia_upd __P((struct ifnet *)); 155 static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 156 157 static void vr_mii_sync __P((struct vr_softc *)); 158 static void vr_mii_send __P((struct vr_softc *, u_int32_t, int)); 159 static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *)); 160 static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *)); 161 static u_int16_t vr_phy_readreg __P((struct vr_softc *, int)); 162 static void vr_phy_writereg __P((struct vr_softc *, u_int16_t, u_int16_t)); 163 164 static void vr_autoneg_xmit __P((struct vr_softc *)); 165 static void vr_autoneg_mii __P((struct vr_softc *, int, int)); 166 static void vr_setmode_mii __P((struct vr_softc *, int)); 167 static void vr_getmode_mii __P((struct vr_softc *)); 168 static void vr_setcfg __P((struct vr_softc *, u_int16_t)); 169 static u_int8_t vr_calchash __P((u_int8_t *)); 170 static void vr_setmulti __P((struct vr_softc *)); 171 static void vr_reset __P((struct vr_softc *)); 172 static int vr_list_rx_init __P((struct vr_softc *)); 173 static int vr_list_tx_init __P((struct vr_softc *)); 174 175 #define VR_SETBIT(sc, reg, x) \ 176 CSR_WRITE_1(sc, reg, \ 177 CSR_READ_1(sc, reg) | x) 178 179 #define VR_CLRBIT(sc, reg, x) \ 180 CSR_WRITE_1(sc, reg, \ 181 CSR_READ_1(sc, reg) & ~x) 182 183 #define VR_SETBIT16(sc, reg, x) \ 184 CSR_WRITE_2(sc, reg, \ 185 CSR_READ_2(sc, reg) | x) 186 187 #define VR_CLRBIT16(sc, reg, x) \ 188 CSR_WRITE_2(sc, reg, \ 189 CSR_READ_2(sc, reg) & ~x) 190 191 #define VR_SETBIT32(sc, reg, x) \ 192 CSR_WRITE_4(sc, reg, \ 193 CSR_READ_4(sc, reg) | x) 194 195 #define VR_CLRBIT32(sc, reg, x) \ 196 CSR_WRITE_4(sc, reg, \ 197 CSR_READ_4(sc, reg) & ~x) 198 199 #define SIO_SET(x) \ 200 CSR_WRITE_1(sc, VR_MIICMD, \ 201 CSR_READ_1(sc, VR_MIICMD) | x) 202 203 #define SIO_CLR(x) \ 204 CSR_WRITE_1(sc, VR_MIICMD, \ 205 CSR_READ_1(sc, VR_MIICMD) & ~x) 206 207 /* 208 * Sync the PHYs by setting data bit and strobing the clock 32 times. 209 */ 210 static void vr_mii_sync(sc) 211 struct vr_softc *sc; 212 { 213 register int i; 214 215 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 216 217 for (i = 0; i < 32; i++) { 218 SIO_SET(VR_MIICMD_CLK); 219 DELAY(1); 220 SIO_CLR(VR_MIICMD_CLK); 221 DELAY(1); 222 } 223 224 return; 225 } 226 227 /* 228 * Clock a series of bits through the MII. 229 */ 230 static void vr_mii_send(sc, bits, cnt) 231 struct vr_softc *sc; 232 u_int32_t bits; 233 int cnt; 234 { 235 int i; 236 237 SIO_CLR(VR_MIICMD_CLK); 238 239 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 240 if (bits & i) { 241 SIO_SET(VR_MIICMD_DATAIN); 242 } else { 243 SIO_CLR(VR_MIICMD_DATAIN); 244 } 245 DELAY(1); 246 SIO_CLR(VR_MIICMD_CLK); 247 DELAY(1); 248 SIO_SET(VR_MIICMD_CLK); 249 } 250 } 251 252 /* 253 * Read an PHY register through the MII. 254 */ 255 static int vr_mii_readreg(sc, frame) 256 struct vr_softc *sc; 257 struct vr_mii_frame *frame; 258 259 { 260 int i, ack, s; 261 262 s = splimp(); 263 264 /* 265 * Set up frame for RX. 266 */ 267 frame->mii_stdelim = VR_MII_STARTDELIM; 268 frame->mii_opcode = VR_MII_READOP; 269 frame->mii_turnaround = 0; 270 frame->mii_data = 0; 271 272 CSR_WRITE_1(sc, VR_MIICMD, 0); 273 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 274 275 /* 276 * Turn on data xmit. 277 */ 278 SIO_SET(VR_MIICMD_DIR); 279 280 vr_mii_sync(sc); 281 282 /* 283 * Send command/address info. 284 */ 285 vr_mii_send(sc, frame->mii_stdelim, 2); 286 vr_mii_send(sc, frame->mii_opcode, 2); 287 vr_mii_send(sc, frame->mii_phyaddr, 5); 288 vr_mii_send(sc, frame->mii_regaddr, 5); 289 290 /* Idle bit */ 291 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 292 DELAY(1); 293 SIO_SET(VR_MIICMD_CLK); 294 DELAY(1); 295 296 /* Turn off xmit. */ 297 SIO_CLR(VR_MIICMD_DIR); 298 299 /* Check for ack */ 300 SIO_CLR(VR_MIICMD_CLK); 301 DELAY(1); 302 SIO_SET(VR_MIICMD_CLK); 303 DELAY(1); 304 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 305 306 /* 307 * Now try reading data bits. If the ack failed, we still 308 * need to clock through 16 cycles to keep the PHY(s) in sync. 309 */ 310 if (ack) { 311 for(i = 0; i < 16; i++) { 312 SIO_CLR(VR_MIICMD_CLK); 313 DELAY(1); 314 SIO_SET(VR_MIICMD_CLK); 315 DELAY(1); 316 } 317 goto fail; 318 } 319 320 for (i = 0x8000; i; i >>= 1) { 321 SIO_CLR(VR_MIICMD_CLK); 322 DELAY(1); 323 if (!ack) { 324 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 325 frame->mii_data |= i; 326 DELAY(1); 327 } 328 SIO_SET(VR_MIICMD_CLK); 329 DELAY(1); 330 } 331 332 fail: 333 334 SIO_CLR(VR_MIICMD_CLK); 335 DELAY(1); 336 SIO_SET(VR_MIICMD_CLK); 337 DELAY(1); 338 339 splx(s); 340 341 if (ack) 342 return(1); 343 return(0); 344 } 345 346 /* 347 * Write to a PHY register through the MII. 348 */ 349 static int vr_mii_writereg(sc, frame) 350 struct vr_softc *sc; 351 struct vr_mii_frame *frame; 352 353 { 354 int s; 355 356 s = splimp(); 357 358 CSR_WRITE_1(sc, VR_MIICMD, 0); 359 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 360 361 /* 362 * Set up frame for TX. 363 */ 364 365 frame->mii_stdelim = VR_MII_STARTDELIM; 366 frame->mii_opcode = VR_MII_WRITEOP; 367 frame->mii_turnaround = VR_MII_TURNAROUND; 368 369 /* 370 * Turn on data output. 371 */ 372 SIO_SET(VR_MIICMD_DIR); 373 374 vr_mii_sync(sc); 375 376 vr_mii_send(sc, frame->mii_stdelim, 2); 377 vr_mii_send(sc, frame->mii_opcode, 2); 378 vr_mii_send(sc, frame->mii_phyaddr, 5); 379 vr_mii_send(sc, frame->mii_regaddr, 5); 380 vr_mii_send(sc, frame->mii_turnaround, 2); 381 vr_mii_send(sc, frame->mii_data, 16); 382 383 /* Idle bit. */ 384 SIO_SET(VR_MIICMD_CLK); 385 DELAY(1); 386 SIO_CLR(VR_MIICMD_CLK); 387 DELAY(1); 388 389 /* 390 * Turn off xmit. 391 */ 392 SIO_CLR(VR_MIICMD_DIR); 393 394 splx(s); 395 396 return(0); 397 } 398 399 static u_int16_t vr_phy_readreg(sc, reg) 400 struct vr_softc *sc; 401 int reg; 402 { 403 struct vr_mii_frame frame; 404 405 bzero((char *)&frame, sizeof(frame)); 406 407 frame.mii_phyaddr = sc->vr_phy_addr; 408 frame.mii_regaddr = reg; 409 vr_mii_readreg(sc, &frame); 410 411 return(frame.mii_data); 412 } 413 414 static void vr_phy_writereg(sc, reg, data) 415 struct vr_softc *sc; 416 u_int16_t reg; 417 u_int16_t data; 418 { 419 struct vr_mii_frame frame; 420 421 bzero((char *)&frame, sizeof(frame)); 422 423 frame.mii_phyaddr = sc->vr_phy_addr; 424 frame.mii_regaddr = reg; 425 frame.mii_data = data; 426 427 vr_mii_writereg(sc, &frame); 428 429 return; 430 } 431 432 /* 433 * Calculate CRC of a multicast group address, return the lower 6 bits. 434 */ 435 static u_int8_t vr_calchash(addr) 436 u_int8_t *addr; 437 { 438 u_int32_t crc, carry; 439 int i, j; 440 u_int8_t c; 441 442 /* Compute CRC for the address value. */ 443 crc = 0xFFFFFFFF; /* initial value */ 444 445 for (i = 0; i < 6; i++) { 446 c = *(addr + i); 447 for (j = 0; j < 8; j++) { 448 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 449 crc <<= 1; 450 c >>= 1; 451 if (carry) 452 crc = (crc ^ 0x04c11db6) | carry; 453 } 454 } 455 456 /* return the filter bit position */ 457 return((crc >> 26) & 0x0000003F); 458 } 459 460 /* 461 * Program the 64-bit multicast hash filter. 462 */ 463 static void vr_setmulti(sc) 464 struct vr_softc *sc; 465 { 466 struct ifnet *ifp; 467 int h = 0; 468 u_int32_t hashes[2] = { 0, 0 }; 469 struct ifmultiaddr *ifma; 470 u_int8_t rxfilt; 471 int mcnt = 0; 472 473 ifp = &sc->arpcom.ac_if; 474 475 rxfilt = CSR_READ_1(sc, VR_RXCFG); 476 477 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 478 rxfilt |= VR_RXCFG_RX_MULTI; 479 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 480 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 481 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 482 return; 483 } 484 485 /* first, zot all the existing hash bits */ 486 CSR_WRITE_4(sc, VR_MAR0, 0); 487 CSR_WRITE_4(sc, VR_MAR1, 0); 488 489 /* now program new ones */ 490 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 491 ifma = ifma->ifma_link.le_next) { 492 if (ifma->ifma_addr->sa_family != AF_LINK) 493 continue; 494 h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 495 if (h < 32) 496 hashes[0] |= (1 << h); 497 else 498 hashes[1] |= (1 << (h - 32)); 499 mcnt++; 500 } 501 502 if (mcnt) 503 rxfilt |= VR_RXCFG_RX_MULTI; 504 else 505 rxfilt &= ~VR_RXCFG_RX_MULTI; 506 507 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 508 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 509 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 510 511 return; 512 } 513 514 /* 515 * Initiate an autonegotiation session. 516 */ 517 static void vr_autoneg_xmit(sc) 518 struct vr_softc *sc; 519 { 520 u_int16_t phy_sts; 521 522 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 523 DELAY(500); 524 while(vr_phy_readreg(sc, PHY_BMCR) 525 & PHY_BMCR_RESET); 526 527 phy_sts = vr_phy_readreg(sc, PHY_BMCR); 528 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR; 529 vr_phy_writereg(sc, PHY_BMCR, phy_sts); 530 531 return; 532 } 533 534 /* 535 * Invoke autonegotiation on a PHY. 536 */ 537 static void vr_autoneg_mii(sc, flag, verbose) 538 struct vr_softc *sc; 539 int flag; 540 int verbose; 541 { 542 u_int16_t phy_sts = 0, media, advert, ability; 543 struct ifnet *ifp; 544 struct ifmedia *ifm; 545 546 ifm = &sc->ifmedia; 547 ifp = &sc->arpcom.ac_if; 548 549 ifm->ifm_media = IFM_ETHER | IFM_AUTO; 550 551 /* 552 * The 100baseT4 PHY on the 3c905-T4 has the 'autoneg supported' 553 * bit cleared in the status register, but has the 'autoneg enabled' 554 * bit set in the control register. This is a contradiction, and 555 * I'm not sure how to handle it. If you want to force an attempt 556 * to autoneg for 100baseT4 PHYs, #define FORCE_AUTONEG_TFOUR 557 * and see what happens. 558 */ 559 #ifndef FORCE_AUTONEG_TFOUR 560 /* 561 * First, see if autoneg is supported. If not, there's 562 * no point in continuing. 563 */ 564 phy_sts = vr_phy_readreg(sc, PHY_BMSR); 565 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 566 if (verbose) 567 printf("vr%d: autonegotiation not supported\n", 568 sc->vr_unit); 569 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX; 570 return; 571 } 572 #endif 573 574 switch (flag) { 575 case VR_FLAG_FORCEDELAY: 576 /* 577 * XXX Never use this option anywhere but in the probe 578 * routine: making the kernel stop dead in its tracks 579 * for three whole seconds after we've gone multi-user 580 * is really bad manners. 581 */ 582 vr_autoneg_xmit(sc); 583 DELAY(5000000); 584 break; 585 case VR_FLAG_SCHEDDELAY: 586 /* 587 * Wait for the transmitter to go idle before starting 588 * an autoneg session, otherwise vr_start() may clobber 589 * our timeout, and we don't want to allow transmission 590 * during an autoneg session since that can screw it up. 591 */ 592 if (sc->vr_cdata.vr_tx_head != NULL) { 593 sc->vr_want_auto = 1; 594 return; 595 } 596 vr_autoneg_xmit(sc); 597 ifp->if_timer = 5; 598 sc->vr_autoneg = 1; 599 sc->vr_want_auto = 0; 600 return; 601 break; 602 case VR_FLAG_DELAYTIMEO: 603 ifp->if_timer = 0; 604 sc->vr_autoneg = 0; 605 break; 606 default: 607 printf("vr%d: invalid autoneg flag: %d\n", sc->vr_unit, flag); 608 return; 609 } 610 611 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { 612 if (verbose) 613 printf("vr%d: autoneg complete, ", sc->vr_unit); 614 phy_sts = vr_phy_readreg(sc, PHY_BMSR); 615 } else { 616 if (verbose) 617 printf("vr%d: autoneg not complete, ", sc->vr_unit); 618 } 619 620 media = vr_phy_readreg(sc, PHY_BMCR); 621 622 /* Link is good. Report modes and set duplex mode. */ 623 if (vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { 624 if (verbose) 625 printf("link status good "); 626 advert = vr_phy_readreg(sc, PHY_ANAR); 627 ability = vr_phy_readreg(sc, PHY_LPAR); 628 629 if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { 630 ifm->ifm_media = IFM_ETHER|IFM_100_T4; 631 media |= PHY_BMCR_SPEEDSEL; 632 media &= ~PHY_BMCR_DUPLEX; 633 printf("(100baseT4)\n"); 634 } else if (advert & PHY_ANAR_100BTXFULL && 635 ability & PHY_ANAR_100BTXFULL) { 636 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX; 637 media |= PHY_BMCR_SPEEDSEL; 638 media |= PHY_BMCR_DUPLEX; 639 printf("(full-duplex, 100Mbps)\n"); 640 } else if (advert & PHY_ANAR_100BTXHALF && 641 ability & PHY_ANAR_100BTXHALF) { 642 ifm->ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX; 643 media |= PHY_BMCR_SPEEDSEL; 644 media &= ~PHY_BMCR_DUPLEX; 645 printf("(half-duplex, 100Mbps)\n"); 646 } else if (advert & PHY_ANAR_10BTFULL && 647 ability & PHY_ANAR_10BTFULL) { 648 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX; 649 media &= ~PHY_BMCR_SPEEDSEL; 650 media |= PHY_BMCR_DUPLEX; 651 printf("(full-duplex, 10Mbps)\n"); 652 } else { 653 ifm->ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX; 654 media &= ~PHY_BMCR_SPEEDSEL; 655 media &= ~PHY_BMCR_DUPLEX; 656 printf("(half-duplex, 10Mbps)\n"); 657 } 658 659 media &= ~PHY_BMCR_AUTONEGENBL; 660 661 /* Set ASIC's duplex mode to match the PHY. */ 662 vr_setcfg(sc, media); 663 vr_phy_writereg(sc, PHY_BMCR, media); 664 } else { 665 if (verbose) 666 printf("no carrier\n"); 667 } 668 669 vr_init(sc); 670 671 if (sc->vr_tx_pend) { 672 sc->vr_autoneg = 0; 673 sc->vr_tx_pend = 0; 674 vr_start(ifp); 675 } 676 677 return; 678 } 679 680 static void vr_getmode_mii(sc) 681 struct vr_softc *sc; 682 { 683 u_int16_t bmsr; 684 struct ifnet *ifp; 685 686 ifp = &sc->arpcom.ac_if; 687 688 bmsr = vr_phy_readreg(sc, PHY_BMSR); 689 if (bootverbose) 690 printf("vr%d: PHY status word: %x\n", sc->vr_unit, bmsr); 691 692 /* fallback */ 693 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_HDX; 694 695 if (bmsr & PHY_BMSR_10BTHALF) { 696 if (bootverbose) 697 printf("vr%d: 10Mbps half-duplex mode supported\n", 698 sc->vr_unit); 699 ifmedia_add(&sc->ifmedia, 700 IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 701 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 702 } 703 704 if (bmsr & PHY_BMSR_10BTFULL) { 705 if (bootverbose) 706 printf("vr%d: 10Mbps full-duplex mode supported\n", 707 sc->vr_unit); 708 ifmedia_add(&sc->ifmedia, 709 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 710 sc->ifmedia.ifm_media = IFM_ETHER|IFM_10_T|IFM_FDX; 711 } 712 713 if (bmsr & PHY_BMSR_100BTXHALF) { 714 if (bootverbose) 715 printf("vr%d: 100Mbps half-duplex mode supported\n", 716 sc->vr_unit); 717 ifp->if_baudrate = 100000000; 718 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 719 ifmedia_add(&sc->ifmedia, 720 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); 721 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_HDX; 722 } 723 724 if (bmsr & PHY_BMSR_100BTXFULL) { 725 if (bootverbose) 726 printf("vr%d: 100Mbps full-duplex mode supported\n", 727 sc->vr_unit); 728 ifp->if_baudrate = 100000000; 729 ifmedia_add(&sc->ifmedia, 730 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 731 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_TX|IFM_FDX; 732 } 733 734 /* Some also support 100BaseT4. */ 735 if (bmsr & PHY_BMSR_100BT4) { 736 if (bootverbose) 737 printf("vr%d: 100baseT4 mode supported\n", sc->vr_unit); 738 ifp->if_baudrate = 100000000; 739 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL); 740 sc->ifmedia.ifm_media = IFM_ETHER|IFM_100_T4; 741 #ifdef FORCE_AUTONEG_TFOUR 742 if (bootverbose) 743 printf("vr%d: forcing on autoneg support for BT4\n", 744 sc->vr_unit); 745 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0 NULL): 746 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO; 747 #endif 748 } 749 750 if (bmsr & PHY_BMSR_CANAUTONEG) { 751 if (bootverbose) 752 printf("vr%d: autoneg supported\n", sc->vr_unit); 753 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 754 sc->ifmedia.ifm_media = IFM_ETHER|IFM_AUTO; 755 } 756 757 return; 758 } 759 760 /* 761 * Set speed and duplex mode. 762 */ 763 static void vr_setmode_mii(sc, media) 764 struct vr_softc *sc; 765 int media; 766 { 767 u_int16_t bmcr; 768 struct ifnet *ifp; 769 770 ifp = &sc->arpcom.ac_if; 771 772 /* 773 * If an autoneg session is in progress, stop it. 774 */ 775 if (sc->vr_autoneg) { 776 printf("vr%d: canceling autoneg session\n", sc->vr_unit); 777 ifp->if_timer = sc->vr_autoneg = sc->vr_want_auto = 0; 778 bmcr = vr_phy_readreg(sc, PHY_BMCR); 779 bmcr &= ~PHY_BMCR_AUTONEGENBL; 780 vr_phy_writereg(sc, PHY_BMCR, bmcr); 781 } 782 783 printf("vr%d: selecting MII, ", sc->vr_unit); 784 785 bmcr = vr_phy_readreg(sc, PHY_BMCR); 786 787 bmcr &= ~(PHY_BMCR_AUTONEGENBL|PHY_BMCR_SPEEDSEL| 788 PHY_BMCR_DUPLEX|PHY_BMCR_LOOPBK); 789 790 if (IFM_SUBTYPE(media) == IFM_100_T4) { 791 printf("100Mbps/T4, half-duplex\n"); 792 bmcr |= PHY_BMCR_SPEEDSEL; 793 bmcr &= ~PHY_BMCR_DUPLEX; 794 } 795 796 if (IFM_SUBTYPE(media) == IFM_100_TX) { 797 printf("100Mbps, "); 798 bmcr |= PHY_BMCR_SPEEDSEL; 799 } 800 801 if (IFM_SUBTYPE(media) == IFM_10_T) { 802 printf("10Mbps, "); 803 bmcr &= ~PHY_BMCR_SPEEDSEL; 804 } 805 806 if ((media & IFM_GMASK) == IFM_FDX) { 807 printf("full duplex\n"); 808 bmcr |= PHY_BMCR_DUPLEX; 809 } else { 810 printf("half duplex\n"); 811 bmcr &= ~PHY_BMCR_DUPLEX; 812 } 813 814 vr_setcfg(sc, bmcr); 815 vr_phy_writereg(sc, PHY_BMCR, bmcr); 816 817 return; 818 } 819 820 /* 821 * In order to fiddle with the 822 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 823 * first have to put the transmit and/or receive logic in the idle state. 824 */ 825 static void vr_setcfg(sc, bmcr) 826 struct vr_softc *sc; 827 u_int16_t bmcr; 828 { 829 int restart = 0; 830 831 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 832 restart = 1; 833 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 834 } 835 836 if (bmcr & PHY_BMCR_DUPLEX) 837 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 838 else 839 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 840 841 if (restart) 842 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 843 844 return; 845 } 846 847 static void vr_reset(sc) 848 struct vr_softc *sc; 849 { 850 register int i; 851 852 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 853 854 for (i = 0; i < VR_TIMEOUT; i++) { 855 DELAY(10); 856 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 857 break; 858 } 859 if (i == VR_TIMEOUT) 860 printf("vr%d: reset never completed!\n", sc->vr_unit); 861 862 /* Wait a little while for the chip to get its brains in order. */ 863 DELAY(1000); 864 865 return; 866 } 867 868 /* 869 * Probe for a VIA Rhine chip. Check the PCI vendor and device 870 * IDs against our list and return a device name if we find a match. 871 */ 872 static const char * 873 vr_probe(config_id, device_id) 874 pcici_t config_id; 875 pcidi_t device_id; 876 { 877 struct vr_type *t; 878 879 t = vr_devs; 880 881 while(t->vr_name != NULL) { 882 if ((device_id & 0xFFFF) == t->vr_vid && 883 ((device_id >> 16) & 0xFFFF) == t->vr_did) { 884 return(t->vr_name); 885 } 886 t++; 887 } 888 889 return(NULL); 890 } 891 892 /* 893 * Attach the interface. Allocate softc structures, do ifmedia 894 * setup and ethernet/BPF attach. 895 */ 896 static void 897 vr_attach(config_id, unit) 898 pcici_t config_id; 899 int unit; 900 { 901 int s, i; 902 #ifndef VR_USEIOSPACE 903 vm_offset_t pbase, vbase; 904 #endif 905 u_char eaddr[ETHER_ADDR_LEN]; 906 u_int32_t command; 907 struct vr_softc *sc; 908 struct ifnet *ifp; 909 int media = IFM_ETHER|IFM_100_TX|IFM_FDX; 910 unsigned int round; 911 caddr_t roundptr; 912 struct vr_type *p; 913 u_int16_t phy_vid, phy_did, phy_sts; 914 915 s = splimp(); 916 917 sc = malloc(sizeof(struct vr_softc), M_DEVBUF, M_NOWAIT); 918 if (sc == NULL) { 919 printf("vr%d: no memory for softc struct!\n", unit); 920 return; 921 } 922 bzero(sc, sizeof(struct vr_softc)); 923 924 /* 925 * Handle power management nonsense. 926 */ 927 928 command = pci_conf_read(config_id, VR_PCI_CAPID) & 0x000000FF; 929 if (command == 0x01) { 930 931 command = pci_conf_read(config_id, VR_PCI_PWRMGMTCTRL); 932 if (command & VR_PSTATE_MASK) { 933 u_int32_t iobase, membase, irq; 934 935 /* Save important PCI config data. */ 936 iobase = pci_conf_read(config_id, VR_PCI_LOIO); 937 membase = pci_conf_read(config_id, VR_PCI_LOMEM); 938 irq = pci_conf_read(config_id, VR_PCI_INTLINE); 939 940 /* Reset the power state. */ 941 printf("vr%d: chip is in D%d power mode " 942 "-- setting to D0\n", unit, command & VR_PSTATE_MASK); 943 command &= 0xFFFFFFFC; 944 pci_conf_write(config_id, VR_PCI_PWRMGMTCTRL, command); 945 946 /* Restore PCI config data. */ 947 pci_conf_write(config_id, VR_PCI_LOIO, iobase); 948 pci_conf_write(config_id, VR_PCI_LOMEM, membase); 949 pci_conf_write(config_id, VR_PCI_INTLINE, irq); 950 } 951 } 952 953 /* 954 * Map control/status registers. 955 */ 956 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 957 command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 958 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, command); 959 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 960 961 #ifdef VR_USEIOSPACE 962 if (!(command & PCIM_CMD_PORTEN)) { 963 printf("vr%d: failed to enable I/O ports!\n", unit); 964 free(sc, M_DEVBUF); 965 goto fail; 966 } 967 968 if (!pci_map_port(config_id, VR_PCI_LOIO, 969 (u_int16_t *)(&sc->vr_bhandle))) { 970 printf ("vr%d: couldn't map ports\n", unit); 971 goto fail; 972 } 973 sc->vr_btag = I386_BUS_SPACE_IO; 974 #else 975 if (!(command & PCIM_CMD_MEMEN)) { 976 printf("vr%d: failed to enable memory mapping!\n", unit); 977 goto fail; 978 } 979 980 if (!pci_map_mem(config_id, VR_PCI_LOMEM, &vbase, &pbase)) { 981 printf ("vr%d: couldn't map memory\n", unit); 982 goto fail; 983 } 984 985 sc->vr_bhandle = vbase; 986 sc->vr_btag = I386_BUS_SPACE_MEM; 987 #endif 988 989 /* Allocate interrupt */ 990 if (!pci_map_int(config_id, vr_intr, sc, &net_imask)) { 991 printf("vr%d: couldn't map interrupt\n", unit); 992 goto fail; 993 } 994 995 /* Reset the adapter. */ 996 vr_reset(sc); 997 998 /* 999 * Get station address. The way the Rhine chips work, 1000 * you're not allowed to directly access the EEPROM once 1001 * they've been programmed a special way. Consequently, 1002 * we need to read the node address from the PAR0 and PAR1 1003 * registers. 1004 */ 1005 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 1006 DELAY(200); 1007 for (i = 0; i < ETHER_ADDR_LEN; i++) 1008 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 1009 1010 /* 1011 * A Rhine chip was detected. Inform the world. 1012 */ 1013 printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1014 1015 sc->vr_unit = unit; 1016 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 1017 1018 sc->vr_ldata_ptr = malloc(sizeof(struct vr_list_data) + 8, 1019 M_DEVBUF, M_NOWAIT); 1020 if (sc->vr_ldata_ptr == NULL) { 1021 free(sc, M_DEVBUF); 1022 printf("vr%d: no memory for list buffers!\n", unit); 1023 return; 1024 } 1025 1026 sc->vr_ldata = (struct vr_list_data *)sc->vr_ldata_ptr; 1027 round = (unsigned int)sc->vr_ldata_ptr & 0xF; 1028 roundptr = sc->vr_ldata_ptr; 1029 for (i = 0; i < 8; i++) { 1030 if (round % 8) { 1031 round++; 1032 roundptr++; 1033 } else 1034 break; 1035 } 1036 sc->vr_ldata = (struct vr_list_data *)roundptr; 1037 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 1038 1039 ifp = &sc->arpcom.ac_if; 1040 ifp->if_softc = sc; 1041 ifp->if_unit = unit; 1042 ifp->if_name = "vr"; 1043 ifp->if_mtu = ETHERMTU; 1044 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1045 ifp->if_ioctl = vr_ioctl; 1046 ifp->if_output = ether_output; 1047 ifp->if_start = vr_start; 1048 ifp->if_watchdog = vr_watchdog; 1049 ifp->if_init = vr_init; 1050 ifp->if_baudrate = 10000000; 1051 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; 1052 1053 if (bootverbose) 1054 printf("vr%d: probing for a PHY\n", sc->vr_unit); 1055 for (i = VR_PHYADDR_MIN; i < VR_PHYADDR_MAX + 1; i++) { 1056 if (bootverbose) 1057 printf("vr%d: checking address: %d\n", 1058 sc->vr_unit, i); 1059 sc->vr_phy_addr = i; 1060 vr_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); 1061 DELAY(500); 1062 while(vr_phy_readreg(sc, PHY_BMCR) 1063 & PHY_BMCR_RESET); 1064 if ((phy_sts = vr_phy_readreg(sc, PHY_BMSR))) 1065 break; 1066 } 1067 if (phy_sts) { 1068 phy_vid = vr_phy_readreg(sc, PHY_VENID); 1069 phy_did = vr_phy_readreg(sc, PHY_DEVID); 1070 if (bootverbose) 1071 printf("vr%d: found PHY at address %d, ", 1072 sc->vr_unit, sc->vr_phy_addr); 1073 if (bootverbose) 1074 printf("vendor id: %x device id: %x\n", 1075 phy_vid, phy_did); 1076 p = vr_phys; 1077 while(p->vr_vid) { 1078 if (phy_vid == p->vr_vid && 1079 (phy_did | 0x000F) == p->vr_did) { 1080 sc->vr_pinfo = p; 1081 break; 1082 } 1083 p++; 1084 } 1085 if (sc->vr_pinfo == NULL) 1086 sc->vr_pinfo = &vr_phys[PHY_UNKNOWN]; 1087 if (bootverbose) 1088 printf("vr%d: PHY type: %s\n", 1089 sc->vr_unit, sc->vr_pinfo->vr_name); 1090 } else { 1091 printf("vr%d: MII without any phy!\n", sc->vr_unit); 1092 goto fail; 1093 } 1094 1095 /* 1096 * Do ifmedia setup. 1097 */ 1098 ifmedia_init(&sc->ifmedia, 0, vr_ifmedia_upd, vr_ifmedia_sts); 1099 1100 vr_getmode_mii(sc); 1101 vr_autoneg_mii(sc, VR_FLAG_FORCEDELAY, 1); 1102 media = sc->ifmedia.ifm_media; 1103 vr_stop(sc); 1104 1105 ifmedia_set(&sc->ifmedia, media); 1106 1107 /* 1108 * Call MI attach routines. 1109 */ 1110 if_attach(ifp); 1111 ether_ifattach(ifp); 1112 1113 #if NBPFILTER > 0 1114 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 1115 #endif 1116 1117 at_shutdown(vr_shutdown, sc, SHUTDOWN_POST_SYNC); 1118 1119 fail: 1120 splx(s); 1121 return; 1122 } 1123 1124 /* 1125 * Initialize the transmit descriptors. 1126 */ 1127 static int vr_list_tx_init(sc) 1128 struct vr_softc *sc; 1129 { 1130 struct vr_chain_data *cd; 1131 struct vr_list_data *ld; 1132 int i; 1133 1134 cd = &sc->vr_cdata; 1135 ld = sc->vr_ldata; 1136 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1137 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 1138 if (i == (VR_TX_LIST_CNT - 1)) 1139 cd->vr_tx_chain[i].vr_nextdesc = 1140 &cd->vr_tx_chain[0]; 1141 else 1142 cd->vr_tx_chain[i].vr_nextdesc = 1143 &cd->vr_tx_chain[i + 1]; 1144 } 1145 1146 cd->vr_tx_free = &cd->vr_tx_chain[0]; 1147 cd->vr_tx_tail = cd->vr_tx_head = NULL; 1148 1149 return(0); 1150 } 1151 1152 1153 /* 1154 * Initialize the RX descriptors and allocate mbufs for them. Note that 1155 * we arrange the descriptors in a closed ring, so that the last descriptor 1156 * points back to the first. 1157 */ 1158 static int vr_list_rx_init(sc) 1159 struct vr_softc *sc; 1160 { 1161 struct vr_chain_data *cd; 1162 struct vr_list_data *ld; 1163 int i; 1164 1165 cd = &sc->vr_cdata; 1166 ld = sc->vr_ldata; 1167 1168 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1169 cd->vr_rx_chain[i].vr_ptr = 1170 (struct vr_desc *)&ld->vr_rx_list[i]; 1171 if (vr_newbuf(sc, &cd->vr_rx_chain[i]) == ENOBUFS) 1172 return(ENOBUFS); 1173 if (i == (VR_RX_LIST_CNT - 1)) { 1174 cd->vr_rx_chain[i].vr_nextdesc = 1175 &cd->vr_rx_chain[0]; 1176 ld->vr_rx_list[i].vr_next = 1177 vtophys(&ld->vr_rx_list[0]); 1178 } else { 1179 cd->vr_rx_chain[i].vr_nextdesc = 1180 &cd->vr_rx_chain[i + 1]; 1181 ld->vr_rx_list[i].vr_next = 1182 vtophys(&ld->vr_rx_list[i + 1]); 1183 } 1184 } 1185 1186 cd->vr_rx_head = &cd->vr_rx_chain[0]; 1187 1188 return(0); 1189 } 1190 1191 /* 1192 * Initialize an RX descriptor and attach an MBUF cluster. 1193 * Note: the length fields are only 11 bits wide, which means the 1194 * largest size we can specify is 2047. This is important because 1195 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1196 * overflow the field and make a mess. 1197 */ 1198 static int vr_newbuf(sc, c) 1199 struct vr_softc *sc; 1200 struct vr_chain_onefrag *c; 1201 { 1202 struct mbuf *m_new = NULL; 1203 1204 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1205 if (m_new == NULL) { 1206 printf("vr%d: no memory for rx list -- packet dropped!\n", 1207 sc->vr_unit); 1208 return(ENOBUFS); 1209 } 1210 1211 MCLGET(m_new, M_DONTWAIT); 1212 if (!(m_new->m_flags & M_EXT)) { 1213 printf("vr%d: no memory for rx list -- packet dropped!\n", 1214 sc->vr_unit); 1215 m_freem(m_new); 1216 return(ENOBUFS); 1217 } 1218 1219 c->vr_mbuf = m_new; 1220 c->vr_ptr->vr_status = VR_RXSTAT; 1221 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 1222 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; 1223 1224 return(0); 1225 } 1226 1227 /* 1228 * A frame has been uploaded: pass the resulting mbuf chain up to 1229 * the higher level protocols. 1230 */ 1231 static void vr_rxeof(sc) 1232 struct vr_softc *sc; 1233 { 1234 struct ether_header *eh; 1235 struct mbuf *m; 1236 struct ifnet *ifp; 1237 struct vr_chain_onefrag *cur_rx; 1238 int total_len = 0; 1239 u_int32_t rxstat; 1240 1241 ifp = &sc->arpcom.ac_if; 1242 1243 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 1244 VR_RXSTAT_OWN)) { 1245 cur_rx = sc->vr_cdata.vr_rx_head; 1246 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 1247 1248 /* 1249 * If an error occurs, update stats, clear the 1250 * status word and leave the mbuf cluster in place: 1251 * it should simply get re-used next time this descriptor 1252 * comes up in the ring. 1253 */ 1254 if (rxstat & VR_RXSTAT_RXERR) { 1255 ifp->if_ierrors++; 1256 printf("vr%d: rx error: ", sc->vr_unit); 1257 switch(rxstat & 0x000000FF) { 1258 case VR_RXSTAT_CRCERR: 1259 printf("crc error\n"); 1260 break; 1261 case VR_RXSTAT_FRAMEALIGNERR: 1262 printf("frame alignment error\n"); 1263 break; 1264 case VR_RXSTAT_FIFOOFLOW: 1265 printf("FIFO overflow\n"); 1266 break; 1267 case VR_RXSTAT_GIANT: 1268 printf("received giant packet\n"); 1269 break; 1270 case VR_RXSTAT_RUNT: 1271 printf("received runt packet\n"); 1272 break; 1273 case VR_RXSTAT_BUSERR: 1274 printf("system bus error\n"); 1275 break; 1276 case VR_RXSTAT_BUFFERR: 1277 printf("rx buffer error\n"); 1278 break; 1279 default: 1280 printf("unknown rx error\n"); 1281 break; 1282 } 1283 cur_rx->vr_ptr->vr_status = VR_RXSTAT; 1284 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN; 1285 continue; 1286 } 1287 1288 /* No errors; receive the packet. */ 1289 m = cur_rx->vr_mbuf; 1290 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 1291 1292 /* 1293 * XXX The VIA Rhine chip includes the CRC with every 1294 * received frame, and there's no way to turn this 1295 * behavior off (at least, I can't find anything in 1296 * the manual that explains how to do it) so we have 1297 * to trim off the CRC manually. 1298 */ 1299 total_len -= ETHER_CRC_LEN; 1300 1301 /* 1302 * Try to conjure up a new mbuf cluster. If that 1303 * fails, it means we have an out of memory condition and 1304 * should leave the buffer in place and continue. This will 1305 * result in a lost packet, but there's little else we 1306 * can do in this situation. 1307 */ 1308 if (vr_newbuf(sc, cur_rx) == ENOBUFS) { 1309 ifp->if_ierrors++; 1310 cur_rx->vr_ptr->vr_status = VR_RXSTAT; 1311 cur_rx->vr_ptr->vr_ctl = VR_RXCTL|VR_RXLEN; 1312 continue; 1313 } 1314 1315 ifp->if_ipackets++; 1316 eh = mtod(m, struct ether_header *); 1317 m->m_pkthdr.rcvif = ifp; 1318 m->m_pkthdr.len = m->m_len = total_len; 1319 #if NBPFILTER > 0 1320 /* 1321 * Handle BPF listeners. Let the BPF user see the packet, but 1322 * don't pass it up to the ether_input() layer unless it's 1323 * a broadcast packet, multicast packet, matches our ethernet 1324 * address or the interface is in promiscuous mode. 1325 */ 1326 if (ifp->if_bpf) { 1327 bpf_mtap(ifp, m); 1328 if (ifp->if_flags & IFF_PROMISC && 1329 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr, 1330 ETHER_ADDR_LEN) && 1331 (eh->ether_dhost[0] & 1) == 0)) { 1332 m_freem(m); 1333 continue; 1334 } 1335 } 1336 #endif 1337 /* Remove header from mbuf and pass it on. */ 1338 m_adj(m, sizeof(struct ether_header)); 1339 ether_input(ifp, eh, m); 1340 } 1341 1342 return; 1343 } 1344 1345 void vr_rxeoc(sc) 1346 struct vr_softc *sc; 1347 { 1348 1349 vr_rxeof(sc); 1350 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1351 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1352 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1353 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1354 1355 return; 1356 } 1357 1358 /* 1359 * A frame was downloaded to the chip. It's safe for us to clean up 1360 * the list buffers. 1361 */ 1362 1363 static void vr_txeof(sc) 1364 struct vr_softc *sc; 1365 { 1366 struct vr_chain *cur_tx; 1367 struct ifnet *ifp; 1368 register struct mbuf *n; 1369 1370 ifp = &sc->arpcom.ac_if; 1371 1372 /* Clear the timeout timer. */ 1373 ifp->if_timer = 0; 1374 1375 /* Sanity check. */ 1376 if (sc->vr_cdata.vr_tx_head == NULL) 1377 return; 1378 1379 /* 1380 * Go through our tx list and free mbufs for those 1381 * frames that have been transmitted. 1382 */ 1383 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { 1384 u_int32_t txstat; 1385 1386 cur_tx = sc->vr_cdata.vr_tx_head; 1387 txstat = cur_tx->vr_ptr->vr_status; 1388 1389 if (txstat & VR_TXSTAT_OWN) 1390 break; 1391 1392 if (txstat & VR_TXSTAT_ERRSUM) { 1393 ifp->if_oerrors++; 1394 if (txstat & VR_TXSTAT_DEFER) 1395 ifp->if_collisions++; 1396 if (txstat & VR_TXSTAT_LATECOLL) 1397 ifp->if_collisions++; 1398 } 1399 1400 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1401 1402 ifp->if_opackets++; 1403 MFREE(cur_tx->vr_mbuf, n); 1404 cur_tx->vr_mbuf = NULL; 1405 1406 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { 1407 sc->vr_cdata.vr_tx_head = NULL; 1408 sc->vr_cdata.vr_tx_tail = NULL; 1409 break; 1410 } 1411 1412 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; 1413 } 1414 1415 return; 1416 } 1417 1418 /* 1419 * TX 'end of channel' interrupt handler. 1420 */ 1421 static void vr_txeoc(sc) 1422 struct vr_softc *sc; 1423 { 1424 struct ifnet *ifp; 1425 1426 ifp = &sc->arpcom.ac_if; 1427 1428 ifp->if_timer = 0; 1429 1430 if (sc->vr_cdata.vr_tx_head == NULL) { 1431 ifp->if_flags &= ~IFF_OACTIVE; 1432 sc->vr_cdata.vr_tx_tail = NULL; 1433 if (sc->vr_want_auto) 1434 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1); 1435 } 1436 1437 return; 1438 } 1439 1440 static void vr_intr(arg) 1441 void *arg; 1442 { 1443 struct vr_softc *sc; 1444 struct ifnet *ifp; 1445 u_int16_t status; 1446 1447 sc = arg; 1448 ifp = &sc->arpcom.ac_if; 1449 1450 /* Supress unwanted interrupts. */ 1451 if (!(ifp->if_flags & IFF_UP)) { 1452 vr_stop(sc); 1453 return; 1454 } 1455 1456 /* Disable interrupts. */ 1457 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1458 1459 for (;;) { 1460 1461 status = CSR_READ_2(sc, VR_ISR); 1462 if (status) 1463 CSR_WRITE_2(sc, VR_ISR, status); 1464 1465 if ((status & VR_INTRS) == 0) 1466 break; 1467 1468 if (status & VR_ISR_RX_OK) 1469 vr_rxeof(sc); 1470 1471 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1472 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) || 1473 (status & VR_ISR_RX_DROPPED)) { 1474 vr_rxeof(sc); 1475 vr_rxeoc(sc); 1476 } 1477 1478 if (status & VR_ISR_TX_OK) { 1479 vr_txeof(sc); 1480 vr_txeoc(sc); 1481 } 1482 1483 if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){ 1484 ifp->if_oerrors++; 1485 vr_txeof(sc); 1486 if (sc->vr_cdata.vr_tx_head != NULL) { 1487 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1488 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1489 } 1490 } 1491 1492 if (status & VR_ISR_BUSERR) { 1493 vr_reset(sc); 1494 vr_init(sc); 1495 } 1496 } 1497 1498 /* Re-enable interrupts. */ 1499 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1500 1501 if (ifp->if_snd.ifq_head != NULL) { 1502 vr_start(ifp); 1503 } 1504 1505 return; 1506 } 1507 1508 /* 1509 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1510 * pointers to the fragment pointers. 1511 */ 1512 static int vr_encap(sc, c, m_head) 1513 struct vr_softc *sc; 1514 struct vr_chain *c; 1515 struct mbuf *m_head; 1516 { 1517 int frag = 0; 1518 struct vr_desc *f = NULL; 1519 int total_len; 1520 struct mbuf *m; 1521 1522 m = m_head; 1523 total_len = 0; 1524 1525 /* 1526 * The VIA Rhine wants packet buffers to be longword 1527 * aligned, but very often our mbufs aren't. Rather than 1528 * waste time trying to decide when to copy and when not 1529 * to copy, just do it all the time. 1530 */ 1531 if (m != NULL) { 1532 struct mbuf *m_new = NULL; 1533 1534 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1535 if (m_new == NULL) { 1536 printf("vr%d: no memory for tx list", sc->vr_unit); 1537 return(1); 1538 } 1539 if (m_head->m_pkthdr.len > MHLEN) { 1540 MCLGET(m_new, M_DONTWAIT); 1541 if (!(m_new->m_flags & M_EXT)) { 1542 m_freem(m_new); 1543 printf("vr%d: no memory for tx list", 1544 sc->vr_unit); 1545 return(1); 1546 } 1547 } 1548 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1549 mtod(m_new, caddr_t)); 1550 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1551 m_freem(m_head); 1552 m_head = m_new; 1553 /* 1554 * The Rhine chip doesn't auto-pad, so we have to make 1555 * sure to pad short frames out to the minimum frame length 1556 * ourselves. 1557 */ 1558 if (m_head->m_len < VR_MIN_FRAMELEN) { 1559 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1560 m_new->m_len = m_new->m_pkthdr.len; 1561 } 1562 f = c->vr_ptr; 1563 f->vr_data = vtophys(mtod(m_new, caddr_t)); 1564 f->vr_ctl = total_len = m_new->m_len; 1565 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1566 f->vr_status = 0; 1567 frag = 1; 1568 } 1569 1570 c->vr_mbuf = m_head; 1571 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; 1572 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1573 1574 return(0); 1575 } 1576 1577 /* 1578 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1579 * to the mbuf data regions directly in the transmit lists. We also save a 1580 * copy of the pointers since the transmit list fragment pointers are 1581 * physical addresses. 1582 */ 1583 1584 static void vr_start(ifp) 1585 struct ifnet *ifp; 1586 { 1587 struct vr_softc *sc; 1588 struct mbuf *m_head = NULL; 1589 struct vr_chain *cur_tx = NULL, *start_tx; 1590 1591 sc = ifp->if_softc; 1592 1593 if (sc->vr_autoneg) { 1594 sc->vr_tx_pend = 1; 1595 return; 1596 } 1597 1598 /* 1599 * Check for an available queue slot. If there are none, 1600 * punt. 1601 */ 1602 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { 1603 ifp->if_flags |= IFF_OACTIVE; 1604 return; 1605 } 1606 1607 start_tx = sc->vr_cdata.vr_tx_free; 1608 1609 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { 1610 IF_DEQUEUE(&ifp->if_snd, m_head); 1611 if (m_head == NULL) 1612 break; 1613 1614 /* Pick a descriptor off the free list. */ 1615 cur_tx = sc->vr_cdata.vr_tx_free; 1616 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; 1617 1618 /* Pack the data into the descriptor. */ 1619 vr_encap(sc, cur_tx, m_head); 1620 1621 if (cur_tx != start_tx) 1622 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1623 1624 #if NBPFILTER > 0 1625 /* 1626 * If there's a BPF listener, bounce a copy of this frame 1627 * to him. 1628 */ 1629 if (ifp->if_bpf) 1630 bpf_mtap(ifp, cur_tx->vr_mbuf); 1631 #endif 1632 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1633 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_TX_GO); 1634 } 1635 1636 /* 1637 * If there are no frames queued, bail. 1638 */ 1639 if (cur_tx == NULL) 1640 return; 1641 1642 sc->vr_cdata.vr_tx_tail = cur_tx; 1643 1644 if (sc->vr_cdata.vr_tx_head == NULL) 1645 sc->vr_cdata.vr_tx_head = start_tx; 1646 1647 /* 1648 * Set a timeout in case the chip goes out to lunch. 1649 */ 1650 ifp->if_timer = 5; 1651 1652 return; 1653 } 1654 1655 static void vr_init(xsc) 1656 void *xsc; 1657 { 1658 struct vr_softc *sc = xsc; 1659 struct ifnet *ifp = &sc->arpcom.ac_if; 1660 u_int16_t phy_bmcr = 0; 1661 int s; 1662 1663 if (sc->vr_autoneg) 1664 return; 1665 1666 s = splimp(); 1667 1668 if (sc->vr_pinfo != NULL) 1669 phy_bmcr = vr_phy_readreg(sc, PHY_BMCR); 1670 1671 /* 1672 * Cancel pending I/O and free all RX/TX buffers. 1673 */ 1674 vr_stop(sc); 1675 vr_reset(sc); 1676 1677 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1678 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD); 1679 1680 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1681 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1682 1683 /* Init circular RX list. */ 1684 if (vr_list_rx_init(sc) == ENOBUFS) { 1685 printf("vr%d: initialization failed: no " 1686 "memory for rx buffers\n", sc->vr_unit); 1687 vr_stop(sc); 1688 (void)splx(s); 1689 return; 1690 } 1691 1692 /* 1693 * Init tx descriptors. 1694 */ 1695 vr_list_tx_init(sc); 1696 1697 /* If we want promiscuous mode, set the allframes bit. */ 1698 if (ifp->if_flags & IFF_PROMISC) 1699 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1700 else 1701 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1702 1703 /* Set capture broadcast bit to capture broadcast frames. */ 1704 if (ifp->if_flags & IFF_BROADCAST) 1705 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1706 else 1707 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1708 1709 /* 1710 * Program the multicast filter, if necessary. 1711 */ 1712 vr_setmulti(sc); 1713 1714 /* 1715 * Load the address of the RX list. 1716 */ 1717 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1718 1719 /* Enable receiver and transmitter. */ 1720 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1721 VR_CMD_TX_ON|VR_CMD_RX_ON| 1722 VR_CMD_RX_GO); 1723 1724 vr_setcfg(sc, vr_phy_readreg(sc, PHY_BMCR)); 1725 1726 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1727 1728 /* 1729 * Enable interrupts. 1730 */ 1731 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1732 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1733 1734 /* Restore state of BMCR */ 1735 if (sc->vr_pinfo != NULL) 1736 vr_phy_writereg(sc, PHY_BMCR, phy_bmcr); 1737 1738 ifp->if_flags |= IFF_RUNNING; 1739 ifp->if_flags &= ~IFF_OACTIVE; 1740 1741 (void)splx(s); 1742 1743 return; 1744 } 1745 1746 /* 1747 * Set media options. 1748 */ 1749 static int vr_ifmedia_upd(ifp) 1750 struct ifnet *ifp; 1751 { 1752 struct vr_softc *sc; 1753 struct ifmedia *ifm; 1754 1755 sc = ifp->if_softc; 1756 ifm = &sc->ifmedia; 1757 1758 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1759 return(EINVAL); 1760 1761 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 1762 vr_autoneg_mii(sc, VR_FLAG_SCHEDDELAY, 1); 1763 else 1764 vr_setmode_mii(sc, ifm->ifm_media); 1765 1766 return(0); 1767 } 1768 1769 /* 1770 * Report current media status. 1771 */ 1772 static void vr_ifmedia_sts(ifp, ifmr) 1773 struct ifnet *ifp; 1774 struct ifmediareq *ifmr; 1775 { 1776 struct vr_softc *sc; 1777 u_int16_t advert = 0, ability = 0; 1778 1779 sc = ifp->if_softc; 1780 1781 ifmr->ifm_active = IFM_ETHER; 1782 1783 if (!(vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { 1784 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) 1785 ifmr->ifm_active = IFM_ETHER|IFM_100_TX; 1786 else 1787 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 1788 if (vr_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) 1789 ifmr->ifm_active |= IFM_FDX; 1790 else 1791 ifmr->ifm_active |= IFM_HDX; 1792 return; 1793 } 1794 1795 ability = vr_phy_readreg(sc, PHY_LPAR); 1796 advert = vr_phy_readreg(sc, PHY_ANAR); 1797 if (advert & PHY_ANAR_100BT4 && 1798 ability & PHY_ANAR_100BT4) { 1799 ifmr->ifm_active = IFM_ETHER|IFM_100_T4; 1800 } else if (advert & PHY_ANAR_100BTXFULL && 1801 ability & PHY_ANAR_100BTXFULL) { 1802 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_FDX; 1803 } else if (advert & PHY_ANAR_100BTXHALF && 1804 ability & PHY_ANAR_100BTXHALF) { 1805 ifmr->ifm_active = IFM_ETHER|IFM_100_TX|IFM_HDX; 1806 } else if (advert & PHY_ANAR_10BTFULL && 1807 ability & PHY_ANAR_10BTFULL) { 1808 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_FDX; 1809 } else if (advert & PHY_ANAR_10BTHALF && 1810 ability & PHY_ANAR_10BTHALF) { 1811 ifmr->ifm_active = IFM_ETHER|IFM_10_T|IFM_HDX; 1812 } 1813 1814 return; 1815 } 1816 1817 static int vr_ioctl(ifp, command, data) 1818 struct ifnet *ifp; 1819 u_long command; 1820 caddr_t data; 1821 { 1822 struct vr_softc *sc = ifp->if_softc; 1823 struct ifreq *ifr = (struct ifreq *) data; 1824 int s, error = 0; 1825 1826 s = splimp(); 1827 1828 switch(command) { 1829 case SIOCSIFADDR: 1830 case SIOCGIFADDR: 1831 case SIOCSIFMTU: 1832 error = ether_ioctl(ifp, command, data); 1833 break; 1834 case SIOCSIFFLAGS: 1835 if (ifp->if_flags & IFF_UP) { 1836 vr_init(sc); 1837 } else { 1838 if (ifp->if_flags & IFF_RUNNING) 1839 vr_stop(sc); 1840 } 1841 error = 0; 1842 break; 1843 case SIOCADDMULTI: 1844 case SIOCDELMULTI: 1845 vr_setmulti(sc); 1846 error = 0; 1847 break; 1848 case SIOCGIFMEDIA: 1849 case SIOCSIFMEDIA: 1850 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 1851 break; 1852 default: 1853 error = EINVAL; 1854 break; 1855 } 1856 1857 (void)splx(s); 1858 1859 return(error); 1860 } 1861 1862 static void vr_watchdog(ifp) 1863 struct ifnet *ifp; 1864 { 1865 struct vr_softc *sc; 1866 1867 sc = ifp->if_softc; 1868 1869 if (sc->vr_autoneg) { 1870 vr_autoneg_mii(sc, VR_FLAG_DELAYTIMEO, 1); 1871 return; 1872 } 1873 1874 ifp->if_oerrors++; 1875 printf("vr%d: watchdog timeout\n", sc->vr_unit); 1876 1877 if (!(vr_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) 1878 printf("vr%d: no carrier - transceiver cable problem?\n", 1879 sc->vr_unit); 1880 1881 vr_stop(sc); 1882 vr_reset(sc); 1883 vr_init(sc); 1884 1885 if (ifp->if_snd.ifq_head != NULL) 1886 vr_start(ifp); 1887 1888 return; 1889 } 1890 1891 /* 1892 * Stop the adapter and free any mbufs allocated to the 1893 * RX and TX lists. 1894 */ 1895 static void vr_stop(sc) 1896 struct vr_softc *sc; 1897 { 1898 register int i; 1899 struct ifnet *ifp; 1900 1901 ifp = &sc->arpcom.ac_if; 1902 ifp->if_timer = 0; 1903 1904 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1905 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1906 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1907 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1908 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1909 1910 /* 1911 * Free data in the RX lists. 1912 */ 1913 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1914 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1915 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1916 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1917 } 1918 } 1919 bzero((char *)&sc->vr_ldata->vr_rx_list, 1920 sizeof(sc->vr_ldata->vr_rx_list)); 1921 1922 /* 1923 * Free the TX list buffers. 1924 */ 1925 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1926 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1927 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1928 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1929 } 1930 } 1931 1932 bzero((char *)&sc->vr_ldata->vr_tx_list, 1933 sizeof(sc->vr_ldata->vr_tx_list)); 1934 1935 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1936 1937 return; 1938 } 1939 1940 /* 1941 * Stop all chip I/O so that the kernel's probe routines don't 1942 * get confused by errant DMAs when rebooting. 1943 */ 1944 static void vr_shutdown(howto, arg) 1945 int howto; 1946 void *arg; 1947 { 1948 struct vr_softc *sc = (struct vr_softc *)arg; 1949 1950 vr_stop(sc); 1951 1952 return; 1953 } 1954 1955 static struct pci_device vr_device = { 1956 "vr", 1957 vr_probe, 1958 vr_attach, 1959 &vr_count, 1960 NULL 1961 }; 1962 COMPAT_PCI_DRIVER(vr, vr_device); 1963