1 /* 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * VIA Rhine fast ethernet PCI NIC driver 35 * 36 * Supports various network adapters based on the VIA Rhine 37 * and Rhine II PCI controllers, including the D-Link DFE530TX. 38 * Datasheets are available at http://www.via.com.tw. 39 * 40 * Written by Bill Paul <wpaul@ctr.columbia.edu> 41 * Electrical Engineering Department 42 * Columbia University, New York City 43 */ 44 45 /* 46 * The VIA Rhine controllers are similar in some respects to the 47 * the DEC tulip chips, except less complicated. The controller 48 * uses an MII bus and an external physical layer interface. The 49 * receiver has a one entry perfect filter and a 64-bit hash table 50 * multicast filter. Transmit and receive descriptors are similar 51 * to the tulip. 52 * 53 * The Rhine has a serious flaw in its transmit DMA mechanism: 54 * transmit buffers must be longword aligned. Unfortunately, 55 * FreeBSD doesn't guarantee that mbufs will be filled in starting 56 * at longword boundaries, so we have to do a buffer copy before 57 * transmission. 58 */ 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/malloc.h> 68 #include <sys/kernel.h> 69 #include <sys/socket.h> 70 71 #include <net/if.h> 72 #include <net/if_arp.h> 73 #include <net/ethernet.h> 74 #include <net/if_dl.h> 75 #include <net/if_media.h> 76 77 #include <net/bpf.h> 78 79 #include <vm/vm.h> /* for vtophys */ 80 #include <vm/pmap.h> /* for vtophys */ 81 #include <machine/bus_pio.h> 82 #include <machine/bus_memio.h> 83 #include <machine/bus.h> 84 #include <machine/resource.h> 85 #include <sys/bus.h> 86 #include <sys/rman.h> 87 88 #include <dev/mii/mii.h> 89 #include <dev/mii/miivar.h> 90 91 #include <dev/pci/pcireg.h> 92 #include <dev/pci/pcivar.h> 93 94 #define VR_USEIOSPACE 95 96 #include <pci/if_vrreg.h> 97 98 MODULE_DEPEND(vr, pci, 1, 1, 1); 99 MODULE_DEPEND(vr, ether, 1, 1, 1); 100 MODULE_DEPEND(vr, miibus, 1, 1, 1); 101 102 /* "controller miibus0" required. See GENERIC if you get errors here. */ 103 #include "miibus_if.h" 104 105 #undef VR_USESWSHIFT 106 107 /* 108 * Various supported device vendors/types and their names. 109 */ 110 static struct vr_type vr_devs[] = { 111 { VIA_VENDORID, VIA_DEVICEID_RHINE, 112 "VIA VT3043 Rhine I 10/100BaseTX" }, 113 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 114 "VIA VT86C100A Rhine II 10/100BaseTX" }, 115 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 116 "VIA VT6102 Rhine II 10/100BaseTX" }, 117 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 118 "VIA VT6105 Rhine III 10/100BaseTX" }, 119 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 120 "VIA VT6105M Rhine III 10/100BaseTX" }, 121 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 122 "Delta Electronics Rhine II 10/100BaseTX" }, 123 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 124 "Addtron Technology Rhine II 10/100BaseTX" }, 125 { 0, 0, NULL } 126 }; 127 128 static int vr_probe (device_t); 129 static int vr_attach (device_t); 130 static int vr_detach (device_t); 131 132 static int vr_newbuf (struct vr_softc *, 133 struct vr_chain_onefrag *, 134 struct mbuf *); 135 static int vr_encap (struct vr_softc *, struct vr_chain *, 136 struct mbuf * ); 137 138 static void vr_rxeof (struct vr_softc *); 139 static void vr_rxeoc (struct vr_softc *); 140 static void vr_txeof (struct vr_softc *); 141 static void vr_txeoc (struct vr_softc *); 142 static void vr_tick (void *); 143 static void vr_intr (void *); 144 static void vr_start (struct ifnet *); 145 static int vr_ioctl (struct ifnet *, u_long, caddr_t); 146 static void vr_init (void *); 147 static void vr_stop (struct vr_softc *); 148 static void vr_watchdog (struct ifnet *); 149 static void vr_shutdown (device_t); 150 static int vr_ifmedia_upd (struct ifnet *); 151 static void vr_ifmedia_sts (struct ifnet *, struct ifmediareq *); 152 153 #ifdef VR_USESWSHIFT 154 static void vr_mii_sync (struct vr_softc *); 155 static void vr_mii_send (struct vr_softc *, u_int32_t, int); 156 #endif 157 static int vr_mii_readreg (struct vr_softc *, struct vr_mii_frame *); 158 static int vr_mii_writereg (struct vr_softc *, struct vr_mii_frame *); 159 static int vr_miibus_readreg (device_t, int, int); 160 static int vr_miibus_writereg (device_t, int, int, int); 161 static void vr_miibus_statchg (device_t); 162 163 static void vr_setcfg (struct vr_softc *, int); 164 static u_int8_t vr_calchash (u_int8_t *); 165 static void vr_setmulti (struct vr_softc *); 166 static void vr_reset (struct vr_softc *); 167 static int vr_list_rx_init (struct vr_softc *); 168 static int vr_list_tx_init (struct vr_softc *); 169 170 #ifdef VR_USEIOSPACE 171 #define VR_RES SYS_RES_IOPORT 172 #define VR_RID VR_PCI_LOIO 173 #else 174 #define VR_RES SYS_RES_MEMORY 175 #define VR_RID VR_PCI_LOMEM 176 #endif 177 178 static device_method_t vr_methods[] = { 179 /* Device interface */ 180 DEVMETHOD(device_probe, vr_probe), 181 DEVMETHOD(device_attach, vr_attach), 182 DEVMETHOD(device_detach, vr_detach), 183 DEVMETHOD(device_shutdown, vr_shutdown), 184 185 /* bus interface */ 186 DEVMETHOD(bus_print_child, bus_generic_print_child), 187 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 188 189 /* MII interface */ 190 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 191 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 192 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 193 194 { 0, 0 } 195 }; 196 197 static driver_t vr_driver = { 198 "vr", 199 vr_methods, 200 sizeof(struct vr_softc) 201 }; 202 203 static devclass_t vr_devclass; 204 205 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 206 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 207 208 #define VR_SETBIT(sc, reg, x) \ 209 CSR_WRITE_1(sc, reg, \ 210 CSR_READ_1(sc, reg) | (x)) 211 212 #define VR_CLRBIT(sc, reg, x) \ 213 CSR_WRITE_1(sc, reg, \ 214 CSR_READ_1(sc, reg) & ~(x)) 215 216 #define VR_SETBIT16(sc, reg, x) \ 217 CSR_WRITE_2(sc, reg, \ 218 CSR_READ_2(sc, reg) | (x)) 219 220 #define VR_CLRBIT16(sc, reg, x) \ 221 CSR_WRITE_2(sc, reg, \ 222 CSR_READ_2(sc, reg) & ~(x)) 223 224 #define VR_SETBIT32(sc, reg, x) \ 225 CSR_WRITE_4(sc, reg, \ 226 CSR_READ_4(sc, reg) | (x)) 227 228 #define VR_CLRBIT32(sc, reg, x) \ 229 CSR_WRITE_4(sc, reg, \ 230 CSR_READ_4(sc, reg) & ~(x)) 231 232 #define SIO_SET(x) \ 233 CSR_WRITE_1(sc, VR_MIICMD, \ 234 CSR_READ_1(sc, VR_MIICMD) | (x)) 235 236 #define SIO_CLR(x) \ 237 CSR_WRITE_1(sc, VR_MIICMD, \ 238 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 239 240 #ifdef VR_USESWSHIFT 241 /* 242 * Sync the PHYs by setting data bit and strobing the clock 32 times. 243 */ 244 static void 245 vr_mii_sync(sc) 246 struct vr_softc *sc; 247 { 248 register int i; 249 250 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 251 252 for (i = 0; i < 32; i++) { 253 SIO_SET(VR_MIICMD_CLK); 254 DELAY(1); 255 SIO_CLR(VR_MIICMD_CLK); 256 DELAY(1); 257 } 258 259 return; 260 } 261 262 /* 263 * Clock a series of bits through the MII. 264 */ 265 static void 266 vr_mii_send(sc, bits, cnt) 267 struct vr_softc *sc; 268 u_int32_t bits; 269 int cnt; 270 { 271 int i; 272 273 SIO_CLR(VR_MIICMD_CLK); 274 275 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 276 if (bits & i) { 277 SIO_SET(VR_MIICMD_DATAIN); 278 } else { 279 SIO_CLR(VR_MIICMD_DATAIN); 280 } 281 DELAY(1); 282 SIO_CLR(VR_MIICMD_CLK); 283 DELAY(1); 284 SIO_SET(VR_MIICMD_CLK); 285 } 286 } 287 #endif 288 289 /* 290 * Read an PHY register through the MII. 291 */ 292 static int 293 vr_mii_readreg(sc, frame) 294 struct vr_softc *sc; 295 struct vr_mii_frame *frame; 296 297 #ifdef VR_USESWSHIFT 298 { 299 int i, ack; 300 301 VR_LOCK(sc); 302 303 /* 304 * Set up frame for RX. 305 */ 306 frame->mii_stdelim = VR_MII_STARTDELIM; 307 frame->mii_opcode = VR_MII_READOP; 308 frame->mii_turnaround = 0; 309 frame->mii_data = 0; 310 311 CSR_WRITE_1(sc, VR_MIICMD, 0); 312 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 313 314 /* 315 * Turn on data xmit. 316 */ 317 SIO_SET(VR_MIICMD_DIR); 318 319 vr_mii_sync(sc); 320 321 /* 322 * Send command/address info. 323 */ 324 vr_mii_send(sc, frame->mii_stdelim, 2); 325 vr_mii_send(sc, frame->mii_opcode, 2); 326 vr_mii_send(sc, frame->mii_phyaddr, 5); 327 vr_mii_send(sc, frame->mii_regaddr, 5); 328 329 /* Idle bit */ 330 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 331 DELAY(1); 332 SIO_SET(VR_MIICMD_CLK); 333 DELAY(1); 334 335 /* Turn off xmit. */ 336 SIO_CLR(VR_MIICMD_DIR); 337 338 /* Check for ack */ 339 SIO_CLR(VR_MIICMD_CLK); 340 DELAY(1); 341 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 342 SIO_SET(VR_MIICMD_CLK); 343 DELAY(1); 344 345 /* 346 * Now try reading data bits. If the ack failed, we still 347 * need to clock through 16 cycles to keep the PHY(s) in sync. 348 */ 349 if (ack) { 350 for(i = 0; i < 16; i++) { 351 SIO_CLR(VR_MIICMD_CLK); 352 DELAY(1); 353 SIO_SET(VR_MIICMD_CLK); 354 DELAY(1); 355 } 356 goto fail; 357 } 358 359 for (i = 0x8000; i; i >>= 1) { 360 SIO_CLR(VR_MIICMD_CLK); 361 DELAY(1); 362 if (!ack) { 363 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 364 frame->mii_data |= i; 365 DELAY(1); 366 } 367 SIO_SET(VR_MIICMD_CLK); 368 DELAY(1); 369 } 370 371 fail: 372 373 SIO_CLR(VR_MIICMD_CLK); 374 DELAY(1); 375 SIO_SET(VR_MIICMD_CLK); 376 DELAY(1); 377 378 VR_UNLOCK(sc); 379 380 if (ack) 381 return(1); 382 return(0); 383 } 384 #else 385 { 386 int s, i; 387 388 s = splimp(); 389 390 /* Set the PHY-adress */ 391 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 392 frame->mii_phyaddr); 393 394 /* Set the register-adress */ 395 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 396 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 397 398 for (i = 0; i < 10000; i++) { 399 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 400 break; 401 DELAY(1); 402 } 403 404 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 405 406 (void)splx(s); 407 408 return(0); 409 } 410 #endif 411 412 413 /* 414 * Write to a PHY register through the MII. 415 */ 416 static int 417 vr_mii_writereg(sc, frame) 418 struct vr_softc *sc; 419 struct vr_mii_frame *frame; 420 421 #ifdef VR_USESWSHIFT 422 { 423 VR_LOCK(sc); 424 425 CSR_WRITE_1(sc, VR_MIICMD, 0); 426 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 427 428 /* 429 * Set up frame for TX. 430 */ 431 432 frame->mii_stdelim = VR_MII_STARTDELIM; 433 frame->mii_opcode = VR_MII_WRITEOP; 434 frame->mii_turnaround = VR_MII_TURNAROUND; 435 436 /* 437 * Turn on data output. 438 */ 439 SIO_SET(VR_MIICMD_DIR); 440 441 vr_mii_sync(sc); 442 443 vr_mii_send(sc, frame->mii_stdelim, 2); 444 vr_mii_send(sc, frame->mii_opcode, 2); 445 vr_mii_send(sc, frame->mii_phyaddr, 5); 446 vr_mii_send(sc, frame->mii_regaddr, 5); 447 vr_mii_send(sc, frame->mii_turnaround, 2); 448 vr_mii_send(sc, frame->mii_data, 16); 449 450 /* Idle bit. */ 451 SIO_SET(VR_MIICMD_CLK); 452 DELAY(1); 453 SIO_CLR(VR_MIICMD_CLK); 454 DELAY(1); 455 456 /* 457 * Turn off xmit. 458 */ 459 SIO_CLR(VR_MIICMD_DIR); 460 461 VR_UNLOCK(sc); 462 463 return(0); 464 } 465 #else 466 { 467 int s, i; 468 469 s = splimp(); 470 471 /* Set the PHY-adress */ 472 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 473 frame->mii_phyaddr); 474 475 /* Set the register-adress and data to write */ 476 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 477 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 478 479 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 480 481 for (i = 0; i < 10000; i++) { 482 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 483 break; 484 DELAY(1); 485 } 486 487 (void)splx(s); 488 489 return(0); 490 } 491 #endif 492 493 static int 494 vr_miibus_readreg(dev, phy, reg) 495 device_t dev; 496 int phy, reg; 497 { 498 struct vr_softc *sc; 499 struct vr_mii_frame frame; 500 501 sc = device_get_softc(dev); 502 503 switch (sc->vr_revid) { 504 case REV_ID_VT6102_APOLLO: 505 if (phy != 1) 506 return 0; 507 default: 508 break; 509 } 510 511 bzero((char *)&frame, sizeof(frame)); 512 513 frame.mii_phyaddr = phy; 514 frame.mii_regaddr = reg; 515 vr_mii_readreg(sc, &frame); 516 517 return(frame.mii_data); 518 } 519 520 static int 521 vr_miibus_writereg(dev, phy, reg, data) 522 device_t dev; 523 u_int16_t phy, reg, data; 524 { 525 struct vr_softc *sc; 526 struct vr_mii_frame frame; 527 528 sc = device_get_softc(dev); 529 530 switch (sc->vr_revid) { 531 case REV_ID_VT6102_APOLLO: 532 if (phy != 1) 533 return 0; 534 default: 535 break; 536 } 537 538 bzero((char *)&frame, sizeof(frame)); 539 540 frame.mii_phyaddr = phy; 541 frame.mii_regaddr = reg; 542 frame.mii_data = data; 543 544 vr_mii_writereg(sc, &frame); 545 546 return(0); 547 } 548 549 static void 550 vr_miibus_statchg(dev) 551 device_t dev; 552 { 553 struct vr_softc *sc; 554 struct mii_data *mii; 555 556 sc = device_get_softc(dev); 557 VR_LOCK(sc); 558 mii = device_get_softc(sc->vr_miibus); 559 vr_setcfg(sc, mii->mii_media_active); 560 VR_UNLOCK(sc); 561 562 return; 563 } 564 565 /* 566 * Calculate CRC of a multicast group address, return the lower 6 bits. 567 */ 568 static u_int8_t vr_calchash(addr) 569 u_int8_t *addr; 570 { 571 u_int32_t crc, carry; 572 int i, j; 573 u_int8_t c; 574 575 /* Compute CRC for the address value. */ 576 crc = 0xFFFFFFFF; /* initial value */ 577 578 for (i = 0; i < 6; i++) { 579 c = *(addr + i); 580 for (j = 0; j < 8; j++) { 581 carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); 582 crc <<= 1; 583 c >>= 1; 584 if (carry) 585 crc = (crc ^ 0x04c11db6) | carry; 586 } 587 } 588 589 /* return the filter bit position */ 590 return((crc >> 26) & 0x0000003F); 591 } 592 593 /* 594 * Program the 64-bit multicast hash filter. 595 */ 596 static void 597 vr_setmulti(sc) 598 struct vr_softc *sc; 599 { 600 struct ifnet *ifp; 601 int h = 0; 602 u_int32_t hashes[2] = { 0, 0 }; 603 struct ifmultiaddr *ifma; 604 u_int8_t rxfilt; 605 int mcnt = 0; 606 607 ifp = &sc->arpcom.ac_if; 608 609 rxfilt = CSR_READ_1(sc, VR_RXCFG); 610 611 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 612 rxfilt |= VR_RXCFG_RX_MULTI; 613 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 614 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 615 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 616 return; 617 } 618 619 /* first, zot all the existing hash bits */ 620 CSR_WRITE_4(sc, VR_MAR0, 0); 621 CSR_WRITE_4(sc, VR_MAR1, 0); 622 623 /* now program new ones */ 624 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 625 if (ifma->ifma_addr->sa_family != AF_LINK) 626 continue; 627 h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 628 if (h < 32) 629 hashes[0] |= (1 << h); 630 else 631 hashes[1] |= (1 << (h - 32)); 632 mcnt++; 633 } 634 635 if (mcnt) 636 rxfilt |= VR_RXCFG_RX_MULTI; 637 else 638 rxfilt &= ~VR_RXCFG_RX_MULTI; 639 640 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 641 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 642 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 643 644 return; 645 } 646 647 /* 648 * In order to fiddle with the 649 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 650 * first have to put the transmit and/or receive logic in the idle state. 651 */ 652 static void 653 vr_setcfg(sc, media) 654 struct vr_softc *sc; 655 int media; 656 { 657 int restart = 0; 658 659 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 660 restart = 1; 661 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 662 } 663 664 if ((media & IFM_GMASK) == IFM_FDX) 665 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 666 else 667 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 668 669 if (restart) 670 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 671 672 return; 673 } 674 675 static void 676 vr_reset(sc) 677 struct vr_softc *sc; 678 { 679 register int i; 680 681 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 682 683 for (i = 0; i < VR_TIMEOUT; i++) { 684 DELAY(10); 685 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 686 break; 687 } 688 if (i == VR_TIMEOUT) { 689 if (sc->vr_revid < REV_ID_VT3065_A) 690 printf("vr%d: reset never completed!\n", sc->vr_unit); 691 else { 692 /* Use newer force reset command */ 693 printf("vr%d: Using force reset command.\n", sc->vr_unit); 694 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 695 } 696 } 697 698 /* Wait a little while for the chip to get its brains in order. */ 699 DELAY(1000); 700 701 return; 702 } 703 704 /* 705 * Probe for a VIA Rhine chip. Check the PCI vendor and device 706 * IDs against our list and return a device name if we find a match. 707 */ 708 static int 709 vr_probe(dev) 710 device_t dev; 711 { 712 struct vr_type *t; 713 714 t = vr_devs; 715 716 while(t->vr_name != NULL) { 717 if ((pci_get_vendor(dev) == t->vr_vid) && 718 (pci_get_device(dev) == t->vr_did)) { 719 device_set_desc(dev, t->vr_name); 720 return(0); 721 } 722 t++; 723 } 724 725 return(ENXIO); 726 } 727 728 /* 729 * Attach the interface. Allocate softc structures, do ifmedia 730 * setup and ethernet/BPF attach. 731 */ 732 static int 733 vr_attach(dev) 734 device_t dev; 735 { 736 int i; 737 u_char eaddr[ETHER_ADDR_LEN]; 738 struct vr_softc *sc; 739 struct ifnet *ifp; 740 int unit, error = 0, rid; 741 742 sc = device_get_softc(dev); 743 unit = device_get_unit(dev); 744 745 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 746 MTX_DEF | MTX_RECURSE); 747 #ifndef BURN_BRIDGES 748 /* 749 * Handle power management nonsense. 750 */ 751 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 752 u_int32_t iobase, membase, irq; 753 754 /* Save important PCI config data. */ 755 iobase = pci_read_config(dev, VR_PCI_LOIO, 4); 756 membase = pci_read_config(dev, VR_PCI_LOMEM, 4); 757 irq = pci_read_config(dev, VR_PCI_INTLINE, 4); 758 759 /* Reset the power state. */ 760 printf("vr%d: chip is in D%d power mode " 761 "-- setting to D0\n", unit, 762 pci_get_powerstate(dev)); 763 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 764 765 /* Restore PCI config data. */ 766 pci_write_config(dev, VR_PCI_LOIO, iobase, 4); 767 pci_write_config(dev, VR_PCI_LOMEM, membase, 4); 768 pci_write_config(dev, VR_PCI_INTLINE, irq, 4); 769 } 770 #endif 771 /* 772 * Map control/status registers. 773 */ 774 pci_enable_busmaster(dev); 775 sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF; 776 777 rid = VR_RID; 778 sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid, 779 0, ~0, 1, RF_ACTIVE); 780 781 if (sc->vr_res == NULL) { 782 printf("vr%d: couldn't map ports/memory\n", unit); 783 error = ENXIO; 784 goto fail; 785 } 786 787 sc->vr_btag = rman_get_bustag(sc->vr_res); 788 sc->vr_bhandle = rman_get_bushandle(sc->vr_res); 789 790 /* Allocate interrupt */ 791 rid = 0; 792 sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 793 RF_SHAREABLE | RF_ACTIVE); 794 795 if (sc->vr_irq == NULL) { 796 printf("vr%d: couldn't map interrupt\n", unit); 797 error = ENXIO; 798 goto fail; 799 } 800 801 /* 802 * Windows may put the chip in suspend mode when it 803 * shuts down. Be sure to kick it in the head to wake it 804 * up again. 805 */ 806 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 807 808 /* Reset the adapter. */ 809 vr_reset(sc); 810 811 /* 812 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 813 * initialization and disable AUTOPOLL. 814 */ 815 pci_write_config(dev, VR_PCI_MODE, 816 pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4); 817 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 818 819 /* 820 * Get station address. The way the Rhine chips work, 821 * you're not allowed to directly access the EEPROM once 822 * they've been programmed a special way. Consequently, 823 * we need to read the node address from the PAR0 and PAR1 824 * registers. 825 */ 826 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 827 DELAY(200); 828 for (i = 0; i < ETHER_ADDR_LEN; i++) 829 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 830 831 /* 832 * A Rhine chip was detected. Inform the world. 833 */ 834 printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":"); 835 836 sc->vr_unit = unit; 837 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 838 839 sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF, 840 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 841 842 if (sc->vr_ldata == NULL) { 843 printf("vr%d: no memory for list buffers!\n", unit); 844 error = ENXIO; 845 goto fail; 846 } 847 848 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 849 850 ifp = &sc->arpcom.ac_if; 851 ifp->if_softc = sc; 852 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 853 ifp->if_mtu = ETHERMTU; 854 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 855 ifp->if_ioctl = vr_ioctl; 856 ifp->if_output = ether_output; 857 ifp->if_start = vr_start; 858 ifp->if_watchdog = vr_watchdog; 859 ifp->if_init = vr_init; 860 ifp->if_baudrate = 10000000; 861 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; 862 863 /* 864 * Do MII setup. 865 */ 866 if (mii_phy_probe(dev, &sc->vr_miibus, 867 vr_ifmedia_upd, vr_ifmedia_sts)) { 868 printf("vr%d: MII without any phy!\n", sc->vr_unit); 869 error = ENXIO; 870 goto fail; 871 } 872 873 callout_handle_init(&sc->vr_stat_ch); 874 875 /* 876 * Call MI attach routine. 877 */ 878 ether_ifattach(ifp, eaddr); 879 880 /* Hook interrupt last to avoid having to lock softc */ 881 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET, 882 vr_intr, sc, &sc->vr_intrhand); 883 884 if (error) { 885 printf("vr%d: couldn't set up irq\n", unit); 886 ether_ifdetach(ifp); 887 goto fail; 888 } 889 890 fail: 891 if (error) 892 vr_detach(dev); 893 894 return(error); 895 } 896 897 /* 898 * Shutdown hardware and free up resources. This can be called any 899 * time after the mutex has been initialized. It is called in both 900 * the error case in attach and the normal detach case so it needs 901 * to be careful about only freeing resources that have actually been 902 * allocated. 903 */ 904 static int 905 vr_detach(dev) 906 device_t dev; 907 { 908 struct vr_softc *sc; 909 struct ifnet *ifp; 910 911 sc = device_get_softc(dev); 912 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 913 VR_LOCK(sc); 914 ifp = &sc->arpcom.ac_if; 915 916 /* These should only be active if attach succeeded */ 917 if (device_is_attached(dev)) { 918 vr_stop(sc); 919 ether_ifdetach(ifp); 920 } 921 if (sc->vr_miibus) 922 device_delete_child(dev, sc->vr_miibus); 923 bus_generic_detach(dev); 924 925 if (sc->vr_intrhand) 926 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 927 if (sc->vr_irq) 928 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 929 if (sc->vr_res) 930 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 931 932 if (sc->vr_ldata) 933 contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); 934 935 VR_UNLOCK(sc); 936 mtx_destroy(&sc->vr_mtx); 937 938 return(0); 939 } 940 941 /* 942 * Initialize the transmit descriptors. 943 */ 944 static int 945 vr_list_tx_init(sc) 946 struct vr_softc *sc; 947 { 948 struct vr_chain_data *cd; 949 struct vr_list_data *ld; 950 int i; 951 952 cd = &sc->vr_cdata; 953 ld = sc->vr_ldata; 954 for (i = 0; i < VR_TX_LIST_CNT; i++) { 955 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 956 if (i == (VR_TX_LIST_CNT - 1)) 957 cd->vr_tx_chain[i].vr_nextdesc = 958 &cd->vr_tx_chain[0]; 959 else 960 cd->vr_tx_chain[i].vr_nextdesc = 961 &cd->vr_tx_chain[i + 1]; 962 } 963 964 cd->vr_tx_free = &cd->vr_tx_chain[0]; 965 cd->vr_tx_tail = cd->vr_tx_head = NULL; 966 967 return(0); 968 } 969 970 971 /* 972 * Initialize the RX descriptors and allocate mbufs for them. Note that 973 * we arrange the descriptors in a closed ring, so that the last descriptor 974 * points back to the first. 975 */ 976 static int 977 vr_list_rx_init(sc) 978 struct vr_softc *sc; 979 { 980 struct vr_chain_data *cd; 981 struct vr_list_data *ld; 982 int i; 983 984 cd = &sc->vr_cdata; 985 ld = sc->vr_ldata; 986 987 for (i = 0; i < VR_RX_LIST_CNT; i++) { 988 cd->vr_rx_chain[i].vr_ptr = 989 (struct vr_desc *)&ld->vr_rx_list[i]; 990 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) 991 return(ENOBUFS); 992 if (i == (VR_RX_LIST_CNT - 1)) { 993 cd->vr_rx_chain[i].vr_nextdesc = 994 &cd->vr_rx_chain[0]; 995 ld->vr_rx_list[i].vr_next = 996 vtophys(&ld->vr_rx_list[0]); 997 } else { 998 cd->vr_rx_chain[i].vr_nextdesc = 999 &cd->vr_rx_chain[i + 1]; 1000 ld->vr_rx_list[i].vr_next = 1001 vtophys(&ld->vr_rx_list[i + 1]); 1002 } 1003 } 1004 1005 cd->vr_rx_head = &cd->vr_rx_chain[0]; 1006 1007 return(0); 1008 } 1009 1010 /* 1011 * Initialize an RX descriptor and attach an MBUF cluster. 1012 * Note: the length fields are only 11 bits wide, which means the 1013 * largest size we can specify is 2047. This is important because 1014 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1015 * overflow the field and make a mess. 1016 */ 1017 static int 1018 vr_newbuf(sc, c, m) 1019 struct vr_softc *sc; 1020 struct vr_chain_onefrag *c; 1021 struct mbuf *m; 1022 { 1023 struct mbuf *m_new = NULL; 1024 1025 if (m == NULL) { 1026 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1027 if (m_new == NULL) 1028 return(ENOBUFS); 1029 1030 MCLGET(m_new, M_DONTWAIT); 1031 if (!(m_new->m_flags & M_EXT)) { 1032 m_freem(m_new); 1033 return(ENOBUFS); 1034 } 1035 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1036 } else { 1037 m_new = m; 1038 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 1039 m_new->m_data = m_new->m_ext.ext_buf; 1040 } 1041 1042 m_adj(m_new, sizeof(u_int64_t)); 1043 1044 c->vr_mbuf = m_new; 1045 c->vr_ptr->vr_status = VR_RXSTAT; 1046 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 1047 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; 1048 1049 return(0); 1050 } 1051 1052 /* 1053 * A frame has been uploaded: pass the resulting mbuf chain up to 1054 * the higher level protocols. 1055 */ 1056 static void 1057 vr_rxeof(sc) 1058 struct vr_softc *sc; 1059 { 1060 struct mbuf *m; 1061 struct ifnet *ifp; 1062 struct vr_chain_onefrag *cur_rx; 1063 int total_len = 0; 1064 u_int32_t rxstat; 1065 1066 ifp = &sc->arpcom.ac_if; 1067 1068 while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 1069 VR_RXSTAT_OWN)) { 1070 struct mbuf *m0 = NULL; 1071 1072 cur_rx = sc->vr_cdata.vr_rx_head; 1073 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 1074 m = cur_rx->vr_mbuf; 1075 1076 /* 1077 * If an error occurs, update stats, clear the 1078 * status word and leave the mbuf cluster in place: 1079 * it should simply get re-used next time this descriptor 1080 * comes up in the ring. 1081 */ 1082 if (rxstat & VR_RXSTAT_RXERR) { 1083 ifp->if_ierrors++; 1084 printf("vr%d: rx error (%02x):", 1085 sc->vr_unit, rxstat & 0x000000ff); 1086 if (rxstat & VR_RXSTAT_CRCERR) 1087 printf(" crc error"); 1088 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1089 printf(" frame alignment error\n"); 1090 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1091 printf(" FIFO overflow"); 1092 if (rxstat & VR_RXSTAT_GIANT) 1093 printf(" received giant packet"); 1094 if (rxstat & VR_RXSTAT_RUNT) 1095 printf(" received runt packet"); 1096 if (rxstat & VR_RXSTAT_BUSERR) 1097 printf(" system bus error"); 1098 if (rxstat & VR_RXSTAT_BUFFERR) 1099 printf("rx buffer error"); 1100 printf("\n"); 1101 vr_newbuf(sc, cur_rx, m); 1102 continue; 1103 } 1104 1105 /* No errors; receive the packet. */ 1106 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 1107 1108 /* 1109 * XXX The VIA Rhine chip includes the CRC with every 1110 * received frame, and there's no way to turn this 1111 * behavior off (at least, I can't find anything in 1112 * the manual that explains how to do it) so we have 1113 * to trim off the CRC manually. 1114 */ 1115 total_len -= ETHER_CRC_LEN; 1116 1117 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, 1118 NULL); 1119 vr_newbuf(sc, cur_rx, m); 1120 if (m0 == NULL) { 1121 ifp->if_ierrors++; 1122 continue; 1123 } 1124 m = m0; 1125 1126 ifp->if_ipackets++; 1127 (*ifp->if_input)(ifp, m); 1128 } 1129 1130 return; 1131 } 1132 1133 static void 1134 vr_rxeoc(sc) 1135 struct vr_softc *sc; 1136 { 1137 struct ifnet *ifp; 1138 int i; 1139 1140 ifp = &sc->arpcom.ac_if; 1141 1142 ifp->if_ierrors++; 1143 1144 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1145 DELAY(10000); 1146 1147 for (i = 0x400; 1148 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 1149 i--) 1150 ; /* Wait for receiver to stop */ 1151 1152 if (!i) { 1153 printf("vr%d: rx shutdown error!\n", sc->vr_unit); 1154 sc->vr_flags |= VR_F_RESTART; 1155 return; 1156 } 1157 1158 vr_rxeof(sc); 1159 1160 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1161 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1162 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1163 1164 return; 1165 } 1166 1167 /* 1168 * A frame was downloaded to the chip. It's safe for us to clean up 1169 * the list buffers. 1170 */ 1171 1172 static void 1173 vr_txeof(sc) 1174 struct vr_softc *sc; 1175 { 1176 struct vr_chain *cur_tx; 1177 struct ifnet *ifp; 1178 1179 ifp = &sc->arpcom.ac_if; 1180 1181 /* Reset the timeout timer; if_txeoc will clear it. */ 1182 ifp->if_timer = 5; 1183 1184 /* Sanity check. */ 1185 if (sc->vr_cdata.vr_tx_head == NULL) 1186 return; 1187 1188 /* 1189 * Go through our tx list and free mbufs for those 1190 * frames that have been transmitted. 1191 */ 1192 while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { 1193 u_int32_t txstat; 1194 int i; 1195 1196 cur_tx = sc->vr_cdata.vr_tx_head; 1197 txstat = cur_tx->vr_ptr->vr_status; 1198 1199 if ((txstat & VR_TXSTAT_ABRT) || 1200 (txstat & VR_TXSTAT_UDF)) { 1201 for (i = 0x400; 1202 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 1203 i--) 1204 ; /* Wait for chip to shutdown */ 1205 if (!i) { 1206 printf("vr%d: tx shutdown timeout\n", sc->vr_unit); 1207 sc->vr_flags |= VR_F_RESTART; 1208 break; 1209 } 1210 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1211 CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr)); 1212 break; 1213 } 1214 1215 if (txstat & VR_TXSTAT_OWN) 1216 break; 1217 1218 if (txstat & VR_TXSTAT_ERRSUM) { 1219 ifp->if_oerrors++; 1220 if (txstat & VR_TXSTAT_DEFER) 1221 ifp->if_collisions++; 1222 if (txstat & VR_TXSTAT_LATECOLL) 1223 ifp->if_collisions++; 1224 } 1225 1226 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1227 1228 ifp->if_opackets++; 1229 if (cur_tx->vr_mbuf != NULL) { 1230 m_freem(cur_tx->vr_mbuf); 1231 cur_tx->vr_mbuf = NULL; 1232 } 1233 1234 if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { 1235 sc->vr_cdata.vr_tx_head = NULL; 1236 sc->vr_cdata.vr_tx_tail = NULL; 1237 break; 1238 } 1239 1240 sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; 1241 } 1242 1243 return; 1244 } 1245 1246 /* 1247 * TX 'end of channel' interrupt handler. 1248 */ 1249 static void 1250 vr_txeoc(sc) 1251 struct vr_softc *sc; 1252 { 1253 struct ifnet *ifp; 1254 1255 ifp = &sc->arpcom.ac_if; 1256 1257 if (sc->vr_cdata.vr_tx_head == NULL) { 1258 ifp->if_flags &= ~IFF_OACTIVE; 1259 sc->vr_cdata.vr_tx_tail = NULL; 1260 ifp->if_timer = 0; 1261 } 1262 1263 return; 1264 } 1265 1266 static void 1267 vr_tick(xsc) 1268 void *xsc; 1269 { 1270 struct vr_softc *sc; 1271 struct mii_data *mii; 1272 1273 sc = xsc; 1274 VR_LOCK(sc); 1275 if (sc->vr_flags & VR_F_RESTART) { 1276 printf("vr%d: restarting\n", sc->vr_unit); 1277 vr_stop(sc); 1278 vr_reset(sc); 1279 vr_init(sc); 1280 sc->vr_flags &= ~VR_F_RESTART; 1281 } 1282 1283 mii = device_get_softc(sc->vr_miibus); 1284 mii_tick(mii); 1285 1286 sc->vr_stat_ch = timeout(vr_tick, sc, hz); 1287 1288 VR_UNLOCK(sc); 1289 1290 return; 1291 } 1292 1293 static void 1294 vr_intr(arg) 1295 void *arg; 1296 { 1297 struct vr_softc *sc; 1298 struct ifnet *ifp; 1299 u_int16_t status; 1300 1301 sc = arg; 1302 VR_LOCK(sc); 1303 ifp = &sc->arpcom.ac_if; 1304 1305 /* Supress unwanted interrupts. */ 1306 if (!(ifp->if_flags & IFF_UP)) { 1307 vr_stop(sc); 1308 VR_UNLOCK(sc); 1309 return; 1310 } 1311 1312 /* Disable interrupts. */ 1313 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1314 1315 for (;;) { 1316 1317 status = CSR_READ_2(sc, VR_ISR); 1318 if (status) 1319 CSR_WRITE_2(sc, VR_ISR, status); 1320 1321 if ((status & VR_INTRS) == 0) 1322 break; 1323 1324 if (status & VR_ISR_RX_OK) 1325 vr_rxeof(sc); 1326 1327 if (status & VR_ISR_RX_DROPPED) { 1328 printf("vr%d: rx packet lost\n", sc->vr_unit); 1329 ifp->if_ierrors++; 1330 } 1331 1332 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1333 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { 1334 printf("vr%d: receive error (%04x)", 1335 sc->vr_unit, status); 1336 if (status & VR_ISR_RX_NOBUF) 1337 printf(" no buffers"); 1338 if (status & VR_ISR_RX_OFLOW) 1339 printf(" overflow"); 1340 if (status & VR_ISR_RX_DROPPED) 1341 printf(" packet lost"); 1342 printf("\n"); 1343 vr_rxeoc(sc); 1344 } 1345 1346 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1347 vr_reset(sc); 1348 vr_init(sc); 1349 break; 1350 } 1351 1352 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1353 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1354 vr_txeof(sc); 1355 if ((status & VR_ISR_UDFI) || 1356 (status & VR_ISR_TX_ABRT2) || 1357 (status & VR_ISR_TX_ABRT)) { 1358 ifp->if_oerrors++; 1359 if (sc->vr_cdata.vr_tx_head != NULL) { 1360 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1361 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1362 } 1363 } else 1364 vr_txeoc(sc); 1365 } 1366 1367 } 1368 1369 /* Re-enable interrupts. */ 1370 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1371 1372 if (ifp->if_snd.ifq_head != NULL) { 1373 vr_start(ifp); 1374 } 1375 1376 VR_UNLOCK(sc); 1377 1378 return; 1379 } 1380 1381 /* 1382 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1383 * pointers to the fragment pointers. 1384 */ 1385 static int 1386 vr_encap(sc, c, m_head) 1387 struct vr_softc *sc; 1388 struct vr_chain *c; 1389 struct mbuf *m_head; 1390 { 1391 int frag = 0; 1392 struct vr_desc *f = NULL; 1393 int total_len; 1394 struct mbuf *m; 1395 1396 m = m_head; 1397 total_len = 0; 1398 1399 /* 1400 * The VIA Rhine wants packet buffers to be longword 1401 * aligned, but very often our mbufs aren't. Rather than 1402 * waste time trying to decide when to copy and when not 1403 * to copy, just do it all the time. 1404 */ 1405 if (m != NULL) { 1406 struct mbuf *m_new = NULL; 1407 1408 m_new = m_defrag(m_head, M_DONTWAIT); 1409 if (m_new == NULL) { 1410 return(1); 1411 } 1412 1413 m_head = m_new; 1414 /* 1415 * The Rhine chip doesn't auto-pad, so we have to make 1416 * sure to pad short frames out to the minimum frame length 1417 * ourselves. 1418 */ 1419 if (m_head->m_len < VR_MIN_FRAMELEN) { 1420 m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; 1421 m_new->m_len = m_new->m_pkthdr.len; 1422 } 1423 f = c->vr_ptr; 1424 f->vr_data = vtophys(mtod(m_new, caddr_t)); 1425 f->vr_ctl = total_len = m_new->m_len; 1426 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1427 f->vr_status = 0; 1428 frag = 1; 1429 } 1430 1431 c->vr_mbuf = m_head; 1432 c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; 1433 c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1434 1435 return(0); 1436 } 1437 1438 /* 1439 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1440 * to the mbuf data regions directly in the transmit lists. We also save a 1441 * copy of the pointers since the transmit list fragment pointers are 1442 * physical addresses. 1443 */ 1444 1445 static void 1446 vr_start(ifp) 1447 struct ifnet *ifp; 1448 { 1449 struct vr_softc *sc; 1450 struct mbuf *m_head = NULL; 1451 struct vr_chain *cur_tx = NULL, *start_tx, *prev_tx; 1452 1453 sc = ifp->if_softc; 1454 1455 VR_LOCK(sc); 1456 1457 /* 1458 * Check for an available queue slot. If there are none, 1459 * punt. 1460 */ 1461 if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { 1462 VR_UNLOCK(sc); 1463 return; 1464 } 1465 1466 start_tx = sc->vr_cdata.vr_tx_free; 1467 1468 while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { 1469 IF_DEQUEUE(&ifp->if_snd, m_head); 1470 if (m_head == NULL) 1471 break; 1472 1473 /* Pick a descriptor off the free list. */ 1474 prev_tx = cur_tx; 1475 cur_tx = sc->vr_cdata.vr_tx_free; 1476 sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; 1477 1478 /* Pack the data into the descriptor. */ 1479 if (vr_encap(sc, cur_tx, m_head)) { 1480 /* Rollback, send what we were able to encap. */ 1481 IF_PREPEND(&ifp->if_snd, m_head); 1482 sc->vr_cdata.vr_tx_free = cur_tx; 1483 cur_tx = prev_tx; 1484 break; 1485 } 1486 1487 if (cur_tx != start_tx) 1488 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1489 1490 /* 1491 * If there's a BPF listener, bounce a copy of this frame 1492 * to him. 1493 */ 1494 BPF_MTAP(ifp, cur_tx->vr_mbuf); 1495 1496 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1497 } 1498 1499 /* 1500 * If there are no frames queued, bail. 1501 */ 1502 if (cur_tx == NULL) { 1503 VR_UNLOCK(sc); 1504 return; 1505 } 1506 1507 sc->vr_cdata.vr_tx_tail = cur_tx; 1508 1509 if (sc->vr_cdata.vr_tx_head == NULL) 1510 sc->vr_cdata.vr_tx_head = start_tx; 1511 1512 /* Tell the chip to start transmitting. */ 1513 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); 1514 1515 /* 1516 * Set a timeout in case the chip goes out to lunch. 1517 */ 1518 ifp->if_timer = 5; 1519 VR_UNLOCK(sc); 1520 1521 return; 1522 } 1523 1524 static void 1525 vr_init(xsc) 1526 void *xsc; 1527 { 1528 struct vr_softc *sc = xsc; 1529 struct ifnet *ifp = &sc->arpcom.ac_if; 1530 struct mii_data *mii; 1531 int i; 1532 1533 VR_LOCK(sc); 1534 1535 mii = device_get_softc(sc->vr_miibus); 1536 1537 /* 1538 * Cancel pending I/O and free all RX/TX buffers. 1539 */ 1540 vr_stop(sc); 1541 vr_reset(sc); 1542 1543 /* 1544 * Set our station address. 1545 */ 1546 for (i = 0; i < ETHER_ADDR_LEN; i++) 1547 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1548 1549 /* Set DMA size */ 1550 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1551 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1552 1553 /* 1554 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1555 * so we must set both. 1556 */ 1557 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1558 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1559 1560 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1561 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1562 1563 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1564 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1565 1566 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1567 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1568 1569 /* Init circular RX list. */ 1570 if (vr_list_rx_init(sc) == ENOBUFS) { 1571 printf("vr%d: initialization failed: no " 1572 "memory for rx buffers\n", sc->vr_unit); 1573 vr_stop(sc); 1574 VR_UNLOCK(sc); 1575 return; 1576 } 1577 1578 /* 1579 * Init tx descriptors. 1580 */ 1581 vr_list_tx_init(sc); 1582 1583 /* If we want promiscuous mode, set the allframes bit. */ 1584 if (ifp->if_flags & IFF_PROMISC) 1585 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1586 else 1587 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1588 1589 /* Set capture broadcast bit to capture broadcast frames. */ 1590 if (ifp->if_flags & IFF_BROADCAST) 1591 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1592 else 1593 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1594 1595 /* 1596 * Program the multicast filter, if necessary. 1597 */ 1598 vr_setmulti(sc); 1599 1600 /* 1601 * Load the address of the RX list. 1602 */ 1603 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1604 1605 /* Enable receiver and transmitter. */ 1606 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1607 VR_CMD_TX_ON|VR_CMD_RX_ON| 1608 VR_CMD_RX_GO); 1609 1610 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1611 1612 /* 1613 * Enable interrupts. 1614 */ 1615 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1616 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1617 1618 mii_mediachg(mii); 1619 1620 ifp->if_flags |= IFF_RUNNING; 1621 ifp->if_flags &= ~IFF_OACTIVE; 1622 1623 sc->vr_stat_ch = timeout(vr_tick, sc, hz); 1624 1625 VR_UNLOCK(sc); 1626 1627 return; 1628 } 1629 1630 /* 1631 * Set media options. 1632 */ 1633 static int 1634 vr_ifmedia_upd(ifp) 1635 struct ifnet *ifp; 1636 { 1637 struct vr_softc *sc; 1638 1639 sc = ifp->if_softc; 1640 1641 if (ifp->if_flags & IFF_UP) 1642 vr_init(sc); 1643 1644 return(0); 1645 } 1646 1647 /* 1648 * Report current media status. 1649 */ 1650 static void 1651 vr_ifmedia_sts(ifp, ifmr) 1652 struct ifnet *ifp; 1653 struct ifmediareq *ifmr; 1654 { 1655 struct vr_softc *sc; 1656 struct mii_data *mii; 1657 1658 sc = ifp->if_softc; 1659 mii = device_get_softc(sc->vr_miibus); 1660 mii_pollstat(mii); 1661 ifmr->ifm_active = mii->mii_media_active; 1662 ifmr->ifm_status = mii->mii_media_status; 1663 1664 return; 1665 } 1666 1667 static int 1668 vr_ioctl(ifp, command, data) 1669 struct ifnet *ifp; 1670 u_long command; 1671 caddr_t data; 1672 { 1673 struct vr_softc *sc = ifp->if_softc; 1674 struct ifreq *ifr = (struct ifreq *) data; 1675 struct mii_data *mii; 1676 int error = 0; 1677 1678 VR_LOCK(sc); 1679 1680 switch(command) { 1681 case SIOCSIFFLAGS: 1682 if (ifp->if_flags & IFF_UP) { 1683 vr_init(sc); 1684 } else { 1685 if (ifp->if_flags & IFF_RUNNING) 1686 vr_stop(sc); 1687 } 1688 error = 0; 1689 break; 1690 case SIOCADDMULTI: 1691 case SIOCDELMULTI: 1692 vr_setmulti(sc); 1693 error = 0; 1694 break; 1695 case SIOCGIFMEDIA: 1696 case SIOCSIFMEDIA: 1697 mii = device_get_softc(sc->vr_miibus); 1698 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1699 break; 1700 default: 1701 error = ether_ioctl(ifp, command, data); 1702 break; 1703 } 1704 1705 VR_UNLOCK(sc); 1706 1707 return(error); 1708 } 1709 1710 static void 1711 vr_watchdog(ifp) 1712 struct ifnet *ifp; 1713 { 1714 struct vr_softc *sc; 1715 1716 sc = ifp->if_softc; 1717 1718 VR_LOCK(sc); 1719 ifp->if_oerrors++; 1720 printf("vr%d: watchdog timeout\n", sc->vr_unit); 1721 1722 vr_stop(sc); 1723 vr_reset(sc); 1724 vr_init(sc); 1725 1726 if (ifp->if_snd.ifq_head != NULL) 1727 vr_start(ifp); 1728 1729 VR_UNLOCK(sc); 1730 1731 return; 1732 } 1733 1734 /* 1735 * Stop the adapter and free any mbufs allocated to the 1736 * RX and TX lists. 1737 */ 1738 static void 1739 vr_stop(sc) 1740 struct vr_softc *sc; 1741 { 1742 register int i; 1743 struct ifnet *ifp; 1744 1745 VR_LOCK(sc); 1746 1747 ifp = &sc->arpcom.ac_if; 1748 ifp->if_timer = 0; 1749 1750 untimeout(vr_tick, sc, sc->vr_stat_ch); 1751 1752 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1753 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1754 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1755 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1756 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1757 1758 /* 1759 * Free data in the RX lists. 1760 */ 1761 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1762 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1763 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1764 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1765 } 1766 } 1767 bzero((char *)&sc->vr_ldata->vr_rx_list, 1768 sizeof(sc->vr_ldata->vr_rx_list)); 1769 1770 /* 1771 * Free the TX list buffers. 1772 */ 1773 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1774 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1775 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1776 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1777 } 1778 } 1779 1780 bzero((char *)&sc->vr_ldata->vr_tx_list, 1781 sizeof(sc->vr_ldata->vr_tx_list)); 1782 1783 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1784 VR_UNLOCK(sc); 1785 1786 return; 1787 } 1788 1789 /* 1790 * Stop all chip I/O so that the kernel's probe routines don't 1791 * get confused by errant DMAs when rebooting. 1792 */ 1793 static void 1794 vr_shutdown(dev) 1795 device_t dev; 1796 { 1797 struct vr_softc *sc; 1798 1799 sc = device_get_softc(dev); 1800 1801 vr_stop(sc); 1802 1803 return; 1804 } 1805