1 /* 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * The Rhine has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/sockio.h> 66 #include <sys/mbuf.h> 67 #include <sys/malloc.h> 68 #include <sys/kernel.h> 69 #include <sys/module.h> 70 #include <sys/socket.h> 71 72 #include <net/if.h> 73 #include <net/if_arp.h> 74 #include <net/ethernet.h> 75 #include <net/if_dl.h> 76 #include <net/if_media.h> 77 78 #include <net/bpf.h> 79 80 #include <vm/vm.h> /* for vtophys */ 81 #include <vm/pmap.h> /* for vtophys */ 82 #include <machine/bus_pio.h> 83 #include <machine/bus_memio.h> 84 #include <machine/bus.h> 85 #include <machine/resource.h> 86 #include <sys/bus.h> 87 #include <sys/rman.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #define VR_USEIOSPACE 96 97 #include <pci/if_vrreg.h> 98 99 MODULE_DEPEND(vr, pci, 1, 1, 1); 100 MODULE_DEPEND(vr, ether, 1, 1, 1); 101 MODULE_DEPEND(vr, miibus, 1, 1, 1); 102 103 /* "controller miibus0" required. See GENERIC if you get errors here. */ 104 #include "miibus_if.h" 105 106 #undef VR_USESWSHIFT 107 108 /* 109 * Various supported device vendors/types and their names. 110 */ 111 static struct vr_type vr_devs[] = { 112 { VIA_VENDORID, VIA_DEVICEID_RHINE, 113 "VIA VT3043 Rhine I 10/100BaseTX" }, 114 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 115 "VIA VT86C100A Rhine II 10/100BaseTX" }, 116 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 117 "VIA VT6102 Rhine II 10/100BaseTX" }, 118 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 119 "VIA VT6105 Rhine III 10/100BaseTX" }, 120 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 121 "VIA VT6105M Rhine III 10/100BaseTX" }, 122 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 123 "Delta Electronics Rhine II 10/100BaseTX" }, 124 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 125 "Addtron Technology Rhine II 10/100BaseTX" }, 126 { 0, 0, NULL } 127 }; 128 129 static int vr_probe (device_t); 130 static int vr_attach (device_t); 131 static int vr_detach (device_t); 132 133 static int vr_newbuf (struct vr_softc *, 134 struct vr_chain_onefrag *, 135 struct mbuf *); 136 static int vr_encap (struct vr_softc *, struct vr_chain *, 137 struct mbuf * ); 138 139 static void vr_rxeof (struct vr_softc *); 140 static void vr_rxeoc (struct vr_softc *); 141 static void vr_txeof (struct vr_softc *); 142 static void vr_tick (void *); 143 static void vr_intr (void *); 144 static void vr_start (struct ifnet *); 145 static void vr_start_locked (struct ifnet *); 146 static int vr_ioctl (struct ifnet *, u_long, caddr_t); 147 static void vr_init (void *); 148 static void vr_init_locked (struct vr_softc *); 149 static void vr_stop (struct vr_softc *); 150 static void vr_watchdog (struct ifnet *); 151 static void vr_shutdown (device_t); 152 static int vr_ifmedia_upd (struct ifnet *); 153 static void vr_ifmedia_sts (struct ifnet *, struct ifmediareq *); 154 155 #ifdef VR_USESWSHIFT 156 static void vr_mii_sync (struct vr_softc *); 157 static void vr_mii_send (struct vr_softc *, uint32_t, int); 158 #endif 159 static int vr_mii_readreg (struct vr_softc *, struct vr_mii_frame *); 160 static int vr_mii_writereg (struct vr_softc *, struct vr_mii_frame *); 161 static int vr_miibus_readreg (device_t, uint16_t, uint16_t); 162 static int vr_miibus_writereg (device_t, uint16_t, uint16_t, uint16_t); 163 static void vr_miibus_statchg (device_t); 164 165 static void vr_setcfg (struct vr_softc *, int); 166 static void vr_setmulti (struct vr_softc *); 167 static void vr_reset (struct vr_softc *); 168 static int vr_list_rx_init (struct vr_softc *); 169 static int vr_list_tx_init (struct vr_softc *); 170 171 #ifdef VR_USEIOSPACE 172 #define VR_RES SYS_RES_IOPORT 173 #define VR_RID VR_PCI_LOIO 174 #else 175 #define VR_RES SYS_RES_MEMORY 176 #define VR_RID VR_PCI_LOMEM 177 #endif 178 179 static device_method_t vr_methods[] = { 180 /* Device interface */ 181 DEVMETHOD(device_probe, vr_probe), 182 DEVMETHOD(device_attach, vr_attach), 183 DEVMETHOD(device_detach, vr_detach), 184 DEVMETHOD(device_shutdown, vr_shutdown), 185 186 /* bus interface */ 187 DEVMETHOD(bus_print_child, bus_generic_print_child), 188 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 189 190 /* MII interface */ 191 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 192 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 193 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 194 195 { 0, 0 } 196 }; 197 198 static driver_t vr_driver = { 199 "vr", 200 vr_methods, 201 sizeof(struct vr_softc) 202 }; 203 204 static devclass_t vr_devclass; 205 206 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 207 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 208 209 #define VR_SETBIT(sc, reg, x) \ 210 CSR_WRITE_1(sc, reg, \ 211 CSR_READ_1(sc, reg) | (x)) 212 213 #define VR_CLRBIT(sc, reg, x) \ 214 CSR_WRITE_1(sc, reg, \ 215 CSR_READ_1(sc, reg) & ~(x)) 216 217 #define VR_SETBIT16(sc, reg, x) \ 218 CSR_WRITE_2(sc, reg, \ 219 CSR_READ_2(sc, reg) | (x)) 220 221 #define VR_CLRBIT16(sc, reg, x) \ 222 CSR_WRITE_2(sc, reg, \ 223 CSR_READ_2(sc, reg) & ~(x)) 224 225 #define VR_SETBIT32(sc, reg, x) \ 226 CSR_WRITE_4(sc, reg, \ 227 CSR_READ_4(sc, reg) | (x)) 228 229 #define VR_CLRBIT32(sc, reg, x) \ 230 CSR_WRITE_4(sc, reg, \ 231 CSR_READ_4(sc, reg) & ~(x)) 232 233 #define SIO_SET(x) \ 234 CSR_WRITE_1(sc, VR_MIICMD, \ 235 CSR_READ_1(sc, VR_MIICMD) | (x)) 236 237 #define SIO_CLR(x) \ 238 CSR_WRITE_1(sc, VR_MIICMD, \ 239 CSR_READ_1(sc, VR_MIICMD) & ~(x)) 240 241 #ifdef VR_USESWSHIFT 242 /* 243 * Sync the PHYs by setting data bit and strobing the clock 32 times. 244 */ 245 static void 246 vr_mii_sync(struct vr_softc *sc) 247 { 248 register int i; 249 250 SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); 251 252 for (i = 0; i < 32; i++) { 253 SIO_SET(VR_MIICMD_CLK); 254 DELAY(1); 255 SIO_CLR(VR_MIICMD_CLK); 256 DELAY(1); 257 } 258 } 259 260 /* 261 * Clock a series of bits through the MII. 262 */ 263 static void 264 vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt) 265 { 266 int i; 267 268 SIO_CLR(VR_MIICMD_CLK); 269 270 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 271 if (bits & i) { 272 SIO_SET(VR_MIICMD_DATAIN); 273 } else { 274 SIO_CLR(VR_MIICMD_DATAIN); 275 } 276 DELAY(1); 277 SIO_CLR(VR_MIICMD_CLK); 278 DELAY(1); 279 SIO_SET(VR_MIICMD_CLK); 280 } 281 } 282 #endif 283 284 /* 285 * Read an PHY register through the MII. 286 */ 287 static int 288 vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) 289 #ifdef VR_USESWSHIFT 290 { 291 int i, ack; 292 293 /* Set up frame for RX. */ 294 frame->mii_stdelim = VR_MII_STARTDELIM; 295 frame->mii_opcode = VR_MII_READOP; 296 frame->mii_turnaround = 0; 297 frame->mii_data = 0; 298 299 CSR_WRITE_1(sc, VR_MIICMD, 0); 300 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 301 302 /* Turn on data xmit. */ 303 SIO_SET(VR_MIICMD_DIR); 304 305 vr_mii_sync(sc); 306 307 /* Send command/address info. */ 308 vr_mii_send(sc, frame->mii_stdelim, 2); 309 vr_mii_send(sc, frame->mii_opcode, 2); 310 vr_mii_send(sc, frame->mii_phyaddr, 5); 311 vr_mii_send(sc, frame->mii_regaddr, 5); 312 313 /* Idle bit. */ 314 SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); 315 DELAY(1); 316 SIO_SET(VR_MIICMD_CLK); 317 DELAY(1); 318 319 /* Turn off xmit. */ 320 SIO_CLR(VR_MIICMD_DIR); 321 322 /* Check for ack */ 323 SIO_CLR(VR_MIICMD_CLK); 324 DELAY(1); 325 ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; 326 SIO_SET(VR_MIICMD_CLK); 327 DELAY(1); 328 329 /* 330 * Now try reading data bits. If the ack failed, we still 331 * need to clock through 16 cycles to keep the PHY(s) in sync. 332 */ 333 if (ack) { 334 for(i = 0; i < 16; i++) { 335 SIO_CLR(VR_MIICMD_CLK); 336 DELAY(1); 337 SIO_SET(VR_MIICMD_CLK); 338 DELAY(1); 339 } 340 goto fail; 341 } 342 343 for (i = 0x8000; i; i >>= 1) { 344 SIO_CLR(VR_MIICMD_CLK); 345 DELAY(1); 346 if (!ack) { 347 if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) 348 frame->mii_data |= i; 349 DELAY(1); 350 } 351 SIO_SET(VR_MIICMD_CLK); 352 DELAY(1); 353 } 354 355 fail: 356 SIO_CLR(VR_MIICMD_CLK); 357 DELAY(1); 358 SIO_SET(VR_MIICMD_CLK); 359 DELAY(1); 360 361 if (ack) 362 return (1); 363 return (0); 364 } 365 #else 366 { 367 int i; 368 369 /* Set the PHY address. */ 370 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 371 frame->mii_phyaddr); 372 373 /* Set the register address. */ 374 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 375 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 376 377 for (i = 0; i < 10000; i++) { 378 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 379 break; 380 DELAY(1); 381 } 382 frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); 383 384 return (0); 385 } 386 #endif 387 388 389 /* 390 * Write to a PHY register through the MII. 391 */ 392 static int 393 vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) 394 #ifdef VR_USESWSHIFT 395 { 396 CSR_WRITE_1(sc, VR_MIICMD, 0); 397 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); 398 399 /* Set up frame for TX. */ 400 frame->mii_stdelim = VR_MII_STARTDELIM; 401 frame->mii_opcode = VR_MII_WRITEOP; 402 frame->mii_turnaround = VR_MII_TURNAROUND; 403 404 /* Turn on data output. */ 405 SIO_SET(VR_MIICMD_DIR); 406 407 vr_mii_sync(sc); 408 409 vr_mii_send(sc, frame->mii_stdelim, 2); 410 vr_mii_send(sc, frame->mii_opcode, 2); 411 vr_mii_send(sc, frame->mii_phyaddr, 5); 412 vr_mii_send(sc, frame->mii_regaddr, 5); 413 vr_mii_send(sc, frame->mii_turnaround, 2); 414 vr_mii_send(sc, frame->mii_data, 16); 415 416 /* Idle bit. */ 417 SIO_SET(VR_MIICMD_CLK); 418 DELAY(1); 419 SIO_CLR(VR_MIICMD_CLK); 420 DELAY(1); 421 422 /* Turn off xmit. */ 423 SIO_CLR(VR_MIICMD_DIR); 424 425 return (0); 426 } 427 #else 428 { 429 int i; 430 431 /* Set the PHY address. */ 432 CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| 433 frame->mii_phyaddr); 434 435 /* Set the register address and data to write. */ 436 CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); 437 CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); 438 439 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 440 441 for (i = 0; i < 10000; i++) { 442 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 443 break; 444 DELAY(1); 445 } 446 447 return (0); 448 } 449 #endif 450 451 static int 452 vr_miibus_readreg(device_t dev, uint16_t phy, uint16_t reg) 453 { 454 struct vr_mii_frame frame; 455 struct vr_softc *sc = device_get_softc(dev); 456 457 switch (sc->vr_revid) { 458 case REV_ID_VT6102_APOLLO: 459 if (phy != 1) { 460 frame.mii_data = 0; 461 goto out; 462 } 463 default: 464 break; 465 } 466 467 bzero((char *)&frame, sizeof(frame)); 468 frame.mii_phyaddr = phy; 469 frame.mii_regaddr = reg; 470 vr_mii_readreg(sc, &frame); 471 472 out: 473 return (frame.mii_data); 474 } 475 476 static int 477 vr_miibus_writereg(device_t dev, uint16_t phy, uint16_t reg, uint16_t data) 478 { 479 struct vr_mii_frame frame; 480 struct vr_softc *sc = device_get_softc(dev); 481 482 switch (sc->vr_revid) { 483 case REV_ID_VT6102_APOLLO: 484 if (phy != 1) 485 return (0); 486 default: 487 break; 488 } 489 490 bzero((char *)&frame, sizeof(frame)); 491 frame.mii_phyaddr = phy; 492 frame.mii_regaddr = reg; 493 frame.mii_data = data; 494 vr_mii_writereg(sc, &frame); 495 496 return (0); 497 } 498 499 static void 500 vr_miibus_statchg(device_t dev) 501 { 502 struct mii_data *mii; 503 struct vr_softc *sc = device_get_softc(dev); 504 505 mii = device_get_softc(sc->vr_miibus); 506 vr_setcfg(sc, mii->mii_media_active); 507 } 508 509 /* 510 * Program the 64-bit multicast hash filter. 511 */ 512 static void 513 vr_setmulti(struct vr_softc *sc) 514 { 515 struct ifnet *ifp = &sc->arpcom.ac_if; 516 int h = 0; 517 uint32_t hashes[2] = { 0, 0 }; 518 struct ifmultiaddr *ifma; 519 uint8_t rxfilt; 520 int mcnt = 0; 521 522 VR_LOCK_ASSERT(sc); 523 524 rxfilt = CSR_READ_1(sc, VR_RXCFG); 525 526 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 527 rxfilt |= VR_RXCFG_RX_MULTI; 528 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 529 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 530 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 531 return; 532 } 533 534 /* First, zero out all the existing hash bits. */ 535 CSR_WRITE_4(sc, VR_MAR0, 0); 536 CSR_WRITE_4(sc, VR_MAR1, 0); 537 538 /* Now program new ones. */ 539 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 540 if (ifma->ifma_addr->sa_family != AF_LINK) 541 continue; 542 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 543 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 544 if (h < 32) 545 hashes[0] |= (1 << h); 546 else 547 hashes[1] |= (1 << (h - 32)); 548 mcnt++; 549 } 550 551 if (mcnt) 552 rxfilt |= VR_RXCFG_RX_MULTI; 553 else 554 rxfilt &= ~VR_RXCFG_RX_MULTI; 555 556 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 557 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 558 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 559 } 560 561 /* 562 * In order to fiddle with the 563 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 564 * first have to put the transmit and/or receive logic in the idle state. 565 */ 566 static void 567 vr_setcfg(struct vr_softc *sc, int media) 568 { 569 int restart = 0; 570 571 VR_LOCK_ASSERT(sc); 572 573 if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { 574 restart = 1; 575 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); 576 } 577 578 if ((media & IFM_GMASK) == IFM_FDX) 579 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 580 else 581 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); 582 583 if (restart) 584 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); 585 } 586 587 static void 588 vr_reset(struct vr_softc *sc) 589 { 590 register int i; 591 592 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during detach w/o lock. */ 593 594 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); 595 596 for (i = 0; i < VR_TIMEOUT; i++) { 597 DELAY(10); 598 if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) 599 break; 600 } 601 if (i == VR_TIMEOUT) { 602 if (sc->vr_revid < REV_ID_VT3065_A) 603 printf("vr%d: reset never completed!\n", sc->vr_unit); 604 else { 605 /* Use newer force reset command */ 606 printf("vr%d: Using force reset command.\n", 607 sc->vr_unit); 608 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 609 } 610 } 611 612 /* Wait a little while for the chip to get its brains in order. */ 613 DELAY(1000); 614 } 615 616 /* 617 * Probe for a VIA Rhine chip. Check the PCI vendor and device 618 * IDs against our list and return a device name if we find a match. 619 */ 620 static int 621 vr_probe(device_t dev) 622 { 623 struct vr_type *t = vr_devs; 624 625 while (t->vr_name != NULL) { 626 if ((pci_get_vendor(dev) == t->vr_vid) && 627 (pci_get_device(dev) == t->vr_did)) { 628 device_set_desc(dev, t->vr_name); 629 return (0); 630 } 631 t++; 632 } 633 634 return (ENXIO); 635 } 636 637 /* 638 * Attach the interface. Allocate softc structures, do ifmedia 639 * setup and ethernet/BPF attach. 640 */ 641 static int 642 vr_attach(dev) 643 device_t dev; 644 { 645 int i; 646 u_char eaddr[ETHER_ADDR_LEN]; 647 struct vr_softc *sc; 648 struct ifnet *ifp; 649 int unit, error = 0, rid; 650 651 sc = device_get_softc(dev); 652 unit = device_get_unit(dev); 653 654 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 655 MTX_DEF); 656 /* 657 * Map control/status registers. 658 */ 659 pci_enable_busmaster(dev); 660 sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF; 661 662 rid = VR_RID; 663 sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE); 664 665 if (sc->vr_res == NULL) { 666 printf("vr%d: couldn't map ports/memory\n", unit); 667 error = ENXIO; 668 goto fail; 669 } 670 671 sc->vr_btag = rman_get_bustag(sc->vr_res); 672 sc->vr_bhandle = rman_get_bushandle(sc->vr_res); 673 674 /* Allocate interrupt */ 675 rid = 0; 676 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 677 RF_SHAREABLE | RF_ACTIVE); 678 679 if (sc->vr_irq == NULL) { 680 printf("vr%d: couldn't map interrupt\n", unit); 681 error = ENXIO; 682 goto fail; 683 } 684 685 /* 686 * Windows may put the chip in suspend mode when it 687 * shuts down. Be sure to kick it in the head to wake it 688 * up again. 689 */ 690 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 691 692 /* Reset the adapter. */ 693 vr_reset(sc); 694 695 /* 696 * Turn on bit2 (MIION) in PCI configuration register 0x53 during 697 * initialization and disable AUTOPOLL. 698 */ 699 pci_write_config(dev, VR_PCI_MODE, 700 pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4); 701 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 702 703 /* 704 * Get station address. The way the Rhine chips work, 705 * you're not allowed to directly access the EEPROM once 706 * they've been programmed a special way. Consequently, 707 * we need to read the node address from the PAR0 and PAR1 708 * registers. 709 */ 710 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 711 DELAY(200); 712 for (i = 0; i < ETHER_ADDR_LEN; i++) 713 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 714 715 sc->vr_unit = unit; 716 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 717 718 sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF, 719 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 720 721 if (sc->vr_ldata == NULL) { 722 printf("vr%d: no memory for list buffers!\n", unit); 723 error = ENXIO; 724 goto fail; 725 } 726 727 bzero(sc->vr_ldata, sizeof(struct vr_list_data)); 728 729 ifp = &sc->arpcom.ac_if; 730 ifp->if_softc = sc; 731 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 732 ifp->if_mtu = ETHERMTU; 733 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 734 ifp->if_ioctl = vr_ioctl; 735 ifp->if_start = vr_start; 736 ifp->if_watchdog = vr_watchdog; 737 ifp->if_init = vr_init; 738 ifp->if_baudrate = 10000000; 739 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1); 740 ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; 741 IFQ_SET_READY(&ifp->if_snd); 742 #ifdef DEVICE_POLLING 743 ifp->if_capabilities |= IFCAP_POLLING; 744 #endif 745 ifp->if_capenable = ifp->if_capabilities; 746 747 /* Do MII setup. */ 748 if (mii_phy_probe(dev, &sc->vr_miibus, 749 vr_ifmedia_upd, vr_ifmedia_sts)) { 750 printf("vr%d: MII without any phy!\n", sc->vr_unit); 751 error = ENXIO; 752 goto fail; 753 } 754 755 callout_handle_init(&sc->vr_stat_ch); 756 757 /* Call MI attach routine. */ 758 ether_ifattach(ifp, eaddr); 759 760 sc->suspended = 0; 761 762 /* Hook interrupt last to avoid having to lock softc */ 763 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 764 vr_intr, sc, &sc->vr_intrhand); 765 766 if (error) { 767 printf("vr%d: couldn't set up irq\n", unit); 768 ether_ifdetach(ifp); 769 goto fail; 770 } 771 772 fail: 773 if (error) 774 vr_detach(dev); 775 776 return (error); 777 } 778 779 /* 780 * Shutdown hardware and free up resources. This can be called any 781 * time after the mutex has been initialized. It is called in both 782 * the error case in attach and the normal detach case so it needs 783 * to be careful about only freeing resources that have actually been 784 * allocated. 785 */ 786 static int 787 vr_detach(device_t dev) 788 { 789 struct vr_softc *sc = device_get_softc(dev); 790 struct ifnet *ifp = &sc->arpcom.ac_if; 791 792 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 793 794 VR_LOCK(sc); 795 796 sc->suspended = 1; 797 798 /* These should only be active if attach succeeded */ 799 if (device_is_attached(dev)) { 800 vr_stop(sc); 801 VR_UNLOCK(sc); /* XXX: Avoid recursive acquire. */ 802 ether_ifdetach(ifp); 803 VR_LOCK(sc); 804 } 805 if (sc->vr_miibus) 806 device_delete_child(dev, sc->vr_miibus); 807 bus_generic_detach(dev); 808 809 if (sc->vr_intrhand) 810 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 811 if (sc->vr_irq) 812 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 813 if (sc->vr_res) 814 bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); 815 816 if (sc->vr_ldata) 817 contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); 818 819 VR_UNLOCK(sc); 820 mtx_destroy(&sc->vr_mtx); 821 822 return (0); 823 } 824 825 /* 826 * Initialize the transmit descriptors. 827 */ 828 static int 829 vr_list_tx_init(struct vr_softc *sc) 830 { 831 struct vr_chain_data *cd; 832 struct vr_list_data *ld; 833 int i; 834 835 cd = &sc->vr_cdata; 836 ld = sc->vr_ldata; 837 for (i = 0; i < VR_TX_LIST_CNT; i++) { 838 cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; 839 if (i == (VR_TX_LIST_CNT - 1)) 840 cd->vr_tx_chain[i].vr_nextdesc = 841 &cd->vr_tx_chain[0]; 842 else 843 cd->vr_tx_chain[i].vr_nextdesc = 844 &cd->vr_tx_chain[i + 1]; 845 } 846 cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; 847 848 return (0); 849 } 850 851 852 /* 853 * Initialize the RX descriptors and allocate mbufs for them. Note that 854 * we arrange the descriptors in a closed ring, so that the last descriptor 855 * points back to the first. 856 */ 857 static int 858 vr_list_rx_init(struct vr_softc *sc) 859 { 860 struct vr_chain_data *cd; 861 struct vr_list_data *ld; 862 int i; 863 864 VR_LOCK_ASSERT(sc); 865 866 cd = &sc->vr_cdata; 867 ld = sc->vr_ldata; 868 869 for (i = 0; i < VR_RX_LIST_CNT; i++) { 870 cd->vr_rx_chain[i].vr_ptr = 871 (struct vr_desc *)&ld->vr_rx_list[i]; 872 if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) 873 return (ENOBUFS); 874 if (i == (VR_RX_LIST_CNT - 1)) { 875 cd->vr_rx_chain[i].vr_nextdesc = 876 &cd->vr_rx_chain[0]; 877 ld->vr_rx_list[i].vr_next = 878 vtophys(&ld->vr_rx_list[0]); 879 } else { 880 cd->vr_rx_chain[i].vr_nextdesc = 881 &cd->vr_rx_chain[i + 1]; 882 ld->vr_rx_list[i].vr_next = 883 vtophys(&ld->vr_rx_list[i + 1]); 884 } 885 } 886 887 cd->vr_rx_head = &cd->vr_rx_chain[0]; 888 889 return (0); 890 } 891 892 /* 893 * Initialize an RX descriptor and attach an MBUF cluster. 894 * Note: the length fields are only 11 bits wide, which means the 895 * largest size we can specify is 2047. This is important because 896 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 897 * overflow the field and make a mess. 898 */ 899 static int 900 vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m) 901 { 902 struct mbuf *m_new = NULL; 903 904 if (m == NULL) { 905 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 906 if (m_new == NULL) 907 return (ENOBUFS); 908 909 MCLGET(m_new, M_DONTWAIT); 910 if (!(m_new->m_flags & M_EXT)) { 911 m_freem(m_new); 912 return (ENOBUFS); 913 } 914 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 915 } else { 916 m_new = m; 917 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 918 m_new->m_data = m_new->m_ext.ext_buf; 919 } 920 921 m_adj(m_new, sizeof(uint64_t)); 922 923 c->vr_mbuf = m_new; 924 c->vr_ptr->vr_status = VR_RXSTAT; 925 c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); 926 c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; 927 928 return (0); 929 } 930 931 /* 932 * A frame has been uploaded: pass the resulting mbuf chain up to 933 * the higher level protocols. 934 */ 935 static void 936 vr_rxeof(struct vr_softc *sc) 937 { 938 struct mbuf *m, *m0; 939 struct ifnet *ifp; 940 struct vr_chain_onefrag *cur_rx; 941 int total_len = 0; 942 uint32_t rxstat; 943 944 VR_LOCK_ASSERT(sc); 945 ifp = &sc->arpcom.ac_if; 946 947 while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & 948 VR_RXSTAT_OWN)) { 949 #ifdef DEVICE_POLLING 950 if (ifp->if_flags & IFF_POLLING) { 951 if (sc->rxcycles <= 0) 952 break; 953 sc->rxcycles--; 954 } 955 #endif /* DEVICE_POLLING */ 956 m0 = NULL; 957 cur_rx = sc->vr_cdata.vr_rx_head; 958 sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; 959 m = cur_rx->vr_mbuf; 960 961 /* 962 * If an error occurs, update stats, clear the 963 * status word and leave the mbuf cluster in place: 964 * it should simply get re-used next time this descriptor 965 * comes up in the ring. 966 */ 967 if (rxstat & VR_RXSTAT_RXERR) { 968 ifp->if_ierrors++; 969 printf("vr%d: rx error (%02x):", sc->vr_unit, 970 rxstat & 0x000000ff); 971 if (rxstat & VR_RXSTAT_CRCERR) 972 printf(" crc error"); 973 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 974 printf(" frame alignment error\n"); 975 if (rxstat & VR_RXSTAT_FIFOOFLOW) 976 printf(" FIFO overflow"); 977 if (rxstat & VR_RXSTAT_GIANT) 978 printf(" received giant packet"); 979 if (rxstat & VR_RXSTAT_RUNT) 980 printf(" received runt packet"); 981 if (rxstat & VR_RXSTAT_BUSERR) 982 printf(" system bus error"); 983 if (rxstat & VR_RXSTAT_BUFFERR) 984 printf("rx buffer error"); 985 printf("\n"); 986 vr_newbuf(sc, cur_rx, m); 987 continue; 988 } 989 990 /* No errors; receive the packet. */ 991 total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); 992 993 /* 994 * XXX The VIA Rhine chip includes the CRC with every 995 * received frame, and there's no way to turn this 996 * behavior off (at least, I can't find anything in 997 * the manual that explains how to do it) so we have 998 * to trim off the CRC manually. 999 */ 1000 total_len -= ETHER_CRC_LEN; 1001 1002 m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, 1003 NULL); 1004 vr_newbuf(sc, cur_rx, m); 1005 if (m0 == NULL) { 1006 ifp->if_ierrors++; 1007 continue; 1008 } 1009 m = m0; 1010 1011 ifp->if_ipackets++; 1012 VR_UNLOCK(sc); 1013 (*ifp->if_input)(ifp, m); 1014 VR_LOCK(sc); 1015 } 1016 } 1017 1018 static void 1019 vr_rxeoc(struct vr_softc *sc) 1020 { 1021 struct ifnet *ifp = &sc->arpcom.ac_if; 1022 int i; 1023 1024 VR_LOCK_ASSERT(sc); 1025 1026 ifp->if_ierrors++; 1027 1028 VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1029 DELAY(10000); 1030 1031 /* Wait for receiver to stop */ 1032 for (i = 0x400; 1033 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); 1034 i--) { 1035 ; 1036 } 1037 1038 if (!i) { 1039 printf("vr%d: rx shutdown error!\n", sc->vr_unit); 1040 sc->vr_flags |= VR_F_RESTART; 1041 return; 1042 } 1043 1044 vr_rxeof(sc); 1045 1046 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1047 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); 1048 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); 1049 } 1050 1051 /* 1052 * A frame was downloaded to the chip. It's safe for us to clean up 1053 * the list buffers. 1054 */ 1055 static void 1056 vr_txeof(struct vr_softc *sc) 1057 { 1058 struct vr_chain *cur_tx; 1059 struct ifnet *ifp = &sc->arpcom.ac_if; 1060 1061 VR_LOCK_ASSERT(sc); 1062 1063 /* 1064 * Go through our tx list and free mbufs for those 1065 * frames that have been transmitted. 1066 */ 1067 cur_tx = sc->vr_cdata.vr_tx_cons; 1068 while (cur_tx->vr_mbuf != NULL) { 1069 uint32_t txstat; 1070 int i; 1071 1072 txstat = cur_tx->vr_ptr->vr_status; 1073 1074 if ((txstat & VR_TXSTAT_ABRT) || 1075 (txstat & VR_TXSTAT_UDF)) { 1076 for (i = 0x400; 1077 i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); 1078 i--) 1079 ; /* Wait for chip to shutdown */ 1080 if (!i) { 1081 printf("vr%d: tx shutdown timeout\n", 1082 sc->vr_unit); 1083 sc->vr_flags |= VR_F_RESTART; 1084 break; 1085 } 1086 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1087 CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr)); 1088 break; 1089 } 1090 1091 if (txstat & VR_TXSTAT_OWN) 1092 break; 1093 1094 if (txstat & VR_TXSTAT_ERRSUM) { 1095 ifp->if_oerrors++; 1096 if (txstat & VR_TXSTAT_DEFER) 1097 ifp->if_collisions++; 1098 if (txstat & VR_TXSTAT_LATECOLL) 1099 ifp->if_collisions++; 1100 } 1101 1102 ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; 1103 1104 ifp->if_opackets++; 1105 m_freem(cur_tx->vr_mbuf); 1106 cur_tx->vr_mbuf = NULL; 1107 ifp->if_flags &= ~IFF_OACTIVE; 1108 1109 cur_tx = cur_tx->vr_nextdesc; 1110 } 1111 sc->vr_cdata.vr_tx_cons = cur_tx; 1112 if (cur_tx->vr_mbuf == NULL) 1113 ifp->if_timer = 0; 1114 } 1115 1116 static void 1117 vr_tick(void *xsc) 1118 { 1119 struct vr_softc *sc = xsc; 1120 struct mii_data *mii; 1121 1122 VR_LOCK(sc); 1123 1124 if (sc->vr_flags & VR_F_RESTART) { 1125 printf("vr%d: restarting\n", sc->vr_unit); 1126 vr_stop(sc); 1127 vr_reset(sc); 1128 vr_init_locked(sc); 1129 sc->vr_flags &= ~VR_F_RESTART; 1130 } 1131 1132 mii = device_get_softc(sc->vr_miibus); 1133 mii_tick(mii); 1134 sc->vr_stat_ch = timeout(vr_tick, sc, hz); 1135 1136 VR_UNLOCK(sc); 1137 } 1138 1139 #ifdef DEVICE_POLLING 1140 static poll_handler_t vr_poll; 1141 static poll_handler_t vr_poll_locked; 1142 1143 static void 1144 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1145 { 1146 struct vr_softc *sc = ifp->if_softc; 1147 1148 VR_LOCK(sc); 1149 vr_poll_locked(ifp, cmd, count); 1150 VR_UNLOCK(sc); 1151 } 1152 1153 static void 1154 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1155 { 1156 struct vr_softc *sc = ifp->if_softc; 1157 1158 VR_LOCK_ASSERT(sc); 1159 1160 if (!(ifp->if_capenable & IFCAP_POLLING)) { 1161 ether_poll_deregister(ifp); 1162 cmd = POLL_DEREGISTER; 1163 } 1164 1165 if (cmd == POLL_DEREGISTER) { 1166 /* Final call, enable interrupts. */ 1167 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1168 return; 1169 } 1170 1171 sc->rxcycles = count; 1172 vr_rxeof(sc); 1173 vr_txeof(sc); 1174 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1175 vr_start_locked(ifp); 1176 1177 if (cmd == POLL_AND_CHECK_STATUS) { 1178 uint16_t status; 1179 1180 /* Also check status register. */ 1181 status = CSR_READ_2(sc, VR_ISR); 1182 if (status) 1183 CSR_WRITE_2(sc, VR_ISR, status); 1184 1185 if ((status & VR_INTRS) == 0) 1186 return; 1187 1188 if (status & VR_ISR_RX_DROPPED) { 1189 printf("vr%d: rx packet lost\n", sc->vr_unit); 1190 ifp->if_ierrors++; 1191 } 1192 1193 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1194 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { 1195 printf("vr%d: receive error (%04x)", 1196 sc->vr_unit, status); 1197 if (status & VR_ISR_RX_NOBUF) 1198 printf(" no buffers"); 1199 if (status & VR_ISR_RX_OFLOW) 1200 printf(" overflow"); 1201 if (status & VR_ISR_RX_DROPPED) 1202 printf(" packet lost"); 1203 printf("\n"); 1204 vr_rxeoc(sc); 1205 } 1206 1207 if ((status & VR_ISR_BUSERR) || 1208 (status & VR_ISR_TX_UNDERRUN)) { 1209 vr_reset(sc); 1210 vr_init_locked(sc); 1211 return; 1212 } 1213 1214 if ((status & VR_ISR_UDFI) || 1215 (status & VR_ISR_TX_ABRT2) || 1216 (status & VR_ISR_TX_ABRT)) { 1217 ifp->if_oerrors++; 1218 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1219 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); 1220 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); 1221 } 1222 } 1223 } 1224 } 1225 #endif /* DEVICE_POLLING */ 1226 1227 static void 1228 vr_intr(void *arg) 1229 { 1230 struct vr_softc *sc = arg; 1231 struct ifnet *ifp = &sc->arpcom.ac_if; 1232 uint16_t status; 1233 1234 VR_LOCK(sc); 1235 1236 if (sc->suspended) { 1237 /* 1238 * Forcibly disable interrupts. 1239 * XXX: Mobile VIA based platforms may need 1240 * interrupt re-enable on resume. 1241 */ 1242 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1243 goto done_locked; 1244 } 1245 1246 #ifdef DEVICE_POLLING 1247 if (ifp->if_flags & IFF_POLLING) 1248 goto done_locked; 1249 1250 if ((ifp->if_capenable & IFCAP_POLLING) && 1251 ether_poll_register(vr_poll, ifp)) { 1252 /* OK, disable interrupts. */ 1253 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1254 vr_poll_locked(ifp, 0, 1); 1255 goto done_locked; 1256 } 1257 #endif /* DEVICE_POLLING */ 1258 1259 /* Suppress unwanted interrupts. */ 1260 if (!(ifp->if_flags & IFF_UP)) { 1261 vr_stop(sc); 1262 goto done_locked; 1263 } 1264 1265 /* Disable interrupts. */ 1266 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1267 1268 for (;;) { 1269 status = CSR_READ_2(sc, VR_ISR); 1270 if (status) 1271 CSR_WRITE_2(sc, VR_ISR, status); 1272 1273 if ((status & VR_INTRS) == 0) 1274 break; 1275 1276 if (status & VR_ISR_RX_OK) 1277 vr_rxeof(sc); 1278 1279 if (status & VR_ISR_RX_DROPPED) { 1280 printf("vr%d: rx packet lost\n", sc->vr_unit); 1281 ifp->if_ierrors++; 1282 } 1283 1284 if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || 1285 (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { 1286 printf("vr%d: receive error (%04x)", 1287 sc->vr_unit, status); 1288 if (status & VR_ISR_RX_NOBUF) 1289 printf(" no buffers"); 1290 if (status & VR_ISR_RX_OFLOW) 1291 printf(" overflow"); 1292 if (status & VR_ISR_RX_DROPPED) 1293 printf(" packet lost"); 1294 printf("\n"); 1295 vr_rxeoc(sc); 1296 } 1297 1298 if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { 1299 vr_reset(sc); 1300 vr_init_locked(sc); 1301 break; 1302 } 1303 1304 if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || 1305 (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { 1306 vr_txeof(sc); 1307 if ((status & VR_ISR_UDFI) || 1308 (status & VR_ISR_TX_ABRT2) || 1309 (status & VR_ISR_TX_ABRT)) { 1310 ifp->if_oerrors++; 1311 if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { 1312 VR_SETBIT16(sc, VR_COMMAND, 1313 VR_CMD_TX_ON); 1314 VR_SETBIT16(sc, VR_COMMAND, 1315 VR_CMD_TX_GO); 1316 } 1317 } 1318 } 1319 } 1320 1321 /* Re-enable interrupts. */ 1322 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1323 1324 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1325 vr_start_locked(ifp); 1326 1327 done_locked: 1328 VR_UNLOCK(sc); 1329 } 1330 1331 /* 1332 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1333 * pointers to the fragment pointers. 1334 */ 1335 static int 1336 vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) 1337 { 1338 struct vr_desc *f = NULL; 1339 struct mbuf *m; 1340 1341 VR_LOCK_ASSERT(sc); 1342 /* 1343 * The VIA Rhine wants packet buffers to be longword 1344 * aligned, but very often our mbufs aren't. Rather than 1345 * waste time trying to decide when to copy and when not 1346 * to copy, just do it all the time. 1347 */ 1348 m = m_defrag(m_head, M_DONTWAIT); 1349 if (m == NULL) 1350 return (1); 1351 1352 /* 1353 * The Rhine chip doesn't auto-pad, so we have to make 1354 * sure to pad short frames out to the minimum frame length 1355 * ourselves. 1356 */ 1357 if (m->m_len < VR_MIN_FRAMELEN) { 1358 m->m_pkthdr.len += VR_MIN_FRAMELEN - m->m_len; 1359 m->m_len = m->m_pkthdr.len; 1360 } 1361 1362 c->vr_mbuf = m; 1363 f = c->vr_ptr; 1364 f->vr_data = vtophys(mtod(m, caddr_t)); 1365 f->vr_ctl = m->m_len; 1366 f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; 1367 f->vr_status = 0; 1368 f->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; 1369 f->vr_next = vtophys(c->vr_nextdesc->vr_ptr); 1370 1371 return (0); 1372 } 1373 1374 /* 1375 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1376 * to the mbuf data regions directly in the transmit lists. We also save a 1377 * copy of the pointers since the transmit list fragment pointers are 1378 * physical addresses. 1379 */ 1380 1381 static void 1382 vr_start(struct ifnet *ifp) 1383 { 1384 struct vr_softc *sc = ifp->if_softc; 1385 1386 VR_LOCK(sc); 1387 vr_start_locked(ifp); 1388 VR_UNLOCK(sc); 1389 } 1390 1391 static void 1392 vr_start_locked(struct ifnet *ifp) 1393 { 1394 struct vr_softc *sc = ifp->if_softc; 1395 struct mbuf *m_head; 1396 struct vr_chain *cur_tx; 1397 1398 if (ifp->if_flags & IFF_OACTIVE) 1399 return; 1400 1401 cur_tx = sc->vr_cdata.vr_tx_prod; 1402 while (cur_tx->vr_mbuf == NULL) { 1403 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1404 if (m_head == NULL) 1405 break; 1406 1407 /* Pack the data into the descriptor. */ 1408 if (vr_encap(sc, cur_tx, m_head)) { 1409 /* Rollback, send what we were able to encap. */ 1410 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1411 break; 1412 } 1413 1414 VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; 1415 1416 /* 1417 * If there's a BPF listener, bounce a copy of this frame 1418 * to him. 1419 */ 1420 BPF_MTAP(ifp, cur_tx->vr_mbuf); 1421 1422 cur_tx = cur_tx->vr_nextdesc; 1423 } 1424 if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { 1425 sc->vr_cdata.vr_tx_prod = cur_tx; 1426 1427 /* Tell the chip to start transmitting. */ 1428 VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/ VR_CMD_TX_GO); 1429 1430 /* Set a timeout in case the chip goes out to lunch. */ 1431 ifp->if_timer = 5; 1432 1433 if (cur_tx->vr_mbuf != NULL) 1434 ifp->if_flags |= IFF_OACTIVE; 1435 } 1436 } 1437 1438 static void 1439 vr_init(void *xsc) 1440 { 1441 struct vr_softc *sc = xsc; 1442 1443 VR_LOCK(sc); 1444 vr_init_locked(sc); 1445 VR_UNLOCK(sc); 1446 } 1447 1448 static void 1449 vr_init_locked(struct vr_softc *sc) 1450 { 1451 struct ifnet *ifp = &sc->arpcom.ac_if; 1452 struct mii_data *mii; 1453 int i; 1454 1455 VR_LOCK_ASSERT(sc); 1456 1457 mii = device_get_softc(sc->vr_miibus); 1458 1459 /* Cancel pending I/O and free all RX/TX buffers. */ 1460 vr_stop(sc); 1461 vr_reset(sc); 1462 1463 /* Set our station address. */ 1464 for (i = 0; i < ETHER_ADDR_LEN; i++) 1465 CSR_WRITE_1(sc, VR_PAR0 + i, sc->arpcom.ac_enaddr[i]); 1466 1467 /* Set DMA size. */ 1468 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 1469 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 1470 1471 /* 1472 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 1473 * so we must set both. 1474 */ 1475 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 1476 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 1477 1478 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 1479 VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); 1480 1481 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 1482 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 1483 1484 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 1485 VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); 1486 1487 /* Init circular RX list. */ 1488 if (vr_list_rx_init(sc) == ENOBUFS) { 1489 printf( 1490 "vr%d: initialization failed: no memory for rx buffers\n", sc->vr_unit); 1491 vr_stop(sc); 1492 return; 1493 } 1494 1495 /* Init tx descriptors. */ 1496 vr_list_tx_init(sc); 1497 1498 /* If we want promiscuous mode, set the allframes bit. */ 1499 if (ifp->if_flags & IFF_PROMISC) 1500 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1501 else 1502 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); 1503 1504 /* Set capture broadcast bit to capture broadcast frames. */ 1505 if (ifp->if_flags & IFF_BROADCAST) 1506 VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1507 else 1508 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); 1509 1510 /* 1511 * Program the multicast filter, if necessary. 1512 */ 1513 vr_setmulti(sc); 1514 1515 /* 1516 * Load the address of the RX list. 1517 */ 1518 CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); 1519 1520 /* Enable receiver and transmitter. */ 1521 CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| 1522 VR_CMD_TX_ON|VR_CMD_RX_ON| 1523 VR_CMD_RX_GO); 1524 1525 CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); 1526 1527 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 1528 #ifdef DEVICE_POLLING 1529 /* 1530 * Disable interrupts if we are polling. 1531 */ 1532 if (ifp->if_flags & IFF_POLLING) 1533 CSR_WRITE_2(sc, VR_IMR, 0); 1534 else 1535 #endif /* DEVICE_POLLING */ 1536 /* 1537 * Enable interrupts. 1538 */ 1539 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1540 1541 mii_mediachg(mii); 1542 1543 ifp->if_flags |= IFF_RUNNING; 1544 ifp->if_flags &= ~IFF_OACTIVE; 1545 1546 sc->vr_stat_ch = timeout(vr_tick, sc, hz); 1547 } 1548 1549 /* 1550 * Set media options. 1551 */ 1552 static int 1553 vr_ifmedia_upd(struct ifnet *ifp) 1554 { 1555 struct vr_softc *sc = ifp->if_softc; 1556 1557 if (ifp->if_flags & IFF_UP) 1558 vr_init(sc); 1559 1560 return (0); 1561 } 1562 1563 /* 1564 * Report current media status. 1565 */ 1566 static void 1567 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1568 { 1569 struct vr_softc *sc = ifp->if_softc; 1570 struct mii_data *mii; 1571 1572 mii = device_get_softc(sc->vr_miibus); 1573 VR_LOCK(sc); 1574 mii_pollstat(mii); 1575 VR_UNLOCK(sc); 1576 ifmr->ifm_active = mii->mii_media_active; 1577 ifmr->ifm_status = mii->mii_media_status; 1578 } 1579 1580 static int 1581 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1582 { 1583 struct vr_softc *sc = ifp->if_softc; 1584 struct ifreq *ifr = (struct ifreq *) data; 1585 struct mii_data *mii; 1586 int error = 0; 1587 1588 switch (command) { 1589 case SIOCSIFFLAGS: 1590 VR_LOCK(sc); 1591 if (ifp->if_flags & IFF_UP) { 1592 vr_init_locked(sc); 1593 } else { 1594 if (ifp->if_flags & IFF_RUNNING) 1595 vr_stop(sc); 1596 } 1597 VR_UNLOCK(sc); 1598 error = 0; 1599 break; 1600 case SIOCADDMULTI: 1601 case SIOCDELMULTI: 1602 VR_LOCK(sc); 1603 vr_setmulti(sc); 1604 VR_UNLOCK(sc); 1605 error = 0; 1606 break; 1607 case SIOCGIFMEDIA: 1608 case SIOCSIFMEDIA: 1609 mii = device_get_softc(sc->vr_miibus); 1610 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 1611 break; 1612 case SIOCSIFCAP: 1613 ifp->if_capenable = ifr->ifr_reqcap; 1614 break; 1615 default: 1616 error = ether_ioctl(ifp, command, data); 1617 break; 1618 } 1619 1620 return (error); 1621 } 1622 1623 static void 1624 vr_watchdog(struct ifnet *ifp) 1625 { 1626 struct vr_softc *sc = ifp->if_softc; 1627 1628 VR_LOCK(sc); 1629 1630 ifp->if_oerrors++; 1631 printf("vr%d: watchdog timeout\n", sc->vr_unit); 1632 1633 vr_stop(sc); 1634 vr_reset(sc); 1635 vr_init_locked(sc); 1636 1637 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1638 vr_start_locked(ifp); 1639 1640 VR_UNLOCK(sc); 1641 } 1642 1643 /* 1644 * Stop the adapter and free any mbufs allocated to the 1645 * RX and TX lists. 1646 */ 1647 static void 1648 vr_stop(struct vr_softc *sc) 1649 { 1650 register int i; 1651 struct ifnet *ifp; 1652 1653 VR_LOCK_ASSERT(sc); 1654 1655 ifp = &sc->arpcom.ac_if; 1656 ifp->if_timer = 0; 1657 1658 untimeout(vr_tick, sc, sc->vr_stat_ch); 1659 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1660 #ifdef DEVICE_POLLING 1661 ether_poll_deregister(ifp); 1662 #endif /* DEVICE_POLLING */ 1663 1664 VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); 1665 VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); 1666 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1667 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 1668 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 1669 1670 /* 1671 * Free data in the RX lists. 1672 */ 1673 for (i = 0; i < VR_RX_LIST_CNT; i++) { 1674 if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { 1675 m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); 1676 sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; 1677 } 1678 } 1679 bzero((char *)&sc->vr_ldata->vr_rx_list, 1680 sizeof(sc->vr_ldata->vr_rx_list)); 1681 1682 /* 1683 * Free the TX list buffers. 1684 */ 1685 for (i = 0; i < VR_TX_LIST_CNT; i++) { 1686 if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { 1687 m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); 1688 sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; 1689 } 1690 } 1691 bzero((char *)&sc->vr_ldata->vr_tx_list, 1692 sizeof(sc->vr_ldata->vr_tx_list)); 1693 } 1694 1695 /* 1696 * Stop all chip I/O so that the kernel's probe routines don't 1697 * get confused by errant DMAs when rebooting. 1698 */ 1699 static void 1700 vr_shutdown(device_t dev) 1701 { 1702 1703 vr_detach(dev); 1704 } 1705