1 /*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #ifdef HAVE_KERNEL_OPTION_HEADERS 64 #include "opt_device_polling.h" 65 #endif 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bus.h> 70 #include <sys/endian.h> 71 #include <sys/kernel.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/rman.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 81 #include <net/bpf.h> 82 #include <net/if.h> 83 #include <net/ethernet.h> 84 #include <net/if_dl.h> 85 #include <net/if_media.h> 86 #include <net/if_types.h> 87 #include <net/if_vlan_var.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #include <machine/bus.h> 96 97 #include <dev/vr/if_vrreg.h> 98 99 /* "device miibus" required. See GENERIC if you get errors here. */ 100 #include "miibus_if.h" 101 102 MODULE_DEPEND(vr, pci, 1, 1, 1); 103 MODULE_DEPEND(vr, ether, 1, 1, 1); 104 MODULE_DEPEND(vr, miibus, 1, 1, 1); 105 106 /* Define to show Rx/Tx error status. */ 107 #undef VR_SHOW_ERRORS 108 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 109 110 /* 111 * Various supported device vendors/types, their names & quirks. 112 */ 113 #define VR_Q_NEEDALIGN (1<<0) 114 #define VR_Q_CSUM (1<<1) 115 #define VR_Q_CAM (1<<2) 116 117 static struct vr_type { 118 u_int16_t vr_vid; 119 u_int16_t vr_did; 120 int vr_quirks; 121 char *vr_name; 122 } vr_devs[] = { 123 { VIA_VENDORID, VIA_DEVICEID_RHINE, 124 VR_Q_NEEDALIGN, 125 "VIA VT3043 Rhine I 10/100BaseTX" }, 126 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 127 VR_Q_NEEDALIGN, 128 "VIA VT86C100A Rhine II 10/100BaseTX" }, 129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 130 0, 131 "VIA VT6102 Rhine II 10/100BaseTX" }, 132 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 133 0, 134 "VIA VT6105 Rhine III 10/100BaseTX" }, 135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 136 VR_Q_CSUM, 137 "VIA VT6105M Rhine III 10/100BaseTX" }, 138 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 139 VR_Q_NEEDALIGN, 140 "Delta Electronics Rhine II 10/100BaseTX" }, 141 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 142 VR_Q_NEEDALIGN, 143 "Addtron Technology Rhine II 10/100BaseTX" }, 144 { 0, 0, 0, NULL } 145 }; 146 147 static int vr_probe(device_t); 148 static int vr_attach(device_t); 149 static int vr_detach(device_t); 150 static int vr_shutdown(device_t); 151 static int vr_suspend(device_t); 152 static int vr_resume(device_t); 153 154 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 155 static int vr_dma_alloc(struct vr_softc *); 156 static void vr_dma_free(struct vr_softc *); 157 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 158 static int vr_newbuf(struct vr_softc *, int); 159 160 #ifndef __NO_STRICT_ALIGNMENT 161 static __inline void vr_fixup_rx(struct mbuf *); 162 #endif 163 static int vr_rxeof(struct vr_softc *); 164 static void vr_txeof(struct vr_softc *); 165 static void vr_tick(void *); 166 static int vr_error(struct vr_softc *, uint16_t); 167 static void vr_tx_underrun(struct vr_softc *); 168 static void vr_intr(void *); 169 static void vr_start(struct ifnet *); 170 static void vr_start_locked(struct ifnet *); 171 static int vr_encap(struct vr_softc *, struct mbuf **); 172 static int vr_ioctl(struct ifnet *, u_long, caddr_t); 173 static void vr_init(void *); 174 static void vr_init_locked(struct vr_softc *); 175 static void vr_tx_start(struct vr_softc *); 176 static void vr_rx_start(struct vr_softc *); 177 static int vr_tx_stop(struct vr_softc *); 178 static int vr_rx_stop(struct vr_softc *); 179 static void vr_stop(struct vr_softc *); 180 static void vr_watchdog(struct vr_softc *); 181 static int vr_ifmedia_upd(struct ifnet *); 182 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 183 184 static int vr_miibus_readreg(device_t, int, int); 185 static int vr_miibus_writereg(device_t, int, int, int); 186 static void vr_miibus_statchg(device_t); 187 188 static void vr_link_task(void *, int); 189 static void vr_cam_mask(struct vr_softc *, uint32_t, int); 190 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); 191 static void vr_set_filter(struct vr_softc *); 192 static void vr_reset(const struct vr_softc *); 193 static int vr_tx_ring_init(struct vr_softc *); 194 static int vr_rx_ring_init(struct vr_softc *); 195 static void vr_setwol(struct vr_softc *); 196 static void vr_clrwol(struct vr_softc *); 197 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 198 199 static struct vr_tx_threshold_table { 200 int tx_cfg; 201 int bcr_cfg; 202 int value; 203 } vr_tx_threshold_tables[] = { 204 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 205 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 206 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 207 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 208 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 209 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 210 }; 211 212 static device_method_t vr_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, vr_probe), 215 DEVMETHOD(device_attach, vr_attach), 216 DEVMETHOD(device_detach, vr_detach), 217 DEVMETHOD(device_shutdown, vr_shutdown), 218 DEVMETHOD(device_suspend, vr_suspend), 219 DEVMETHOD(device_resume, vr_resume), 220 221 /* bus interface */ 222 DEVMETHOD(bus_print_child, bus_generic_print_child), 223 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 224 225 /* MII interface */ 226 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 227 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 228 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 229 DEVMETHOD(miibus_linkchg, vr_miibus_statchg), 230 231 { NULL, NULL } 232 }; 233 234 static driver_t vr_driver = { 235 "vr", 236 vr_methods, 237 sizeof(struct vr_softc) 238 }; 239 240 static devclass_t vr_devclass; 241 242 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 243 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 244 245 static int 246 vr_miibus_readreg(device_t dev, int phy, int reg) 247 { 248 struct vr_softc *sc; 249 int i; 250 251 sc = device_get_softc(dev); 252 253 /* Set the register address. */ 254 CSR_WRITE_1(sc, VR_MIIADDR, reg); 255 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 256 257 for (i = 0; i < VR_MII_TIMEOUT; i++) { 258 DELAY(1); 259 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 260 break; 261 } 262 if (i == VR_MII_TIMEOUT) 263 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 264 265 return (CSR_READ_2(sc, VR_MIIDATA)); 266 } 267 268 static int 269 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 270 { 271 struct vr_softc *sc; 272 int i; 273 274 sc = device_get_softc(dev); 275 276 /* Set the register address and data to write. */ 277 CSR_WRITE_1(sc, VR_MIIADDR, reg); 278 CSR_WRITE_2(sc, VR_MIIDATA, data); 279 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 280 281 for (i = 0; i < VR_MII_TIMEOUT; i++) { 282 DELAY(1); 283 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 284 break; 285 } 286 if (i == VR_MII_TIMEOUT) 287 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 288 reg); 289 290 return (0); 291 } 292 293 static void 294 vr_miibus_statchg(device_t dev) 295 { 296 struct vr_softc *sc; 297 298 sc = device_get_softc(dev); 299 taskqueue_enqueue(taskqueue_swi, &sc->vr_link_task); 300 } 301 302 /* 303 * In order to fiddle with the 304 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 305 * first have to put the transmit and/or receive logic in the idle state. 306 */ 307 static void 308 vr_link_task(void *arg, int pending) 309 { 310 struct vr_softc *sc; 311 struct mii_data *mii; 312 struct ifnet *ifp; 313 int lfdx, mfdx; 314 uint8_t cr0, cr1, fc; 315 316 sc = (struct vr_softc *)arg; 317 318 VR_LOCK(sc); 319 mii = device_get_softc(sc->vr_miibus); 320 ifp = sc->vr_ifp; 321 if (mii == NULL || ifp == NULL || 322 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 323 VR_UNLOCK(sc); 324 return; 325 } 326 327 if (mii->mii_media_status & IFM_ACTIVE) { 328 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 329 sc->vr_link = 1; 330 } else 331 sc->vr_link = 0; 332 333 if (sc->vr_link != 0) { 334 cr0 = CSR_READ_1(sc, VR_CR0); 335 cr1 = CSR_READ_1(sc, VR_CR1); 336 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 337 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 338 if (mfdx != lfdx) { 339 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 340 if (vr_tx_stop(sc) != 0 || 341 vr_rx_stop(sc) != 0) { 342 device_printf(sc->vr_dev, 343 "%s: Tx/Rx shutdown error -- " 344 "resetting\n", __func__); 345 sc->vr_flags |= VR_F_RESTART; 346 VR_UNLOCK(sc); 347 return; 348 } 349 } 350 if (lfdx) 351 cr1 |= VR_CR1_FULLDUPLEX; 352 else 353 cr1 &= ~VR_CR1_FULLDUPLEX; 354 CSR_WRITE_1(sc, VR_CR1, cr1); 355 } 356 fc = 0; 357 #ifdef notyet 358 /* Configure flow-control. */ 359 if (sc->vr_revid >= REV_ID_VT6105_A0) { 360 fc = CSR_READ_1(sc, VR_FLOWCR1); 361 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 362 if ((IFM_OPTIONS(mii->mii_media_active) & 363 IFM_ETH_RXPAUSE) != 0) 364 fc |= VR_FLOWCR1_RXPAUSE; 365 if ((IFM_OPTIONS(mii->mii_media_active) & 366 IFM_ETH_TXPAUSE) != 0) 367 fc |= VR_FLOWCR1_TXPAUSE; 368 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 369 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 370 /* No Tx puase capability available for Rhine II. */ 371 fc = CSR_READ_1(sc, VR_MISC_CR0); 372 fc &= ~VR_MISCCR0_RXPAUSE; 373 if ((IFM_OPTIONS(mii->mii_media_active) & 374 IFM_ETH_RXPAUSE) != 0) 375 fc |= VR_MISCCR0_RXPAUSE; 376 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 377 } 378 #endif 379 vr_rx_start(sc); 380 vr_tx_start(sc); 381 } else { 382 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 383 device_printf(sc->vr_dev, 384 "%s: Tx/Rx shutdown error -- resetting\n", 385 __func__); 386 sc->vr_flags |= VR_F_RESTART; 387 VR_UNLOCK(sc); 388 return; 389 } 390 } 391 VR_UNLOCK(sc); 392 } 393 394 395 static void 396 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) 397 { 398 399 if (type == VR_MCAST_CAM) 400 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 401 else 402 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 403 CSR_WRITE_4(sc, VR_CAMMASK, mask); 404 CSR_WRITE_1(sc, VR_CAMCTL, 0); 405 } 406 407 static int 408 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) 409 { 410 int i; 411 412 if (type == VR_MCAST_CAM) { 413 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 414 return (EINVAL); 415 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 416 } else 417 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 418 419 /* Set CAM entry address. */ 420 CSR_WRITE_1(sc, VR_CAMADDR, idx); 421 /* Set CAM entry data. */ 422 if (type == VR_MCAST_CAM) { 423 for (i = 0; i < ETHER_ADDR_LEN; i++) 424 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); 425 } else { 426 CSR_WRITE_1(sc, VR_VCAM0, mac[0]); 427 CSR_WRITE_1(sc, VR_VCAM1, mac[1]); 428 } 429 DELAY(10); 430 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 431 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); 432 for (i = 0; i < VR_TIMEOUT; i++) { 433 DELAY(1); 434 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 435 break; 436 } 437 438 if (i == VR_TIMEOUT) 439 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 440 __func__); 441 CSR_WRITE_1(sc, VR_CAMCTL, 0); 442 443 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 444 } 445 446 /* 447 * Program the 64-bit multicast hash filter. 448 */ 449 static void 450 vr_set_filter(struct vr_softc *sc) 451 { 452 struct ifnet *ifp; 453 int h; 454 uint32_t hashes[2] = { 0, 0 }; 455 struct ifmultiaddr *ifma; 456 uint8_t rxfilt; 457 int error, mcnt; 458 uint32_t cam_mask; 459 460 VR_LOCK_ASSERT(sc); 461 462 ifp = sc->vr_ifp; 463 rxfilt = CSR_READ_1(sc, VR_RXCFG); 464 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | 465 VR_RXCFG_RX_MULTI); 466 if (ifp->if_flags & IFF_BROADCAST) 467 rxfilt |= VR_RXCFG_RX_BROAD; 468 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 469 rxfilt |= VR_RXCFG_RX_MULTI; 470 if (ifp->if_flags & IFF_PROMISC) 471 rxfilt |= VR_RXCFG_RX_PROMISC; 472 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 473 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 474 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 475 return; 476 } 477 478 /* Now program new ones. */ 479 error = 0; 480 mcnt = 0; 481 if_maddr_rlock(ifp); 482 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 483 /* 484 * For hardwares that have CAM capability, use 485 * 32 entries multicast perfect filter. 486 */ 487 cam_mask = 0; 488 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 489 if (ifma->ifma_addr->sa_family != AF_LINK) 490 continue; 491 error = vr_cam_data(sc, VR_MCAST_CAM, mcnt, 492 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 493 if (error != 0) { 494 cam_mask = 0; 495 break; 496 } 497 cam_mask |= 1 << mcnt; 498 mcnt++; 499 } 500 vr_cam_mask(sc, VR_MCAST_CAM, cam_mask); 501 } 502 503 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 504 /* 505 * If there are too many multicast addresses or 506 * setting multicast CAM filter failed, use hash 507 * table based filtering. 508 */ 509 mcnt = 0; 510 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 511 if (ifma->ifma_addr->sa_family != AF_LINK) 512 continue; 513 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 514 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 515 if (h < 32) 516 hashes[0] |= (1 << h); 517 else 518 hashes[1] |= (1 << (h - 32)); 519 mcnt++; 520 } 521 } 522 if_maddr_runlock(ifp); 523 524 if (mcnt > 0) 525 rxfilt |= VR_RXCFG_RX_MULTI; 526 527 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 528 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 529 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 530 } 531 532 static void 533 vr_reset(const struct vr_softc *sc) 534 { 535 int i; 536 537 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 538 539 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 540 if (sc->vr_revid < REV_ID_VT6102_A) { 541 /* VT86C100A needs more delay after reset. */ 542 DELAY(100); 543 } 544 for (i = 0; i < VR_TIMEOUT; i++) { 545 DELAY(10); 546 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 547 break; 548 } 549 if (i == VR_TIMEOUT) { 550 if (sc->vr_revid < REV_ID_VT6102_A) 551 device_printf(sc->vr_dev, "reset never completed!\n"); 552 else { 553 /* Use newer force reset command. */ 554 device_printf(sc->vr_dev, 555 "Using force reset command.\n"); 556 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 557 /* 558 * Wait a little while for the chip to get its brains 559 * in order. 560 */ 561 DELAY(2000); 562 } 563 } 564 565 } 566 567 /* 568 * Probe for a VIA Rhine chip. Check the PCI vendor and device 569 * IDs against our list and return a match or NULL 570 */ 571 static struct vr_type * 572 vr_match(device_t dev) 573 { 574 struct vr_type *t = vr_devs; 575 576 for (t = vr_devs; t->vr_name != NULL; t++) 577 if ((pci_get_vendor(dev) == t->vr_vid) && 578 (pci_get_device(dev) == t->vr_did)) 579 return (t); 580 return (NULL); 581 } 582 583 /* 584 * Probe for a VIA Rhine chip. Check the PCI vendor and device 585 * IDs against our list and return a device name if we find a match. 586 */ 587 static int 588 vr_probe(device_t dev) 589 { 590 struct vr_type *t; 591 592 t = vr_match(dev); 593 if (t != NULL) { 594 device_set_desc(dev, t->vr_name); 595 return (BUS_PROBE_DEFAULT); 596 } 597 return (ENXIO); 598 } 599 600 /* 601 * Attach the interface. Allocate softc structures, do ifmedia 602 * setup and ethernet/BPF attach. 603 */ 604 static int 605 vr_attach(device_t dev) 606 { 607 struct vr_softc *sc; 608 struct ifnet *ifp; 609 struct vr_type *t; 610 uint8_t eaddr[ETHER_ADDR_LEN]; 611 int error, rid; 612 int i, phy, pmc; 613 614 sc = device_get_softc(dev); 615 sc->vr_dev = dev; 616 t = vr_match(dev); 617 KASSERT(t != NULL, ("Lost if_vr device match")); 618 sc->vr_quirks = t->vr_quirks; 619 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 620 621 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 622 MTX_DEF); 623 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 624 TASK_INIT(&sc->vr_link_task, 0, vr_link_task, sc); 625 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 626 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 627 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 628 vr_sysctl_stats, "I", "Statistics"); 629 630 error = 0; 631 632 /* 633 * Map control/status registers. 634 */ 635 pci_enable_busmaster(dev); 636 sc->vr_revid = pci_get_revid(dev); 637 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 638 639 sc->vr_res_id = PCIR_BAR(0); 640 sc->vr_res_type = SYS_RES_IOPORT; 641 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 642 &sc->vr_res_id, RF_ACTIVE); 643 if (sc->vr_res == NULL) { 644 device_printf(dev, "couldn't map ports\n"); 645 error = ENXIO; 646 goto fail; 647 } 648 649 /* Allocate interrupt. */ 650 rid = 0; 651 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 652 RF_SHAREABLE | RF_ACTIVE); 653 654 if (sc->vr_irq == NULL) { 655 device_printf(dev, "couldn't map interrupt\n"); 656 error = ENXIO; 657 goto fail; 658 } 659 660 /* Allocate ifnet structure. */ 661 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 662 if (ifp == NULL) { 663 device_printf(dev, "couldn't allocate ifnet structure\n"); 664 error = ENOSPC; 665 goto fail; 666 } 667 ifp->if_softc = sc; 668 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 669 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 670 ifp->if_ioctl = vr_ioctl; 671 ifp->if_start = vr_start; 672 ifp->if_init = vr_init; 673 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); 674 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; 675 IFQ_SET_READY(&ifp->if_snd); 676 677 /* Configure Tx FIFO threshold. */ 678 sc->vr_txthresh = VR_TXTHRESH_MIN; 679 if (sc->vr_revid < REV_ID_VT6105_A0) { 680 /* 681 * Use store and forward mode for Rhine I/II. 682 * Otherwise they produce a lot of Tx underruns and 683 * it would take a while to get working FIFO threshold 684 * value. 685 */ 686 sc->vr_txthresh = VR_TXTHRESH_MAX; 687 } 688 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 689 ifp->if_hwassist = VR_CSUM_FEATURES; 690 ifp->if_capabilities |= IFCAP_HWCSUM; 691 /* 692 * To update checksum field the hardware may need to 693 * store entire frames into FIFO before transmitting. 694 */ 695 sc->vr_txthresh = VR_TXTHRESH_MAX; 696 } 697 698 if (sc->vr_revid >= REV_ID_VT6102_A && 699 pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 700 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; 701 702 /* Rhine supports oversized VLAN frame. */ 703 ifp->if_capabilities |= IFCAP_VLAN_MTU; 704 ifp->if_capenable = ifp->if_capabilities; 705 #ifdef DEVICE_POLLING 706 ifp->if_capabilities |= IFCAP_POLLING; 707 #endif 708 709 /* 710 * Windows may put the chip in suspend mode when it 711 * shuts down. Be sure to kick it in the head to wake it 712 * up again. 713 */ 714 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 715 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 716 717 /* 718 * Get station address. The way the Rhine chips work, 719 * you're not allowed to directly access the EEPROM once 720 * they've been programmed a special way. Consequently, 721 * we need to read the node address from the PAR0 and PAR1 722 * registers. 723 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 724 * VR_CFGC and VR_CFGD such that memory mapped IO configured 725 * by driver is reset to default state. 726 */ 727 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 728 for (i = VR_TIMEOUT; i > 0; i--) { 729 DELAY(1); 730 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 731 break; 732 } 733 if (i == 0) 734 device_printf(dev, "Reloading EEPROM timeout!\n"); 735 for (i = 0; i < ETHER_ADDR_LEN; i++) 736 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 737 738 /* Reset the adapter. */ 739 vr_reset(sc); 740 /* Ack intr & disable further interrupts. */ 741 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 742 CSR_WRITE_2(sc, VR_IMR, 0); 743 if (sc->vr_revid >= REV_ID_VT6102_A) 744 CSR_WRITE_2(sc, VR_MII_IMR, 0); 745 746 if (sc->vr_revid < REV_ID_VT6102_A) { 747 pci_write_config(dev, VR_PCI_MODE2, 748 pci_read_config(dev, VR_PCI_MODE2, 1) | 749 VR_MODE2_MODE10T, 1); 750 } else { 751 /* Report error instead of retrying forever. */ 752 pci_write_config(dev, VR_PCI_MODE2, 753 pci_read_config(dev, VR_PCI_MODE2, 1) | 754 VR_MODE2_PCEROPT, 1); 755 /* Detect MII coding error. */ 756 pci_write_config(dev, VR_PCI_MODE3, 757 pci_read_config(dev, VR_PCI_MODE3, 1) | 758 VR_MODE3_MIION, 1); 759 if (sc->vr_revid >= REV_ID_VT6105_LOM && 760 sc->vr_revid < REV_ID_VT6105M_A0) 761 pci_write_config(dev, VR_PCI_MODE2, 762 pci_read_config(dev, VR_PCI_MODE2, 1) | 763 VR_MODE2_MODE10T, 1); 764 /* Enable Memory-Read-Multiple. */ 765 if (sc->vr_revid >= REV_ID_VT6107_A1 && 766 sc->vr_revid < REV_ID_VT6105M_A0) 767 pci_write_config(dev, VR_PCI_MODE2, 768 pci_read_config(dev, VR_PCI_MODE2, 1) | 769 VR_MODE2_MRDPL, 1); 770 } 771 /* Disable MII AUTOPOLL. */ 772 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 773 774 if (vr_dma_alloc(sc) != 0) { 775 error = ENXIO; 776 goto fail; 777 } 778 779 /* Do MII setup. */ 780 if (sc->vr_revid >= REV_ID_VT6105_A0) 781 phy = 1; 782 else 783 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 784 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, 785 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); 786 if (error != 0) { 787 device_printf(dev, "attaching PHYs failed\n"); 788 goto fail; 789 } 790 791 /* Call MI attach routine. */ 792 ether_ifattach(ifp, eaddr); 793 /* 794 * Tell the upper layer(s) we support long frames. 795 * Must appear after the call to ether_ifattach() because 796 * ether_ifattach() sets ifi_hdrlen to the default value. 797 */ 798 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 799 800 /* Hook interrupt last to avoid having to lock softc. */ 801 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 802 NULL, vr_intr, sc, &sc->vr_intrhand); 803 804 if (error) { 805 device_printf(dev, "couldn't set up irq\n"); 806 ether_ifdetach(ifp); 807 goto fail; 808 } 809 810 fail: 811 if (error) 812 vr_detach(dev); 813 814 return (error); 815 } 816 817 /* 818 * Shutdown hardware and free up resources. This can be called any 819 * time after the mutex has been initialized. It is called in both 820 * the error case in attach and the normal detach case so it needs 821 * to be careful about only freeing resources that have actually been 822 * allocated. 823 */ 824 static int 825 vr_detach(device_t dev) 826 { 827 struct vr_softc *sc = device_get_softc(dev); 828 struct ifnet *ifp = sc->vr_ifp; 829 830 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 831 832 #ifdef DEVICE_POLLING 833 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 834 ether_poll_deregister(ifp); 835 #endif 836 837 /* These should only be active if attach succeeded. */ 838 if (device_is_attached(dev)) { 839 VR_LOCK(sc); 840 sc->vr_detach = 1; 841 vr_stop(sc); 842 VR_UNLOCK(sc); 843 callout_drain(&sc->vr_stat_callout); 844 taskqueue_drain(taskqueue_swi, &sc->vr_link_task); 845 ether_ifdetach(ifp); 846 } 847 if (sc->vr_miibus) 848 device_delete_child(dev, sc->vr_miibus); 849 bus_generic_detach(dev); 850 851 if (sc->vr_intrhand) 852 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 853 if (sc->vr_irq) 854 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 855 if (sc->vr_res) 856 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 857 sc->vr_res); 858 859 if (ifp) 860 if_free(ifp); 861 862 vr_dma_free(sc); 863 864 mtx_destroy(&sc->vr_mtx); 865 866 return (0); 867 } 868 869 struct vr_dmamap_arg { 870 bus_addr_t vr_busaddr; 871 }; 872 873 static void 874 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 875 { 876 struct vr_dmamap_arg *ctx; 877 878 if (error != 0) 879 return; 880 ctx = arg; 881 ctx->vr_busaddr = segs[0].ds_addr; 882 } 883 884 static int 885 vr_dma_alloc(struct vr_softc *sc) 886 { 887 struct vr_dmamap_arg ctx; 888 struct vr_txdesc *txd; 889 struct vr_rxdesc *rxd; 890 bus_size_t tx_alignment; 891 int error, i; 892 893 /* Create parent DMA tag. */ 894 error = bus_dma_tag_create( 895 bus_get_dma_tag(sc->vr_dev), /* parent */ 896 1, 0, /* alignment, boundary */ 897 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 898 BUS_SPACE_MAXADDR, /* highaddr */ 899 NULL, NULL, /* filter, filterarg */ 900 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 901 0, /* nsegments */ 902 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 903 0, /* flags */ 904 NULL, NULL, /* lockfunc, lockarg */ 905 &sc->vr_cdata.vr_parent_tag); 906 if (error != 0) { 907 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 908 goto fail; 909 } 910 /* Create tag for Tx ring. */ 911 error = bus_dma_tag_create( 912 sc->vr_cdata.vr_parent_tag, /* parent */ 913 VR_RING_ALIGN, 0, /* alignment, boundary */ 914 BUS_SPACE_MAXADDR, /* lowaddr */ 915 BUS_SPACE_MAXADDR, /* highaddr */ 916 NULL, NULL, /* filter, filterarg */ 917 VR_TX_RING_SIZE, /* maxsize */ 918 1, /* nsegments */ 919 VR_TX_RING_SIZE, /* maxsegsize */ 920 0, /* flags */ 921 NULL, NULL, /* lockfunc, lockarg */ 922 &sc->vr_cdata.vr_tx_ring_tag); 923 if (error != 0) { 924 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 925 goto fail; 926 } 927 928 /* Create tag for Rx ring. */ 929 error = bus_dma_tag_create( 930 sc->vr_cdata.vr_parent_tag, /* parent */ 931 VR_RING_ALIGN, 0, /* alignment, boundary */ 932 BUS_SPACE_MAXADDR, /* lowaddr */ 933 BUS_SPACE_MAXADDR, /* highaddr */ 934 NULL, NULL, /* filter, filterarg */ 935 VR_RX_RING_SIZE, /* maxsize */ 936 1, /* nsegments */ 937 VR_RX_RING_SIZE, /* maxsegsize */ 938 0, /* flags */ 939 NULL, NULL, /* lockfunc, lockarg */ 940 &sc->vr_cdata.vr_rx_ring_tag); 941 if (error != 0) { 942 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 943 goto fail; 944 } 945 946 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 947 tx_alignment = sizeof(uint32_t); 948 else 949 tx_alignment = 1; 950 /* Create tag for Tx buffers. */ 951 error = bus_dma_tag_create( 952 sc->vr_cdata.vr_parent_tag, /* parent */ 953 tx_alignment, 0, /* alignment, boundary */ 954 BUS_SPACE_MAXADDR, /* lowaddr */ 955 BUS_SPACE_MAXADDR, /* highaddr */ 956 NULL, NULL, /* filter, filterarg */ 957 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 958 VR_MAXFRAGS, /* nsegments */ 959 MCLBYTES, /* maxsegsize */ 960 0, /* flags */ 961 NULL, NULL, /* lockfunc, lockarg */ 962 &sc->vr_cdata.vr_tx_tag); 963 if (error != 0) { 964 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 965 goto fail; 966 } 967 968 /* Create tag for Rx buffers. */ 969 error = bus_dma_tag_create( 970 sc->vr_cdata.vr_parent_tag, /* parent */ 971 VR_RX_ALIGN, 0, /* alignment, boundary */ 972 BUS_SPACE_MAXADDR, /* lowaddr */ 973 BUS_SPACE_MAXADDR, /* highaddr */ 974 NULL, NULL, /* filter, filterarg */ 975 MCLBYTES, /* maxsize */ 976 1, /* nsegments */ 977 MCLBYTES, /* maxsegsize */ 978 0, /* flags */ 979 NULL, NULL, /* lockfunc, lockarg */ 980 &sc->vr_cdata.vr_rx_tag); 981 if (error != 0) { 982 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 983 goto fail; 984 } 985 986 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 987 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 988 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 989 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 990 if (error != 0) { 991 device_printf(sc->vr_dev, 992 "failed to allocate DMA'able memory for Tx ring\n"); 993 goto fail; 994 } 995 996 ctx.vr_busaddr = 0; 997 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 998 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 999 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1000 if (error != 0 || ctx.vr_busaddr == 0) { 1001 device_printf(sc->vr_dev, 1002 "failed to load DMA'able memory for Tx ring\n"); 1003 goto fail; 1004 } 1005 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 1006 1007 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1008 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 1009 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 1010 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 1011 if (error != 0) { 1012 device_printf(sc->vr_dev, 1013 "failed to allocate DMA'able memory for Rx ring\n"); 1014 goto fail; 1015 } 1016 1017 ctx.vr_busaddr = 0; 1018 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1019 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1020 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1021 if (error != 0 || ctx.vr_busaddr == 0) { 1022 device_printf(sc->vr_dev, 1023 "failed to load DMA'able memory for Rx ring\n"); 1024 goto fail; 1025 } 1026 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1027 1028 /* Create DMA maps for Tx buffers. */ 1029 for (i = 0; i < VR_TX_RING_CNT; i++) { 1030 txd = &sc->vr_cdata.vr_txdesc[i]; 1031 txd->tx_m = NULL; 1032 txd->tx_dmamap = NULL; 1033 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1034 &txd->tx_dmamap); 1035 if (error != 0) { 1036 device_printf(sc->vr_dev, 1037 "failed to create Tx dmamap\n"); 1038 goto fail; 1039 } 1040 } 1041 /* Create DMA maps for Rx buffers. */ 1042 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1043 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1044 device_printf(sc->vr_dev, 1045 "failed to create spare Rx dmamap\n"); 1046 goto fail; 1047 } 1048 for (i = 0; i < VR_RX_RING_CNT; i++) { 1049 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1050 rxd->rx_m = NULL; 1051 rxd->rx_dmamap = NULL; 1052 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1053 &rxd->rx_dmamap); 1054 if (error != 0) { 1055 device_printf(sc->vr_dev, 1056 "failed to create Rx dmamap\n"); 1057 goto fail; 1058 } 1059 } 1060 1061 fail: 1062 return (error); 1063 } 1064 1065 static void 1066 vr_dma_free(struct vr_softc *sc) 1067 { 1068 struct vr_txdesc *txd; 1069 struct vr_rxdesc *rxd; 1070 int i; 1071 1072 /* Tx ring. */ 1073 if (sc->vr_cdata.vr_tx_ring_tag) { 1074 if (sc->vr_cdata.vr_tx_ring_map) 1075 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1076 sc->vr_cdata.vr_tx_ring_map); 1077 if (sc->vr_cdata.vr_tx_ring_map && 1078 sc->vr_rdata.vr_tx_ring) 1079 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1080 sc->vr_rdata.vr_tx_ring, 1081 sc->vr_cdata.vr_tx_ring_map); 1082 sc->vr_rdata.vr_tx_ring = NULL; 1083 sc->vr_cdata.vr_tx_ring_map = NULL; 1084 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1085 sc->vr_cdata.vr_tx_ring_tag = NULL; 1086 } 1087 /* Rx ring. */ 1088 if (sc->vr_cdata.vr_rx_ring_tag) { 1089 if (sc->vr_cdata.vr_rx_ring_map) 1090 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1091 sc->vr_cdata.vr_rx_ring_map); 1092 if (sc->vr_cdata.vr_rx_ring_map && 1093 sc->vr_rdata.vr_rx_ring) 1094 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1095 sc->vr_rdata.vr_rx_ring, 1096 sc->vr_cdata.vr_rx_ring_map); 1097 sc->vr_rdata.vr_rx_ring = NULL; 1098 sc->vr_cdata.vr_rx_ring_map = NULL; 1099 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1100 sc->vr_cdata.vr_rx_ring_tag = NULL; 1101 } 1102 /* Tx buffers. */ 1103 if (sc->vr_cdata.vr_tx_tag) { 1104 for (i = 0; i < VR_TX_RING_CNT; i++) { 1105 txd = &sc->vr_cdata.vr_txdesc[i]; 1106 if (txd->tx_dmamap) { 1107 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1108 txd->tx_dmamap); 1109 txd->tx_dmamap = NULL; 1110 } 1111 } 1112 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1113 sc->vr_cdata.vr_tx_tag = NULL; 1114 } 1115 /* Rx buffers. */ 1116 if (sc->vr_cdata.vr_rx_tag) { 1117 for (i = 0; i < VR_RX_RING_CNT; i++) { 1118 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1119 if (rxd->rx_dmamap) { 1120 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1121 rxd->rx_dmamap); 1122 rxd->rx_dmamap = NULL; 1123 } 1124 } 1125 if (sc->vr_cdata.vr_rx_sparemap) { 1126 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1127 sc->vr_cdata.vr_rx_sparemap); 1128 sc->vr_cdata.vr_rx_sparemap = 0; 1129 } 1130 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1131 sc->vr_cdata.vr_rx_tag = NULL; 1132 } 1133 1134 if (sc->vr_cdata.vr_parent_tag) { 1135 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1136 sc->vr_cdata.vr_parent_tag = NULL; 1137 } 1138 } 1139 1140 /* 1141 * Initialize the transmit descriptors. 1142 */ 1143 static int 1144 vr_tx_ring_init(struct vr_softc *sc) 1145 { 1146 struct vr_ring_data *rd; 1147 struct vr_txdesc *txd; 1148 bus_addr_t addr; 1149 int i; 1150 1151 sc->vr_cdata.vr_tx_prod = 0; 1152 sc->vr_cdata.vr_tx_cons = 0; 1153 sc->vr_cdata.vr_tx_cnt = 0; 1154 sc->vr_cdata.vr_tx_pkts = 0; 1155 1156 rd = &sc->vr_rdata; 1157 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1158 for (i = 0; i < VR_TX_RING_CNT; i++) { 1159 if (i == VR_TX_RING_CNT - 1) 1160 addr = VR_TX_RING_ADDR(sc, 0); 1161 else 1162 addr = VR_TX_RING_ADDR(sc, i + 1); 1163 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1164 txd = &sc->vr_cdata.vr_txdesc[i]; 1165 txd->tx_m = NULL; 1166 } 1167 1168 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1169 sc->vr_cdata.vr_tx_ring_map, 1170 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1171 1172 return (0); 1173 } 1174 1175 /* 1176 * Initialize the RX descriptors and allocate mbufs for them. Note that 1177 * we arrange the descriptors in a closed ring, so that the last descriptor 1178 * points back to the first. 1179 */ 1180 static int 1181 vr_rx_ring_init(struct vr_softc *sc) 1182 { 1183 struct vr_ring_data *rd; 1184 struct vr_rxdesc *rxd; 1185 bus_addr_t addr; 1186 int i; 1187 1188 sc->vr_cdata.vr_rx_cons = 0; 1189 1190 rd = &sc->vr_rdata; 1191 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1192 for (i = 0; i < VR_RX_RING_CNT; i++) { 1193 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1194 rxd->rx_m = NULL; 1195 rxd->desc = &rd->vr_rx_ring[i]; 1196 if (i == VR_RX_RING_CNT - 1) 1197 addr = VR_RX_RING_ADDR(sc, 0); 1198 else 1199 addr = VR_RX_RING_ADDR(sc, i + 1); 1200 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1201 if (vr_newbuf(sc, i) != 0) 1202 return (ENOBUFS); 1203 } 1204 1205 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1206 sc->vr_cdata.vr_rx_ring_map, 1207 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1208 1209 return (0); 1210 } 1211 1212 static __inline void 1213 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1214 { 1215 struct vr_desc *desc; 1216 1217 desc = rxd->desc; 1218 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1219 desc->vr_status = htole32(VR_RXSTAT_OWN); 1220 } 1221 1222 /* 1223 * Initialize an RX descriptor and attach an MBUF cluster. 1224 * Note: the length fields are only 11 bits wide, which means the 1225 * largest size we can specify is 2047. This is important because 1226 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1227 * overflow the field and make a mess. 1228 */ 1229 static int 1230 vr_newbuf(struct vr_softc *sc, int idx) 1231 { 1232 struct vr_desc *desc; 1233 struct vr_rxdesc *rxd; 1234 struct mbuf *m; 1235 bus_dma_segment_t segs[1]; 1236 bus_dmamap_t map; 1237 int nsegs; 1238 1239 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1240 if (m == NULL) 1241 return (ENOBUFS); 1242 m->m_len = m->m_pkthdr.len = MCLBYTES; 1243 m_adj(m, sizeof(uint64_t)); 1244 1245 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1246 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1247 m_freem(m); 1248 return (ENOBUFS); 1249 } 1250 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1251 1252 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1253 if (rxd->rx_m != NULL) { 1254 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1255 BUS_DMASYNC_POSTREAD); 1256 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1257 } 1258 map = rxd->rx_dmamap; 1259 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1260 sc->vr_cdata.vr_rx_sparemap = map; 1261 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1262 BUS_DMASYNC_PREREAD); 1263 rxd->rx_m = m; 1264 desc = rxd->desc; 1265 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1266 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1267 desc->vr_status = htole32(VR_RXSTAT_OWN); 1268 1269 return (0); 1270 } 1271 1272 #ifndef __NO_STRICT_ALIGNMENT 1273 static __inline void 1274 vr_fixup_rx(struct mbuf *m) 1275 { 1276 uint16_t *src, *dst; 1277 int i; 1278 1279 src = mtod(m, uint16_t *); 1280 dst = src - 1; 1281 1282 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1283 *dst++ = *src++; 1284 1285 m->m_data -= ETHER_ALIGN; 1286 } 1287 #endif 1288 1289 /* 1290 * A frame has been uploaded: pass the resulting mbuf chain up to 1291 * the higher level protocols. 1292 */ 1293 static int 1294 vr_rxeof(struct vr_softc *sc) 1295 { 1296 struct vr_rxdesc *rxd; 1297 struct mbuf *m; 1298 struct ifnet *ifp; 1299 struct vr_desc *cur_rx; 1300 int cons, prog, total_len, rx_npkts; 1301 uint32_t rxstat, rxctl; 1302 1303 VR_LOCK_ASSERT(sc); 1304 ifp = sc->vr_ifp; 1305 cons = sc->vr_cdata.vr_rx_cons; 1306 rx_npkts = 0; 1307 1308 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1309 sc->vr_cdata.vr_rx_ring_map, 1310 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1311 1312 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1313 #ifdef DEVICE_POLLING 1314 if (ifp->if_capenable & IFCAP_POLLING) { 1315 if (sc->rxcycles <= 0) 1316 break; 1317 sc->rxcycles--; 1318 } 1319 #endif 1320 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1321 rxstat = le32toh(cur_rx->vr_status); 1322 rxctl = le32toh(cur_rx->vr_ctl); 1323 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1324 break; 1325 1326 prog++; 1327 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1328 m = rxd->rx_m; 1329 1330 /* 1331 * If an error occurs, update stats, clear the 1332 * status word and leave the mbuf cluster in place: 1333 * it should simply get re-used next time this descriptor 1334 * comes up in the ring. 1335 * We don't support SG in Rx path yet, so discard 1336 * partial frame. 1337 */ 1338 if ((rxstat & VR_RXSTAT_RX_OK) == 0 || 1339 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != 1340 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1341 ifp->if_ierrors++; 1342 sc->vr_stat.rx_errors++; 1343 if (rxstat & VR_RXSTAT_CRCERR) 1344 sc->vr_stat.rx_crc_errors++; 1345 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1346 sc->vr_stat.rx_alignment++; 1347 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1348 sc->vr_stat.rx_fifo_overflows++; 1349 if (rxstat & VR_RXSTAT_GIANT) 1350 sc->vr_stat.rx_giants++; 1351 if (rxstat & VR_RXSTAT_RUNT) 1352 sc->vr_stat.rx_runts++; 1353 if (rxstat & VR_RXSTAT_BUFFERR) 1354 sc->vr_stat.rx_no_buffers++; 1355 #ifdef VR_SHOW_ERRORS 1356 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1357 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1358 #endif 1359 vr_discard_rxbuf(rxd); 1360 continue; 1361 } 1362 1363 if (vr_newbuf(sc, cons) != 0) { 1364 ifp->if_iqdrops++; 1365 sc->vr_stat.rx_errors++; 1366 sc->vr_stat.rx_no_mbufs++; 1367 vr_discard_rxbuf(rxd); 1368 continue; 1369 } 1370 1371 /* 1372 * XXX The VIA Rhine chip includes the CRC with every 1373 * received frame, and there's no way to turn this 1374 * behavior off (at least, I can't find anything in 1375 * the manual that explains how to do it) so we have 1376 * to trim off the CRC manually. 1377 */ 1378 total_len = VR_RXBYTES(rxstat); 1379 total_len -= ETHER_CRC_LEN; 1380 m->m_pkthdr.len = m->m_len = total_len; 1381 #ifndef __NO_STRICT_ALIGNMENT 1382 /* 1383 * RX buffers must be 32-bit aligned. 1384 * Ignore the alignment problems on the non-strict alignment 1385 * platform. The performance hit incurred due to unaligned 1386 * accesses is much smaller than the hit produced by forcing 1387 * buffer copies all the time. 1388 */ 1389 vr_fixup_rx(m); 1390 #endif 1391 m->m_pkthdr.rcvif = ifp; 1392 ifp->if_ipackets++; 1393 sc->vr_stat.rx_ok++; 1394 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1395 (rxstat & VR_RXSTAT_FRAG) == 0 && 1396 (rxctl & VR_RXCTL_IP) != 0) { 1397 /* Checksum is valid for non-fragmented IP packets. */ 1398 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1399 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1400 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1401 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1402 m->m_pkthdr.csum_flags |= 1403 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1404 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1405 m->m_pkthdr.csum_data = 0xffff; 1406 } 1407 } 1408 } 1409 VR_UNLOCK(sc); 1410 (*ifp->if_input)(ifp, m); 1411 VR_LOCK(sc); 1412 rx_npkts++; 1413 } 1414 1415 if (prog > 0) { 1416 sc->vr_cdata.vr_rx_cons = cons; 1417 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1418 sc->vr_cdata.vr_rx_ring_map, 1419 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1420 } 1421 return (rx_npkts); 1422 } 1423 1424 /* 1425 * A frame was downloaded to the chip. It's safe for us to clean up 1426 * the list buffers. 1427 */ 1428 static void 1429 vr_txeof(struct vr_softc *sc) 1430 { 1431 struct vr_txdesc *txd; 1432 struct vr_desc *cur_tx; 1433 struct ifnet *ifp; 1434 uint32_t txctl, txstat; 1435 int cons, prod; 1436 1437 VR_LOCK_ASSERT(sc); 1438 1439 cons = sc->vr_cdata.vr_tx_cons; 1440 prod = sc->vr_cdata.vr_tx_prod; 1441 if (cons == prod) 1442 return; 1443 1444 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1445 sc->vr_cdata.vr_tx_ring_map, 1446 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1447 1448 ifp = sc->vr_ifp; 1449 /* 1450 * Go through our tx list and free mbufs for those 1451 * frames that have been transmitted. 1452 */ 1453 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1454 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1455 txctl = le32toh(cur_tx->vr_ctl); 1456 txstat = le32toh(cur_tx->vr_status); 1457 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1458 break; 1459 1460 sc->vr_cdata.vr_tx_cnt--; 1461 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1462 /* Only the first descriptor in the chain is valid. */ 1463 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1464 continue; 1465 1466 txd = &sc->vr_cdata.vr_txdesc[cons]; 1467 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1468 __func__)); 1469 1470 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1471 ifp->if_oerrors++; 1472 sc->vr_stat.tx_errors++; 1473 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1474 /* Give up and restart Tx. */ 1475 sc->vr_stat.tx_abort++; 1476 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1477 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1478 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1479 txd->tx_dmamap); 1480 m_freem(txd->tx_m); 1481 txd->tx_m = NULL; 1482 VR_INC(cons, VR_TX_RING_CNT); 1483 sc->vr_cdata.vr_tx_cons = cons; 1484 if (vr_tx_stop(sc) != 0) { 1485 device_printf(sc->vr_dev, 1486 "%s: Tx shutdown error -- " 1487 "resetting\n", __func__); 1488 sc->vr_flags |= VR_F_RESTART; 1489 return; 1490 } 1491 vr_tx_start(sc); 1492 break; 1493 } 1494 if ((sc->vr_revid < REV_ID_VT3071_A && 1495 (txstat & VR_TXSTAT_UNDERRUN)) || 1496 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1497 sc->vr_stat.tx_underrun++; 1498 /* Retry and restart Tx. */ 1499 sc->vr_cdata.vr_tx_cnt++; 1500 sc->vr_cdata.vr_tx_cons = cons; 1501 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1502 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1503 sc->vr_cdata.vr_tx_ring_map, 1504 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1505 vr_tx_underrun(sc); 1506 return; 1507 } 1508 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1509 ifp->if_collisions++; 1510 sc->vr_stat.tx_collisions++; 1511 } 1512 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1513 ifp->if_collisions++; 1514 sc->vr_stat.tx_late_collisions++; 1515 } 1516 } else { 1517 sc->vr_stat.tx_ok++; 1518 ifp->if_opackets++; 1519 } 1520 1521 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1522 BUS_DMASYNC_POSTWRITE); 1523 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1524 if (sc->vr_revid < REV_ID_VT3071_A) { 1525 ifp->if_collisions += 1526 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1527 sc->vr_stat.tx_collisions += 1528 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1529 } else { 1530 ifp->if_collisions += (txstat & 0x0f); 1531 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1532 } 1533 m_freem(txd->tx_m); 1534 txd->tx_m = NULL; 1535 } 1536 1537 sc->vr_cdata.vr_tx_cons = cons; 1538 if (sc->vr_cdata.vr_tx_cnt == 0) 1539 sc->vr_watchdog_timer = 0; 1540 } 1541 1542 static void 1543 vr_tick(void *xsc) 1544 { 1545 struct vr_softc *sc; 1546 struct mii_data *mii; 1547 1548 sc = (struct vr_softc *)xsc; 1549 1550 VR_LOCK_ASSERT(sc); 1551 1552 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1553 device_printf(sc->vr_dev, "restarting\n"); 1554 sc->vr_stat.num_restart++; 1555 sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1556 vr_init_locked(sc); 1557 sc->vr_flags &= ~VR_F_RESTART; 1558 } 1559 1560 mii = device_get_softc(sc->vr_miibus); 1561 mii_tick(mii); 1562 vr_watchdog(sc); 1563 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1564 } 1565 1566 #ifdef DEVICE_POLLING 1567 static poll_handler_t vr_poll; 1568 static poll_handler_t vr_poll_locked; 1569 1570 static int 1571 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1572 { 1573 struct vr_softc *sc; 1574 int rx_npkts; 1575 1576 sc = ifp->if_softc; 1577 rx_npkts = 0; 1578 1579 VR_LOCK(sc); 1580 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1581 rx_npkts = vr_poll_locked(ifp, cmd, count); 1582 VR_UNLOCK(sc); 1583 return (rx_npkts); 1584 } 1585 1586 static int 1587 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1588 { 1589 struct vr_softc *sc; 1590 int rx_npkts; 1591 1592 sc = ifp->if_softc; 1593 1594 VR_LOCK_ASSERT(sc); 1595 1596 sc->rxcycles = count; 1597 rx_npkts = vr_rxeof(sc); 1598 vr_txeof(sc); 1599 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1600 vr_start_locked(ifp); 1601 1602 if (cmd == POLL_AND_CHECK_STATUS) { 1603 uint16_t status; 1604 1605 /* Also check status register. */ 1606 status = CSR_READ_2(sc, VR_ISR); 1607 if (status) 1608 CSR_WRITE_2(sc, VR_ISR, status); 1609 1610 if ((status & VR_INTRS) == 0) 1611 return (rx_npkts); 1612 1613 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1614 VR_ISR_STATSOFLOW)) != 0) { 1615 if (vr_error(sc, status) != 0) 1616 return (rx_npkts); 1617 } 1618 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1619 #ifdef VR_SHOW_ERRORS 1620 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1621 __func__, status, VR_ISR_ERR_BITS); 1622 #endif 1623 vr_rx_start(sc); 1624 } 1625 } 1626 return (rx_npkts); 1627 } 1628 #endif /* DEVICE_POLLING */ 1629 1630 /* Back off the transmit threshold. */ 1631 static void 1632 vr_tx_underrun(struct vr_softc *sc) 1633 { 1634 int thresh; 1635 1636 device_printf(sc->vr_dev, "Tx underrun -- "); 1637 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1638 thresh = sc->vr_txthresh; 1639 sc->vr_txthresh++; 1640 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1641 sc->vr_txthresh = VR_TXTHRESH_MAX; 1642 printf("using store and forward mode\n"); 1643 } else 1644 printf("increasing Tx threshold(%d -> %d)\n", 1645 vr_tx_threshold_tables[thresh].value, 1646 vr_tx_threshold_tables[thresh + 1].value); 1647 } else 1648 printf("\n"); 1649 sc->vr_stat.tx_underrun++; 1650 if (vr_tx_stop(sc) != 0) { 1651 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1652 "resetting\n", __func__); 1653 sc->vr_flags |= VR_F_RESTART; 1654 return; 1655 } 1656 vr_tx_start(sc); 1657 } 1658 1659 static void 1660 vr_intr(void *arg) 1661 { 1662 struct vr_softc *sc; 1663 struct ifnet *ifp; 1664 uint16_t status; 1665 1666 sc = (struct vr_softc *)arg; 1667 1668 VR_LOCK(sc); 1669 1670 if (sc->vr_suspended != 0) 1671 goto done_locked; 1672 1673 status = CSR_READ_2(sc, VR_ISR); 1674 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1675 goto done_locked; 1676 1677 ifp = sc->vr_ifp; 1678 #ifdef DEVICE_POLLING 1679 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1680 goto done_locked; 1681 #endif 1682 1683 /* Suppress unwanted interrupts. */ 1684 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1685 (sc->vr_flags & VR_F_RESTART) != 0) { 1686 CSR_WRITE_2(sc, VR_IMR, 0); 1687 CSR_WRITE_2(sc, VR_ISR, status); 1688 goto done_locked; 1689 } 1690 1691 /* Disable interrupts. */ 1692 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1693 1694 for (; (status & VR_INTRS) != 0;) { 1695 CSR_WRITE_2(sc, VR_ISR, status); 1696 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1697 VR_ISR_STATSOFLOW)) != 0) { 1698 if (vr_error(sc, status) != 0) { 1699 VR_UNLOCK(sc); 1700 return; 1701 } 1702 } 1703 vr_rxeof(sc); 1704 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1705 #ifdef VR_SHOW_ERRORS 1706 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1707 __func__, status, VR_ISR_ERR_BITS); 1708 #endif 1709 /* Restart Rx if RxDMA SM was stopped. */ 1710 vr_rx_start(sc); 1711 } 1712 vr_txeof(sc); 1713 status = CSR_READ_2(sc, VR_ISR); 1714 } 1715 1716 /* Re-enable interrupts. */ 1717 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1718 1719 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1720 vr_start_locked(ifp); 1721 1722 done_locked: 1723 VR_UNLOCK(sc); 1724 } 1725 1726 static int 1727 vr_error(struct vr_softc *sc, uint16_t status) 1728 { 1729 uint16_t pcis; 1730 1731 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1732 if ((status & VR_ISR_BUSERR) != 0) { 1733 status &= ~VR_ISR_BUSERR; 1734 sc->vr_stat.bus_errors++; 1735 /* Disable further interrupts. */ 1736 CSR_WRITE_2(sc, VR_IMR, 0); 1737 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1738 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1739 "resetting\n", pcis); 1740 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1741 sc->vr_flags |= VR_F_RESTART; 1742 return (EAGAIN); 1743 } 1744 if ((status & VR_ISR_LINKSTAT2) != 0) { 1745 /* Link state change, duplex changes etc. */ 1746 status &= ~VR_ISR_LINKSTAT2; 1747 } 1748 if ((status & VR_ISR_STATSOFLOW) != 0) { 1749 status &= ~VR_ISR_STATSOFLOW; 1750 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1751 /* Update MIB counters. */ 1752 } 1753 } 1754 1755 if (status != 0) 1756 device_printf(sc->vr_dev, 1757 "unhandled interrupt, status = 0x%04x\n", status); 1758 return (0); 1759 } 1760 1761 /* 1762 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1763 * pointers to the fragment pointers. 1764 */ 1765 static int 1766 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1767 { 1768 struct vr_txdesc *txd; 1769 struct vr_desc *desc; 1770 struct mbuf *m; 1771 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1772 uint32_t csum_flags, txctl; 1773 int error, i, nsegs, prod, si; 1774 int padlen; 1775 1776 VR_LOCK_ASSERT(sc); 1777 1778 M_ASSERTPKTHDR((*m_head)); 1779 1780 /* 1781 * Some VIA Rhine wants packet buffers to be longword 1782 * aligned, but very often our mbufs aren't. Rather than 1783 * waste time trying to decide when to copy and when not 1784 * to copy, just do it all the time. 1785 */ 1786 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1787 m = m_defrag(*m_head, M_DONTWAIT); 1788 if (m == NULL) { 1789 m_freem(*m_head); 1790 *m_head = NULL; 1791 return (ENOBUFS); 1792 } 1793 *m_head = m; 1794 } 1795 1796 /* 1797 * The Rhine chip doesn't auto-pad, so we have to make 1798 * sure to pad short frames out to the minimum frame length 1799 * ourselves. 1800 */ 1801 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1802 m = *m_head; 1803 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1804 if (M_WRITABLE(m) == 0) { 1805 /* Get a writable copy. */ 1806 m = m_dup(*m_head, M_DONTWAIT); 1807 m_freem(*m_head); 1808 if (m == NULL) { 1809 *m_head = NULL; 1810 return (ENOBUFS); 1811 } 1812 *m_head = m; 1813 } 1814 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1815 m = m_defrag(m, M_DONTWAIT); 1816 if (m == NULL) { 1817 m_freem(*m_head); 1818 *m_head = NULL; 1819 return (ENOBUFS); 1820 } 1821 } 1822 /* 1823 * Manually pad short frames, and zero the pad space 1824 * to avoid leaking data. 1825 */ 1826 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1827 m->m_pkthdr.len += padlen; 1828 m->m_len = m->m_pkthdr.len; 1829 *m_head = m; 1830 } 1831 1832 prod = sc->vr_cdata.vr_tx_prod; 1833 txd = &sc->vr_cdata.vr_txdesc[prod]; 1834 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1835 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1836 if (error == EFBIG) { 1837 m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS); 1838 if (m == NULL) { 1839 m_freem(*m_head); 1840 *m_head = NULL; 1841 return (ENOBUFS); 1842 } 1843 *m_head = m; 1844 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1845 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1846 if (error != 0) { 1847 m_freem(*m_head); 1848 *m_head = NULL; 1849 return (error); 1850 } 1851 } else if (error != 0) 1852 return (error); 1853 if (nsegs == 0) { 1854 m_freem(*m_head); 1855 *m_head = NULL; 1856 return (EIO); 1857 } 1858 1859 /* Check number of available descriptors. */ 1860 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1861 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1862 return (ENOBUFS); 1863 } 1864 1865 txd->tx_m = *m_head; 1866 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1867 BUS_DMASYNC_PREWRITE); 1868 1869 /* Set checksum offload. */ 1870 csum_flags = 0; 1871 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1872 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1873 csum_flags |= VR_TXCTL_IPCSUM; 1874 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1875 csum_flags |= VR_TXCTL_TCPCSUM; 1876 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1877 csum_flags |= VR_TXCTL_UDPCSUM; 1878 } 1879 1880 /* 1881 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1882 * is required for all descriptors regardless of single or 1883 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1884 * the first descriptor for a multi-fragmented frames. Without 1885 * that VIA Rhine chip generates Tx underrun interrupts and can't 1886 * send any frames. 1887 */ 1888 si = prod; 1889 for (i = 0; i < nsegs; i++) { 1890 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1891 desc->vr_status = 0; 1892 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1893 if (i == 0) 1894 txctl |= VR_TXCTL_FIRSTFRAG; 1895 desc->vr_ctl = htole32(txctl); 1896 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1897 sc->vr_cdata.vr_tx_cnt++; 1898 VR_INC(prod, VR_TX_RING_CNT); 1899 } 1900 /* Update producer index. */ 1901 sc->vr_cdata.vr_tx_prod = prod; 1902 1903 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1904 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1905 1906 /* 1907 * Set EOP on the last desciptor and reuqest Tx completion 1908 * interrupt for every VR_TX_INTR_THRESH-th frames. 1909 */ 1910 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1911 if (sc->vr_cdata.vr_tx_pkts == 0) 1912 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1913 else 1914 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1915 1916 /* Lastly turn the first descriptor ownership to hardware. */ 1917 desc = &sc->vr_rdata.vr_tx_ring[si]; 1918 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1919 1920 /* Sync descriptors. */ 1921 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1922 sc->vr_cdata.vr_tx_ring_map, 1923 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1924 1925 return (0); 1926 } 1927 1928 static void 1929 vr_start(struct ifnet *ifp) 1930 { 1931 struct vr_softc *sc; 1932 1933 sc = ifp->if_softc; 1934 VR_LOCK(sc); 1935 vr_start_locked(ifp); 1936 VR_UNLOCK(sc); 1937 } 1938 1939 static void 1940 vr_start_locked(struct ifnet *ifp) 1941 { 1942 struct vr_softc *sc; 1943 struct mbuf *m_head; 1944 int enq; 1945 1946 sc = ifp->if_softc; 1947 1948 VR_LOCK_ASSERT(sc); 1949 1950 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1951 IFF_DRV_RUNNING || sc->vr_link == 0) 1952 return; 1953 1954 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1955 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1956 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1957 if (m_head == NULL) 1958 break; 1959 /* 1960 * Pack the data into the transmit ring. If we 1961 * don't have room, set the OACTIVE flag and wait 1962 * for the NIC to drain the ring. 1963 */ 1964 if (vr_encap(sc, &m_head)) { 1965 if (m_head == NULL) 1966 break; 1967 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1968 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1969 break; 1970 } 1971 1972 enq++; 1973 /* 1974 * If there's a BPF listener, bounce a copy of this frame 1975 * to him. 1976 */ 1977 ETHER_BPF_MTAP(ifp, m_head); 1978 } 1979 1980 if (enq > 0) { 1981 /* Tell the chip to start transmitting. */ 1982 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 1983 /* Set a timeout in case the chip goes out to lunch. */ 1984 sc->vr_watchdog_timer = 5; 1985 } 1986 } 1987 1988 static void 1989 vr_init(void *xsc) 1990 { 1991 struct vr_softc *sc; 1992 1993 sc = (struct vr_softc *)xsc; 1994 VR_LOCK(sc); 1995 vr_init_locked(sc); 1996 VR_UNLOCK(sc); 1997 } 1998 1999 static void 2000 vr_init_locked(struct vr_softc *sc) 2001 { 2002 struct ifnet *ifp; 2003 struct mii_data *mii; 2004 bus_addr_t addr; 2005 int i; 2006 2007 VR_LOCK_ASSERT(sc); 2008 2009 ifp = sc->vr_ifp; 2010 mii = device_get_softc(sc->vr_miibus); 2011 2012 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2013 return; 2014 2015 /* Cancel pending I/O and free all RX/TX buffers. */ 2016 vr_stop(sc); 2017 vr_reset(sc); 2018 2019 /* Set our station address. */ 2020 for (i = 0; i < ETHER_ADDR_LEN; i++) 2021 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); 2022 2023 /* Set DMA size. */ 2024 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2025 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2026 2027 /* 2028 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2029 * so we must set both. 2030 */ 2031 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2032 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2033 2034 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2035 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2036 2037 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2038 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2039 2040 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2041 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2042 2043 /* Init circular RX list. */ 2044 if (vr_rx_ring_init(sc) != 0) { 2045 device_printf(sc->vr_dev, 2046 "initialization failed: no memory for rx buffers\n"); 2047 vr_stop(sc); 2048 return; 2049 } 2050 2051 /* Init tx descriptors. */ 2052 vr_tx_ring_init(sc); 2053 2054 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2055 uint8_t vcam[2] = { 0, 0 }; 2056 2057 /* Disable VLAN hardware tag insertion/stripping. */ 2058 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); 2059 /* Disable VLAN hardware filtering. */ 2060 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); 2061 /* Disable all CAM entries. */ 2062 vr_cam_mask(sc, VR_MCAST_CAM, 0); 2063 vr_cam_mask(sc, VR_VLAN_CAM, 0); 2064 /* Enable the first VLAN CAM. */ 2065 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); 2066 vr_cam_mask(sc, VR_VLAN_CAM, 1); 2067 } 2068 2069 /* 2070 * Set up receive filter. 2071 */ 2072 vr_set_filter(sc); 2073 2074 /* 2075 * Load the address of the RX ring. 2076 */ 2077 addr = VR_RX_RING_ADDR(sc, 0); 2078 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2079 /* 2080 * Load the address of the TX ring. 2081 */ 2082 addr = VR_TX_RING_ADDR(sc, 0); 2083 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2084 /* Default : full-duplex, no Tx poll. */ 2085 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2086 2087 /* Set flow-control parameters for Rhine III. */ 2088 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2089 /* Rx buffer count available for incoming packet. */ 2090 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT); 2091 /* 2092 * Tx pause low threshold : 16 free receive buffers 2093 * Tx pause XON high threshold : 48 free receive buffers 2094 */ 2095 CSR_WRITE_1(sc, VR_FLOWCR1, 2096 VR_FLOWCR1_TXLO16 | VR_FLOWCR1_TXHI48 | VR_FLOWCR1_XONXOFF); 2097 /* Set Tx pause timer. */ 2098 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2099 } 2100 2101 /* Enable receiver and transmitter. */ 2102 CSR_WRITE_1(sc, VR_CR0, 2103 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2104 2105 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2106 #ifdef DEVICE_POLLING 2107 /* 2108 * Disable interrupts if we are polling. 2109 */ 2110 if (ifp->if_capenable & IFCAP_POLLING) 2111 CSR_WRITE_2(sc, VR_IMR, 0); 2112 else 2113 #endif 2114 /* 2115 * Enable interrupts and disable MII intrs. 2116 */ 2117 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2118 if (sc->vr_revid > REV_ID_VT6102_A) 2119 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2120 2121 sc->vr_link = 0; 2122 mii_mediachg(mii); 2123 2124 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2125 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2126 2127 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2128 } 2129 2130 /* 2131 * Set media options. 2132 */ 2133 static int 2134 vr_ifmedia_upd(struct ifnet *ifp) 2135 { 2136 struct vr_softc *sc; 2137 struct mii_data *mii; 2138 struct mii_softc *miisc; 2139 int error; 2140 2141 sc = ifp->if_softc; 2142 VR_LOCK(sc); 2143 mii = device_get_softc(sc->vr_miibus); 2144 if (mii->mii_instance) { 2145 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2146 mii_phy_reset(miisc); 2147 } 2148 error = mii_mediachg(mii); 2149 VR_UNLOCK(sc); 2150 2151 return (error); 2152 } 2153 2154 /* 2155 * Report current media status. 2156 */ 2157 static void 2158 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2159 { 2160 struct vr_softc *sc; 2161 struct mii_data *mii; 2162 2163 sc = ifp->if_softc; 2164 mii = device_get_softc(sc->vr_miibus); 2165 VR_LOCK(sc); 2166 mii_pollstat(mii); 2167 VR_UNLOCK(sc); 2168 ifmr->ifm_active = mii->mii_media_active; 2169 ifmr->ifm_status = mii->mii_media_status; 2170 } 2171 2172 static int 2173 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2174 { 2175 struct vr_softc *sc; 2176 struct ifreq *ifr; 2177 struct mii_data *mii; 2178 int error, mask; 2179 2180 sc = ifp->if_softc; 2181 ifr = (struct ifreq *)data; 2182 error = 0; 2183 2184 switch (command) { 2185 case SIOCSIFFLAGS: 2186 VR_LOCK(sc); 2187 if (ifp->if_flags & IFF_UP) { 2188 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2189 if ((ifp->if_flags ^ sc->vr_if_flags) & 2190 (IFF_PROMISC | IFF_ALLMULTI)) 2191 vr_set_filter(sc); 2192 } else { 2193 if (sc->vr_detach == 0) 2194 vr_init_locked(sc); 2195 } 2196 } else { 2197 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2198 vr_stop(sc); 2199 } 2200 sc->vr_if_flags = ifp->if_flags; 2201 VR_UNLOCK(sc); 2202 break; 2203 case SIOCADDMULTI: 2204 case SIOCDELMULTI: 2205 VR_LOCK(sc); 2206 vr_set_filter(sc); 2207 VR_UNLOCK(sc); 2208 break; 2209 case SIOCGIFMEDIA: 2210 case SIOCSIFMEDIA: 2211 mii = device_get_softc(sc->vr_miibus); 2212 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2213 break; 2214 case SIOCSIFCAP: 2215 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2216 #ifdef DEVICE_POLLING 2217 if (mask & IFCAP_POLLING) { 2218 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2219 error = ether_poll_register(vr_poll, ifp); 2220 if (error != 0) 2221 break; 2222 VR_LOCK(sc); 2223 /* Disable interrupts. */ 2224 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2225 ifp->if_capenable |= IFCAP_POLLING; 2226 VR_UNLOCK(sc); 2227 } else { 2228 error = ether_poll_deregister(ifp); 2229 /* Enable interrupts. */ 2230 VR_LOCK(sc); 2231 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2232 ifp->if_capenable &= ~IFCAP_POLLING; 2233 VR_UNLOCK(sc); 2234 } 2235 } 2236 #endif /* DEVICE_POLLING */ 2237 if ((mask & IFCAP_TXCSUM) != 0 && 2238 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2239 ifp->if_capenable ^= IFCAP_TXCSUM; 2240 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2241 ifp->if_hwassist |= VR_CSUM_FEATURES; 2242 else 2243 ifp->if_hwassist &= ~VR_CSUM_FEATURES; 2244 } 2245 if ((mask & IFCAP_RXCSUM) != 0 && 2246 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2247 ifp->if_capenable ^= IFCAP_RXCSUM; 2248 if ((mask & IFCAP_WOL_UCAST) != 0 && 2249 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2250 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2251 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2252 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2253 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2254 break; 2255 default: 2256 error = ether_ioctl(ifp, command, data); 2257 break; 2258 } 2259 2260 return (error); 2261 } 2262 2263 static void 2264 vr_watchdog(struct vr_softc *sc) 2265 { 2266 struct ifnet *ifp; 2267 2268 VR_LOCK_ASSERT(sc); 2269 2270 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2271 return; 2272 2273 ifp = sc->vr_ifp; 2274 /* 2275 * Reclaim first as we don't request interrupt for every packets. 2276 */ 2277 vr_txeof(sc); 2278 if (sc->vr_cdata.vr_tx_cnt == 0) 2279 return; 2280 2281 if (sc->vr_link == 0) { 2282 if (bootverbose) 2283 if_printf(sc->vr_ifp, "watchdog timeout " 2284 "(missed link)\n"); 2285 ifp->if_oerrors++; 2286 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2287 vr_init_locked(sc); 2288 return; 2289 } 2290 2291 ifp->if_oerrors++; 2292 if_printf(ifp, "watchdog timeout\n"); 2293 2294 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2295 vr_init_locked(sc); 2296 2297 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2298 vr_start_locked(ifp); 2299 } 2300 2301 static void 2302 vr_tx_start(struct vr_softc *sc) 2303 { 2304 bus_addr_t addr; 2305 uint8_t cmd; 2306 2307 cmd = CSR_READ_1(sc, VR_CR0); 2308 if ((cmd & VR_CR0_TX_ON) == 0) { 2309 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2310 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2311 cmd |= VR_CR0_TX_ON; 2312 CSR_WRITE_1(sc, VR_CR0, cmd); 2313 } 2314 if (sc->vr_cdata.vr_tx_cnt != 0) { 2315 sc->vr_watchdog_timer = 5; 2316 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2317 } 2318 } 2319 2320 static void 2321 vr_rx_start(struct vr_softc *sc) 2322 { 2323 bus_addr_t addr; 2324 uint8_t cmd; 2325 2326 cmd = CSR_READ_1(sc, VR_CR0); 2327 if ((cmd & VR_CR0_RX_ON) == 0) { 2328 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2329 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2330 cmd |= VR_CR0_RX_ON; 2331 CSR_WRITE_1(sc, VR_CR0, cmd); 2332 } 2333 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2334 } 2335 2336 static int 2337 vr_tx_stop(struct vr_softc *sc) 2338 { 2339 int i; 2340 uint8_t cmd; 2341 2342 cmd = CSR_READ_1(sc, VR_CR0); 2343 if ((cmd & VR_CR0_TX_ON) != 0) { 2344 cmd &= ~VR_CR0_TX_ON; 2345 CSR_WRITE_1(sc, VR_CR0, cmd); 2346 for (i = VR_TIMEOUT; i > 0; i--) { 2347 DELAY(5); 2348 cmd = CSR_READ_1(sc, VR_CR0); 2349 if ((cmd & VR_CR0_TX_ON) == 0) 2350 break; 2351 } 2352 if (i == 0) 2353 return (ETIMEDOUT); 2354 } 2355 return (0); 2356 } 2357 2358 static int 2359 vr_rx_stop(struct vr_softc *sc) 2360 { 2361 int i; 2362 uint8_t cmd; 2363 2364 cmd = CSR_READ_1(sc, VR_CR0); 2365 if ((cmd & VR_CR0_RX_ON) != 0) { 2366 cmd &= ~VR_CR0_RX_ON; 2367 CSR_WRITE_1(sc, VR_CR0, cmd); 2368 for (i = VR_TIMEOUT; i > 0; i--) { 2369 DELAY(5); 2370 cmd = CSR_READ_1(sc, VR_CR0); 2371 if ((cmd & VR_CR0_RX_ON) == 0) 2372 break; 2373 } 2374 if (i == 0) 2375 return (ETIMEDOUT); 2376 } 2377 return (0); 2378 } 2379 2380 /* 2381 * Stop the adapter and free any mbufs allocated to the 2382 * RX and TX lists. 2383 */ 2384 static void 2385 vr_stop(struct vr_softc *sc) 2386 { 2387 struct vr_txdesc *txd; 2388 struct vr_rxdesc *rxd; 2389 struct ifnet *ifp; 2390 int i; 2391 2392 VR_LOCK_ASSERT(sc); 2393 2394 ifp = sc->vr_ifp; 2395 sc->vr_watchdog_timer = 0; 2396 2397 callout_stop(&sc->vr_stat_callout); 2398 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2399 2400 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2401 if (vr_rx_stop(sc) != 0) 2402 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2403 if (vr_tx_stop(sc) != 0) 2404 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2405 /* Clear pending interrupts. */ 2406 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2407 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2408 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2409 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2410 2411 /* 2412 * Free RX and TX mbufs still in the queues. 2413 */ 2414 for (i = 0; i < VR_RX_RING_CNT; i++) { 2415 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2416 if (rxd->rx_m != NULL) { 2417 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2418 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2419 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2420 rxd->rx_dmamap); 2421 m_freem(rxd->rx_m); 2422 rxd->rx_m = NULL; 2423 } 2424 } 2425 for (i = 0; i < VR_TX_RING_CNT; i++) { 2426 txd = &sc->vr_cdata.vr_txdesc[i]; 2427 if (txd->tx_m != NULL) { 2428 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2429 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2430 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2431 txd->tx_dmamap); 2432 m_freem(txd->tx_m); 2433 txd->tx_m = NULL; 2434 } 2435 } 2436 } 2437 2438 /* 2439 * Stop all chip I/O so that the kernel's probe routines don't 2440 * get confused by errant DMAs when rebooting. 2441 */ 2442 static int 2443 vr_shutdown(device_t dev) 2444 { 2445 2446 return (vr_suspend(dev)); 2447 } 2448 2449 static int 2450 vr_suspend(device_t dev) 2451 { 2452 struct vr_softc *sc; 2453 2454 sc = device_get_softc(dev); 2455 2456 VR_LOCK(sc); 2457 vr_stop(sc); 2458 vr_setwol(sc); 2459 sc->vr_suspended = 1; 2460 VR_UNLOCK(sc); 2461 2462 return (0); 2463 } 2464 2465 static int 2466 vr_resume(device_t dev) 2467 { 2468 struct vr_softc *sc; 2469 struct ifnet *ifp; 2470 2471 sc = device_get_softc(dev); 2472 2473 VR_LOCK(sc); 2474 ifp = sc->vr_ifp; 2475 vr_clrwol(sc); 2476 vr_reset(sc); 2477 if (ifp->if_flags & IFF_UP) 2478 vr_init_locked(sc); 2479 2480 sc->vr_suspended = 0; 2481 VR_UNLOCK(sc); 2482 2483 return (0); 2484 } 2485 2486 static void 2487 vr_setwol(struct vr_softc *sc) 2488 { 2489 struct ifnet *ifp; 2490 int pmc; 2491 uint16_t pmstat; 2492 uint8_t v; 2493 2494 VR_LOCK_ASSERT(sc); 2495 2496 if (sc->vr_revid < REV_ID_VT6102_A || 2497 pci_find_extcap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2498 return; 2499 2500 ifp = sc->vr_ifp; 2501 2502 /* Clear WOL configuration. */ 2503 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2504 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2505 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2506 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2507 if (sc->vr_revid > REV_ID_VT6105_B0) { 2508 /* Newer Rhine III supports two additional patterns. */ 2509 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2510 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2511 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2512 } 2513 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2514 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2515 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2516 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2517 /* 2518 * It seems that multicast wakeup frames require programming pattern 2519 * registers and valid CRC as well as pattern mask for each pattern. 2520 * While it's possible to setup such a pattern it would complicate 2521 * WOL configuration so ignore multicast wakeup frames. 2522 */ 2523 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2524 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2525 v = CSR_READ_1(sc, VR_STICKHW); 2526 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2527 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2528 } 2529 2530 /* Put hardware into sleep. */ 2531 v = CSR_READ_1(sc, VR_STICKHW); 2532 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2533 CSR_WRITE_1(sc, VR_STICKHW, v); 2534 2535 /* Request PME if WOL is requested. */ 2536 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2537 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2538 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2539 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2540 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2541 } 2542 2543 static void 2544 vr_clrwol(struct vr_softc *sc) 2545 { 2546 uint8_t v; 2547 2548 VR_LOCK_ASSERT(sc); 2549 2550 if (sc->vr_revid < REV_ID_VT6102_A) 2551 return; 2552 2553 /* Take hardware out of sleep. */ 2554 v = CSR_READ_1(sc, VR_STICKHW); 2555 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2556 CSR_WRITE_1(sc, VR_STICKHW, v); 2557 2558 /* Clear WOL configuration as WOL may interfere normal operation. */ 2559 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2560 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2561 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2562 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2563 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2564 if (sc->vr_revid > REV_ID_VT6105_B0) { 2565 /* Newer Rhine III supports two additional patterns. */ 2566 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2567 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2568 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2569 } 2570 } 2571 2572 static int 2573 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2574 { 2575 struct vr_softc *sc; 2576 struct vr_statistics *stat; 2577 int error; 2578 int result; 2579 2580 result = -1; 2581 error = sysctl_handle_int(oidp, &result, 0, req); 2582 2583 if (error != 0 || req->newptr == NULL) 2584 return (error); 2585 2586 if (result == 1) { 2587 sc = (struct vr_softc *)arg1; 2588 stat = &sc->vr_stat; 2589 2590 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2591 printf("Outbound good frames : %ju\n", 2592 (uintmax_t)stat->tx_ok); 2593 printf("Inbound good frames : %ju\n", 2594 (uintmax_t)stat->rx_ok); 2595 printf("Outbound errors : %u\n", stat->tx_errors); 2596 printf("Inbound errors : %u\n", stat->rx_errors); 2597 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2598 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2599 printf("Inbound FIFO overflows : %d\n", 2600 stat->rx_fifo_overflows); 2601 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2602 printf("Inbound frame alignment errors : %u\n", 2603 stat->rx_alignment); 2604 printf("Inbound giant frames : %u\n", stat->rx_giants); 2605 printf("Inbound runt frames : %u\n", stat->rx_runts); 2606 printf("Outbound aborted with excessive collisions : %u\n", 2607 stat->tx_abort); 2608 printf("Outbound collisions : %u\n", stat->tx_collisions); 2609 printf("Outbound late collisions : %u\n", 2610 stat->tx_late_collisions); 2611 printf("Outbound underrun : %u\n", stat->tx_underrun); 2612 printf("PCI bus errors : %u\n", stat->bus_errors); 2613 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2614 stat->num_restart); 2615 } 2616 2617 return (error); 2618 } 2619