1 /*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #ifdef HAVE_KERNEL_OPTION_HEADERS 64 #include "opt_device_polling.h" 65 #endif 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bus.h> 70 #include <sys/endian.h> 71 #include <sys/kernel.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/rman.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 81 #include <net/bpf.h> 82 #include <net/if.h> 83 #include <net/ethernet.h> 84 #include <net/if_dl.h> 85 #include <net/if_media.h> 86 #include <net/if_types.h> 87 #include <net/if_vlan_var.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #include <machine/bus.h> 96 97 #include <dev/vr/if_vrreg.h> 98 99 /* "device miibus" required. See GENERIC if you get errors here. */ 100 #include "miibus_if.h" 101 102 MODULE_DEPEND(vr, pci, 1, 1, 1); 103 MODULE_DEPEND(vr, ether, 1, 1, 1); 104 MODULE_DEPEND(vr, miibus, 1, 1, 1); 105 106 /* Define to show Rx/Tx error status. */ 107 #undef VR_SHOW_ERRORS 108 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 109 110 /* 111 * Various supported device vendors/types, their names & quirks. 112 */ 113 #define VR_Q_NEEDALIGN (1<<0) 114 #define VR_Q_CSUM (1<<1) 115 #define VR_Q_CAM (1<<2) 116 117 static struct vr_type { 118 u_int16_t vr_vid; 119 u_int16_t vr_did; 120 int vr_quirks; 121 char *vr_name; 122 } vr_devs[] = { 123 { VIA_VENDORID, VIA_DEVICEID_RHINE, 124 VR_Q_NEEDALIGN, 125 "VIA VT3043 Rhine I 10/100BaseTX" }, 126 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 127 VR_Q_NEEDALIGN, 128 "VIA VT86C100A Rhine II 10/100BaseTX" }, 129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 130 0, 131 "VIA VT6102 Rhine II 10/100BaseTX" }, 132 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 133 0, 134 "VIA VT6105 Rhine III 10/100BaseTX" }, 135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 136 VR_Q_CSUM | VR_Q_CAM, 137 "VIA VT6105M Rhine III 10/100BaseTX" }, 138 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 139 VR_Q_NEEDALIGN, 140 "Delta Electronics Rhine II 10/100BaseTX" }, 141 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 142 VR_Q_NEEDALIGN, 143 "Addtron Technology Rhine II 10/100BaseTX" }, 144 { 0, 0, 0, NULL } 145 }; 146 147 static int vr_probe(device_t); 148 static int vr_attach(device_t); 149 static int vr_detach(device_t); 150 static int vr_shutdown(device_t); 151 static int vr_suspend(device_t); 152 static int vr_resume(device_t); 153 154 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 155 static int vr_dma_alloc(struct vr_softc *); 156 static void vr_dma_free(struct vr_softc *); 157 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 158 static int vr_newbuf(struct vr_softc *, int); 159 160 #ifndef __NO_STRICT_ALIGNMENT 161 static __inline void vr_fixup_rx(struct mbuf *); 162 #endif 163 static void vr_rxeof(struct vr_softc *); 164 static void vr_txeof(struct vr_softc *); 165 static void vr_tick(void *); 166 static int vr_error(struct vr_softc *, uint16_t); 167 static void vr_tx_underrun(struct vr_softc *); 168 static void vr_intr(void *); 169 static void vr_start(struct ifnet *); 170 static void vr_start_locked(struct ifnet *); 171 static int vr_encap(struct vr_softc *, struct mbuf **); 172 static int vr_ioctl(struct ifnet *, u_long, caddr_t); 173 static void vr_init(void *); 174 static void vr_init_locked(struct vr_softc *); 175 static void vr_tx_start(struct vr_softc *); 176 static void vr_rx_start(struct vr_softc *); 177 static int vr_tx_stop(struct vr_softc *); 178 static int vr_rx_stop(struct vr_softc *); 179 static void vr_stop(struct vr_softc *); 180 static void vr_watchdog(struct vr_softc *); 181 static int vr_ifmedia_upd(struct ifnet *); 182 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 183 184 static int vr_miibus_readreg(device_t, int, int); 185 static int vr_miibus_writereg(device_t, int, int, int); 186 static void vr_miibus_statchg(device_t); 187 188 static void vr_link_task(void *, int); 189 static void vr_cam_mask(struct vr_softc *, uint32_t, int); 190 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); 191 static void vr_set_filter(struct vr_softc *); 192 static void vr_reset(const struct vr_softc *); 193 static int vr_tx_ring_init(struct vr_softc *); 194 static int vr_rx_ring_init(struct vr_softc *); 195 static void vr_setwol(struct vr_softc *); 196 static void vr_clrwol(struct vr_softc *); 197 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 198 199 static struct vr_tx_threshold_table { 200 int tx_cfg; 201 int bcr_cfg; 202 int value; 203 } vr_tx_threshold_tables[] = { 204 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 205 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 206 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 207 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 208 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 209 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 210 }; 211 212 static device_method_t vr_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, vr_probe), 215 DEVMETHOD(device_attach, vr_attach), 216 DEVMETHOD(device_detach, vr_detach), 217 DEVMETHOD(device_shutdown, vr_shutdown), 218 DEVMETHOD(device_suspend, vr_suspend), 219 DEVMETHOD(device_resume, vr_resume), 220 221 /* bus interface */ 222 DEVMETHOD(bus_print_child, bus_generic_print_child), 223 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 224 225 /* MII interface */ 226 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 227 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 228 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 229 DEVMETHOD(miibus_linkchg, vr_miibus_statchg), 230 231 { NULL, NULL } 232 }; 233 234 static driver_t vr_driver = { 235 "vr", 236 vr_methods, 237 sizeof(struct vr_softc) 238 }; 239 240 static devclass_t vr_devclass; 241 242 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 243 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 244 245 static int 246 vr_miibus_readreg(device_t dev, int phy, int reg) 247 { 248 struct vr_softc *sc; 249 int i; 250 251 sc = device_get_softc(dev); 252 if (sc->vr_phyaddr != phy) 253 return (0); 254 255 /* Set the register address. */ 256 CSR_WRITE_1(sc, VR_MIIADDR, reg); 257 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 258 259 for (i = 0; i < VR_MII_TIMEOUT; i++) { 260 DELAY(1); 261 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 262 break; 263 } 264 if (i == VR_MII_TIMEOUT) 265 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 266 267 return (CSR_READ_2(sc, VR_MIIDATA)); 268 } 269 270 static int 271 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 272 { 273 struct vr_softc *sc; 274 int i; 275 276 sc = device_get_softc(dev); 277 if (sc->vr_phyaddr != phy) 278 return (0); 279 280 /* Set the register address and data to write. */ 281 CSR_WRITE_1(sc, VR_MIIADDR, reg); 282 CSR_WRITE_2(sc, VR_MIIDATA, data); 283 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 284 285 for (i = 0; i < VR_MII_TIMEOUT; i++) { 286 DELAY(1); 287 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 288 break; 289 } 290 if (i == VR_MII_TIMEOUT) 291 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 292 reg); 293 294 return (0); 295 } 296 297 static void 298 vr_miibus_statchg(device_t dev) 299 { 300 struct vr_softc *sc; 301 302 sc = device_get_softc(dev); 303 taskqueue_enqueue(taskqueue_swi, &sc->vr_link_task); 304 } 305 306 /* 307 * In order to fiddle with the 308 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 309 * first have to put the transmit and/or receive logic in the idle state. 310 */ 311 static void 312 vr_link_task(void *arg, int pending) 313 { 314 struct vr_softc *sc; 315 struct mii_data *mii; 316 struct ifnet *ifp; 317 int lfdx, mfdx; 318 uint8_t cr0, cr1, fc; 319 320 sc = (struct vr_softc *)arg; 321 322 VR_LOCK(sc); 323 mii = device_get_softc(sc->vr_miibus); 324 ifp = sc->vr_ifp; 325 if (mii == NULL || ifp == NULL || 326 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 327 VR_UNLOCK(sc); 328 return; 329 } 330 331 if (mii->mii_media_status & IFM_ACTIVE) { 332 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 333 sc->vr_link = 1; 334 } else 335 sc->vr_link = 0; 336 337 if (sc->vr_link != 0) { 338 cr0 = CSR_READ_1(sc, VR_CR0); 339 cr1 = CSR_READ_1(sc, VR_CR1); 340 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 341 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 342 if (mfdx != lfdx) { 343 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 344 if (vr_tx_stop(sc) != 0 || 345 vr_rx_stop(sc) != 0) { 346 device_printf(sc->vr_dev, 347 "%s: Tx/Rx shutdown error -- " 348 "resetting\n", __func__); 349 sc->vr_flags |= VR_F_RESTART; 350 VR_UNLOCK(sc); 351 return; 352 } 353 } 354 if (lfdx) 355 cr1 |= VR_CR1_FULLDUPLEX; 356 else 357 cr1 &= ~VR_CR1_FULLDUPLEX; 358 CSR_WRITE_1(sc, VR_CR1, cr1); 359 } 360 fc = 0; 361 #ifdef notyet 362 /* Configure flow-control. */ 363 if (sc->vr_revid >= REV_ID_VT6105_A0) { 364 fc = CSR_READ_1(sc, VR_FLOWCR1); 365 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 366 if ((IFM_OPTIONS(mii->mii_media_active) & 367 IFM_ETH_RXPAUSE) != 0) 368 fc |= VR_FLOWCR1_RXPAUSE; 369 if ((IFM_OPTIONS(mii->mii_media_active) & 370 IFM_ETH_TXPAUSE) != 0) 371 fc |= VR_FLOWCR1_TXPAUSE; 372 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 373 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 374 /* No Tx puase capability available for Rhine II. */ 375 fc = CSR_READ_1(sc, VR_MISC_CR0); 376 fc &= ~VR_MISCCR0_RXPAUSE; 377 if ((IFM_OPTIONS(mii->mii_media_active) & 378 IFM_ETH_RXPAUSE) != 0) 379 fc |= VR_MISCCR0_RXPAUSE; 380 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 381 } 382 #endif 383 vr_rx_start(sc); 384 vr_tx_start(sc); 385 } else { 386 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 387 device_printf(sc->vr_dev, 388 "%s: Tx/Rx shutdown error -- resetting\n", 389 __func__); 390 sc->vr_flags |= VR_F_RESTART; 391 VR_UNLOCK(sc); 392 return; 393 } 394 } 395 VR_UNLOCK(sc); 396 } 397 398 399 static void 400 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) 401 { 402 403 if (type == VR_MCAST_CAM) 404 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 405 else 406 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 407 CSR_WRITE_4(sc, VR_CAMMASK, mask); 408 CSR_WRITE_1(sc, VR_CAMCTL, 0); 409 } 410 411 static int 412 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) 413 { 414 int i; 415 416 if (type == VR_MCAST_CAM) { 417 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 418 return (EINVAL); 419 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 420 } else 421 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 422 423 /* Set CAM entry address. */ 424 CSR_WRITE_1(sc, VR_CAMADDR, idx); 425 /* Set CAM entry data. */ 426 if (type == VR_MCAST_CAM) { 427 for (i = 0; i < ETHER_ADDR_LEN; i++) 428 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); 429 } else { 430 CSR_WRITE_1(sc, VR_VCAM0, mac[0]); 431 CSR_WRITE_1(sc, VR_VCAM1, mac[1]); 432 } 433 DELAY(10); 434 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 435 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); 436 for (i = 0; i < VR_TIMEOUT; i++) { 437 DELAY(1); 438 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 439 break; 440 } 441 442 if (i == VR_TIMEOUT) 443 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 444 __func__); 445 CSR_WRITE_1(sc, VR_CAMCTL, 0); 446 447 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 448 } 449 450 /* 451 * Program the 64-bit multicast hash filter. 452 */ 453 static void 454 vr_set_filter(struct vr_softc *sc) 455 { 456 struct ifnet *ifp; 457 int h; 458 uint32_t hashes[2] = { 0, 0 }; 459 struct ifmultiaddr *ifma; 460 uint8_t rxfilt; 461 int error, mcnt; 462 uint32_t cam_mask; 463 464 VR_LOCK_ASSERT(sc); 465 466 ifp = sc->vr_ifp; 467 rxfilt = CSR_READ_1(sc, VR_RXCFG); 468 rxfilt = ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI); 469 if (ifp->if_flags & IFF_BROADCAST) 470 rxfilt |= VR_RXCFG_RX_BROAD; 471 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 472 rxfilt |= VR_RXCFG_RX_MULTI; 473 if (ifp->if_flags & IFF_PROMISC) 474 rxfilt |= VR_RXCFG_RX_PROMISC; 475 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 476 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 477 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 478 return; 479 } 480 481 /* Now program new ones. */ 482 error = 0; 483 mcnt = 0; 484 IF_ADDR_LOCK(ifp); 485 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 486 /* 487 * For hardwares that have CAM capability, use 488 * 32 entries multicast perfect filter. 489 */ 490 cam_mask = 0; 491 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 492 if (ifma->ifma_addr->sa_family != AF_LINK) 493 continue; 494 error = vr_cam_data(sc, VR_MCAST_CAM, mcnt, 495 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 496 if (error != 0) { 497 cam_mask = 0; 498 break; 499 } 500 cam_mask |= 1 << mcnt; 501 mcnt++; 502 } 503 vr_cam_mask(sc, VR_MCAST_CAM, cam_mask); 504 } 505 506 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 507 /* 508 * If there are too many multicast addresses or 509 * setting multicast CAM filter failed, use hash 510 * table based filtering. 511 */ 512 mcnt = 0; 513 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 514 if (ifma->ifma_addr->sa_family != AF_LINK) 515 continue; 516 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 517 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 518 if (h < 32) 519 hashes[0] |= (1 << h); 520 else 521 hashes[1] |= (1 << (h - 32)); 522 mcnt++; 523 } 524 } 525 IF_ADDR_UNLOCK(ifp); 526 527 if (mcnt > 0) 528 rxfilt |= VR_RXCFG_RX_MULTI; 529 530 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 531 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 532 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 533 } 534 535 static void 536 vr_reset(const struct vr_softc *sc) 537 { 538 int i; 539 540 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 541 542 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 543 if (sc->vr_revid < REV_ID_VT6102_A) { 544 /* VT86C100A needs more delay after reset. */ 545 DELAY(100); 546 } 547 for (i = 0; i < VR_TIMEOUT; i++) { 548 DELAY(10); 549 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 550 break; 551 } 552 if (i == VR_TIMEOUT) { 553 if (sc->vr_revid < REV_ID_VT6102_A) 554 device_printf(sc->vr_dev, "reset never completed!\n"); 555 else { 556 /* Use newer force reset command. */ 557 device_printf(sc->vr_dev, 558 "Using force reset command.\n"); 559 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 560 /* 561 * Wait a little while for the chip to get its brains 562 * in order. 563 */ 564 DELAY(2000); 565 } 566 } 567 568 } 569 570 /* 571 * Probe for a VIA Rhine chip. Check the PCI vendor and device 572 * IDs against our list and return a match or NULL 573 */ 574 static struct vr_type * 575 vr_match(device_t dev) 576 { 577 struct vr_type *t = vr_devs; 578 579 for (t = vr_devs; t->vr_name != NULL; t++) 580 if ((pci_get_vendor(dev) == t->vr_vid) && 581 (pci_get_device(dev) == t->vr_did)) 582 return (t); 583 return (NULL); 584 } 585 586 /* 587 * Probe for a VIA Rhine chip. Check the PCI vendor and device 588 * IDs against our list and return a device name if we find a match. 589 */ 590 static int 591 vr_probe(device_t dev) 592 { 593 struct vr_type *t; 594 595 t = vr_match(dev); 596 if (t != NULL) { 597 device_set_desc(dev, t->vr_name); 598 return (BUS_PROBE_DEFAULT); 599 } 600 return (ENXIO); 601 } 602 603 /* 604 * Attach the interface. Allocate softc structures, do ifmedia 605 * setup and ethernet/BPF attach. 606 */ 607 static int 608 vr_attach(device_t dev) 609 { 610 struct vr_softc *sc; 611 struct ifnet *ifp; 612 struct vr_type *t; 613 uint8_t eaddr[ETHER_ADDR_LEN]; 614 int error, rid; 615 int i, pmc; 616 617 sc = device_get_softc(dev); 618 sc->vr_dev = dev; 619 t = vr_match(dev); 620 KASSERT(t != NULL, ("Lost if_vr device match")); 621 sc->vr_quirks = t->vr_quirks; 622 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 623 624 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 625 MTX_DEF); 626 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 627 TASK_INIT(&sc->vr_link_task, 0, vr_link_task, sc); 628 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 629 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 630 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 631 vr_sysctl_stats, "I", "Statistics"); 632 633 error = 0; 634 635 /* 636 * Map control/status registers. 637 */ 638 pci_enable_busmaster(dev); 639 sc->vr_revid = pci_get_revid(dev); 640 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 641 642 sc->vr_res_id = PCIR_BAR(0); 643 sc->vr_res_type = SYS_RES_IOPORT; 644 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 645 &sc->vr_res_id, RF_ACTIVE); 646 if (sc->vr_res == NULL) { 647 device_printf(dev, "couldn't map ports\n"); 648 error = ENXIO; 649 goto fail; 650 } 651 652 /* Allocate interrupt. */ 653 rid = 0; 654 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 655 RF_SHAREABLE | RF_ACTIVE); 656 657 if (sc->vr_irq == NULL) { 658 device_printf(dev, "couldn't map interrupt\n"); 659 error = ENXIO; 660 goto fail; 661 } 662 663 /* Allocate ifnet structure. */ 664 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 665 if (ifp == NULL) { 666 device_printf(dev, "couldn't allocate ifnet structure\n"); 667 error = ENOSPC; 668 goto fail; 669 } 670 ifp->if_softc = sc; 671 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 672 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 673 ifp->if_ioctl = vr_ioctl; 674 ifp->if_start = vr_start; 675 ifp->if_init = vr_init; 676 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); 677 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; 678 IFQ_SET_READY(&ifp->if_snd); 679 680 /* Configure Tx FIFO threshold. */ 681 sc->vr_txthresh = VR_TXTHRESH_MIN; 682 if (sc->vr_revid < REV_ID_VT6105_A0) { 683 /* 684 * Use store and forward mode for Rhine I/II. 685 * Otherwise they produce a lot of Tx underruns and 686 * it would take a while to get working FIFO threshold 687 * value. 688 */ 689 sc->vr_txthresh = VR_TXTHRESH_MAX; 690 } 691 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 692 ifp->if_hwassist = VR_CSUM_FEATURES; 693 ifp->if_capabilities |= IFCAP_HWCSUM; 694 /* 695 * To update checksum field the hardware may need to 696 * store entire frames into FIFO before transmitting. 697 */ 698 sc->vr_txthresh = VR_TXTHRESH_MAX; 699 } 700 701 if (sc->vr_revid >= REV_ID_VT6102_A && 702 pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 703 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; 704 705 /* Rhine supports oversized VLAN frame. */ 706 ifp->if_capabilities |= IFCAP_VLAN_MTU; 707 ifp->if_capenable = ifp->if_capabilities; 708 #ifdef DEVICE_POLLING 709 ifp->if_capabilities |= IFCAP_POLLING; 710 #endif 711 712 /* 713 * Windows may put the chip in suspend mode when it 714 * shuts down. Be sure to kick it in the head to wake it 715 * up again. 716 */ 717 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 718 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 719 720 /* 721 * Get station address. The way the Rhine chips work, 722 * you're not allowed to directly access the EEPROM once 723 * they've been programmed a special way. Consequently, 724 * we need to read the node address from the PAR0 and PAR1 725 * registers. 726 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 727 * VR_CFGC and VR_CFGD such that memory mapped IO configured 728 * by driver is reset to default state. 729 */ 730 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 731 for (i = VR_TIMEOUT; i > 0; i--) { 732 DELAY(1); 733 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 734 break; 735 } 736 if (i == 0) 737 device_printf(dev, "Reloading EEPROM timeout!\n"); 738 for (i = 0; i < ETHER_ADDR_LEN; i++) 739 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 740 741 /* Reset the adapter. */ 742 vr_reset(sc); 743 /* Ack intr & disable further interrupts. */ 744 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 745 CSR_WRITE_2(sc, VR_IMR, 0); 746 if (sc->vr_revid >= REV_ID_VT6102_A) 747 CSR_WRITE_2(sc, VR_MII_IMR, 0); 748 749 if (sc->vr_revid < REV_ID_VT6102_A) { 750 pci_write_config(dev, VR_PCI_MODE2, 751 pci_read_config(dev, VR_PCI_MODE2, 1) | 752 VR_MODE2_MODE10T, 1); 753 } else { 754 /* Report error instead of retrying forever. */ 755 pci_write_config(dev, VR_PCI_MODE2, 756 pci_read_config(dev, VR_PCI_MODE2, 1) | 757 VR_MODE2_PCEROPT, 1); 758 /* Detect MII coding error. */ 759 pci_write_config(dev, VR_PCI_MODE3, 760 pci_read_config(dev, VR_PCI_MODE3, 1) | 761 VR_MODE3_MIION, 1); 762 if (sc->vr_revid >= REV_ID_VT6105_LOM && 763 sc->vr_revid < REV_ID_VT6105M_A0) 764 pci_write_config(dev, VR_PCI_MODE2, 765 pci_read_config(dev, VR_PCI_MODE2, 1) | 766 VR_MODE2_MODE10T, 1); 767 /* Enable Memory-Read-Multiple. */ 768 if (sc->vr_revid >= REV_ID_VT6107_A1 && 769 sc->vr_revid < REV_ID_VT6105M_A0) 770 pci_write_config(dev, VR_PCI_MODE2, 771 pci_read_config(dev, VR_PCI_MODE2, 1) | 772 VR_MODE2_MRDPL, 1); 773 } 774 /* Disable MII AUTOPOLL. */ 775 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 776 777 if (vr_dma_alloc(sc) != 0) { 778 error = ENXIO; 779 goto fail; 780 } 781 782 /* Save PHY address. */ 783 if (sc->vr_revid >= REV_ID_VT6105_A0) 784 sc->vr_phyaddr = 1; 785 else 786 sc->vr_phyaddr = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 787 788 /* Do MII setup. */ 789 if (mii_phy_probe(dev, &sc->vr_miibus, 790 vr_ifmedia_upd, vr_ifmedia_sts)) { 791 device_printf(dev, "MII without any phy!\n"); 792 error = ENXIO; 793 goto fail; 794 } 795 796 /* Call MI attach routine. */ 797 ether_ifattach(ifp, eaddr); 798 /* 799 * Tell the upper layer(s) we support long frames. 800 * Must appear after the call to ether_ifattach() because 801 * ether_ifattach() sets ifi_hdrlen to the default value. 802 */ 803 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 804 805 /* Hook interrupt last to avoid having to lock softc. */ 806 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 807 NULL, vr_intr, sc, &sc->vr_intrhand); 808 809 if (error) { 810 device_printf(dev, "couldn't set up irq\n"); 811 ether_ifdetach(ifp); 812 goto fail; 813 } 814 815 fail: 816 if (error) 817 vr_detach(dev); 818 819 return (error); 820 } 821 822 /* 823 * Shutdown hardware and free up resources. This can be called any 824 * time after the mutex has been initialized. It is called in both 825 * the error case in attach and the normal detach case so it needs 826 * to be careful about only freeing resources that have actually been 827 * allocated. 828 */ 829 static int 830 vr_detach(device_t dev) 831 { 832 struct vr_softc *sc = device_get_softc(dev); 833 struct ifnet *ifp = sc->vr_ifp; 834 835 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 836 837 #ifdef DEVICE_POLLING 838 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 839 ether_poll_deregister(ifp); 840 #endif 841 842 /* These should only be active if attach succeeded. */ 843 if (device_is_attached(dev)) { 844 VR_LOCK(sc); 845 sc->vr_detach = 1; 846 vr_stop(sc); 847 VR_UNLOCK(sc); 848 callout_drain(&sc->vr_stat_callout); 849 taskqueue_drain(taskqueue_swi, &sc->vr_link_task); 850 ether_ifdetach(ifp); 851 } 852 if (sc->vr_miibus) 853 device_delete_child(dev, sc->vr_miibus); 854 bus_generic_detach(dev); 855 856 if (sc->vr_intrhand) 857 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 858 if (sc->vr_irq) 859 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 860 if (sc->vr_res) 861 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 862 sc->vr_res); 863 864 if (ifp) 865 if_free(ifp); 866 867 vr_dma_free(sc); 868 869 mtx_destroy(&sc->vr_mtx); 870 871 return (0); 872 } 873 874 struct vr_dmamap_arg { 875 bus_addr_t vr_busaddr; 876 }; 877 878 static void 879 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 880 { 881 struct vr_dmamap_arg *ctx; 882 883 if (error != 0) 884 return; 885 ctx = arg; 886 ctx->vr_busaddr = segs[0].ds_addr; 887 } 888 889 static int 890 vr_dma_alloc(struct vr_softc *sc) 891 { 892 struct vr_dmamap_arg ctx; 893 struct vr_txdesc *txd; 894 struct vr_rxdesc *rxd; 895 bus_size_t tx_alignment; 896 int error, i; 897 898 /* Create parent DMA tag. */ 899 error = bus_dma_tag_create( 900 bus_get_dma_tag(sc->vr_dev), /* parent */ 901 1, 0, /* alignment, boundary */ 902 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 903 BUS_SPACE_MAXADDR, /* highaddr */ 904 NULL, NULL, /* filter, filterarg */ 905 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 906 0, /* nsegments */ 907 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 908 0, /* flags */ 909 NULL, NULL, /* lockfunc, lockarg */ 910 &sc->vr_cdata.vr_parent_tag); 911 if (error != 0) { 912 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 913 goto fail; 914 } 915 /* Create tag for Tx ring. */ 916 error = bus_dma_tag_create( 917 sc->vr_cdata.vr_parent_tag, /* parent */ 918 VR_RING_ALIGN, 0, /* alignment, boundary */ 919 BUS_SPACE_MAXADDR, /* lowaddr */ 920 BUS_SPACE_MAXADDR, /* highaddr */ 921 NULL, NULL, /* filter, filterarg */ 922 VR_TX_RING_SIZE, /* maxsize */ 923 1, /* nsegments */ 924 VR_TX_RING_SIZE, /* maxsegsize */ 925 0, /* flags */ 926 NULL, NULL, /* lockfunc, lockarg */ 927 &sc->vr_cdata.vr_tx_ring_tag); 928 if (error != 0) { 929 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 930 goto fail; 931 } 932 933 /* Create tag for Rx ring. */ 934 error = bus_dma_tag_create( 935 sc->vr_cdata.vr_parent_tag, /* parent */ 936 VR_RING_ALIGN, 0, /* alignment, boundary */ 937 BUS_SPACE_MAXADDR, /* lowaddr */ 938 BUS_SPACE_MAXADDR, /* highaddr */ 939 NULL, NULL, /* filter, filterarg */ 940 VR_RX_RING_SIZE, /* maxsize */ 941 1, /* nsegments */ 942 VR_RX_RING_SIZE, /* maxsegsize */ 943 0, /* flags */ 944 NULL, NULL, /* lockfunc, lockarg */ 945 &sc->vr_cdata.vr_rx_ring_tag); 946 if (error != 0) { 947 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 948 goto fail; 949 } 950 951 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 952 tx_alignment = sizeof(uint32_t); 953 else 954 tx_alignment = 1; 955 /* Create tag for Tx buffers. */ 956 error = bus_dma_tag_create( 957 sc->vr_cdata.vr_parent_tag, /* parent */ 958 tx_alignment, 0, /* alignment, boundary */ 959 BUS_SPACE_MAXADDR, /* lowaddr */ 960 BUS_SPACE_MAXADDR, /* highaddr */ 961 NULL, NULL, /* filter, filterarg */ 962 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 963 VR_MAXFRAGS, /* nsegments */ 964 MCLBYTES, /* maxsegsize */ 965 0, /* flags */ 966 NULL, NULL, /* lockfunc, lockarg */ 967 &sc->vr_cdata.vr_tx_tag); 968 if (error != 0) { 969 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 970 goto fail; 971 } 972 973 /* Create tag for Rx buffers. */ 974 error = bus_dma_tag_create( 975 sc->vr_cdata.vr_parent_tag, /* parent */ 976 VR_RX_ALIGN, 0, /* alignment, boundary */ 977 BUS_SPACE_MAXADDR, /* lowaddr */ 978 BUS_SPACE_MAXADDR, /* highaddr */ 979 NULL, NULL, /* filter, filterarg */ 980 MCLBYTES, /* maxsize */ 981 1, /* nsegments */ 982 MCLBYTES, /* maxsegsize */ 983 0, /* flags */ 984 NULL, NULL, /* lockfunc, lockarg */ 985 &sc->vr_cdata.vr_rx_tag); 986 if (error != 0) { 987 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 988 goto fail; 989 } 990 991 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 992 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 993 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 994 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 995 if (error != 0) { 996 device_printf(sc->vr_dev, 997 "failed to allocate DMA'able memory for Tx ring\n"); 998 goto fail; 999 } 1000 1001 ctx.vr_busaddr = 0; 1002 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 1003 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 1004 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1005 if (error != 0 || ctx.vr_busaddr == 0) { 1006 device_printf(sc->vr_dev, 1007 "failed to load DMA'able memory for Tx ring\n"); 1008 goto fail; 1009 } 1010 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 1011 1012 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1013 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 1014 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 1015 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 1016 if (error != 0) { 1017 device_printf(sc->vr_dev, 1018 "failed to allocate DMA'able memory for Rx ring\n"); 1019 goto fail; 1020 } 1021 1022 ctx.vr_busaddr = 0; 1023 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1024 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1025 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1026 if (error != 0 || ctx.vr_busaddr == 0) { 1027 device_printf(sc->vr_dev, 1028 "failed to load DMA'able memory for Rx ring\n"); 1029 goto fail; 1030 } 1031 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1032 1033 /* Create DMA maps for Tx buffers. */ 1034 for (i = 0; i < VR_TX_RING_CNT; i++) { 1035 txd = &sc->vr_cdata.vr_txdesc[i]; 1036 txd->tx_m = NULL; 1037 txd->tx_dmamap = NULL; 1038 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1039 &txd->tx_dmamap); 1040 if (error != 0) { 1041 device_printf(sc->vr_dev, 1042 "failed to create Tx dmamap\n"); 1043 goto fail; 1044 } 1045 } 1046 /* Create DMA maps for Rx buffers. */ 1047 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1048 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1049 device_printf(sc->vr_dev, 1050 "failed to create spare Rx dmamap\n"); 1051 goto fail; 1052 } 1053 for (i = 0; i < VR_RX_RING_CNT; i++) { 1054 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1055 rxd->rx_m = NULL; 1056 rxd->rx_dmamap = NULL; 1057 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1058 &rxd->rx_dmamap); 1059 if (error != 0) { 1060 device_printf(sc->vr_dev, 1061 "failed to create Rx dmamap\n"); 1062 goto fail; 1063 } 1064 } 1065 1066 fail: 1067 return (error); 1068 } 1069 1070 static void 1071 vr_dma_free(struct vr_softc *sc) 1072 { 1073 struct vr_txdesc *txd; 1074 struct vr_rxdesc *rxd; 1075 int i; 1076 1077 /* Tx ring. */ 1078 if (sc->vr_cdata.vr_tx_ring_tag) { 1079 if (sc->vr_cdata.vr_tx_ring_map) 1080 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1081 sc->vr_cdata.vr_tx_ring_map); 1082 if (sc->vr_cdata.vr_tx_ring_map && 1083 sc->vr_rdata.vr_tx_ring) 1084 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1085 sc->vr_rdata.vr_tx_ring, 1086 sc->vr_cdata.vr_tx_ring_map); 1087 sc->vr_rdata.vr_tx_ring = NULL; 1088 sc->vr_cdata.vr_tx_ring_map = NULL; 1089 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1090 sc->vr_cdata.vr_tx_ring_tag = NULL; 1091 } 1092 /* Rx ring. */ 1093 if (sc->vr_cdata.vr_rx_ring_tag) { 1094 if (sc->vr_cdata.vr_rx_ring_map) 1095 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1096 sc->vr_cdata.vr_rx_ring_map); 1097 if (sc->vr_cdata.vr_rx_ring_map && 1098 sc->vr_rdata.vr_rx_ring) 1099 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1100 sc->vr_rdata.vr_rx_ring, 1101 sc->vr_cdata.vr_rx_ring_map); 1102 sc->vr_rdata.vr_rx_ring = NULL; 1103 sc->vr_cdata.vr_rx_ring_map = NULL; 1104 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1105 sc->vr_cdata.vr_rx_ring_tag = NULL; 1106 } 1107 /* Tx buffers. */ 1108 if (sc->vr_cdata.vr_tx_tag) { 1109 for (i = 0; i < VR_TX_RING_CNT; i++) { 1110 txd = &sc->vr_cdata.vr_txdesc[i]; 1111 if (txd->tx_dmamap) { 1112 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1113 txd->tx_dmamap); 1114 txd->tx_dmamap = NULL; 1115 } 1116 } 1117 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1118 sc->vr_cdata.vr_tx_tag = NULL; 1119 } 1120 /* Rx buffers. */ 1121 if (sc->vr_cdata.vr_rx_tag) { 1122 for (i = 0; i < VR_RX_RING_CNT; i++) { 1123 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1124 if (rxd->rx_dmamap) { 1125 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1126 rxd->rx_dmamap); 1127 rxd->rx_dmamap = NULL; 1128 } 1129 } 1130 if (sc->vr_cdata.vr_rx_sparemap) { 1131 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1132 sc->vr_cdata.vr_rx_sparemap); 1133 sc->vr_cdata.vr_rx_sparemap = 0; 1134 } 1135 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1136 sc->vr_cdata.vr_rx_tag = NULL; 1137 } 1138 1139 if (sc->vr_cdata.vr_parent_tag) { 1140 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1141 sc->vr_cdata.vr_parent_tag = NULL; 1142 } 1143 } 1144 1145 /* 1146 * Initialize the transmit descriptors. 1147 */ 1148 static int 1149 vr_tx_ring_init(struct vr_softc *sc) 1150 { 1151 struct vr_ring_data *rd; 1152 struct vr_txdesc *txd; 1153 bus_addr_t addr; 1154 int i; 1155 1156 sc->vr_cdata.vr_tx_prod = 0; 1157 sc->vr_cdata.vr_tx_cons = 0; 1158 sc->vr_cdata.vr_tx_cnt = 0; 1159 sc->vr_cdata.vr_tx_pkts = 0; 1160 1161 rd = &sc->vr_rdata; 1162 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1163 for (i = 0; i < VR_TX_RING_CNT; i++) { 1164 if (i == VR_TX_RING_CNT - 1) 1165 addr = VR_TX_RING_ADDR(sc, 0); 1166 else 1167 addr = VR_TX_RING_ADDR(sc, i + 1); 1168 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1169 txd = &sc->vr_cdata.vr_txdesc[i]; 1170 txd->tx_m = NULL; 1171 } 1172 1173 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1174 sc->vr_cdata.vr_tx_ring_map, 1175 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1176 1177 return (0); 1178 } 1179 1180 /* 1181 * Initialize the RX descriptors and allocate mbufs for them. Note that 1182 * we arrange the descriptors in a closed ring, so that the last descriptor 1183 * points back to the first. 1184 */ 1185 static int 1186 vr_rx_ring_init(struct vr_softc *sc) 1187 { 1188 struct vr_ring_data *rd; 1189 struct vr_rxdesc *rxd; 1190 bus_addr_t addr; 1191 int i; 1192 1193 sc->vr_cdata.vr_rx_cons = 0; 1194 1195 rd = &sc->vr_rdata; 1196 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1197 for (i = 0; i < VR_RX_RING_CNT; i++) { 1198 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1199 rxd->rx_m = NULL; 1200 rxd->desc = &rd->vr_rx_ring[i]; 1201 if (i == VR_RX_RING_CNT - 1) 1202 addr = VR_RX_RING_ADDR(sc, 0); 1203 else 1204 addr = VR_RX_RING_ADDR(sc, i + 1); 1205 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1206 if (vr_newbuf(sc, i) != 0) 1207 return (ENOBUFS); 1208 } 1209 1210 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1211 sc->vr_cdata.vr_rx_ring_map, 1212 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1213 1214 return (0); 1215 } 1216 1217 static __inline void 1218 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1219 { 1220 struct vr_desc *desc; 1221 1222 desc = rxd->desc; 1223 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1224 desc->vr_status = htole32(VR_RXSTAT_OWN); 1225 } 1226 1227 /* 1228 * Initialize an RX descriptor and attach an MBUF cluster. 1229 * Note: the length fields are only 11 bits wide, which means the 1230 * largest size we can specify is 2047. This is important because 1231 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1232 * overflow the field and make a mess. 1233 */ 1234 static int 1235 vr_newbuf(struct vr_softc *sc, int idx) 1236 { 1237 struct vr_desc *desc; 1238 struct vr_rxdesc *rxd; 1239 struct mbuf *m; 1240 bus_dma_segment_t segs[1]; 1241 bus_dmamap_t map; 1242 int nsegs; 1243 1244 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1245 if (m == NULL) 1246 return (ENOBUFS); 1247 m->m_len = m->m_pkthdr.len = MCLBYTES; 1248 m_adj(m, sizeof(uint64_t)); 1249 1250 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1251 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1252 m_freem(m); 1253 return (ENOBUFS); 1254 } 1255 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1256 1257 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1258 if (rxd->rx_m != NULL) { 1259 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1260 BUS_DMASYNC_POSTREAD); 1261 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1262 } 1263 map = rxd->rx_dmamap; 1264 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1265 sc->vr_cdata.vr_rx_sparemap = map; 1266 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1267 BUS_DMASYNC_PREREAD); 1268 rxd->rx_m = m; 1269 desc = rxd->desc; 1270 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1271 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1272 desc->vr_status = htole32(VR_RXSTAT_OWN); 1273 1274 return (0); 1275 } 1276 1277 #ifndef __NO_STRICT_ALIGNMENT 1278 static __inline void 1279 vr_fixup_rx(struct mbuf *m) 1280 { 1281 uint16_t *src, *dst; 1282 int i; 1283 1284 src = mtod(m, uint16_t *); 1285 dst = src - 1; 1286 1287 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1288 *dst++ = *src++; 1289 1290 m->m_data -= ETHER_ALIGN; 1291 } 1292 #endif 1293 1294 /* 1295 * A frame has been uploaded: pass the resulting mbuf chain up to 1296 * the higher level protocols. 1297 */ 1298 static void 1299 vr_rxeof(struct vr_softc *sc) 1300 { 1301 struct vr_rxdesc *rxd; 1302 struct mbuf *m; 1303 struct ifnet *ifp; 1304 struct vr_desc *cur_rx; 1305 int cons, prog, total_len; 1306 uint32_t rxstat, rxctl; 1307 1308 VR_LOCK_ASSERT(sc); 1309 ifp = sc->vr_ifp; 1310 cons = sc->vr_cdata.vr_rx_cons; 1311 1312 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1313 sc->vr_cdata.vr_rx_ring_map, 1314 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1315 1316 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1317 #ifdef DEVICE_POLLING 1318 if (ifp->if_capenable & IFCAP_POLLING) { 1319 if (sc->rxcycles <= 0) 1320 break; 1321 sc->rxcycles--; 1322 } 1323 #endif 1324 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1325 rxstat = le32toh(cur_rx->vr_status); 1326 rxctl = le32toh(cur_rx->vr_ctl); 1327 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1328 break; 1329 1330 prog++; 1331 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1332 m = rxd->rx_m; 1333 1334 /* 1335 * If an error occurs, update stats, clear the 1336 * status word and leave the mbuf cluster in place: 1337 * it should simply get re-used next time this descriptor 1338 * comes up in the ring. 1339 * We don't support SG in Rx path yet, so discard 1340 * partial frame. 1341 */ 1342 if ((rxstat & VR_RXSTAT_RX_OK) == 0 || 1343 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != 1344 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1345 ifp->if_ierrors++; 1346 sc->vr_stat.rx_errors++; 1347 if (rxstat & VR_RXSTAT_CRCERR) 1348 sc->vr_stat.rx_crc_errors++; 1349 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1350 sc->vr_stat.rx_alignment++; 1351 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1352 sc->vr_stat.rx_fifo_overflows++; 1353 if (rxstat & VR_RXSTAT_GIANT) 1354 sc->vr_stat.rx_giants++; 1355 if (rxstat & VR_RXSTAT_RUNT) 1356 sc->vr_stat.rx_runts++; 1357 if (rxstat & VR_RXSTAT_BUFFERR) 1358 sc->vr_stat.rx_no_buffers++; 1359 #ifdef VR_SHOW_ERRORS 1360 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1361 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1362 #endif 1363 vr_discard_rxbuf(rxd); 1364 continue; 1365 } 1366 1367 if (vr_newbuf(sc, cons) != 0) { 1368 ifp->if_iqdrops++; 1369 sc->vr_stat.rx_errors++; 1370 sc->vr_stat.rx_no_mbufs++; 1371 vr_discard_rxbuf(rxd); 1372 continue; 1373 } 1374 1375 /* 1376 * XXX The VIA Rhine chip includes the CRC with every 1377 * received frame, and there's no way to turn this 1378 * behavior off (at least, I can't find anything in 1379 * the manual that explains how to do it) so we have 1380 * to trim off the CRC manually. 1381 */ 1382 total_len = VR_RXBYTES(rxstat); 1383 total_len -= ETHER_CRC_LEN; 1384 m->m_pkthdr.len = m->m_len = total_len; 1385 #ifndef __NO_STRICT_ALIGNMENT 1386 /* 1387 * RX buffers must be 32-bit aligned. 1388 * Ignore the alignment problems on the non-strict alignment 1389 * platform. The performance hit incurred due to unaligned 1390 * accesses is much smaller than the hit produced by forcing 1391 * buffer copies all the time. 1392 */ 1393 vr_fixup_rx(m); 1394 #endif 1395 m->m_pkthdr.rcvif = ifp; 1396 ifp->if_ipackets++; 1397 sc->vr_stat.rx_ok++; 1398 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1399 (rxstat & VR_RXSTAT_FRAG) == 0 && 1400 (rxctl & VR_RXCTL_IP) != 0) { 1401 /* Checksum is valid for non-fragmented IP packets. */ 1402 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1403 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1404 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1405 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1406 m->m_pkthdr.csum_flags |= 1407 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1408 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1409 m->m_pkthdr.csum_data = 0xffff; 1410 } 1411 } 1412 } 1413 VR_UNLOCK(sc); 1414 (*ifp->if_input)(ifp, m); 1415 VR_LOCK(sc); 1416 } 1417 1418 if (prog > 0) { 1419 sc->vr_cdata.vr_rx_cons = cons; 1420 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1421 sc->vr_cdata.vr_rx_ring_map, 1422 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1423 } 1424 } 1425 1426 /* 1427 * A frame was downloaded to the chip. It's safe for us to clean up 1428 * the list buffers. 1429 */ 1430 static void 1431 vr_txeof(struct vr_softc *sc) 1432 { 1433 struct vr_txdesc *txd; 1434 struct vr_desc *cur_tx; 1435 struct ifnet *ifp; 1436 uint32_t txctl, txstat; 1437 int cons, prod; 1438 1439 VR_LOCK_ASSERT(sc); 1440 1441 cons = sc->vr_cdata.vr_tx_cons; 1442 prod = sc->vr_cdata.vr_tx_prod; 1443 if (cons == prod) 1444 return; 1445 1446 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1447 sc->vr_cdata.vr_tx_ring_map, 1448 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1449 1450 ifp = sc->vr_ifp; 1451 /* 1452 * Go through our tx list and free mbufs for those 1453 * frames that have been transmitted. 1454 */ 1455 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1456 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1457 txctl = le32toh(cur_tx->vr_ctl); 1458 txstat = le32toh(cur_tx->vr_status); 1459 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1460 break; 1461 1462 sc->vr_cdata.vr_tx_cnt--; 1463 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1464 /* Only the first descriptor in the chain is valid. */ 1465 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1466 continue; 1467 1468 txd = &sc->vr_cdata.vr_txdesc[cons]; 1469 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1470 __func__)); 1471 1472 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1473 ifp->if_oerrors++; 1474 sc->vr_stat.tx_errors++; 1475 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1476 /* Give up and restart Tx. */ 1477 sc->vr_stat.tx_abort++; 1478 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1479 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1480 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1481 txd->tx_dmamap); 1482 m_freem(txd->tx_m); 1483 txd->tx_m = NULL; 1484 VR_INC(cons, VR_TX_RING_CNT); 1485 sc->vr_cdata.vr_tx_cons = cons; 1486 if (vr_tx_stop(sc) != 0) { 1487 device_printf(sc->vr_dev, 1488 "%s: Tx shutdown error -- " 1489 "resetting\n", __func__); 1490 sc->vr_flags |= VR_F_RESTART; 1491 return; 1492 } 1493 vr_tx_start(sc); 1494 break; 1495 } 1496 if ((sc->vr_revid < REV_ID_VT3071_A && 1497 (txstat & VR_TXSTAT_UNDERRUN)) || 1498 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1499 sc->vr_stat.tx_underrun++; 1500 /* Retry and restart Tx. */ 1501 sc->vr_cdata.vr_tx_cnt++; 1502 sc->vr_cdata.vr_tx_cons = cons; 1503 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1504 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1505 sc->vr_cdata.vr_tx_ring_map, 1506 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1507 vr_tx_underrun(sc); 1508 return; 1509 } 1510 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1511 ifp->if_collisions++; 1512 sc->vr_stat.tx_collisions++; 1513 } 1514 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1515 ifp->if_collisions++; 1516 sc->vr_stat.tx_late_collisions++; 1517 } 1518 } else { 1519 sc->vr_stat.tx_ok++; 1520 ifp->if_opackets++; 1521 } 1522 1523 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1524 BUS_DMASYNC_POSTWRITE); 1525 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1526 if (sc->vr_revid < REV_ID_VT3071_A) { 1527 ifp->if_collisions += 1528 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1529 sc->vr_stat.tx_collisions += 1530 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1531 } else { 1532 ifp->if_collisions += (txstat & 0x0f); 1533 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1534 } 1535 m_freem(txd->tx_m); 1536 txd->tx_m = NULL; 1537 } 1538 1539 sc->vr_cdata.vr_tx_cons = cons; 1540 if (sc->vr_cdata.vr_tx_cnt == 0) 1541 sc->vr_watchdog_timer = 0; 1542 } 1543 1544 static void 1545 vr_tick(void *xsc) 1546 { 1547 struct vr_softc *sc; 1548 struct mii_data *mii; 1549 1550 sc = (struct vr_softc *)xsc; 1551 1552 VR_LOCK_ASSERT(sc); 1553 1554 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1555 device_printf(sc->vr_dev, "restarting\n"); 1556 sc->vr_stat.num_restart++; 1557 vr_stop(sc); 1558 vr_reset(sc); 1559 vr_init_locked(sc); 1560 sc->vr_flags &= ~VR_F_RESTART; 1561 } 1562 1563 mii = device_get_softc(sc->vr_miibus); 1564 mii_tick(mii); 1565 vr_watchdog(sc); 1566 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1567 } 1568 1569 #ifdef DEVICE_POLLING 1570 static poll_handler_t vr_poll; 1571 static poll_handler_t vr_poll_locked; 1572 1573 static void 1574 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1575 { 1576 struct vr_softc *sc; 1577 1578 sc = ifp->if_softc; 1579 1580 VR_LOCK(sc); 1581 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1582 vr_poll_locked(ifp, cmd, count); 1583 VR_UNLOCK(sc); 1584 } 1585 1586 static void 1587 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1588 { 1589 struct vr_softc *sc; 1590 1591 sc = ifp->if_softc; 1592 1593 VR_LOCK_ASSERT(sc); 1594 1595 sc->rxcycles = count; 1596 vr_rxeof(sc); 1597 vr_txeof(sc); 1598 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1599 vr_start_locked(ifp); 1600 1601 if (cmd == POLL_AND_CHECK_STATUS) { 1602 uint16_t status; 1603 1604 /* Also check status register. */ 1605 status = CSR_READ_2(sc, VR_ISR); 1606 if (status) 1607 CSR_WRITE_2(sc, VR_ISR, status); 1608 1609 if ((status & VR_INTRS) == 0) 1610 return; 1611 1612 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1613 VR_ISR_STATSOFLOW)) != 0) { 1614 if (vr_error(sc, status) != 0) 1615 return; 1616 } 1617 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1618 #ifdef VR_SHOW_ERRORS 1619 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1620 __func__, status, VR_ISR_ERR_BITS); 1621 #endif 1622 vr_rx_start(sc); 1623 } 1624 } 1625 } 1626 #endif /* DEVICE_POLLING */ 1627 1628 /* Back off the transmit threshold. */ 1629 static void 1630 vr_tx_underrun(struct vr_softc *sc) 1631 { 1632 int thresh; 1633 1634 device_printf(sc->vr_dev, "Tx underrun -- "); 1635 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1636 thresh = sc->vr_txthresh; 1637 sc->vr_txthresh++; 1638 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1639 sc->vr_txthresh = VR_TXTHRESH_MAX; 1640 printf("using store and forward mode\n"); 1641 } else 1642 printf("increasing Tx threshold(%d -> %d)\n", 1643 vr_tx_threshold_tables[thresh].value, 1644 vr_tx_threshold_tables[thresh + 1].value); 1645 } else 1646 printf("\n"); 1647 sc->vr_stat.tx_underrun++; 1648 if (vr_tx_stop(sc) != 0) { 1649 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1650 "resetting\n", __func__); 1651 sc->vr_flags |= VR_F_RESTART; 1652 return; 1653 } 1654 vr_tx_start(sc); 1655 } 1656 1657 static void 1658 vr_intr(void *arg) 1659 { 1660 struct vr_softc *sc; 1661 struct ifnet *ifp; 1662 uint16_t status; 1663 1664 sc = (struct vr_softc *)arg; 1665 1666 VR_LOCK(sc); 1667 1668 if (sc->vr_suspended != 0) 1669 goto done_locked; 1670 1671 status = CSR_READ_2(sc, VR_ISR); 1672 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1673 goto done_locked; 1674 1675 ifp = sc->vr_ifp; 1676 #ifdef DEVICE_POLLING 1677 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1678 goto done_locked; 1679 #endif 1680 1681 /* Suppress unwanted interrupts. */ 1682 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1683 (sc->vr_flags & VR_F_RESTART) != 0) { 1684 CSR_WRITE_2(sc, VR_IMR, 0); 1685 CSR_WRITE_2(sc, VR_ISR, status); 1686 goto done_locked; 1687 } 1688 1689 /* Disable interrupts. */ 1690 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1691 1692 for (; (status & VR_INTRS) != 0;) { 1693 CSR_WRITE_2(sc, VR_ISR, status); 1694 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1695 VR_ISR_STATSOFLOW)) != 0) { 1696 if (vr_error(sc, status) != 0) { 1697 VR_UNLOCK(sc); 1698 return; 1699 } 1700 } 1701 vr_rxeof(sc); 1702 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1703 #ifdef VR_SHOW_ERRORS 1704 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1705 __func__, status, VR_ISR_ERR_BITS); 1706 #endif 1707 /* Restart Rx if RxDMA SM was stopped. */ 1708 vr_rx_start(sc); 1709 } 1710 vr_txeof(sc); 1711 status = CSR_READ_2(sc, VR_ISR); 1712 } 1713 1714 /* Re-enable interrupts. */ 1715 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1716 1717 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1718 vr_start_locked(ifp); 1719 1720 done_locked: 1721 VR_UNLOCK(sc); 1722 } 1723 1724 static int 1725 vr_error(struct vr_softc *sc, uint16_t status) 1726 { 1727 uint16_t pcis; 1728 1729 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1730 if ((status & VR_ISR_BUSERR) != 0) { 1731 status &= ~VR_ISR_BUSERR; 1732 sc->vr_stat.bus_errors++; 1733 /* Disable further interrupts. */ 1734 CSR_WRITE_2(sc, VR_IMR, 0); 1735 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1736 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1737 "resetting\n", pcis); 1738 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1739 sc->vr_flags |= VR_F_RESTART; 1740 return (EAGAIN); 1741 } 1742 if ((status & VR_ISR_LINKSTAT2) != 0) { 1743 /* Link state change, duplex changes etc. */ 1744 status &= ~VR_ISR_LINKSTAT2; 1745 } 1746 if ((status & VR_ISR_STATSOFLOW) != 0) { 1747 status &= ~VR_ISR_STATSOFLOW; 1748 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1749 /* Update MIB counters. */ 1750 } 1751 } 1752 1753 if (status != 0) 1754 device_printf(sc->vr_dev, 1755 "unhandled interrupt, status = 0x%04x\n", status); 1756 return (0); 1757 } 1758 1759 /* 1760 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1761 * pointers to the fragment pointers. 1762 */ 1763 static int 1764 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1765 { 1766 struct vr_txdesc *txd; 1767 struct vr_desc *desc; 1768 struct mbuf *m; 1769 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1770 uint32_t csum_flags, txctl; 1771 int error, i, nsegs, prod, si; 1772 int padlen; 1773 1774 VR_LOCK_ASSERT(sc); 1775 1776 M_ASSERTPKTHDR((*m_head)); 1777 1778 /* 1779 * Some VIA Rhine wants packet buffers to be longword 1780 * aligned, but very often our mbufs aren't. Rather than 1781 * waste time trying to decide when to copy and when not 1782 * to copy, just do it all the time. 1783 */ 1784 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1785 m = m_defrag(*m_head, M_DONTWAIT); 1786 if (m == NULL) { 1787 m_freem(*m_head); 1788 *m_head = NULL; 1789 return (ENOBUFS); 1790 } 1791 *m_head = m; 1792 } 1793 1794 /* 1795 * The Rhine chip doesn't auto-pad, so we have to make 1796 * sure to pad short frames out to the minimum frame length 1797 * ourselves. 1798 */ 1799 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1800 m = *m_head; 1801 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1802 if (M_WRITABLE(m) == 0) { 1803 /* Get a writable copy. */ 1804 m = m_dup(*m_head, M_DONTWAIT); 1805 m_freem(*m_head); 1806 if (m == NULL) { 1807 *m_head = NULL; 1808 return (ENOBUFS); 1809 } 1810 *m_head = m; 1811 } 1812 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1813 m = m_defrag(m, M_DONTWAIT); 1814 if (m == NULL) { 1815 m_freem(*m_head); 1816 *m_head = NULL; 1817 return (ENOBUFS); 1818 } 1819 } 1820 /* 1821 * Manually pad short frames, and zero the pad space 1822 * to avoid leaking data. 1823 */ 1824 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1825 m->m_pkthdr.len += padlen; 1826 m->m_len = m->m_pkthdr.len; 1827 *m_head = m; 1828 } 1829 1830 prod = sc->vr_cdata.vr_tx_prod; 1831 txd = &sc->vr_cdata.vr_txdesc[prod]; 1832 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1833 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1834 if (error == EFBIG) { 1835 m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS); 1836 if (m == NULL) { 1837 m_freem(*m_head); 1838 *m_head = NULL; 1839 return (ENOBUFS); 1840 } 1841 *m_head = m; 1842 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1843 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1844 if (error != 0) { 1845 m_freem(*m_head); 1846 *m_head = NULL; 1847 return (error); 1848 } 1849 } else if (error != 0) 1850 return (error); 1851 if (nsegs == 0) { 1852 m_freem(*m_head); 1853 *m_head = NULL; 1854 return (EIO); 1855 } 1856 1857 /* Check number of available descriptors. */ 1858 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1859 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1860 return (ENOBUFS); 1861 } 1862 1863 txd->tx_m = *m_head; 1864 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1865 BUS_DMASYNC_PREWRITE); 1866 1867 /* Set checksum offload. */ 1868 csum_flags = 0; 1869 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1870 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1871 csum_flags |= VR_TXCTL_IPCSUM; 1872 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1873 csum_flags |= VR_TXCTL_TCPCSUM; 1874 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1875 csum_flags |= VR_TXCTL_UDPCSUM; 1876 } 1877 1878 /* 1879 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1880 * is required for all descriptors regardless of single or 1881 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1882 * the first descriptor for a multi-fragmented frames. Without 1883 * that VIA Rhine chip generates Tx underrun interrupts and can't 1884 * send any frames. 1885 */ 1886 si = prod; 1887 for (i = 0; i < nsegs; i++) { 1888 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1889 desc->vr_status = 0; 1890 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1891 if (i == 0) 1892 txctl |= VR_TXCTL_FIRSTFRAG; 1893 desc->vr_ctl = htole32(txctl); 1894 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1895 sc->vr_cdata.vr_tx_cnt++; 1896 VR_INC(prod, VR_TX_RING_CNT); 1897 } 1898 /* Update producer index. */ 1899 sc->vr_cdata.vr_tx_prod = prod; 1900 1901 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1902 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1903 1904 /* 1905 * Set EOP on the last desciptor and reuqest Tx completion 1906 * interrupt for every VR_TX_INTR_THRESH-th frames. 1907 */ 1908 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1909 if (sc->vr_cdata.vr_tx_pkts == 0) 1910 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1911 else 1912 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1913 1914 /* Lastly turn the first descriptor ownership to hardware. */ 1915 desc = &sc->vr_rdata.vr_tx_ring[si]; 1916 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1917 1918 /* Sync descriptors. */ 1919 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1920 sc->vr_cdata.vr_tx_ring_map, 1921 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1922 1923 return (0); 1924 } 1925 1926 static void 1927 vr_start(struct ifnet *ifp) 1928 { 1929 struct vr_softc *sc; 1930 1931 sc = ifp->if_softc; 1932 VR_LOCK(sc); 1933 vr_start_locked(ifp); 1934 VR_UNLOCK(sc); 1935 } 1936 1937 static void 1938 vr_start_locked(struct ifnet *ifp) 1939 { 1940 struct vr_softc *sc; 1941 struct mbuf *m_head; 1942 int enq; 1943 1944 sc = ifp->if_softc; 1945 1946 VR_LOCK_ASSERT(sc); 1947 1948 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1949 IFF_DRV_RUNNING || sc->vr_link == 0) 1950 return; 1951 1952 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1953 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1954 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1955 if (m_head == NULL) 1956 break; 1957 /* 1958 * Pack the data into the transmit ring. If we 1959 * don't have room, set the OACTIVE flag and wait 1960 * for the NIC to drain the ring. 1961 */ 1962 if (vr_encap(sc, &m_head)) { 1963 if (m_head == NULL) 1964 break; 1965 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1966 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1967 break; 1968 } 1969 1970 enq++; 1971 /* 1972 * If there's a BPF listener, bounce a copy of this frame 1973 * to him. 1974 */ 1975 ETHER_BPF_MTAP(ifp, m_head); 1976 } 1977 1978 if (enq > 0) { 1979 /* Tell the chip to start transmitting. */ 1980 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 1981 /* Set a timeout in case the chip goes out to lunch. */ 1982 sc->vr_watchdog_timer = 5; 1983 } 1984 } 1985 1986 static void 1987 vr_init(void *xsc) 1988 { 1989 struct vr_softc *sc; 1990 1991 sc = (struct vr_softc *)xsc; 1992 VR_LOCK(sc); 1993 vr_init_locked(sc); 1994 VR_UNLOCK(sc); 1995 } 1996 1997 static void 1998 vr_init_locked(struct vr_softc *sc) 1999 { 2000 struct ifnet *ifp; 2001 struct mii_data *mii; 2002 bus_addr_t addr; 2003 int i; 2004 2005 VR_LOCK_ASSERT(sc); 2006 2007 ifp = sc->vr_ifp; 2008 mii = device_get_softc(sc->vr_miibus); 2009 2010 /* Cancel pending I/O and free all RX/TX buffers. */ 2011 vr_stop(sc); 2012 vr_reset(sc); 2013 2014 /* Set our station address. */ 2015 for (i = 0; i < ETHER_ADDR_LEN; i++) 2016 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); 2017 2018 /* Set DMA size. */ 2019 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2020 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2021 2022 /* 2023 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2024 * so we must set both. 2025 */ 2026 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2027 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2028 2029 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2030 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2031 2032 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2033 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2034 2035 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2036 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2037 2038 /* Init circular RX list. */ 2039 if (vr_rx_ring_init(sc) != 0) { 2040 device_printf(sc->vr_dev, 2041 "initialization failed: no memory for rx buffers\n"); 2042 vr_stop(sc); 2043 return; 2044 } 2045 2046 /* Init tx descriptors. */ 2047 vr_tx_ring_init(sc); 2048 2049 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2050 uint8_t vcam[2] = { 0, 0 }; 2051 2052 /* Disable VLAN hardware tag insertion/stripping. */ 2053 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); 2054 /* Disable VLAN hardware filtering. */ 2055 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); 2056 /* Disable all CAM entries. */ 2057 vr_cam_mask(sc, VR_MCAST_CAM, 0); 2058 vr_cam_mask(sc, VR_VLAN_CAM, 0); 2059 /* Enable the first VLAN CAM. */ 2060 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); 2061 vr_cam_mask(sc, VR_VLAN_CAM, 1); 2062 } 2063 2064 /* 2065 * Set up receive filter. 2066 */ 2067 vr_set_filter(sc); 2068 2069 /* 2070 * Load the address of the RX ring. 2071 */ 2072 addr = VR_RX_RING_ADDR(sc, 0); 2073 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2074 /* 2075 * Load the address of the TX ring. 2076 */ 2077 addr = VR_TX_RING_ADDR(sc, 0); 2078 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2079 /* Default : full-duplex, no Tx poll. */ 2080 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2081 2082 /* Set flow-control parameters for Rhine III. */ 2083 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2084 /* Rx buffer count available for incoming packet. */ 2085 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT); 2086 /* 2087 * Tx pause low threshold : 16 free receive buffers 2088 * Tx pause XON high threshold : 48 free receive buffers 2089 */ 2090 CSR_WRITE_1(sc, VR_FLOWCR1, 2091 VR_FLOWCR1_TXLO16 | VR_FLOWCR1_TXHI48 | VR_FLOWCR1_XONXOFF); 2092 /* Set Tx pause timer. */ 2093 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2094 } 2095 2096 /* Enable receiver and transmitter. */ 2097 CSR_WRITE_1(sc, VR_CR0, 2098 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2099 2100 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2101 #ifdef DEVICE_POLLING 2102 /* 2103 * Disable interrupts if we are polling. 2104 */ 2105 if (ifp->if_capenable & IFCAP_POLLING) 2106 CSR_WRITE_2(sc, VR_IMR, 0); 2107 else 2108 #endif 2109 /* 2110 * Enable interrupts and disable MII intrs. 2111 */ 2112 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2113 if (sc->vr_revid > REV_ID_VT6102_A) 2114 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2115 2116 sc->vr_link = 0; 2117 mii_mediachg(mii); 2118 2119 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2120 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2121 2122 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2123 } 2124 2125 /* 2126 * Set media options. 2127 */ 2128 static int 2129 vr_ifmedia_upd(struct ifnet *ifp) 2130 { 2131 struct vr_softc *sc; 2132 struct mii_data *mii; 2133 struct mii_softc *miisc; 2134 int error; 2135 2136 sc = ifp->if_softc; 2137 VR_LOCK(sc); 2138 mii = device_get_softc(sc->vr_miibus); 2139 if (mii->mii_instance) { 2140 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2141 mii_phy_reset(miisc); 2142 } 2143 error = mii_mediachg(mii); 2144 VR_UNLOCK(sc); 2145 2146 return (error); 2147 } 2148 2149 /* 2150 * Report current media status. 2151 */ 2152 static void 2153 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2154 { 2155 struct vr_softc *sc; 2156 struct mii_data *mii; 2157 2158 sc = ifp->if_softc; 2159 mii = device_get_softc(sc->vr_miibus); 2160 VR_LOCK(sc); 2161 mii_pollstat(mii); 2162 VR_UNLOCK(sc); 2163 ifmr->ifm_active = mii->mii_media_active; 2164 ifmr->ifm_status = mii->mii_media_status; 2165 } 2166 2167 static int 2168 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2169 { 2170 struct vr_softc *sc; 2171 struct ifreq *ifr; 2172 struct mii_data *mii; 2173 int error, mask; 2174 2175 sc = ifp->if_softc; 2176 ifr = (struct ifreq *)data; 2177 error = 0; 2178 2179 switch (command) { 2180 case SIOCSIFFLAGS: 2181 VR_LOCK(sc); 2182 if (ifp->if_flags & IFF_UP) { 2183 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2184 if ((ifp->if_flags ^ sc->vr_if_flags) & 2185 (IFF_PROMISC | IFF_ALLMULTI)) 2186 vr_set_filter(sc); 2187 } else { 2188 if (sc->vr_detach == 0) 2189 vr_init_locked(sc); 2190 } 2191 } else { 2192 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2193 vr_stop(sc); 2194 } 2195 sc->vr_if_flags = ifp->if_flags; 2196 VR_UNLOCK(sc); 2197 break; 2198 case SIOCADDMULTI: 2199 case SIOCDELMULTI: 2200 VR_LOCK(sc); 2201 vr_set_filter(sc); 2202 VR_UNLOCK(sc); 2203 break; 2204 case SIOCGIFMEDIA: 2205 case SIOCSIFMEDIA: 2206 mii = device_get_softc(sc->vr_miibus); 2207 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2208 break; 2209 case SIOCSIFCAP: 2210 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2211 #ifdef DEVICE_POLLING 2212 if (mask & IFCAP_POLLING) { 2213 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2214 error = ether_poll_register(vr_poll, ifp); 2215 if (error != 0) 2216 break; 2217 VR_LOCK(sc); 2218 /* Disable interrupts. */ 2219 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2220 ifp->if_capenable |= IFCAP_POLLING; 2221 VR_UNLOCK(sc); 2222 } else { 2223 error = ether_poll_deregister(ifp); 2224 /* Enable interrupts. */ 2225 VR_LOCK(sc); 2226 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2227 ifp->if_capenable &= ~IFCAP_POLLING; 2228 VR_UNLOCK(sc); 2229 } 2230 } 2231 #endif /* DEVICE_POLLING */ 2232 if ((mask & IFCAP_TXCSUM) != 0 && 2233 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2234 ifp->if_capenable ^= IFCAP_TXCSUM; 2235 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2236 ifp->if_hwassist |= VR_CSUM_FEATURES; 2237 else 2238 ifp->if_hwassist &= ~VR_CSUM_FEATURES; 2239 } 2240 if ((mask & IFCAP_RXCSUM) != 0 && 2241 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2242 ifp->if_capenable ^= IFCAP_RXCSUM; 2243 if ((mask & IFCAP_WOL_UCAST) != 0 && 2244 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2245 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2246 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2247 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2248 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2249 break; 2250 default: 2251 error = ether_ioctl(ifp, command, data); 2252 break; 2253 } 2254 2255 return (error); 2256 } 2257 2258 static void 2259 vr_watchdog(struct vr_softc *sc) 2260 { 2261 struct ifnet *ifp; 2262 2263 VR_LOCK_ASSERT(sc); 2264 2265 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2266 return; 2267 2268 ifp = sc->vr_ifp; 2269 /* 2270 * Reclaim first as we don't request interrupt for every packets. 2271 */ 2272 vr_txeof(sc); 2273 if (sc->vr_cdata.vr_tx_cnt == 0) 2274 return; 2275 2276 if (sc->vr_link == 0) { 2277 if (bootverbose) 2278 if_printf(sc->vr_ifp, "watchdog timeout " 2279 "(missed link)\n"); 2280 ifp->if_oerrors++; 2281 vr_init_locked(sc); 2282 return; 2283 } 2284 2285 ifp->if_oerrors++; 2286 if_printf(ifp, "watchdog timeout\n"); 2287 2288 vr_stop(sc); 2289 vr_reset(sc); 2290 vr_init_locked(sc); 2291 2292 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2293 vr_start_locked(ifp); 2294 } 2295 2296 static void 2297 vr_tx_start(struct vr_softc *sc) 2298 { 2299 bus_addr_t addr; 2300 uint8_t cmd; 2301 2302 cmd = CSR_READ_1(sc, VR_CR0); 2303 if ((cmd & VR_CR0_TX_ON) == 0) { 2304 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2305 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2306 cmd |= VR_CR0_TX_ON; 2307 CSR_WRITE_1(sc, VR_CR0, cmd); 2308 } 2309 if (sc->vr_cdata.vr_tx_cnt != 0) { 2310 sc->vr_watchdog_timer = 5; 2311 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2312 } 2313 } 2314 2315 static void 2316 vr_rx_start(struct vr_softc *sc) 2317 { 2318 bus_addr_t addr; 2319 uint8_t cmd; 2320 2321 cmd = CSR_READ_1(sc, VR_CR0); 2322 if ((cmd & VR_CR0_RX_ON) == 0) { 2323 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2324 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2325 cmd |= VR_CR0_RX_ON; 2326 CSR_WRITE_1(sc, VR_CR0, cmd); 2327 } 2328 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2329 } 2330 2331 static int 2332 vr_tx_stop(struct vr_softc *sc) 2333 { 2334 int i; 2335 uint8_t cmd; 2336 2337 cmd = CSR_READ_1(sc, VR_CR0); 2338 if ((cmd & VR_CR0_TX_ON) != 0) { 2339 cmd &= ~VR_CR0_TX_ON; 2340 CSR_WRITE_1(sc, VR_CR0, cmd); 2341 for (i = VR_TIMEOUT; i > 0; i--) { 2342 DELAY(5); 2343 cmd = CSR_READ_1(sc, VR_CR0); 2344 if ((cmd & VR_CR0_TX_ON) == 0) 2345 break; 2346 } 2347 if (i == 0) 2348 return (ETIMEDOUT); 2349 } 2350 return (0); 2351 } 2352 2353 static int 2354 vr_rx_stop(struct vr_softc *sc) 2355 { 2356 int i; 2357 uint8_t cmd; 2358 2359 cmd = CSR_READ_1(sc, VR_CR0); 2360 if ((cmd & VR_CR0_RX_ON) != 0) { 2361 cmd &= ~VR_CR0_RX_ON; 2362 CSR_WRITE_1(sc, VR_CR0, cmd); 2363 for (i = VR_TIMEOUT; i > 0; i--) { 2364 DELAY(5); 2365 cmd = CSR_READ_1(sc, VR_CR0); 2366 if ((cmd & VR_CR0_RX_ON) == 0) 2367 break; 2368 } 2369 if (i == 0) 2370 return (ETIMEDOUT); 2371 } 2372 return (0); 2373 } 2374 2375 /* 2376 * Stop the adapter and free any mbufs allocated to the 2377 * RX and TX lists. 2378 */ 2379 static void 2380 vr_stop(struct vr_softc *sc) 2381 { 2382 struct vr_txdesc *txd; 2383 struct vr_rxdesc *rxd; 2384 struct ifnet *ifp; 2385 int i; 2386 2387 VR_LOCK_ASSERT(sc); 2388 2389 ifp = sc->vr_ifp; 2390 sc->vr_watchdog_timer = 0; 2391 2392 callout_stop(&sc->vr_stat_callout); 2393 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2394 2395 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2396 if (vr_rx_stop(sc) != 0) 2397 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2398 if (vr_tx_stop(sc) != 0) 2399 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2400 /* Clear pending interrupts. */ 2401 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2402 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2403 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2404 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2405 2406 /* 2407 * Free RX and TX mbufs still in the queues. 2408 */ 2409 for (i = 0; i < VR_RX_RING_CNT; i++) { 2410 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2411 if (rxd->rx_m != NULL) { 2412 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2413 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2414 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2415 rxd->rx_dmamap); 2416 m_freem(rxd->rx_m); 2417 rxd->rx_m = NULL; 2418 } 2419 } 2420 for (i = 0; i < VR_TX_RING_CNT; i++) { 2421 txd = &sc->vr_cdata.vr_txdesc[i]; 2422 if (txd->tx_m != NULL) { 2423 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2424 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2425 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2426 txd->tx_dmamap); 2427 m_freem(txd->tx_m); 2428 txd->tx_m = NULL; 2429 } 2430 } 2431 } 2432 2433 /* 2434 * Stop all chip I/O so that the kernel's probe routines don't 2435 * get confused by errant DMAs when rebooting. 2436 */ 2437 static int 2438 vr_shutdown(device_t dev) 2439 { 2440 2441 return (vr_suspend(dev)); 2442 } 2443 2444 static int 2445 vr_suspend(device_t dev) 2446 { 2447 struct vr_softc *sc; 2448 2449 sc = device_get_softc(dev); 2450 2451 VR_LOCK(sc); 2452 vr_stop(sc); 2453 vr_setwol(sc); 2454 sc->vr_suspended = 1; 2455 VR_UNLOCK(sc); 2456 2457 return (0); 2458 } 2459 2460 static int 2461 vr_resume(device_t dev) 2462 { 2463 struct vr_softc *sc; 2464 struct ifnet *ifp; 2465 2466 sc = device_get_softc(dev); 2467 2468 VR_LOCK(sc); 2469 ifp = sc->vr_ifp; 2470 vr_clrwol(sc); 2471 vr_reset(sc); 2472 if (ifp->if_flags & IFF_UP) 2473 vr_init_locked(sc); 2474 2475 sc->vr_suspended = 0; 2476 VR_UNLOCK(sc); 2477 2478 return (0); 2479 } 2480 2481 static void 2482 vr_setwol(struct vr_softc *sc) 2483 { 2484 struct ifnet *ifp; 2485 int pmc; 2486 uint16_t pmstat; 2487 uint8_t v; 2488 2489 VR_LOCK_ASSERT(sc); 2490 2491 if (sc->vr_revid < REV_ID_VT6102_A || 2492 pci_find_extcap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2493 return; 2494 2495 ifp = sc->vr_ifp; 2496 2497 /* Clear WOL configuration. */ 2498 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2499 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2500 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2501 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2502 if (sc->vr_revid > REV_ID_VT6105_B0) { 2503 /* Newer Rhine III supports two additional patterns. */ 2504 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2505 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2506 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2507 } 2508 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2509 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2510 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2511 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2512 /* 2513 * It seems that multicast wakeup frames require programming pattern 2514 * registers and valid CRC as well as pattern mask for each pattern. 2515 * While it's possible to setup such a pattern it would complicate 2516 * WOL configuration so ignore multicast wakeup frames. 2517 */ 2518 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2519 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2520 v = CSR_READ_1(sc, VR_STICKHW); 2521 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2522 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2523 } 2524 2525 /* Put hardware into sleep. */ 2526 v = CSR_READ_1(sc, VR_STICKHW); 2527 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2528 CSR_WRITE_1(sc, VR_STICKHW, v); 2529 2530 /* Request PME if WOL is requested. */ 2531 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2532 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2533 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2534 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2535 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2536 } 2537 2538 static void 2539 vr_clrwol(struct vr_softc *sc) 2540 { 2541 uint8_t v; 2542 2543 VR_LOCK_ASSERT(sc); 2544 2545 if (sc->vr_revid < REV_ID_VT6102_A) 2546 return; 2547 2548 /* Take hardware out of sleep. */ 2549 v = CSR_READ_1(sc, VR_STICKHW); 2550 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2551 CSR_WRITE_1(sc, VR_STICKHW, v); 2552 2553 /* Clear WOL configuration as WOL may interfere normal operation. */ 2554 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2555 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2556 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2557 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2558 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2559 if (sc->vr_revid > REV_ID_VT6105_B0) { 2560 /* Newer Rhine III supports two additional patterns. */ 2561 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2562 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2563 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2564 } 2565 } 2566 2567 static int 2568 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2569 { 2570 struct vr_softc *sc; 2571 struct vr_statistics *stat; 2572 int error; 2573 int result; 2574 2575 result = -1; 2576 error = sysctl_handle_int(oidp, &result, 0, req); 2577 2578 if (error != 0 || req->newptr == NULL) 2579 return (error); 2580 2581 if (result == 1) { 2582 sc = (struct vr_softc *)arg1; 2583 stat = &sc->vr_stat; 2584 2585 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2586 printf("Outbound good frames : %ju\n", 2587 (uintmax_t)stat->tx_ok); 2588 printf("Inbound good frames : %ju\n", 2589 (uintmax_t)stat->rx_ok); 2590 printf("Outbound errors : %u\n", stat->tx_errors); 2591 printf("Inbound errors : %u\n", stat->rx_errors); 2592 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2593 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2594 printf("Inbound FIFO overflows : %d\n", 2595 stat->rx_fifo_overflows); 2596 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2597 printf("Inbound frame alignment errors : %u\n", 2598 stat->rx_alignment); 2599 printf("Inbound giant frames : %u\n", stat->rx_giants); 2600 printf("Inbound runt frames : %u\n", stat->rx_runts); 2601 printf("Outbound aborted with excessive collisions : %u\n", 2602 stat->tx_abort); 2603 printf("Outbound collisions : %u\n", stat->tx_collisions); 2604 printf("Outbound late collisions : %u\n", 2605 stat->tx_late_collisions); 2606 printf("Outbound underrun : %u\n", stat->tx_underrun); 2607 printf("PCI bus errors : %u\n", stat->bus_errors); 2608 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2609 stat->num_restart); 2610 } 2611 2612 return (error); 2613 } 2614