1 /*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #ifdef HAVE_KERNEL_OPTION_HEADERS 64 #include "opt_device_polling.h" 65 #endif 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bus.h> 70 #include <sys/endian.h> 71 #include <sys/kernel.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/rman.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 81 #include <net/bpf.h> 82 #include <net/if.h> 83 #include <net/ethernet.h> 84 #include <net/if_dl.h> 85 #include <net/if_media.h> 86 #include <net/if_types.h> 87 #include <net/if_vlan_var.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #include <machine/bus.h> 96 97 #include <dev/vr/if_vrreg.h> 98 99 /* "device miibus" required. See GENERIC if you get errors here. */ 100 #include "miibus_if.h" 101 102 MODULE_DEPEND(vr, pci, 1, 1, 1); 103 MODULE_DEPEND(vr, ether, 1, 1, 1); 104 MODULE_DEPEND(vr, miibus, 1, 1, 1); 105 106 /* Define to show Rx/Tx error status. */ 107 #undef VR_SHOW_ERRORS 108 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 109 110 /* 111 * Various supported device vendors/types, their names & quirks. 112 */ 113 #define VR_Q_NEEDALIGN (1<<0) 114 #define VR_Q_CSUM (1<<1) 115 #define VR_Q_CAM (1<<2) 116 117 static const struct vr_type { 118 u_int16_t vr_vid; 119 u_int16_t vr_did; 120 int vr_quirks; 121 const char *vr_name; 122 } const vr_devs[] = { 123 { VIA_VENDORID, VIA_DEVICEID_RHINE, 124 VR_Q_NEEDALIGN, 125 "VIA VT3043 Rhine I 10/100BaseTX" }, 126 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 127 VR_Q_NEEDALIGN, 128 "VIA VT86C100A Rhine II 10/100BaseTX" }, 129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 130 0, 131 "VIA VT6102 Rhine II 10/100BaseTX" }, 132 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 133 0, 134 "VIA VT6105 Rhine III 10/100BaseTX" }, 135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 136 VR_Q_CSUM, 137 "VIA VT6105M Rhine III 10/100BaseTX" }, 138 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 139 VR_Q_NEEDALIGN, 140 "Delta Electronics Rhine II 10/100BaseTX" }, 141 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 142 VR_Q_NEEDALIGN, 143 "Addtron Technology Rhine II 10/100BaseTX" }, 144 { 0, 0, 0, NULL } 145 }; 146 147 static int vr_probe(device_t); 148 static int vr_attach(device_t); 149 static int vr_detach(device_t); 150 static int vr_shutdown(device_t); 151 static int vr_suspend(device_t); 152 static int vr_resume(device_t); 153 154 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 155 static int vr_dma_alloc(struct vr_softc *); 156 static void vr_dma_free(struct vr_softc *); 157 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 158 static int vr_newbuf(struct vr_softc *, int); 159 160 #ifndef __NO_STRICT_ALIGNMENT 161 static __inline void vr_fixup_rx(struct mbuf *); 162 #endif 163 static int vr_rxeof(struct vr_softc *); 164 static void vr_txeof(struct vr_softc *); 165 static void vr_tick(void *); 166 static int vr_error(struct vr_softc *, uint16_t); 167 static void vr_tx_underrun(struct vr_softc *); 168 static void vr_intr(void *); 169 static void vr_start(struct ifnet *); 170 static void vr_start_locked(struct ifnet *); 171 static int vr_encap(struct vr_softc *, struct mbuf **); 172 static int vr_ioctl(struct ifnet *, u_long, caddr_t); 173 static void vr_init(void *); 174 static void vr_init_locked(struct vr_softc *); 175 static void vr_tx_start(struct vr_softc *); 176 static void vr_rx_start(struct vr_softc *); 177 static int vr_tx_stop(struct vr_softc *); 178 static int vr_rx_stop(struct vr_softc *); 179 static void vr_stop(struct vr_softc *); 180 static void vr_watchdog(struct vr_softc *); 181 static int vr_ifmedia_upd(struct ifnet *); 182 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 183 184 static int vr_miibus_readreg(device_t, int, int); 185 static int vr_miibus_writereg(device_t, int, int, int); 186 static void vr_miibus_statchg(device_t); 187 188 static void vr_cam_mask(struct vr_softc *, uint32_t, int); 189 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); 190 static void vr_set_filter(struct vr_softc *); 191 static void vr_reset(const struct vr_softc *); 192 static int vr_tx_ring_init(struct vr_softc *); 193 static int vr_rx_ring_init(struct vr_softc *); 194 static void vr_setwol(struct vr_softc *); 195 static void vr_clrwol(struct vr_softc *); 196 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 197 198 static const struct vr_tx_threshold_table { 199 int tx_cfg; 200 int bcr_cfg; 201 int value; 202 } const vr_tx_threshold_tables[] = { 203 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 204 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 205 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 206 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 207 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 208 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 209 }; 210 211 static device_method_t vr_methods[] = { 212 /* Device interface */ 213 DEVMETHOD(device_probe, vr_probe), 214 DEVMETHOD(device_attach, vr_attach), 215 DEVMETHOD(device_detach, vr_detach), 216 DEVMETHOD(device_shutdown, vr_shutdown), 217 DEVMETHOD(device_suspend, vr_suspend), 218 DEVMETHOD(device_resume, vr_resume), 219 220 /* MII interface */ 221 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 222 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 223 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 224 225 DEVMETHOD_END 226 }; 227 228 static driver_t vr_driver = { 229 "vr", 230 vr_methods, 231 sizeof(struct vr_softc) 232 }; 233 234 static devclass_t vr_devclass; 235 236 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 237 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 238 239 static int 240 vr_miibus_readreg(device_t dev, int phy, int reg) 241 { 242 struct vr_softc *sc; 243 int i; 244 245 sc = device_get_softc(dev); 246 247 /* Set the register address. */ 248 CSR_WRITE_1(sc, VR_MIIADDR, reg); 249 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 250 251 for (i = 0; i < VR_MII_TIMEOUT; i++) { 252 DELAY(1); 253 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 254 break; 255 } 256 if (i == VR_MII_TIMEOUT) 257 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 258 259 return (CSR_READ_2(sc, VR_MIIDATA)); 260 } 261 262 static int 263 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 264 { 265 struct vr_softc *sc; 266 int i; 267 268 sc = device_get_softc(dev); 269 270 /* Set the register address and data to write. */ 271 CSR_WRITE_1(sc, VR_MIIADDR, reg); 272 CSR_WRITE_2(sc, VR_MIIDATA, data); 273 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 274 275 for (i = 0; i < VR_MII_TIMEOUT; i++) { 276 DELAY(1); 277 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 278 break; 279 } 280 if (i == VR_MII_TIMEOUT) 281 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 282 reg); 283 284 return (0); 285 } 286 287 /* 288 * In order to fiddle with the 289 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 290 * first have to put the transmit and/or receive logic in the idle state. 291 */ 292 static void 293 vr_miibus_statchg(device_t dev) 294 { 295 struct vr_softc *sc; 296 struct mii_data *mii; 297 struct ifnet *ifp; 298 int lfdx, mfdx; 299 uint8_t cr0, cr1, fc; 300 301 sc = device_get_softc(dev); 302 mii = device_get_softc(sc->vr_miibus); 303 ifp = sc->vr_ifp; 304 if (mii == NULL || ifp == NULL || 305 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 306 return; 307 308 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 309 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 310 (IFM_ACTIVE | IFM_AVALID)) { 311 switch (IFM_SUBTYPE(mii->mii_media_active)) { 312 case IFM_10_T: 313 case IFM_100_TX: 314 sc->vr_flags |= VR_F_LINK; 315 break; 316 default: 317 break; 318 } 319 } 320 321 if ((sc->vr_flags & VR_F_LINK) != 0) { 322 cr0 = CSR_READ_1(sc, VR_CR0); 323 cr1 = CSR_READ_1(sc, VR_CR1); 324 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 325 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 326 if (mfdx != lfdx) { 327 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 328 if (vr_tx_stop(sc) != 0 || 329 vr_rx_stop(sc) != 0) { 330 device_printf(sc->vr_dev, 331 "%s: Tx/Rx shutdown error -- " 332 "resetting\n", __func__); 333 sc->vr_flags |= VR_F_RESTART; 334 VR_UNLOCK(sc); 335 return; 336 } 337 } 338 if (lfdx) 339 cr1 |= VR_CR1_FULLDUPLEX; 340 else 341 cr1 &= ~VR_CR1_FULLDUPLEX; 342 CSR_WRITE_1(sc, VR_CR1, cr1); 343 } 344 fc = 0; 345 /* Configure flow-control. */ 346 if (sc->vr_revid >= REV_ID_VT6105_A0) { 347 fc = CSR_READ_1(sc, VR_FLOWCR1); 348 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 349 if ((IFM_OPTIONS(mii->mii_media_active) & 350 IFM_ETH_RXPAUSE) != 0) 351 fc |= VR_FLOWCR1_RXPAUSE; 352 if ((IFM_OPTIONS(mii->mii_media_active) & 353 IFM_ETH_TXPAUSE) != 0) { 354 fc |= VR_FLOWCR1_TXPAUSE; 355 sc->vr_flags |= VR_F_TXPAUSE; 356 } 357 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 358 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 359 /* No Tx puase capability available for Rhine II. */ 360 fc = CSR_READ_1(sc, VR_MISC_CR0); 361 fc &= ~VR_MISCCR0_RXPAUSE; 362 if ((IFM_OPTIONS(mii->mii_media_active) & 363 IFM_ETH_RXPAUSE) != 0) 364 fc |= VR_MISCCR0_RXPAUSE; 365 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 366 } 367 vr_rx_start(sc); 368 vr_tx_start(sc); 369 } else { 370 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 371 device_printf(sc->vr_dev, 372 "%s: Tx/Rx shutdown error -- resetting\n", 373 __func__); 374 sc->vr_flags |= VR_F_RESTART; 375 } 376 } 377 } 378 379 380 static void 381 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) 382 { 383 384 if (type == VR_MCAST_CAM) 385 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 386 else 387 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 388 CSR_WRITE_4(sc, VR_CAMMASK, mask); 389 CSR_WRITE_1(sc, VR_CAMCTL, 0); 390 } 391 392 static int 393 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) 394 { 395 int i; 396 397 if (type == VR_MCAST_CAM) { 398 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 399 return (EINVAL); 400 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 401 } else 402 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 403 404 /* Set CAM entry address. */ 405 CSR_WRITE_1(sc, VR_CAMADDR, idx); 406 /* Set CAM entry data. */ 407 if (type == VR_MCAST_CAM) { 408 for (i = 0; i < ETHER_ADDR_LEN; i++) 409 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); 410 } else { 411 CSR_WRITE_1(sc, VR_VCAM0, mac[0]); 412 CSR_WRITE_1(sc, VR_VCAM1, mac[1]); 413 } 414 DELAY(10); 415 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 416 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); 417 for (i = 0; i < VR_TIMEOUT; i++) { 418 DELAY(1); 419 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 420 break; 421 } 422 423 if (i == VR_TIMEOUT) 424 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 425 __func__); 426 CSR_WRITE_1(sc, VR_CAMCTL, 0); 427 428 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 429 } 430 431 /* 432 * Program the 64-bit multicast hash filter. 433 */ 434 static void 435 vr_set_filter(struct vr_softc *sc) 436 { 437 struct ifnet *ifp; 438 int h; 439 uint32_t hashes[2] = { 0, 0 }; 440 struct ifmultiaddr *ifma; 441 uint8_t rxfilt; 442 int error, mcnt; 443 uint32_t cam_mask; 444 445 VR_LOCK_ASSERT(sc); 446 447 ifp = sc->vr_ifp; 448 rxfilt = CSR_READ_1(sc, VR_RXCFG); 449 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | 450 VR_RXCFG_RX_MULTI); 451 if (ifp->if_flags & IFF_BROADCAST) 452 rxfilt |= VR_RXCFG_RX_BROAD; 453 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 454 rxfilt |= VR_RXCFG_RX_MULTI; 455 if (ifp->if_flags & IFF_PROMISC) 456 rxfilt |= VR_RXCFG_RX_PROMISC; 457 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 458 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 459 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 460 return; 461 } 462 463 /* Now program new ones. */ 464 error = 0; 465 mcnt = 0; 466 if_maddr_rlock(ifp); 467 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 468 /* 469 * For hardwares that have CAM capability, use 470 * 32 entries multicast perfect filter. 471 */ 472 cam_mask = 0; 473 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 474 if (ifma->ifma_addr->sa_family != AF_LINK) 475 continue; 476 error = vr_cam_data(sc, VR_MCAST_CAM, mcnt, 477 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 478 if (error != 0) { 479 cam_mask = 0; 480 break; 481 } 482 cam_mask |= 1 << mcnt; 483 mcnt++; 484 } 485 vr_cam_mask(sc, VR_MCAST_CAM, cam_mask); 486 } 487 488 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 489 /* 490 * If there are too many multicast addresses or 491 * setting multicast CAM filter failed, use hash 492 * table based filtering. 493 */ 494 mcnt = 0; 495 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 496 if (ifma->ifma_addr->sa_family != AF_LINK) 497 continue; 498 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 499 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 500 if (h < 32) 501 hashes[0] |= (1 << h); 502 else 503 hashes[1] |= (1 << (h - 32)); 504 mcnt++; 505 } 506 } 507 if_maddr_runlock(ifp); 508 509 if (mcnt > 0) 510 rxfilt |= VR_RXCFG_RX_MULTI; 511 512 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 513 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 514 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 515 } 516 517 static void 518 vr_reset(const struct vr_softc *sc) 519 { 520 int i; 521 522 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 523 524 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 525 if (sc->vr_revid < REV_ID_VT6102_A) { 526 /* VT86C100A needs more delay after reset. */ 527 DELAY(100); 528 } 529 for (i = 0; i < VR_TIMEOUT; i++) { 530 DELAY(10); 531 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 532 break; 533 } 534 if (i == VR_TIMEOUT) { 535 if (sc->vr_revid < REV_ID_VT6102_A) 536 device_printf(sc->vr_dev, "reset never completed!\n"); 537 else { 538 /* Use newer force reset command. */ 539 device_printf(sc->vr_dev, 540 "Using force reset command.\n"); 541 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 542 /* 543 * Wait a little while for the chip to get its brains 544 * in order. 545 */ 546 DELAY(2000); 547 } 548 } 549 550 } 551 552 /* 553 * Probe for a VIA Rhine chip. Check the PCI vendor and device 554 * IDs against our list and return a match or NULL 555 */ 556 static const struct vr_type * 557 vr_match(device_t dev) 558 { 559 const struct vr_type *t = vr_devs; 560 561 for (t = vr_devs; t->vr_name != NULL; t++) 562 if ((pci_get_vendor(dev) == t->vr_vid) && 563 (pci_get_device(dev) == t->vr_did)) 564 return (t); 565 return (NULL); 566 } 567 568 /* 569 * Probe for a VIA Rhine chip. Check the PCI vendor and device 570 * IDs against our list and return a device name if we find a match. 571 */ 572 static int 573 vr_probe(device_t dev) 574 { 575 const struct vr_type *t; 576 577 t = vr_match(dev); 578 if (t != NULL) { 579 device_set_desc(dev, t->vr_name); 580 return (BUS_PROBE_DEFAULT); 581 } 582 return (ENXIO); 583 } 584 585 /* 586 * Attach the interface. Allocate softc structures, do ifmedia 587 * setup and ethernet/BPF attach. 588 */ 589 static int 590 vr_attach(device_t dev) 591 { 592 struct vr_softc *sc; 593 struct ifnet *ifp; 594 const struct vr_type *t; 595 uint8_t eaddr[ETHER_ADDR_LEN]; 596 int error, rid; 597 int i, phy, pmc; 598 599 sc = device_get_softc(dev); 600 sc->vr_dev = dev; 601 t = vr_match(dev); 602 KASSERT(t != NULL, ("Lost if_vr device match")); 603 sc->vr_quirks = t->vr_quirks; 604 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 605 606 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 607 MTX_DEF); 608 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 609 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 610 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 611 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 612 vr_sysctl_stats, "I", "Statistics"); 613 614 error = 0; 615 616 /* 617 * Map control/status registers. 618 */ 619 pci_enable_busmaster(dev); 620 sc->vr_revid = pci_get_revid(dev); 621 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 622 623 sc->vr_res_id = PCIR_BAR(0); 624 sc->vr_res_type = SYS_RES_IOPORT; 625 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 626 &sc->vr_res_id, RF_ACTIVE); 627 if (sc->vr_res == NULL) { 628 device_printf(dev, "couldn't map ports\n"); 629 error = ENXIO; 630 goto fail; 631 } 632 633 /* Allocate interrupt. */ 634 rid = 0; 635 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 636 RF_SHAREABLE | RF_ACTIVE); 637 638 if (sc->vr_irq == NULL) { 639 device_printf(dev, "couldn't map interrupt\n"); 640 error = ENXIO; 641 goto fail; 642 } 643 644 /* Allocate ifnet structure. */ 645 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 646 if (ifp == NULL) { 647 device_printf(dev, "couldn't allocate ifnet structure\n"); 648 error = ENOSPC; 649 goto fail; 650 } 651 ifp->if_softc = sc; 652 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 653 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 654 ifp->if_ioctl = vr_ioctl; 655 ifp->if_start = vr_start; 656 ifp->if_init = vr_init; 657 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); 658 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; 659 IFQ_SET_READY(&ifp->if_snd); 660 661 /* Configure Tx FIFO threshold. */ 662 sc->vr_txthresh = VR_TXTHRESH_MIN; 663 if (sc->vr_revid < REV_ID_VT6105_A0) { 664 /* 665 * Use store and forward mode for Rhine I/II. 666 * Otherwise they produce a lot of Tx underruns and 667 * it would take a while to get working FIFO threshold 668 * value. 669 */ 670 sc->vr_txthresh = VR_TXTHRESH_MAX; 671 } 672 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 673 ifp->if_hwassist = VR_CSUM_FEATURES; 674 ifp->if_capabilities |= IFCAP_HWCSUM; 675 /* 676 * To update checksum field the hardware may need to 677 * store entire frames into FIFO before transmitting. 678 */ 679 sc->vr_txthresh = VR_TXTHRESH_MAX; 680 } 681 682 if (sc->vr_revid >= REV_ID_VT6102_A && 683 pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 684 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; 685 686 /* Rhine supports oversized VLAN frame. */ 687 ifp->if_capabilities |= IFCAP_VLAN_MTU; 688 ifp->if_capenable = ifp->if_capabilities; 689 #ifdef DEVICE_POLLING 690 ifp->if_capabilities |= IFCAP_POLLING; 691 #endif 692 693 /* 694 * Windows may put the chip in suspend mode when it 695 * shuts down. Be sure to kick it in the head to wake it 696 * up again. 697 */ 698 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 699 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 700 701 /* 702 * Get station address. The way the Rhine chips work, 703 * you're not allowed to directly access the EEPROM once 704 * they've been programmed a special way. Consequently, 705 * we need to read the node address from the PAR0 and PAR1 706 * registers. 707 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 708 * VR_CFGC and VR_CFGD such that memory mapped IO configured 709 * by driver is reset to default state. 710 */ 711 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 712 for (i = VR_TIMEOUT; i > 0; i--) { 713 DELAY(1); 714 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 715 break; 716 } 717 if (i == 0) 718 device_printf(dev, "Reloading EEPROM timeout!\n"); 719 for (i = 0; i < ETHER_ADDR_LEN; i++) 720 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 721 722 /* Reset the adapter. */ 723 vr_reset(sc); 724 /* Ack intr & disable further interrupts. */ 725 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 726 CSR_WRITE_2(sc, VR_IMR, 0); 727 if (sc->vr_revid >= REV_ID_VT6102_A) 728 CSR_WRITE_2(sc, VR_MII_IMR, 0); 729 730 if (sc->vr_revid < REV_ID_VT6102_A) { 731 pci_write_config(dev, VR_PCI_MODE2, 732 pci_read_config(dev, VR_PCI_MODE2, 1) | 733 VR_MODE2_MODE10T, 1); 734 } else { 735 /* Report error instead of retrying forever. */ 736 pci_write_config(dev, VR_PCI_MODE2, 737 pci_read_config(dev, VR_PCI_MODE2, 1) | 738 VR_MODE2_PCEROPT, 1); 739 /* Detect MII coding error. */ 740 pci_write_config(dev, VR_PCI_MODE3, 741 pci_read_config(dev, VR_PCI_MODE3, 1) | 742 VR_MODE3_MIION, 1); 743 if (sc->vr_revid >= REV_ID_VT6105_LOM && 744 sc->vr_revid < REV_ID_VT6105M_A0) 745 pci_write_config(dev, VR_PCI_MODE2, 746 pci_read_config(dev, VR_PCI_MODE2, 1) | 747 VR_MODE2_MODE10T, 1); 748 /* Enable Memory-Read-Multiple. */ 749 if (sc->vr_revid >= REV_ID_VT6107_A1 && 750 sc->vr_revid < REV_ID_VT6105M_A0) 751 pci_write_config(dev, VR_PCI_MODE2, 752 pci_read_config(dev, VR_PCI_MODE2, 1) | 753 VR_MODE2_MRDPL, 1); 754 } 755 /* Disable MII AUTOPOLL. */ 756 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 757 758 if (vr_dma_alloc(sc) != 0) { 759 error = ENXIO; 760 goto fail; 761 } 762 763 /* Do MII setup. */ 764 if (sc->vr_revid >= REV_ID_VT6105_A0) 765 phy = 1; 766 else 767 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 768 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, 769 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 770 sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0); 771 if (error != 0) { 772 device_printf(dev, "attaching PHYs failed\n"); 773 goto fail; 774 } 775 776 /* Call MI attach routine. */ 777 ether_ifattach(ifp, eaddr); 778 /* 779 * Tell the upper layer(s) we support long frames. 780 * Must appear after the call to ether_ifattach() because 781 * ether_ifattach() sets ifi_hdrlen to the default value. 782 */ 783 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 784 785 /* Hook interrupt last to avoid having to lock softc. */ 786 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 787 NULL, vr_intr, sc, &sc->vr_intrhand); 788 789 if (error) { 790 device_printf(dev, "couldn't set up irq\n"); 791 ether_ifdetach(ifp); 792 goto fail; 793 } 794 795 fail: 796 if (error) 797 vr_detach(dev); 798 799 return (error); 800 } 801 802 /* 803 * Shutdown hardware and free up resources. This can be called any 804 * time after the mutex has been initialized. It is called in both 805 * the error case in attach and the normal detach case so it needs 806 * to be careful about only freeing resources that have actually been 807 * allocated. 808 */ 809 static int 810 vr_detach(device_t dev) 811 { 812 struct vr_softc *sc = device_get_softc(dev); 813 struct ifnet *ifp = sc->vr_ifp; 814 815 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 816 817 #ifdef DEVICE_POLLING 818 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 819 ether_poll_deregister(ifp); 820 #endif 821 822 /* These should only be active if attach succeeded. */ 823 if (device_is_attached(dev)) { 824 VR_LOCK(sc); 825 sc->vr_flags |= VR_F_DETACHED; 826 vr_stop(sc); 827 VR_UNLOCK(sc); 828 callout_drain(&sc->vr_stat_callout); 829 ether_ifdetach(ifp); 830 } 831 if (sc->vr_miibus) 832 device_delete_child(dev, sc->vr_miibus); 833 bus_generic_detach(dev); 834 835 if (sc->vr_intrhand) 836 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 837 if (sc->vr_irq) 838 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 839 if (sc->vr_res) 840 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 841 sc->vr_res); 842 843 if (ifp) 844 if_free(ifp); 845 846 vr_dma_free(sc); 847 848 mtx_destroy(&sc->vr_mtx); 849 850 return (0); 851 } 852 853 struct vr_dmamap_arg { 854 bus_addr_t vr_busaddr; 855 }; 856 857 static void 858 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 859 { 860 struct vr_dmamap_arg *ctx; 861 862 if (error != 0) 863 return; 864 ctx = arg; 865 ctx->vr_busaddr = segs[0].ds_addr; 866 } 867 868 static int 869 vr_dma_alloc(struct vr_softc *sc) 870 { 871 struct vr_dmamap_arg ctx; 872 struct vr_txdesc *txd; 873 struct vr_rxdesc *rxd; 874 bus_size_t tx_alignment; 875 int error, i; 876 877 /* Create parent DMA tag. */ 878 error = bus_dma_tag_create( 879 bus_get_dma_tag(sc->vr_dev), /* parent */ 880 1, 0, /* alignment, boundary */ 881 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 882 BUS_SPACE_MAXADDR, /* highaddr */ 883 NULL, NULL, /* filter, filterarg */ 884 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 885 0, /* nsegments */ 886 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 887 0, /* flags */ 888 NULL, NULL, /* lockfunc, lockarg */ 889 &sc->vr_cdata.vr_parent_tag); 890 if (error != 0) { 891 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 892 goto fail; 893 } 894 /* Create tag for Tx ring. */ 895 error = bus_dma_tag_create( 896 sc->vr_cdata.vr_parent_tag, /* parent */ 897 VR_RING_ALIGN, 0, /* alignment, boundary */ 898 BUS_SPACE_MAXADDR, /* lowaddr */ 899 BUS_SPACE_MAXADDR, /* highaddr */ 900 NULL, NULL, /* filter, filterarg */ 901 VR_TX_RING_SIZE, /* maxsize */ 902 1, /* nsegments */ 903 VR_TX_RING_SIZE, /* maxsegsize */ 904 0, /* flags */ 905 NULL, NULL, /* lockfunc, lockarg */ 906 &sc->vr_cdata.vr_tx_ring_tag); 907 if (error != 0) { 908 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 909 goto fail; 910 } 911 912 /* Create tag for Rx ring. */ 913 error = bus_dma_tag_create( 914 sc->vr_cdata.vr_parent_tag, /* parent */ 915 VR_RING_ALIGN, 0, /* alignment, boundary */ 916 BUS_SPACE_MAXADDR, /* lowaddr */ 917 BUS_SPACE_MAXADDR, /* highaddr */ 918 NULL, NULL, /* filter, filterarg */ 919 VR_RX_RING_SIZE, /* maxsize */ 920 1, /* nsegments */ 921 VR_RX_RING_SIZE, /* maxsegsize */ 922 0, /* flags */ 923 NULL, NULL, /* lockfunc, lockarg */ 924 &sc->vr_cdata.vr_rx_ring_tag); 925 if (error != 0) { 926 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 927 goto fail; 928 } 929 930 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 931 tx_alignment = sizeof(uint32_t); 932 else 933 tx_alignment = 1; 934 /* Create tag for Tx buffers. */ 935 error = bus_dma_tag_create( 936 sc->vr_cdata.vr_parent_tag, /* parent */ 937 tx_alignment, 0, /* alignment, boundary */ 938 BUS_SPACE_MAXADDR, /* lowaddr */ 939 BUS_SPACE_MAXADDR, /* highaddr */ 940 NULL, NULL, /* filter, filterarg */ 941 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 942 VR_MAXFRAGS, /* nsegments */ 943 MCLBYTES, /* maxsegsize */ 944 0, /* flags */ 945 NULL, NULL, /* lockfunc, lockarg */ 946 &sc->vr_cdata.vr_tx_tag); 947 if (error != 0) { 948 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 949 goto fail; 950 } 951 952 /* Create tag for Rx buffers. */ 953 error = bus_dma_tag_create( 954 sc->vr_cdata.vr_parent_tag, /* parent */ 955 VR_RX_ALIGN, 0, /* alignment, boundary */ 956 BUS_SPACE_MAXADDR, /* lowaddr */ 957 BUS_SPACE_MAXADDR, /* highaddr */ 958 NULL, NULL, /* filter, filterarg */ 959 MCLBYTES, /* maxsize */ 960 1, /* nsegments */ 961 MCLBYTES, /* maxsegsize */ 962 0, /* flags */ 963 NULL, NULL, /* lockfunc, lockarg */ 964 &sc->vr_cdata.vr_rx_tag); 965 if (error != 0) { 966 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 967 goto fail; 968 } 969 970 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 971 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 972 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 973 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 974 if (error != 0) { 975 device_printf(sc->vr_dev, 976 "failed to allocate DMA'able memory for Tx ring\n"); 977 goto fail; 978 } 979 980 ctx.vr_busaddr = 0; 981 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 982 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 983 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 984 if (error != 0 || ctx.vr_busaddr == 0) { 985 device_printf(sc->vr_dev, 986 "failed to load DMA'able memory for Tx ring\n"); 987 goto fail; 988 } 989 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 990 991 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 992 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 993 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 994 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 995 if (error != 0) { 996 device_printf(sc->vr_dev, 997 "failed to allocate DMA'able memory for Rx ring\n"); 998 goto fail; 999 } 1000 1001 ctx.vr_busaddr = 0; 1002 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1003 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1004 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1005 if (error != 0 || ctx.vr_busaddr == 0) { 1006 device_printf(sc->vr_dev, 1007 "failed to load DMA'able memory for Rx ring\n"); 1008 goto fail; 1009 } 1010 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1011 1012 /* Create DMA maps for Tx buffers. */ 1013 for (i = 0; i < VR_TX_RING_CNT; i++) { 1014 txd = &sc->vr_cdata.vr_txdesc[i]; 1015 txd->tx_m = NULL; 1016 txd->tx_dmamap = NULL; 1017 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1018 &txd->tx_dmamap); 1019 if (error != 0) { 1020 device_printf(sc->vr_dev, 1021 "failed to create Tx dmamap\n"); 1022 goto fail; 1023 } 1024 } 1025 /* Create DMA maps for Rx buffers. */ 1026 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1027 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1028 device_printf(sc->vr_dev, 1029 "failed to create spare Rx dmamap\n"); 1030 goto fail; 1031 } 1032 for (i = 0; i < VR_RX_RING_CNT; i++) { 1033 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1034 rxd->rx_m = NULL; 1035 rxd->rx_dmamap = NULL; 1036 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1037 &rxd->rx_dmamap); 1038 if (error != 0) { 1039 device_printf(sc->vr_dev, 1040 "failed to create Rx dmamap\n"); 1041 goto fail; 1042 } 1043 } 1044 1045 fail: 1046 return (error); 1047 } 1048 1049 static void 1050 vr_dma_free(struct vr_softc *sc) 1051 { 1052 struct vr_txdesc *txd; 1053 struct vr_rxdesc *rxd; 1054 int i; 1055 1056 /* Tx ring. */ 1057 if (sc->vr_cdata.vr_tx_ring_tag) { 1058 if (sc->vr_cdata.vr_tx_ring_map) 1059 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1060 sc->vr_cdata.vr_tx_ring_map); 1061 if (sc->vr_cdata.vr_tx_ring_map && 1062 sc->vr_rdata.vr_tx_ring) 1063 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1064 sc->vr_rdata.vr_tx_ring, 1065 sc->vr_cdata.vr_tx_ring_map); 1066 sc->vr_rdata.vr_tx_ring = NULL; 1067 sc->vr_cdata.vr_tx_ring_map = NULL; 1068 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1069 sc->vr_cdata.vr_tx_ring_tag = NULL; 1070 } 1071 /* Rx ring. */ 1072 if (sc->vr_cdata.vr_rx_ring_tag) { 1073 if (sc->vr_cdata.vr_rx_ring_map) 1074 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1075 sc->vr_cdata.vr_rx_ring_map); 1076 if (sc->vr_cdata.vr_rx_ring_map && 1077 sc->vr_rdata.vr_rx_ring) 1078 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1079 sc->vr_rdata.vr_rx_ring, 1080 sc->vr_cdata.vr_rx_ring_map); 1081 sc->vr_rdata.vr_rx_ring = NULL; 1082 sc->vr_cdata.vr_rx_ring_map = NULL; 1083 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1084 sc->vr_cdata.vr_rx_ring_tag = NULL; 1085 } 1086 /* Tx buffers. */ 1087 if (sc->vr_cdata.vr_tx_tag) { 1088 for (i = 0; i < VR_TX_RING_CNT; i++) { 1089 txd = &sc->vr_cdata.vr_txdesc[i]; 1090 if (txd->tx_dmamap) { 1091 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1092 txd->tx_dmamap); 1093 txd->tx_dmamap = NULL; 1094 } 1095 } 1096 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1097 sc->vr_cdata.vr_tx_tag = NULL; 1098 } 1099 /* Rx buffers. */ 1100 if (sc->vr_cdata.vr_rx_tag) { 1101 for (i = 0; i < VR_RX_RING_CNT; i++) { 1102 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1103 if (rxd->rx_dmamap) { 1104 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1105 rxd->rx_dmamap); 1106 rxd->rx_dmamap = NULL; 1107 } 1108 } 1109 if (sc->vr_cdata.vr_rx_sparemap) { 1110 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1111 sc->vr_cdata.vr_rx_sparemap); 1112 sc->vr_cdata.vr_rx_sparemap = 0; 1113 } 1114 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1115 sc->vr_cdata.vr_rx_tag = NULL; 1116 } 1117 1118 if (sc->vr_cdata.vr_parent_tag) { 1119 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1120 sc->vr_cdata.vr_parent_tag = NULL; 1121 } 1122 } 1123 1124 /* 1125 * Initialize the transmit descriptors. 1126 */ 1127 static int 1128 vr_tx_ring_init(struct vr_softc *sc) 1129 { 1130 struct vr_ring_data *rd; 1131 struct vr_txdesc *txd; 1132 bus_addr_t addr; 1133 int i; 1134 1135 sc->vr_cdata.vr_tx_prod = 0; 1136 sc->vr_cdata.vr_tx_cons = 0; 1137 sc->vr_cdata.vr_tx_cnt = 0; 1138 sc->vr_cdata.vr_tx_pkts = 0; 1139 1140 rd = &sc->vr_rdata; 1141 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1142 for (i = 0; i < VR_TX_RING_CNT; i++) { 1143 if (i == VR_TX_RING_CNT - 1) 1144 addr = VR_TX_RING_ADDR(sc, 0); 1145 else 1146 addr = VR_TX_RING_ADDR(sc, i + 1); 1147 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1148 txd = &sc->vr_cdata.vr_txdesc[i]; 1149 txd->tx_m = NULL; 1150 } 1151 1152 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1153 sc->vr_cdata.vr_tx_ring_map, 1154 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1155 1156 return (0); 1157 } 1158 1159 /* 1160 * Initialize the RX descriptors and allocate mbufs for them. Note that 1161 * we arrange the descriptors in a closed ring, so that the last descriptor 1162 * points back to the first. 1163 */ 1164 static int 1165 vr_rx_ring_init(struct vr_softc *sc) 1166 { 1167 struct vr_ring_data *rd; 1168 struct vr_rxdesc *rxd; 1169 bus_addr_t addr; 1170 int i; 1171 1172 sc->vr_cdata.vr_rx_cons = 0; 1173 1174 rd = &sc->vr_rdata; 1175 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1176 for (i = 0; i < VR_RX_RING_CNT; i++) { 1177 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1178 rxd->rx_m = NULL; 1179 rxd->desc = &rd->vr_rx_ring[i]; 1180 if (i == VR_RX_RING_CNT - 1) 1181 addr = VR_RX_RING_ADDR(sc, 0); 1182 else 1183 addr = VR_RX_RING_ADDR(sc, i + 1); 1184 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1185 if (vr_newbuf(sc, i) != 0) 1186 return (ENOBUFS); 1187 } 1188 1189 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1190 sc->vr_cdata.vr_rx_ring_map, 1191 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1192 1193 return (0); 1194 } 1195 1196 static __inline void 1197 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1198 { 1199 struct vr_desc *desc; 1200 1201 desc = rxd->desc; 1202 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1203 desc->vr_status = htole32(VR_RXSTAT_OWN); 1204 } 1205 1206 /* 1207 * Initialize an RX descriptor and attach an MBUF cluster. 1208 * Note: the length fields are only 11 bits wide, which means the 1209 * largest size we can specify is 2047. This is important because 1210 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1211 * overflow the field and make a mess. 1212 */ 1213 static int 1214 vr_newbuf(struct vr_softc *sc, int idx) 1215 { 1216 struct vr_desc *desc; 1217 struct vr_rxdesc *rxd; 1218 struct mbuf *m; 1219 bus_dma_segment_t segs[1]; 1220 bus_dmamap_t map; 1221 int nsegs; 1222 1223 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1224 if (m == NULL) 1225 return (ENOBUFS); 1226 m->m_len = m->m_pkthdr.len = MCLBYTES; 1227 m_adj(m, sizeof(uint64_t)); 1228 1229 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1230 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1231 m_freem(m); 1232 return (ENOBUFS); 1233 } 1234 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1235 1236 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1237 if (rxd->rx_m != NULL) { 1238 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1239 BUS_DMASYNC_POSTREAD); 1240 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1241 } 1242 map = rxd->rx_dmamap; 1243 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1244 sc->vr_cdata.vr_rx_sparemap = map; 1245 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1246 BUS_DMASYNC_PREREAD); 1247 rxd->rx_m = m; 1248 desc = rxd->desc; 1249 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1250 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1251 desc->vr_status = htole32(VR_RXSTAT_OWN); 1252 1253 return (0); 1254 } 1255 1256 #ifndef __NO_STRICT_ALIGNMENT 1257 static __inline void 1258 vr_fixup_rx(struct mbuf *m) 1259 { 1260 uint16_t *src, *dst; 1261 int i; 1262 1263 src = mtod(m, uint16_t *); 1264 dst = src - 1; 1265 1266 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1267 *dst++ = *src++; 1268 1269 m->m_data -= ETHER_ALIGN; 1270 } 1271 #endif 1272 1273 /* 1274 * A frame has been uploaded: pass the resulting mbuf chain up to 1275 * the higher level protocols. 1276 */ 1277 static int 1278 vr_rxeof(struct vr_softc *sc) 1279 { 1280 struct vr_rxdesc *rxd; 1281 struct mbuf *m; 1282 struct ifnet *ifp; 1283 struct vr_desc *cur_rx; 1284 int cons, prog, total_len, rx_npkts; 1285 uint32_t rxstat, rxctl; 1286 1287 VR_LOCK_ASSERT(sc); 1288 ifp = sc->vr_ifp; 1289 cons = sc->vr_cdata.vr_rx_cons; 1290 rx_npkts = 0; 1291 1292 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1293 sc->vr_cdata.vr_rx_ring_map, 1294 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1295 1296 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1297 #ifdef DEVICE_POLLING 1298 if (ifp->if_capenable & IFCAP_POLLING) { 1299 if (sc->rxcycles <= 0) 1300 break; 1301 sc->rxcycles--; 1302 } 1303 #endif 1304 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1305 rxstat = le32toh(cur_rx->vr_status); 1306 rxctl = le32toh(cur_rx->vr_ctl); 1307 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1308 break; 1309 1310 prog++; 1311 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1312 m = rxd->rx_m; 1313 1314 /* 1315 * If an error occurs, update stats, clear the 1316 * status word and leave the mbuf cluster in place: 1317 * it should simply get re-used next time this descriptor 1318 * comes up in the ring. 1319 * We don't support SG in Rx path yet, so discard 1320 * partial frame. 1321 */ 1322 if ((rxstat & VR_RXSTAT_RX_OK) == 0 || 1323 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != 1324 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1325 ifp->if_ierrors++; 1326 sc->vr_stat.rx_errors++; 1327 if (rxstat & VR_RXSTAT_CRCERR) 1328 sc->vr_stat.rx_crc_errors++; 1329 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1330 sc->vr_stat.rx_alignment++; 1331 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1332 sc->vr_stat.rx_fifo_overflows++; 1333 if (rxstat & VR_RXSTAT_GIANT) 1334 sc->vr_stat.rx_giants++; 1335 if (rxstat & VR_RXSTAT_RUNT) 1336 sc->vr_stat.rx_runts++; 1337 if (rxstat & VR_RXSTAT_BUFFERR) 1338 sc->vr_stat.rx_no_buffers++; 1339 #ifdef VR_SHOW_ERRORS 1340 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1341 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1342 #endif 1343 vr_discard_rxbuf(rxd); 1344 continue; 1345 } 1346 1347 if (vr_newbuf(sc, cons) != 0) { 1348 ifp->if_iqdrops++; 1349 sc->vr_stat.rx_errors++; 1350 sc->vr_stat.rx_no_mbufs++; 1351 vr_discard_rxbuf(rxd); 1352 continue; 1353 } 1354 1355 /* 1356 * XXX The VIA Rhine chip includes the CRC with every 1357 * received frame, and there's no way to turn this 1358 * behavior off (at least, I can't find anything in 1359 * the manual that explains how to do it) so we have 1360 * to trim off the CRC manually. 1361 */ 1362 total_len = VR_RXBYTES(rxstat); 1363 total_len -= ETHER_CRC_LEN; 1364 m->m_pkthdr.len = m->m_len = total_len; 1365 #ifndef __NO_STRICT_ALIGNMENT 1366 /* 1367 * RX buffers must be 32-bit aligned. 1368 * Ignore the alignment problems on the non-strict alignment 1369 * platform. The performance hit incurred due to unaligned 1370 * accesses is much smaller than the hit produced by forcing 1371 * buffer copies all the time. 1372 */ 1373 vr_fixup_rx(m); 1374 #endif 1375 m->m_pkthdr.rcvif = ifp; 1376 ifp->if_ipackets++; 1377 sc->vr_stat.rx_ok++; 1378 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1379 (rxstat & VR_RXSTAT_FRAG) == 0 && 1380 (rxctl & VR_RXCTL_IP) != 0) { 1381 /* Checksum is valid for non-fragmented IP packets. */ 1382 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1383 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1384 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1385 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1386 m->m_pkthdr.csum_flags |= 1387 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1388 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1389 m->m_pkthdr.csum_data = 0xffff; 1390 } 1391 } 1392 } 1393 VR_UNLOCK(sc); 1394 (*ifp->if_input)(ifp, m); 1395 VR_LOCK(sc); 1396 rx_npkts++; 1397 } 1398 1399 if (prog > 0) { 1400 /* 1401 * Let controller know how many number of RX buffers 1402 * are posted but avoid expensive register access if 1403 * TX pause capability was not negotiated with link 1404 * partner. 1405 */ 1406 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) { 1407 if (prog >= VR_RX_RING_CNT) 1408 prog = VR_RX_RING_CNT - 1; 1409 CSR_WRITE_1(sc, VR_FLOWCR0, prog); 1410 } 1411 sc->vr_cdata.vr_rx_cons = cons; 1412 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1413 sc->vr_cdata.vr_rx_ring_map, 1414 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1415 } 1416 return (rx_npkts); 1417 } 1418 1419 /* 1420 * A frame was downloaded to the chip. It's safe for us to clean up 1421 * the list buffers. 1422 */ 1423 static void 1424 vr_txeof(struct vr_softc *sc) 1425 { 1426 struct vr_txdesc *txd; 1427 struct vr_desc *cur_tx; 1428 struct ifnet *ifp; 1429 uint32_t txctl, txstat; 1430 int cons, prod; 1431 1432 VR_LOCK_ASSERT(sc); 1433 1434 cons = sc->vr_cdata.vr_tx_cons; 1435 prod = sc->vr_cdata.vr_tx_prod; 1436 if (cons == prod) 1437 return; 1438 1439 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1440 sc->vr_cdata.vr_tx_ring_map, 1441 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1442 1443 ifp = sc->vr_ifp; 1444 /* 1445 * Go through our tx list and free mbufs for those 1446 * frames that have been transmitted. 1447 */ 1448 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1449 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1450 txctl = le32toh(cur_tx->vr_ctl); 1451 txstat = le32toh(cur_tx->vr_status); 1452 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1453 break; 1454 1455 sc->vr_cdata.vr_tx_cnt--; 1456 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1457 /* Only the first descriptor in the chain is valid. */ 1458 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1459 continue; 1460 1461 txd = &sc->vr_cdata.vr_txdesc[cons]; 1462 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1463 __func__)); 1464 1465 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1466 ifp->if_oerrors++; 1467 sc->vr_stat.tx_errors++; 1468 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1469 /* Give up and restart Tx. */ 1470 sc->vr_stat.tx_abort++; 1471 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1472 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1473 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1474 txd->tx_dmamap); 1475 m_freem(txd->tx_m); 1476 txd->tx_m = NULL; 1477 VR_INC(cons, VR_TX_RING_CNT); 1478 sc->vr_cdata.vr_tx_cons = cons; 1479 if (vr_tx_stop(sc) != 0) { 1480 device_printf(sc->vr_dev, 1481 "%s: Tx shutdown error -- " 1482 "resetting\n", __func__); 1483 sc->vr_flags |= VR_F_RESTART; 1484 return; 1485 } 1486 vr_tx_start(sc); 1487 break; 1488 } 1489 if ((sc->vr_revid < REV_ID_VT3071_A && 1490 (txstat & VR_TXSTAT_UNDERRUN)) || 1491 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1492 sc->vr_stat.tx_underrun++; 1493 /* Retry and restart Tx. */ 1494 sc->vr_cdata.vr_tx_cnt++; 1495 sc->vr_cdata.vr_tx_cons = cons; 1496 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1497 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1498 sc->vr_cdata.vr_tx_ring_map, 1499 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1500 vr_tx_underrun(sc); 1501 return; 1502 } 1503 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1504 ifp->if_collisions++; 1505 sc->vr_stat.tx_collisions++; 1506 } 1507 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1508 ifp->if_collisions++; 1509 sc->vr_stat.tx_late_collisions++; 1510 } 1511 } else { 1512 sc->vr_stat.tx_ok++; 1513 ifp->if_opackets++; 1514 } 1515 1516 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1517 BUS_DMASYNC_POSTWRITE); 1518 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1519 if (sc->vr_revid < REV_ID_VT3071_A) { 1520 ifp->if_collisions += 1521 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1522 sc->vr_stat.tx_collisions += 1523 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1524 } else { 1525 ifp->if_collisions += (txstat & 0x0f); 1526 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1527 } 1528 m_freem(txd->tx_m); 1529 txd->tx_m = NULL; 1530 } 1531 1532 sc->vr_cdata.vr_tx_cons = cons; 1533 if (sc->vr_cdata.vr_tx_cnt == 0) 1534 sc->vr_watchdog_timer = 0; 1535 } 1536 1537 static void 1538 vr_tick(void *xsc) 1539 { 1540 struct vr_softc *sc; 1541 struct mii_data *mii; 1542 1543 sc = (struct vr_softc *)xsc; 1544 1545 VR_LOCK_ASSERT(sc); 1546 1547 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1548 device_printf(sc->vr_dev, "restarting\n"); 1549 sc->vr_stat.num_restart++; 1550 sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1551 vr_init_locked(sc); 1552 sc->vr_flags &= ~VR_F_RESTART; 1553 } 1554 1555 mii = device_get_softc(sc->vr_miibus); 1556 mii_tick(mii); 1557 if ((sc->vr_flags & VR_F_LINK) == 0) 1558 vr_miibus_statchg(sc->vr_dev); 1559 vr_watchdog(sc); 1560 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1561 } 1562 1563 #ifdef DEVICE_POLLING 1564 static poll_handler_t vr_poll; 1565 static poll_handler_t vr_poll_locked; 1566 1567 static int 1568 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1569 { 1570 struct vr_softc *sc; 1571 int rx_npkts; 1572 1573 sc = ifp->if_softc; 1574 rx_npkts = 0; 1575 1576 VR_LOCK(sc); 1577 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1578 rx_npkts = vr_poll_locked(ifp, cmd, count); 1579 VR_UNLOCK(sc); 1580 return (rx_npkts); 1581 } 1582 1583 static int 1584 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1585 { 1586 struct vr_softc *sc; 1587 int rx_npkts; 1588 1589 sc = ifp->if_softc; 1590 1591 VR_LOCK_ASSERT(sc); 1592 1593 sc->rxcycles = count; 1594 rx_npkts = vr_rxeof(sc); 1595 vr_txeof(sc); 1596 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1597 vr_start_locked(ifp); 1598 1599 if (cmd == POLL_AND_CHECK_STATUS) { 1600 uint16_t status; 1601 1602 /* Also check status register. */ 1603 status = CSR_READ_2(sc, VR_ISR); 1604 if (status) 1605 CSR_WRITE_2(sc, VR_ISR, status); 1606 1607 if ((status & VR_INTRS) == 0) 1608 return (rx_npkts); 1609 1610 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1611 VR_ISR_STATSOFLOW)) != 0) { 1612 if (vr_error(sc, status) != 0) 1613 return (rx_npkts); 1614 } 1615 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1616 #ifdef VR_SHOW_ERRORS 1617 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1618 __func__, status, VR_ISR_ERR_BITS); 1619 #endif 1620 vr_rx_start(sc); 1621 } 1622 } 1623 return (rx_npkts); 1624 } 1625 #endif /* DEVICE_POLLING */ 1626 1627 /* Back off the transmit threshold. */ 1628 static void 1629 vr_tx_underrun(struct vr_softc *sc) 1630 { 1631 int thresh; 1632 1633 device_printf(sc->vr_dev, "Tx underrun -- "); 1634 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1635 thresh = sc->vr_txthresh; 1636 sc->vr_txthresh++; 1637 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1638 sc->vr_txthresh = VR_TXTHRESH_MAX; 1639 printf("using store and forward mode\n"); 1640 } else 1641 printf("increasing Tx threshold(%d -> %d)\n", 1642 vr_tx_threshold_tables[thresh].value, 1643 vr_tx_threshold_tables[thresh + 1].value); 1644 } else 1645 printf("\n"); 1646 sc->vr_stat.tx_underrun++; 1647 if (vr_tx_stop(sc) != 0) { 1648 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1649 "resetting\n", __func__); 1650 sc->vr_flags |= VR_F_RESTART; 1651 return; 1652 } 1653 vr_tx_start(sc); 1654 } 1655 1656 static void 1657 vr_intr(void *arg) 1658 { 1659 struct vr_softc *sc; 1660 struct ifnet *ifp; 1661 uint16_t status; 1662 1663 sc = (struct vr_softc *)arg; 1664 1665 VR_LOCK(sc); 1666 1667 if ((sc->vr_flags & VR_F_SUSPENDED) != 0) 1668 goto done_locked; 1669 1670 status = CSR_READ_2(sc, VR_ISR); 1671 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1672 goto done_locked; 1673 1674 ifp = sc->vr_ifp; 1675 #ifdef DEVICE_POLLING 1676 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1677 goto done_locked; 1678 #endif 1679 1680 /* Suppress unwanted interrupts. */ 1681 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1682 (sc->vr_flags & VR_F_RESTART) != 0) { 1683 CSR_WRITE_2(sc, VR_IMR, 0); 1684 CSR_WRITE_2(sc, VR_ISR, status); 1685 goto done_locked; 1686 } 1687 1688 /* Disable interrupts. */ 1689 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1690 1691 for (; (status & VR_INTRS) != 0;) { 1692 CSR_WRITE_2(sc, VR_ISR, status); 1693 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1694 VR_ISR_STATSOFLOW)) != 0) { 1695 if (vr_error(sc, status) != 0) { 1696 VR_UNLOCK(sc); 1697 return; 1698 } 1699 } 1700 vr_rxeof(sc); 1701 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1702 #ifdef VR_SHOW_ERRORS 1703 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1704 __func__, status, VR_ISR_ERR_BITS); 1705 #endif 1706 /* Restart Rx if RxDMA SM was stopped. */ 1707 vr_rx_start(sc); 1708 } 1709 vr_txeof(sc); 1710 status = CSR_READ_2(sc, VR_ISR); 1711 } 1712 1713 /* Re-enable interrupts. */ 1714 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1715 1716 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1717 vr_start_locked(ifp); 1718 1719 done_locked: 1720 VR_UNLOCK(sc); 1721 } 1722 1723 static int 1724 vr_error(struct vr_softc *sc, uint16_t status) 1725 { 1726 uint16_t pcis; 1727 1728 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1729 if ((status & VR_ISR_BUSERR) != 0) { 1730 status &= ~VR_ISR_BUSERR; 1731 sc->vr_stat.bus_errors++; 1732 /* Disable further interrupts. */ 1733 CSR_WRITE_2(sc, VR_IMR, 0); 1734 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1735 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1736 "resetting\n", pcis); 1737 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1738 sc->vr_flags |= VR_F_RESTART; 1739 return (EAGAIN); 1740 } 1741 if ((status & VR_ISR_LINKSTAT2) != 0) { 1742 /* Link state change, duplex changes etc. */ 1743 status &= ~VR_ISR_LINKSTAT2; 1744 } 1745 if ((status & VR_ISR_STATSOFLOW) != 0) { 1746 status &= ~VR_ISR_STATSOFLOW; 1747 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1748 /* Update MIB counters. */ 1749 } 1750 } 1751 1752 if (status != 0) 1753 device_printf(sc->vr_dev, 1754 "unhandled interrupt, status = 0x%04x\n", status); 1755 return (0); 1756 } 1757 1758 /* 1759 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1760 * pointers to the fragment pointers. 1761 */ 1762 static int 1763 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1764 { 1765 struct vr_txdesc *txd; 1766 struct vr_desc *desc; 1767 struct mbuf *m; 1768 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1769 uint32_t csum_flags, txctl; 1770 int error, i, nsegs, prod, si; 1771 int padlen; 1772 1773 VR_LOCK_ASSERT(sc); 1774 1775 M_ASSERTPKTHDR((*m_head)); 1776 1777 /* 1778 * Some VIA Rhine wants packet buffers to be longword 1779 * aligned, but very often our mbufs aren't. Rather than 1780 * waste time trying to decide when to copy and when not 1781 * to copy, just do it all the time. 1782 */ 1783 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1784 m = m_defrag(*m_head, M_DONTWAIT); 1785 if (m == NULL) { 1786 m_freem(*m_head); 1787 *m_head = NULL; 1788 return (ENOBUFS); 1789 } 1790 *m_head = m; 1791 } 1792 1793 /* 1794 * The Rhine chip doesn't auto-pad, so we have to make 1795 * sure to pad short frames out to the minimum frame length 1796 * ourselves. 1797 */ 1798 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1799 m = *m_head; 1800 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1801 if (M_WRITABLE(m) == 0) { 1802 /* Get a writable copy. */ 1803 m = m_dup(*m_head, M_DONTWAIT); 1804 m_freem(*m_head); 1805 if (m == NULL) { 1806 *m_head = NULL; 1807 return (ENOBUFS); 1808 } 1809 *m_head = m; 1810 } 1811 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1812 m = m_defrag(m, M_DONTWAIT); 1813 if (m == NULL) { 1814 m_freem(*m_head); 1815 *m_head = NULL; 1816 return (ENOBUFS); 1817 } 1818 } 1819 /* 1820 * Manually pad short frames, and zero the pad space 1821 * to avoid leaking data. 1822 */ 1823 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1824 m->m_pkthdr.len += padlen; 1825 m->m_len = m->m_pkthdr.len; 1826 *m_head = m; 1827 } 1828 1829 prod = sc->vr_cdata.vr_tx_prod; 1830 txd = &sc->vr_cdata.vr_txdesc[prod]; 1831 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1832 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1833 if (error == EFBIG) { 1834 m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS); 1835 if (m == NULL) { 1836 m_freem(*m_head); 1837 *m_head = NULL; 1838 return (ENOBUFS); 1839 } 1840 *m_head = m; 1841 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1842 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1843 if (error != 0) { 1844 m_freem(*m_head); 1845 *m_head = NULL; 1846 return (error); 1847 } 1848 } else if (error != 0) 1849 return (error); 1850 if (nsegs == 0) { 1851 m_freem(*m_head); 1852 *m_head = NULL; 1853 return (EIO); 1854 } 1855 1856 /* Check number of available descriptors. */ 1857 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1858 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1859 return (ENOBUFS); 1860 } 1861 1862 txd->tx_m = *m_head; 1863 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1864 BUS_DMASYNC_PREWRITE); 1865 1866 /* Set checksum offload. */ 1867 csum_flags = 0; 1868 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1869 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1870 csum_flags |= VR_TXCTL_IPCSUM; 1871 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1872 csum_flags |= VR_TXCTL_TCPCSUM; 1873 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1874 csum_flags |= VR_TXCTL_UDPCSUM; 1875 } 1876 1877 /* 1878 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1879 * is required for all descriptors regardless of single or 1880 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1881 * the first descriptor for a multi-fragmented frames. Without 1882 * that VIA Rhine chip generates Tx underrun interrupts and can't 1883 * send any frames. 1884 */ 1885 si = prod; 1886 for (i = 0; i < nsegs; i++) { 1887 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1888 desc->vr_status = 0; 1889 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1890 if (i == 0) 1891 txctl |= VR_TXCTL_FIRSTFRAG; 1892 desc->vr_ctl = htole32(txctl); 1893 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1894 sc->vr_cdata.vr_tx_cnt++; 1895 VR_INC(prod, VR_TX_RING_CNT); 1896 } 1897 /* Update producer index. */ 1898 sc->vr_cdata.vr_tx_prod = prod; 1899 1900 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1901 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1902 1903 /* 1904 * Set EOP on the last desciptor and reuqest Tx completion 1905 * interrupt for every VR_TX_INTR_THRESH-th frames. 1906 */ 1907 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1908 if (sc->vr_cdata.vr_tx_pkts == 0) 1909 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1910 else 1911 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1912 1913 /* Lastly turn the first descriptor ownership to hardware. */ 1914 desc = &sc->vr_rdata.vr_tx_ring[si]; 1915 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1916 1917 /* Sync descriptors. */ 1918 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1919 sc->vr_cdata.vr_tx_ring_map, 1920 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1921 1922 return (0); 1923 } 1924 1925 static void 1926 vr_start(struct ifnet *ifp) 1927 { 1928 struct vr_softc *sc; 1929 1930 sc = ifp->if_softc; 1931 VR_LOCK(sc); 1932 vr_start_locked(ifp); 1933 VR_UNLOCK(sc); 1934 } 1935 1936 static void 1937 vr_start_locked(struct ifnet *ifp) 1938 { 1939 struct vr_softc *sc; 1940 struct mbuf *m_head; 1941 int enq; 1942 1943 sc = ifp->if_softc; 1944 1945 VR_LOCK_ASSERT(sc); 1946 1947 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1948 IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0) 1949 return; 1950 1951 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1952 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1953 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1954 if (m_head == NULL) 1955 break; 1956 /* 1957 * Pack the data into the transmit ring. If we 1958 * don't have room, set the OACTIVE flag and wait 1959 * for the NIC to drain the ring. 1960 */ 1961 if (vr_encap(sc, &m_head)) { 1962 if (m_head == NULL) 1963 break; 1964 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1965 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1966 break; 1967 } 1968 1969 enq++; 1970 /* 1971 * If there's a BPF listener, bounce a copy of this frame 1972 * to him. 1973 */ 1974 ETHER_BPF_MTAP(ifp, m_head); 1975 } 1976 1977 if (enq > 0) { 1978 /* Tell the chip to start transmitting. */ 1979 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 1980 /* Set a timeout in case the chip goes out to lunch. */ 1981 sc->vr_watchdog_timer = 5; 1982 } 1983 } 1984 1985 static void 1986 vr_init(void *xsc) 1987 { 1988 struct vr_softc *sc; 1989 1990 sc = (struct vr_softc *)xsc; 1991 VR_LOCK(sc); 1992 vr_init_locked(sc); 1993 VR_UNLOCK(sc); 1994 } 1995 1996 static void 1997 vr_init_locked(struct vr_softc *sc) 1998 { 1999 struct ifnet *ifp; 2000 struct mii_data *mii; 2001 bus_addr_t addr; 2002 int i; 2003 2004 VR_LOCK_ASSERT(sc); 2005 2006 ifp = sc->vr_ifp; 2007 mii = device_get_softc(sc->vr_miibus); 2008 2009 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2010 return; 2011 2012 /* Cancel pending I/O and free all RX/TX buffers. */ 2013 vr_stop(sc); 2014 vr_reset(sc); 2015 2016 /* Set our station address. */ 2017 for (i = 0; i < ETHER_ADDR_LEN; i++) 2018 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); 2019 2020 /* Set DMA size. */ 2021 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2022 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2023 2024 /* 2025 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2026 * so we must set both. 2027 */ 2028 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2029 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2030 2031 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2032 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2033 2034 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2035 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2036 2037 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2038 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2039 2040 /* Init circular RX list. */ 2041 if (vr_rx_ring_init(sc) != 0) { 2042 device_printf(sc->vr_dev, 2043 "initialization failed: no memory for rx buffers\n"); 2044 vr_stop(sc); 2045 return; 2046 } 2047 2048 /* Init tx descriptors. */ 2049 vr_tx_ring_init(sc); 2050 2051 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2052 uint8_t vcam[2] = { 0, 0 }; 2053 2054 /* Disable VLAN hardware tag insertion/stripping. */ 2055 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); 2056 /* Disable VLAN hardware filtering. */ 2057 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); 2058 /* Disable all CAM entries. */ 2059 vr_cam_mask(sc, VR_MCAST_CAM, 0); 2060 vr_cam_mask(sc, VR_VLAN_CAM, 0); 2061 /* Enable the first VLAN CAM. */ 2062 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); 2063 vr_cam_mask(sc, VR_VLAN_CAM, 1); 2064 } 2065 2066 /* 2067 * Set up receive filter. 2068 */ 2069 vr_set_filter(sc); 2070 2071 /* 2072 * Load the address of the RX ring. 2073 */ 2074 addr = VR_RX_RING_ADDR(sc, 0); 2075 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2076 /* 2077 * Load the address of the TX ring. 2078 */ 2079 addr = VR_TX_RING_ADDR(sc, 0); 2080 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2081 /* Default : full-duplex, no Tx poll. */ 2082 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2083 2084 /* Set flow-control parameters for Rhine III. */ 2085 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2086 /* 2087 * Configure Rx buffer count available for incoming 2088 * packet. 2089 * Even though data sheet says almost nothing about 2090 * this register, this register should be updated 2091 * whenever driver adds new RX buffers to controller. 2092 * Otherwise, XON frame is not sent to link partner 2093 * even if controller has enough RX buffers and you 2094 * would be isolated from network. 2095 * The controller is not smart enough to know number 2096 * of available RX buffers so driver have to let 2097 * controller know how many RX buffers are posted. 2098 * In other words, this register works like a residue 2099 * counter for RX buffers and should be initialized 2100 * to the number of total RX buffers - 1 before 2101 * enabling RX MAC. Note, this register is 8bits so 2102 * it effectively limits the maximum number of RX 2103 * buffer to be configured by controller is 255. 2104 */ 2105 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1); 2106 /* 2107 * Tx pause low threshold : 8 free receive buffers 2108 * Tx pause XON high threshold : 24 free receive buffers 2109 */ 2110 CSR_WRITE_1(sc, VR_FLOWCR1, 2111 VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF); 2112 /* Set Tx pause timer. */ 2113 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2114 } 2115 2116 /* Enable receiver and transmitter. */ 2117 CSR_WRITE_1(sc, VR_CR0, 2118 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2119 2120 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2121 #ifdef DEVICE_POLLING 2122 /* 2123 * Disable interrupts if we are polling. 2124 */ 2125 if (ifp->if_capenable & IFCAP_POLLING) 2126 CSR_WRITE_2(sc, VR_IMR, 0); 2127 else 2128 #endif 2129 /* 2130 * Enable interrupts and disable MII intrs. 2131 */ 2132 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2133 if (sc->vr_revid > REV_ID_VT6102_A) 2134 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2135 2136 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2137 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2138 2139 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2140 mii_mediachg(mii); 2141 2142 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2143 } 2144 2145 /* 2146 * Set media options. 2147 */ 2148 static int 2149 vr_ifmedia_upd(struct ifnet *ifp) 2150 { 2151 struct vr_softc *sc; 2152 struct mii_data *mii; 2153 struct mii_softc *miisc; 2154 int error; 2155 2156 sc = ifp->if_softc; 2157 VR_LOCK(sc); 2158 mii = device_get_softc(sc->vr_miibus); 2159 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2160 PHY_RESET(miisc); 2161 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2162 error = mii_mediachg(mii); 2163 VR_UNLOCK(sc); 2164 2165 return (error); 2166 } 2167 2168 /* 2169 * Report current media status. 2170 */ 2171 static void 2172 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2173 { 2174 struct vr_softc *sc; 2175 struct mii_data *mii; 2176 2177 sc = ifp->if_softc; 2178 mii = device_get_softc(sc->vr_miibus); 2179 VR_LOCK(sc); 2180 if ((ifp->if_flags & IFF_UP) == 0) { 2181 VR_UNLOCK(sc); 2182 return; 2183 } 2184 mii_pollstat(mii); 2185 ifmr->ifm_active = mii->mii_media_active; 2186 ifmr->ifm_status = mii->mii_media_status; 2187 VR_UNLOCK(sc); 2188 } 2189 2190 static int 2191 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2192 { 2193 struct vr_softc *sc; 2194 struct ifreq *ifr; 2195 struct mii_data *mii; 2196 int error, mask; 2197 2198 sc = ifp->if_softc; 2199 ifr = (struct ifreq *)data; 2200 error = 0; 2201 2202 switch (command) { 2203 case SIOCSIFFLAGS: 2204 VR_LOCK(sc); 2205 if (ifp->if_flags & IFF_UP) { 2206 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2207 if ((ifp->if_flags ^ sc->vr_if_flags) & 2208 (IFF_PROMISC | IFF_ALLMULTI)) 2209 vr_set_filter(sc); 2210 } else { 2211 if ((sc->vr_flags & VR_F_DETACHED) == 0) 2212 vr_init_locked(sc); 2213 } 2214 } else { 2215 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2216 vr_stop(sc); 2217 } 2218 sc->vr_if_flags = ifp->if_flags; 2219 VR_UNLOCK(sc); 2220 break; 2221 case SIOCADDMULTI: 2222 case SIOCDELMULTI: 2223 VR_LOCK(sc); 2224 vr_set_filter(sc); 2225 VR_UNLOCK(sc); 2226 break; 2227 case SIOCGIFMEDIA: 2228 case SIOCSIFMEDIA: 2229 mii = device_get_softc(sc->vr_miibus); 2230 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2231 break; 2232 case SIOCSIFCAP: 2233 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2234 #ifdef DEVICE_POLLING 2235 if (mask & IFCAP_POLLING) { 2236 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2237 error = ether_poll_register(vr_poll, ifp); 2238 if (error != 0) 2239 break; 2240 VR_LOCK(sc); 2241 /* Disable interrupts. */ 2242 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2243 ifp->if_capenable |= IFCAP_POLLING; 2244 VR_UNLOCK(sc); 2245 } else { 2246 error = ether_poll_deregister(ifp); 2247 /* Enable interrupts. */ 2248 VR_LOCK(sc); 2249 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2250 ifp->if_capenable &= ~IFCAP_POLLING; 2251 VR_UNLOCK(sc); 2252 } 2253 } 2254 #endif /* DEVICE_POLLING */ 2255 if ((mask & IFCAP_TXCSUM) != 0 && 2256 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2257 ifp->if_capenable ^= IFCAP_TXCSUM; 2258 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2259 ifp->if_hwassist |= VR_CSUM_FEATURES; 2260 else 2261 ifp->if_hwassist &= ~VR_CSUM_FEATURES; 2262 } 2263 if ((mask & IFCAP_RXCSUM) != 0 && 2264 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2265 ifp->if_capenable ^= IFCAP_RXCSUM; 2266 if ((mask & IFCAP_WOL_UCAST) != 0 && 2267 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2268 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2269 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2270 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2271 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2272 break; 2273 default: 2274 error = ether_ioctl(ifp, command, data); 2275 break; 2276 } 2277 2278 return (error); 2279 } 2280 2281 static void 2282 vr_watchdog(struct vr_softc *sc) 2283 { 2284 struct ifnet *ifp; 2285 2286 VR_LOCK_ASSERT(sc); 2287 2288 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2289 return; 2290 2291 ifp = sc->vr_ifp; 2292 /* 2293 * Reclaim first as we don't request interrupt for every packets. 2294 */ 2295 vr_txeof(sc); 2296 if (sc->vr_cdata.vr_tx_cnt == 0) 2297 return; 2298 2299 if ((sc->vr_flags & VR_F_LINK) == 0) { 2300 if (bootverbose) 2301 if_printf(sc->vr_ifp, "watchdog timeout " 2302 "(missed link)\n"); 2303 ifp->if_oerrors++; 2304 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2305 vr_init_locked(sc); 2306 return; 2307 } 2308 2309 ifp->if_oerrors++; 2310 if_printf(ifp, "watchdog timeout\n"); 2311 2312 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2313 vr_init_locked(sc); 2314 2315 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2316 vr_start_locked(ifp); 2317 } 2318 2319 static void 2320 vr_tx_start(struct vr_softc *sc) 2321 { 2322 bus_addr_t addr; 2323 uint8_t cmd; 2324 2325 cmd = CSR_READ_1(sc, VR_CR0); 2326 if ((cmd & VR_CR0_TX_ON) == 0) { 2327 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2328 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2329 cmd |= VR_CR0_TX_ON; 2330 CSR_WRITE_1(sc, VR_CR0, cmd); 2331 } 2332 if (sc->vr_cdata.vr_tx_cnt != 0) { 2333 sc->vr_watchdog_timer = 5; 2334 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2335 } 2336 } 2337 2338 static void 2339 vr_rx_start(struct vr_softc *sc) 2340 { 2341 bus_addr_t addr; 2342 uint8_t cmd; 2343 2344 cmd = CSR_READ_1(sc, VR_CR0); 2345 if ((cmd & VR_CR0_RX_ON) == 0) { 2346 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2347 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2348 cmd |= VR_CR0_RX_ON; 2349 CSR_WRITE_1(sc, VR_CR0, cmd); 2350 } 2351 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2352 } 2353 2354 static int 2355 vr_tx_stop(struct vr_softc *sc) 2356 { 2357 int i; 2358 uint8_t cmd; 2359 2360 cmd = CSR_READ_1(sc, VR_CR0); 2361 if ((cmd & VR_CR0_TX_ON) != 0) { 2362 cmd &= ~VR_CR0_TX_ON; 2363 CSR_WRITE_1(sc, VR_CR0, cmd); 2364 for (i = VR_TIMEOUT; i > 0; i--) { 2365 DELAY(5); 2366 cmd = CSR_READ_1(sc, VR_CR0); 2367 if ((cmd & VR_CR0_TX_ON) == 0) 2368 break; 2369 } 2370 if (i == 0) 2371 return (ETIMEDOUT); 2372 } 2373 return (0); 2374 } 2375 2376 static int 2377 vr_rx_stop(struct vr_softc *sc) 2378 { 2379 int i; 2380 uint8_t cmd; 2381 2382 cmd = CSR_READ_1(sc, VR_CR0); 2383 if ((cmd & VR_CR0_RX_ON) != 0) { 2384 cmd &= ~VR_CR0_RX_ON; 2385 CSR_WRITE_1(sc, VR_CR0, cmd); 2386 for (i = VR_TIMEOUT; i > 0; i--) { 2387 DELAY(5); 2388 cmd = CSR_READ_1(sc, VR_CR0); 2389 if ((cmd & VR_CR0_RX_ON) == 0) 2390 break; 2391 } 2392 if (i == 0) 2393 return (ETIMEDOUT); 2394 } 2395 return (0); 2396 } 2397 2398 /* 2399 * Stop the adapter and free any mbufs allocated to the 2400 * RX and TX lists. 2401 */ 2402 static void 2403 vr_stop(struct vr_softc *sc) 2404 { 2405 struct vr_txdesc *txd; 2406 struct vr_rxdesc *rxd; 2407 struct ifnet *ifp; 2408 int i; 2409 2410 VR_LOCK_ASSERT(sc); 2411 2412 ifp = sc->vr_ifp; 2413 sc->vr_watchdog_timer = 0; 2414 2415 callout_stop(&sc->vr_stat_callout); 2416 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2417 2418 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2419 if (vr_rx_stop(sc) != 0) 2420 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2421 if (vr_tx_stop(sc) != 0) 2422 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2423 /* Clear pending interrupts. */ 2424 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2425 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2426 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2427 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2428 2429 /* 2430 * Free RX and TX mbufs still in the queues. 2431 */ 2432 for (i = 0; i < VR_RX_RING_CNT; i++) { 2433 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2434 if (rxd->rx_m != NULL) { 2435 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2436 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2437 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2438 rxd->rx_dmamap); 2439 m_freem(rxd->rx_m); 2440 rxd->rx_m = NULL; 2441 } 2442 } 2443 for (i = 0; i < VR_TX_RING_CNT; i++) { 2444 txd = &sc->vr_cdata.vr_txdesc[i]; 2445 if (txd->tx_m != NULL) { 2446 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2447 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2448 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2449 txd->tx_dmamap); 2450 m_freem(txd->tx_m); 2451 txd->tx_m = NULL; 2452 } 2453 } 2454 } 2455 2456 /* 2457 * Stop all chip I/O so that the kernel's probe routines don't 2458 * get confused by errant DMAs when rebooting. 2459 */ 2460 static int 2461 vr_shutdown(device_t dev) 2462 { 2463 2464 return (vr_suspend(dev)); 2465 } 2466 2467 static int 2468 vr_suspend(device_t dev) 2469 { 2470 struct vr_softc *sc; 2471 2472 sc = device_get_softc(dev); 2473 2474 VR_LOCK(sc); 2475 vr_stop(sc); 2476 vr_setwol(sc); 2477 sc->vr_flags |= VR_F_SUSPENDED; 2478 VR_UNLOCK(sc); 2479 2480 return (0); 2481 } 2482 2483 static int 2484 vr_resume(device_t dev) 2485 { 2486 struct vr_softc *sc; 2487 struct ifnet *ifp; 2488 2489 sc = device_get_softc(dev); 2490 2491 VR_LOCK(sc); 2492 ifp = sc->vr_ifp; 2493 vr_clrwol(sc); 2494 vr_reset(sc); 2495 if (ifp->if_flags & IFF_UP) 2496 vr_init_locked(sc); 2497 2498 sc->vr_flags &= ~VR_F_SUSPENDED; 2499 VR_UNLOCK(sc); 2500 2501 return (0); 2502 } 2503 2504 static void 2505 vr_setwol(struct vr_softc *sc) 2506 { 2507 struct ifnet *ifp; 2508 int pmc; 2509 uint16_t pmstat; 2510 uint8_t v; 2511 2512 VR_LOCK_ASSERT(sc); 2513 2514 if (sc->vr_revid < REV_ID_VT6102_A || 2515 pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2516 return; 2517 2518 ifp = sc->vr_ifp; 2519 2520 /* Clear WOL configuration. */ 2521 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2522 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2523 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2524 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2525 if (sc->vr_revid > REV_ID_VT6105_B0) { 2526 /* Newer Rhine III supports two additional patterns. */ 2527 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2528 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2529 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2530 } 2531 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2532 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2533 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2534 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2535 /* 2536 * It seems that multicast wakeup frames require programming pattern 2537 * registers and valid CRC as well as pattern mask for each pattern. 2538 * While it's possible to setup such a pattern it would complicate 2539 * WOL configuration so ignore multicast wakeup frames. 2540 */ 2541 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2542 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2543 v = CSR_READ_1(sc, VR_STICKHW); 2544 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2545 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2546 } 2547 2548 /* Put hardware into sleep. */ 2549 v = CSR_READ_1(sc, VR_STICKHW); 2550 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2551 CSR_WRITE_1(sc, VR_STICKHW, v); 2552 2553 /* Request PME if WOL is requested. */ 2554 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2555 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2556 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2557 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2558 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2559 } 2560 2561 static void 2562 vr_clrwol(struct vr_softc *sc) 2563 { 2564 uint8_t v; 2565 2566 VR_LOCK_ASSERT(sc); 2567 2568 if (sc->vr_revid < REV_ID_VT6102_A) 2569 return; 2570 2571 /* Take hardware out of sleep. */ 2572 v = CSR_READ_1(sc, VR_STICKHW); 2573 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2574 CSR_WRITE_1(sc, VR_STICKHW, v); 2575 2576 /* Clear WOL configuration as WOL may interfere normal operation. */ 2577 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2578 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2579 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2580 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2581 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2582 if (sc->vr_revid > REV_ID_VT6105_B0) { 2583 /* Newer Rhine III supports two additional patterns. */ 2584 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2585 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2586 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2587 } 2588 } 2589 2590 static int 2591 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2592 { 2593 struct vr_softc *sc; 2594 struct vr_statistics *stat; 2595 int error; 2596 int result; 2597 2598 result = -1; 2599 error = sysctl_handle_int(oidp, &result, 0, req); 2600 2601 if (error != 0 || req->newptr == NULL) 2602 return (error); 2603 2604 if (result == 1) { 2605 sc = (struct vr_softc *)arg1; 2606 stat = &sc->vr_stat; 2607 2608 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2609 printf("Outbound good frames : %ju\n", 2610 (uintmax_t)stat->tx_ok); 2611 printf("Inbound good frames : %ju\n", 2612 (uintmax_t)stat->rx_ok); 2613 printf("Outbound errors : %u\n", stat->tx_errors); 2614 printf("Inbound errors : %u\n", stat->rx_errors); 2615 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2616 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2617 printf("Inbound FIFO overflows : %d\n", 2618 stat->rx_fifo_overflows); 2619 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2620 printf("Inbound frame alignment errors : %u\n", 2621 stat->rx_alignment); 2622 printf("Inbound giant frames : %u\n", stat->rx_giants); 2623 printf("Inbound runt frames : %u\n", stat->rx_runts); 2624 printf("Outbound aborted with excessive collisions : %u\n", 2625 stat->tx_abort); 2626 printf("Outbound collisions : %u\n", stat->tx_collisions); 2627 printf("Outbound late collisions : %u\n", 2628 stat->tx_late_collisions); 2629 printf("Outbound underrun : %u\n", stat->tx_underrun); 2630 printf("PCI bus errors : %u\n", stat->bus_errors); 2631 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2632 stat->num_restart); 2633 } 2634 2635 return (error); 2636 } 2637