1 /*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #ifdef HAVE_KERNEL_OPTION_HEADERS 64 #include "opt_device_polling.h" 65 #endif 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bus.h> 70 #include <sys/endian.h> 71 #include <sys/kernel.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/rman.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 81 #include <net/bpf.h> 82 #include <net/if.h> 83 #include <net/ethernet.h> 84 #include <net/if_dl.h> 85 #include <net/if_media.h> 86 #include <net/if_types.h> 87 #include <net/if_vlan_var.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #include <machine/bus.h> 96 97 #include <dev/vr/if_vrreg.h> 98 99 /* "device miibus" required. See GENERIC if you get errors here. */ 100 #include "miibus_if.h" 101 102 MODULE_DEPEND(vr, pci, 1, 1, 1); 103 MODULE_DEPEND(vr, ether, 1, 1, 1); 104 MODULE_DEPEND(vr, miibus, 1, 1, 1); 105 106 /* Define to show Rx/Tx error status. */ 107 #undef VR_SHOW_ERRORS 108 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 109 110 /* 111 * Various supported device vendors/types, their names & quirks. 112 */ 113 #define VR_Q_NEEDALIGN (1<<0) 114 #define VR_Q_CSUM (1<<1) 115 #define VR_Q_CAM (1<<2) 116 117 static const struct vr_type { 118 u_int16_t vr_vid; 119 u_int16_t vr_did; 120 int vr_quirks; 121 const char *vr_name; 122 } vr_devs[] = { 123 { VIA_VENDORID, VIA_DEVICEID_RHINE, 124 VR_Q_NEEDALIGN, 125 "VIA VT3043 Rhine I 10/100BaseTX" }, 126 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 127 VR_Q_NEEDALIGN, 128 "VIA VT86C100A Rhine II 10/100BaseTX" }, 129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 130 0, 131 "VIA VT6102 Rhine II 10/100BaseTX" }, 132 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 133 0, 134 "VIA VT6105 Rhine III 10/100BaseTX" }, 135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 136 VR_Q_CSUM, 137 "VIA VT6105M Rhine III 10/100BaseTX" }, 138 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 139 VR_Q_NEEDALIGN, 140 "Delta Electronics Rhine II 10/100BaseTX" }, 141 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 142 VR_Q_NEEDALIGN, 143 "Addtron Technology Rhine II 10/100BaseTX" }, 144 { 0, 0, 0, NULL } 145 }; 146 147 static int vr_probe(device_t); 148 static int vr_attach(device_t); 149 static int vr_detach(device_t); 150 static int vr_shutdown(device_t); 151 static int vr_suspend(device_t); 152 static int vr_resume(device_t); 153 154 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 155 static int vr_dma_alloc(struct vr_softc *); 156 static void vr_dma_free(struct vr_softc *); 157 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 158 static int vr_newbuf(struct vr_softc *, int); 159 160 #ifndef __NO_STRICT_ALIGNMENT 161 static __inline void vr_fixup_rx(struct mbuf *); 162 #endif 163 static int vr_rxeof(struct vr_softc *); 164 static void vr_txeof(struct vr_softc *); 165 static void vr_tick(void *); 166 static int vr_error(struct vr_softc *, uint16_t); 167 static void vr_tx_underrun(struct vr_softc *); 168 static int vr_intr(void *); 169 static void vr_int_task(void *, int); 170 static void vr_start(struct ifnet *); 171 static void vr_start_locked(struct ifnet *); 172 static int vr_encap(struct vr_softc *, struct mbuf **); 173 static int vr_ioctl(struct ifnet *, u_long, caddr_t); 174 static void vr_init(void *); 175 static void vr_init_locked(struct vr_softc *); 176 static void vr_tx_start(struct vr_softc *); 177 static void vr_rx_start(struct vr_softc *); 178 static int vr_tx_stop(struct vr_softc *); 179 static int vr_rx_stop(struct vr_softc *); 180 static void vr_stop(struct vr_softc *); 181 static void vr_watchdog(struct vr_softc *); 182 static int vr_ifmedia_upd(struct ifnet *); 183 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 184 185 static int vr_miibus_readreg(device_t, int, int); 186 static int vr_miibus_writereg(device_t, int, int, int); 187 static void vr_miibus_statchg(device_t); 188 189 static void vr_cam_mask(struct vr_softc *, uint32_t, int); 190 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); 191 static void vr_set_filter(struct vr_softc *); 192 static void vr_reset(const struct vr_softc *); 193 static int vr_tx_ring_init(struct vr_softc *); 194 static int vr_rx_ring_init(struct vr_softc *); 195 static void vr_setwol(struct vr_softc *); 196 static void vr_clrwol(struct vr_softc *); 197 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 198 199 static const struct vr_tx_threshold_table { 200 int tx_cfg; 201 int bcr_cfg; 202 int value; 203 } vr_tx_threshold_tables[] = { 204 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 205 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 206 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 207 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 208 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 209 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 210 }; 211 212 static device_method_t vr_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, vr_probe), 215 DEVMETHOD(device_attach, vr_attach), 216 DEVMETHOD(device_detach, vr_detach), 217 DEVMETHOD(device_shutdown, vr_shutdown), 218 DEVMETHOD(device_suspend, vr_suspend), 219 DEVMETHOD(device_resume, vr_resume), 220 221 /* MII interface */ 222 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 223 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 224 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 225 226 DEVMETHOD_END 227 }; 228 229 static driver_t vr_driver = { 230 "vr", 231 vr_methods, 232 sizeof(struct vr_softc) 233 }; 234 235 static devclass_t vr_devclass; 236 237 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 238 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 239 240 static int 241 vr_miibus_readreg(device_t dev, int phy, int reg) 242 { 243 struct vr_softc *sc; 244 int i; 245 246 sc = device_get_softc(dev); 247 248 /* Set the register address. */ 249 CSR_WRITE_1(sc, VR_MIIADDR, reg); 250 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 251 252 for (i = 0; i < VR_MII_TIMEOUT; i++) { 253 DELAY(1); 254 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 255 break; 256 } 257 if (i == VR_MII_TIMEOUT) 258 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 259 260 return (CSR_READ_2(sc, VR_MIIDATA)); 261 } 262 263 static int 264 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 265 { 266 struct vr_softc *sc; 267 int i; 268 269 sc = device_get_softc(dev); 270 271 /* Set the register address and data to write. */ 272 CSR_WRITE_1(sc, VR_MIIADDR, reg); 273 CSR_WRITE_2(sc, VR_MIIDATA, data); 274 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 275 276 for (i = 0; i < VR_MII_TIMEOUT; i++) { 277 DELAY(1); 278 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 279 break; 280 } 281 if (i == VR_MII_TIMEOUT) 282 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 283 reg); 284 285 return (0); 286 } 287 288 /* 289 * In order to fiddle with the 290 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 291 * first have to put the transmit and/or receive logic in the idle state. 292 */ 293 static void 294 vr_miibus_statchg(device_t dev) 295 { 296 struct vr_softc *sc; 297 struct mii_data *mii; 298 struct ifnet *ifp; 299 int lfdx, mfdx; 300 uint8_t cr0, cr1, fc; 301 302 sc = device_get_softc(dev); 303 mii = device_get_softc(sc->vr_miibus); 304 ifp = sc->vr_ifp; 305 if (mii == NULL || ifp == NULL || 306 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 307 return; 308 309 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 310 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 311 (IFM_ACTIVE | IFM_AVALID)) { 312 switch (IFM_SUBTYPE(mii->mii_media_active)) { 313 case IFM_10_T: 314 case IFM_100_TX: 315 sc->vr_flags |= VR_F_LINK; 316 break; 317 default: 318 break; 319 } 320 } 321 322 if ((sc->vr_flags & VR_F_LINK) != 0) { 323 cr0 = CSR_READ_1(sc, VR_CR0); 324 cr1 = CSR_READ_1(sc, VR_CR1); 325 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 326 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 327 if (mfdx != lfdx) { 328 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 329 if (vr_tx_stop(sc) != 0 || 330 vr_rx_stop(sc) != 0) { 331 device_printf(sc->vr_dev, 332 "%s: Tx/Rx shutdown error -- " 333 "resetting\n", __func__); 334 sc->vr_flags |= VR_F_RESTART; 335 VR_UNLOCK(sc); 336 return; 337 } 338 } 339 if (lfdx) 340 cr1 |= VR_CR1_FULLDUPLEX; 341 else 342 cr1 &= ~VR_CR1_FULLDUPLEX; 343 CSR_WRITE_1(sc, VR_CR1, cr1); 344 } 345 fc = 0; 346 /* Configure flow-control. */ 347 if (sc->vr_revid >= REV_ID_VT6105_A0) { 348 fc = CSR_READ_1(sc, VR_FLOWCR1); 349 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 350 if ((IFM_OPTIONS(mii->mii_media_active) & 351 IFM_ETH_RXPAUSE) != 0) 352 fc |= VR_FLOWCR1_RXPAUSE; 353 if ((IFM_OPTIONS(mii->mii_media_active) & 354 IFM_ETH_TXPAUSE) != 0) { 355 fc |= VR_FLOWCR1_TXPAUSE; 356 sc->vr_flags |= VR_F_TXPAUSE; 357 } 358 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 359 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 360 /* No Tx puase capability available for Rhine II. */ 361 fc = CSR_READ_1(sc, VR_MISC_CR0); 362 fc &= ~VR_MISCCR0_RXPAUSE; 363 if ((IFM_OPTIONS(mii->mii_media_active) & 364 IFM_ETH_RXPAUSE) != 0) 365 fc |= VR_MISCCR0_RXPAUSE; 366 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 367 } 368 vr_rx_start(sc); 369 vr_tx_start(sc); 370 } else { 371 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 372 device_printf(sc->vr_dev, 373 "%s: Tx/Rx shutdown error -- resetting\n", 374 __func__); 375 sc->vr_flags |= VR_F_RESTART; 376 } 377 } 378 } 379 380 381 static void 382 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) 383 { 384 385 if (type == VR_MCAST_CAM) 386 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 387 else 388 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 389 CSR_WRITE_4(sc, VR_CAMMASK, mask); 390 CSR_WRITE_1(sc, VR_CAMCTL, 0); 391 } 392 393 static int 394 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) 395 { 396 int i; 397 398 if (type == VR_MCAST_CAM) { 399 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 400 return (EINVAL); 401 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 402 } else 403 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 404 405 /* Set CAM entry address. */ 406 CSR_WRITE_1(sc, VR_CAMADDR, idx); 407 /* Set CAM entry data. */ 408 if (type == VR_MCAST_CAM) { 409 for (i = 0; i < ETHER_ADDR_LEN; i++) 410 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); 411 } else { 412 CSR_WRITE_1(sc, VR_VCAM0, mac[0]); 413 CSR_WRITE_1(sc, VR_VCAM1, mac[1]); 414 } 415 DELAY(10); 416 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 417 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); 418 for (i = 0; i < VR_TIMEOUT; i++) { 419 DELAY(1); 420 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 421 break; 422 } 423 424 if (i == VR_TIMEOUT) 425 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 426 __func__); 427 CSR_WRITE_1(sc, VR_CAMCTL, 0); 428 429 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 430 } 431 432 /* 433 * Program the 64-bit multicast hash filter. 434 */ 435 static void 436 vr_set_filter(struct vr_softc *sc) 437 { 438 struct ifnet *ifp; 439 int h; 440 uint32_t hashes[2] = { 0, 0 }; 441 struct ifmultiaddr *ifma; 442 uint8_t rxfilt; 443 int error, mcnt; 444 uint32_t cam_mask; 445 446 VR_LOCK_ASSERT(sc); 447 448 ifp = sc->vr_ifp; 449 rxfilt = CSR_READ_1(sc, VR_RXCFG); 450 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | 451 VR_RXCFG_RX_MULTI); 452 if (ifp->if_flags & IFF_BROADCAST) 453 rxfilt |= VR_RXCFG_RX_BROAD; 454 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 455 rxfilt |= VR_RXCFG_RX_MULTI; 456 if (ifp->if_flags & IFF_PROMISC) 457 rxfilt |= VR_RXCFG_RX_PROMISC; 458 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 459 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 460 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 461 return; 462 } 463 464 /* Now program new ones. */ 465 error = 0; 466 mcnt = 0; 467 if_maddr_rlock(ifp); 468 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 469 /* 470 * For hardwares that have CAM capability, use 471 * 32 entries multicast perfect filter. 472 */ 473 cam_mask = 0; 474 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 475 if (ifma->ifma_addr->sa_family != AF_LINK) 476 continue; 477 error = vr_cam_data(sc, VR_MCAST_CAM, mcnt, 478 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 479 if (error != 0) { 480 cam_mask = 0; 481 break; 482 } 483 cam_mask |= 1 << mcnt; 484 mcnt++; 485 } 486 vr_cam_mask(sc, VR_MCAST_CAM, cam_mask); 487 } 488 489 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 490 /* 491 * If there are too many multicast addresses or 492 * setting multicast CAM filter failed, use hash 493 * table based filtering. 494 */ 495 mcnt = 0; 496 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 497 if (ifma->ifma_addr->sa_family != AF_LINK) 498 continue; 499 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 500 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 501 if (h < 32) 502 hashes[0] |= (1 << h); 503 else 504 hashes[1] |= (1 << (h - 32)); 505 mcnt++; 506 } 507 } 508 if_maddr_runlock(ifp); 509 510 if (mcnt > 0) 511 rxfilt |= VR_RXCFG_RX_MULTI; 512 513 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 514 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 515 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 516 } 517 518 static void 519 vr_reset(const struct vr_softc *sc) 520 { 521 int i; 522 523 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 524 525 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 526 if (sc->vr_revid < REV_ID_VT6102_A) { 527 /* VT86C100A needs more delay after reset. */ 528 DELAY(100); 529 } 530 for (i = 0; i < VR_TIMEOUT; i++) { 531 DELAY(10); 532 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 533 break; 534 } 535 if (i == VR_TIMEOUT) { 536 if (sc->vr_revid < REV_ID_VT6102_A) 537 device_printf(sc->vr_dev, "reset never completed!\n"); 538 else { 539 /* Use newer force reset command. */ 540 device_printf(sc->vr_dev, 541 "Using force reset command.\n"); 542 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 543 /* 544 * Wait a little while for the chip to get its brains 545 * in order. 546 */ 547 DELAY(2000); 548 } 549 } 550 551 } 552 553 /* 554 * Probe for a VIA Rhine chip. Check the PCI vendor and device 555 * IDs against our list and return a match or NULL 556 */ 557 static const struct vr_type * 558 vr_match(device_t dev) 559 { 560 const struct vr_type *t = vr_devs; 561 562 for (t = vr_devs; t->vr_name != NULL; t++) 563 if ((pci_get_vendor(dev) == t->vr_vid) && 564 (pci_get_device(dev) == t->vr_did)) 565 return (t); 566 return (NULL); 567 } 568 569 /* 570 * Probe for a VIA Rhine chip. Check the PCI vendor and device 571 * IDs against our list and return a device name if we find a match. 572 */ 573 static int 574 vr_probe(device_t dev) 575 { 576 const struct vr_type *t; 577 578 t = vr_match(dev); 579 if (t != NULL) { 580 device_set_desc(dev, t->vr_name); 581 return (BUS_PROBE_DEFAULT); 582 } 583 return (ENXIO); 584 } 585 586 /* 587 * Attach the interface. Allocate softc structures, do ifmedia 588 * setup and ethernet/BPF attach. 589 */ 590 static int 591 vr_attach(device_t dev) 592 { 593 struct vr_softc *sc; 594 struct ifnet *ifp; 595 const struct vr_type *t; 596 uint8_t eaddr[ETHER_ADDR_LEN]; 597 int error, rid; 598 int i, phy, pmc; 599 600 sc = device_get_softc(dev); 601 sc->vr_dev = dev; 602 t = vr_match(dev); 603 KASSERT(t != NULL, ("Lost if_vr device match")); 604 sc->vr_quirks = t->vr_quirks; 605 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 606 607 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 608 MTX_DEF); 609 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 610 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 611 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 612 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 613 vr_sysctl_stats, "I", "Statistics"); 614 615 error = 0; 616 617 /* 618 * Map control/status registers. 619 */ 620 pci_enable_busmaster(dev); 621 sc->vr_revid = pci_get_revid(dev); 622 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 623 624 sc->vr_res_id = PCIR_BAR(0); 625 sc->vr_res_type = SYS_RES_IOPORT; 626 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 627 &sc->vr_res_id, RF_ACTIVE); 628 if (sc->vr_res == NULL) { 629 device_printf(dev, "couldn't map ports\n"); 630 error = ENXIO; 631 goto fail; 632 } 633 634 /* Allocate interrupt. */ 635 rid = 0; 636 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 637 RF_SHAREABLE | RF_ACTIVE); 638 639 if (sc->vr_irq == NULL) { 640 device_printf(dev, "couldn't map interrupt\n"); 641 error = ENXIO; 642 goto fail; 643 } 644 645 /* Allocate ifnet structure. */ 646 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 647 if (ifp == NULL) { 648 device_printf(dev, "couldn't allocate ifnet structure\n"); 649 error = ENOSPC; 650 goto fail; 651 } 652 ifp->if_softc = sc; 653 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 654 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 655 ifp->if_ioctl = vr_ioctl; 656 ifp->if_start = vr_start; 657 ifp->if_init = vr_init; 658 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); 659 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; 660 IFQ_SET_READY(&ifp->if_snd); 661 662 TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); 663 664 /* Configure Tx FIFO threshold. */ 665 sc->vr_txthresh = VR_TXTHRESH_MIN; 666 if (sc->vr_revid < REV_ID_VT6105_A0) { 667 /* 668 * Use store and forward mode for Rhine I/II. 669 * Otherwise they produce a lot of Tx underruns and 670 * it would take a while to get working FIFO threshold 671 * value. 672 */ 673 sc->vr_txthresh = VR_TXTHRESH_MAX; 674 } 675 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 676 ifp->if_hwassist = VR_CSUM_FEATURES; 677 ifp->if_capabilities |= IFCAP_HWCSUM; 678 /* 679 * To update checksum field the hardware may need to 680 * store entire frames into FIFO before transmitting. 681 */ 682 sc->vr_txthresh = VR_TXTHRESH_MAX; 683 } 684 685 if (sc->vr_revid >= REV_ID_VT6102_A && 686 pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 687 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; 688 689 /* Rhine supports oversized VLAN frame. */ 690 ifp->if_capabilities |= IFCAP_VLAN_MTU; 691 ifp->if_capenable = ifp->if_capabilities; 692 #ifdef DEVICE_POLLING 693 ifp->if_capabilities |= IFCAP_POLLING; 694 #endif 695 696 /* 697 * Windows may put the chip in suspend mode when it 698 * shuts down. Be sure to kick it in the head to wake it 699 * up again. 700 */ 701 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 702 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 703 704 /* 705 * Get station address. The way the Rhine chips work, 706 * you're not allowed to directly access the EEPROM once 707 * they've been programmed a special way. Consequently, 708 * we need to read the node address from the PAR0 and PAR1 709 * registers. 710 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 711 * VR_CFGC and VR_CFGD such that memory mapped IO configured 712 * by driver is reset to default state. 713 */ 714 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 715 for (i = VR_TIMEOUT; i > 0; i--) { 716 DELAY(1); 717 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 718 break; 719 } 720 if (i == 0) 721 device_printf(dev, "Reloading EEPROM timeout!\n"); 722 for (i = 0; i < ETHER_ADDR_LEN; i++) 723 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 724 725 /* Reset the adapter. */ 726 vr_reset(sc); 727 /* Ack intr & disable further interrupts. */ 728 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 729 CSR_WRITE_2(sc, VR_IMR, 0); 730 if (sc->vr_revid >= REV_ID_VT6102_A) 731 CSR_WRITE_2(sc, VR_MII_IMR, 0); 732 733 if (sc->vr_revid < REV_ID_VT6102_A) { 734 pci_write_config(dev, VR_PCI_MODE2, 735 pci_read_config(dev, VR_PCI_MODE2, 1) | 736 VR_MODE2_MODE10T, 1); 737 } else { 738 /* Report error instead of retrying forever. */ 739 pci_write_config(dev, VR_PCI_MODE2, 740 pci_read_config(dev, VR_PCI_MODE2, 1) | 741 VR_MODE2_PCEROPT, 1); 742 /* Detect MII coding error. */ 743 pci_write_config(dev, VR_PCI_MODE3, 744 pci_read_config(dev, VR_PCI_MODE3, 1) | 745 VR_MODE3_MIION, 1); 746 if (sc->vr_revid >= REV_ID_VT6105_LOM && 747 sc->vr_revid < REV_ID_VT6105M_A0) 748 pci_write_config(dev, VR_PCI_MODE2, 749 pci_read_config(dev, VR_PCI_MODE2, 1) | 750 VR_MODE2_MODE10T, 1); 751 /* Enable Memory-Read-Multiple. */ 752 if (sc->vr_revid >= REV_ID_VT6107_A1 && 753 sc->vr_revid < REV_ID_VT6105M_A0) 754 pci_write_config(dev, VR_PCI_MODE2, 755 pci_read_config(dev, VR_PCI_MODE2, 1) | 756 VR_MODE2_MRDPL, 1); 757 } 758 /* Disable MII AUTOPOLL. */ 759 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 760 761 if (vr_dma_alloc(sc) != 0) { 762 error = ENXIO; 763 goto fail; 764 } 765 766 /* Do MII setup. */ 767 if (sc->vr_revid >= REV_ID_VT6105_A0) 768 phy = 1; 769 else 770 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 771 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, 772 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 773 sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0); 774 if (error != 0) { 775 device_printf(dev, "attaching PHYs failed\n"); 776 goto fail; 777 } 778 779 /* Call MI attach routine. */ 780 ether_ifattach(ifp, eaddr); 781 /* 782 * Tell the upper layer(s) we support long frames. 783 * Must appear after the call to ether_ifattach() because 784 * ether_ifattach() sets ifi_hdrlen to the default value. 785 */ 786 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 787 788 /* Hook interrupt last to avoid having to lock softc. */ 789 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 790 vr_intr, NULL, sc, &sc->vr_intrhand); 791 792 if (error) { 793 device_printf(dev, "couldn't set up irq\n"); 794 ether_ifdetach(ifp); 795 goto fail; 796 } 797 798 fail: 799 if (error) 800 vr_detach(dev); 801 802 return (error); 803 } 804 805 /* 806 * Shutdown hardware and free up resources. This can be called any 807 * time after the mutex has been initialized. It is called in both 808 * the error case in attach and the normal detach case so it needs 809 * to be careful about only freeing resources that have actually been 810 * allocated. 811 */ 812 static int 813 vr_detach(device_t dev) 814 { 815 struct vr_softc *sc = device_get_softc(dev); 816 struct ifnet *ifp = sc->vr_ifp; 817 818 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 819 820 #ifdef DEVICE_POLLING 821 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 822 ether_poll_deregister(ifp); 823 #endif 824 825 /* These should only be active if attach succeeded. */ 826 if (device_is_attached(dev)) { 827 VR_LOCK(sc); 828 sc->vr_flags |= VR_F_DETACHED; 829 vr_stop(sc); 830 VR_UNLOCK(sc); 831 callout_drain(&sc->vr_stat_callout); 832 taskqueue_drain(taskqueue_fast, &sc->vr_inttask); 833 ether_ifdetach(ifp); 834 } 835 if (sc->vr_miibus) 836 device_delete_child(dev, sc->vr_miibus); 837 bus_generic_detach(dev); 838 839 if (sc->vr_intrhand) 840 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 841 if (sc->vr_irq) 842 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 843 if (sc->vr_res) 844 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 845 sc->vr_res); 846 847 if (ifp) 848 if_free(ifp); 849 850 vr_dma_free(sc); 851 852 mtx_destroy(&sc->vr_mtx); 853 854 return (0); 855 } 856 857 struct vr_dmamap_arg { 858 bus_addr_t vr_busaddr; 859 }; 860 861 static void 862 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 863 { 864 struct vr_dmamap_arg *ctx; 865 866 if (error != 0) 867 return; 868 ctx = arg; 869 ctx->vr_busaddr = segs[0].ds_addr; 870 } 871 872 static int 873 vr_dma_alloc(struct vr_softc *sc) 874 { 875 struct vr_dmamap_arg ctx; 876 struct vr_txdesc *txd; 877 struct vr_rxdesc *rxd; 878 bus_size_t tx_alignment; 879 int error, i; 880 881 /* Create parent DMA tag. */ 882 error = bus_dma_tag_create( 883 bus_get_dma_tag(sc->vr_dev), /* parent */ 884 1, 0, /* alignment, boundary */ 885 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 886 BUS_SPACE_MAXADDR, /* highaddr */ 887 NULL, NULL, /* filter, filterarg */ 888 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 889 0, /* nsegments */ 890 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 891 0, /* flags */ 892 NULL, NULL, /* lockfunc, lockarg */ 893 &sc->vr_cdata.vr_parent_tag); 894 if (error != 0) { 895 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 896 goto fail; 897 } 898 /* Create tag for Tx ring. */ 899 error = bus_dma_tag_create( 900 sc->vr_cdata.vr_parent_tag, /* parent */ 901 VR_RING_ALIGN, 0, /* alignment, boundary */ 902 BUS_SPACE_MAXADDR, /* lowaddr */ 903 BUS_SPACE_MAXADDR, /* highaddr */ 904 NULL, NULL, /* filter, filterarg */ 905 VR_TX_RING_SIZE, /* maxsize */ 906 1, /* nsegments */ 907 VR_TX_RING_SIZE, /* maxsegsize */ 908 0, /* flags */ 909 NULL, NULL, /* lockfunc, lockarg */ 910 &sc->vr_cdata.vr_tx_ring_tag); 911 if (error != 0) { 912 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 913 goto fail; 914 } 915 916 /* Create tag for Rx ring. */ 917 error = bus_dma_tag_create( 918 sc->vr_cdata.vr_parent_tag, /* parent */ 919 VR_RING_ALIGN, 0, /* alignment, boundary */ 920 BUS_SPACE_MAXADDR, /* lowaddr */ 921 BUS_SPACE_MAXADDR, /* highaddr */ 922 NULL, NULL, /* filter, filterarg */ 923 VR_RX_RING_SIZE, /* maxsize */ 924 1, /* nsegments */ 925 VR_RX_RING_SIZE, /* maxsegsize */ 926 0, /* flags */ 927 NULL, NULL, /* lockfunc, lockarg */ 928 &sc->vr_cdata.vr_rx_ring_tag); 929 if (error != 0) { 930 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 931 goto fail; 932 } 933 934 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 935 tx_alignment = sizeof(uint32_t); 936 else 937 tx_alignment = 1; 938 /* Create tag for Tx buffers. */ 939 error = bus_dma_tag_create( 940 sc->vr_cdata.vr_parent_tag, /* parent */ 941 tx_alignment, 0, /* alignment, boundary */ 942 BUS_SPACE_MAXADDR, /* lowaddr */ 943 BUS_SPACE_MAXADDR, /* highaddr */ 944 NULL, NULL, /* filter, filterarg */ 945 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 946 VR_MAXFRAGS, /* nsegments */ 947 MCLBYTES, /* maxsegsize */ 948 0, /* flags */ 949 NULL, NULL, /* lockfunc, lockarg */ 950 &sc->vr_cdata.vr_tx_tag); 951 if (error != 0) { 952 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 953 goto fail; 954 } 955 956 /* Create tag for Rx buffers. */ 957 error = bus_dma_tag_create( 958 sc->vr_cdata.vr_parent_tag, /* parent */ 959 VR_RX_ALIGN, 0, /* alignment, boundary */ 960 BUS_SPACE_MAXADDR, /* lowaddr */ 961 BUS_SPACE_MAXADDR, /* highaddr */ 962 NULL, NULL, /* filter, filterarg */ 963 MCLBYTES, /* maxsize */ 964 1, /* nsegments */ 965 MCLBYTES, /* maxsegsize */ 966 0, /* flags */ 967 NULL, NULL, /* lockfunc, lockarg */ 968 &sc->vr_cdata.vr_rx_tag); 969 if (error != 0) { 970 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 971 goto fail; 972 } 973 974 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 975 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 976 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 977 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 978 if (error != 0) { 979 device_printf(sc->vr_dev, 980 "failed to allocate DMA'able memory for Tx ring\n"); 981 goto fail; 982 } 983 984 ctx.vr_busaddr = 0; 985 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 986 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 987 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 988 if (error != 0 || ctx.vr_busaddr == 0) { 989 device_printf(sc->vr_dev, 990 "failed to load DMA'able memory for Tx ring\n"); 991 goto fail; 992 } 993 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 994 995 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 996 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 997 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 998 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 999 if (error != 0) { 1000 device_printf(sc->vr_dev, 1001 "failed to allocate DMA'able memory for Rx ring\n"); 1002 goto fail; 1003 } 1004 1005 ctx.vr_busaddr = 0; 1006 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1007 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1008 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1009 if (error != 0 || ctx.vr_busaddr == 0) { 1010 device_printf(sc->vr_dev, 1011 "failed to load DMA'able memory for Rx ring\n"); 1012 goto fail; 1013 } 1014 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1015 1016 /* Create DMA maps for Tx buffers. */ 1017 for (i = 0; i < VR_TX_RING_CNT; i++) { 1018 txd = &sc->vr_cdata.vr_txdesc[i]; 1019 txd->tx_m = NULL; 1020 txd->tx_dmamap = NULL; 1021 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1022 &txd->tx_dmamap); 1023 if (error != 0) { 1024 device_printf(sc->vr_dev, 1025 "failed to create Tx dmamap\n"); 1026 goto fail; 1027 } 1028 } 1029 /* Create DMA maps for Rx buffers. */ 1030 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1031 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1032 device_printf(sc->vr_dev, 1033 "failed to create spare Rx dmamap\n"); 1034 goto fail; 1035 } 1036 for (i = 0; i < VR_RX_RING_CNT; i++) { 1037 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1038 rxd->rx_m = NULL; 1039 rxd->rx_dmamap = NULL; 1040 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1041 &rxd->rx_dmamap); 1042 if (error != 0) { 1043 device_printf(sc->vr_dev, 1044 "failed to create Rx dmamap\n"); 1045 goto fail; 1046 } 1047 } 1048 1049 fail: 1050 return (error); 1051 } 1052 1053 static void 1054 vr_dma_free(struct vr_softc *sc) 1055 { 1056 struct vr_txdesc *txd; 1057 struct vr_rxdesc *rxd; 1058 int i; 1059 1060 /* Tx ring. */ 1061 if (sc->vr_cdata.vr_tx_ring_tag) { 1062 if (sc->vr_cdata.vr_tx_ring_map) 1063 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1064 sc->vr_cdata.vr_tx_ring_map); 1065 if (sc->vr_cdata.vr_tx_ring_map && 1066 sc->vr_rdata.vr_tx_ring) 1067 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1068 sc->vr_rdata.vr_tx_ring, 1069 sc->vr_cdata.vr_tx_ring_map); 1070 sc->vr_rdata.vr_tx_ring = NULL; 1071 sc->vr_cdata.vr_tx_ring_map = NULL; 1072 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1073 sc->vr_cdata.vr_tx_ring_tag = NULL; 1074 } 1075 /* Rx ring. */ 1076 if (sc->vr_cdata.vr_rx_ring_tag) { 1077 if (sc->vr_cdata.vr_rx_ring_map) 1078 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1079 sc->vr_cdata.vr_rx_ring_map); 1080 if (sc->vr_cdata.vr_rx_ring_map && 1081 sc->vr_rdata.vr_rx_ring) 1082 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1083 sc->vr_rdata.vr_rx_ring, 1084 sc->vr_cdata.vr_rx_ring_map); 1085 sc->vr_rdata.vr_rx_ring = NULL; 1086 sc->vr_cdata.vr_rx_ring_map = NULL; 1087 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1088 sc->vr_cdata.vr_rx_ring_tag = NULL; 1089 } 1090 /* Tx buffers. */ 1091 if (sc->vr_cdata.vr_tx_tag) { 1092 for (i = 0; i < VR_TX_RING_CNT; i++) { 1093 txd = &sc->vr_cdata.vr_txdesc[i]; 1094 if (txd->tx_dmamap) { 1095 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1096 txd->tx_dmamap); 1097 txd->tx_dmamap = NULL; 1098 } 1099 } 1100 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1101 sc->vr_cdata.vr_tx_tag = NULL; 1102 } 1103 /* Rx buffers. */ 1104 if (sc->vr_cdata.vr_rx_tag) { 1105 for (i = 0; i < VR_RX_RING_CNT; i++) { 1106 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1107 if (rxd->rx_dmamap) { 1108 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1109 rxd->rx_dmamap); 1110 rxd->rx_dmamap = NULL; 1111 } 1112 } 1113 if (sc->vr_cdata.vr_rx_sparemap) { 1114 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1115 sc->vr_cdata.vr_rx_sparemap); 1116 sc->vr_cdata.vr_rx_sparemap = 0; 1117 } 1118 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1119 sc->vr_cdata.vr_rx_tag = NULL; 1120 } 1121 1122 if (sc->vr_cdata.vr_parent_tag) { 1123 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1124 sc->vr_cdata.vr_parent_tag = NULL; 1125 } 1126 } 1127 1128 /* 1129 * Initialize the transmit descriptors. 1130 */ 1131 static int 1132 vr_tx_ring_init(struct vr_softc *sc) 1133 { 1134 struct vr_ring_data *rd; 1135 struct vr_txdesc *txd; 1136 bus_addr_t addr; 1137 int i; 1138 1139 sc->vr_cdata.vr_tx_prod = 0; 1140 sc->vr_cdata.vr_tx_cons = 0; 1141 sc->vr_cdata.vr_tx_cnt = 0; 1142 sc->vr_cdata.vr_tx_pkts = 0; 1143 1144 rd = &sc->vr_rdata; 1145 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1146 for (i = 0; i < VR_TX_RING_CNT; i++) { 1147 if (i == VR_TX_RING_CNT - 1) 1148 addr = VR_TX_RING_ADDR(sc, 0); 1149 else 1150 addr = VR_TX_RING_ADDR(sc, i + 1); 1151 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1152 txd = &sc->vr_cdata.vr_txdesc[i]; 1153 txd->tx_m = NULL; 1154 } 1155 1156 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1157 sc->vr_cdata.vr_tx_ring_map, 1158 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1159 1160 return (0); 1161 } 1162 1163 /* 1164 * Initialize the RX descriptors and allocate mbufs for them. Note that 1165 * we arrange the descriptors in a closed ring, so that the last descriptor 1166 * points back to the first. 1167 */ 1168 static int 1169 vr_rx_ring_init(struct vr_softc *sc) 1170 { 1171 struct vr_ring_data *rd; 1172 struct vr_rxdesc *rxd; 1173 bus_addr_t addr; 1174 int i; 1175 1176 sc->vr_cdata.vr_rx_cons = 0; 1177 1178 rd = &sc->vr_rdata; 1179 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1180 for (i = 0; i < VR_RX_RING_CNT; i++) { 1181 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1182 rxd->rx_m = NULL; 1183 rxd->desc = &rd->vr_rx_ring[i]; 1184 if (i == VR_RX_RING_CNT - 1) 1185 addr = VR_RX_RING_ADDR(sc, 0); 1186 else 1187 addr = VR_RX_RING_ADDR(sc, i + 1); 1188 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1189 if (vr_newbuf(sc, i) != 0) 1190 return (ENOBUFS); 1191 } 1192 1193 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1194 sc->vr_cdata.vr_rx_ring_map, 1195 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1196 1197 return (0); 1198 } 1199 1200 static __inline void 1201 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1202 { 1203 struct vr_desc *desc; 1204 1205 desc = rxd->desc; 1206 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1207 desc->vr_status = htole32(VR_RXSTAT_OWN); 1208 } 1209 1210 /* 1211 * Initialize an RX descriptor and attach an MBUF cluster. 1212 * Note: the length fields are only 11 bits wide, which means the 1213 * largest size we can specify is 2047. This is important because 1214 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1215 * overflow the field and make a mess. 1216 */ 1217 static int 1218 vr_newbuf(struct vr_softc *sc, int idx) 1219 { 1220 struct vr_desc *desc; 1221 struct vr_rxdesc *rxd; 1222 struct mbuf *m; 1223 bus_dma_segment_t segs[1]; 1224 bus_dmamap_t map; 1225 int nsegs; 1226 1227 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1228 if (m == NULL) 1229 return (ENOBUFS); 1230 m->m_len = m->m_pkthdr.len = MCLBYTES; 1231 m_adj(m, sizeof(uint64_t)); 1232 1233 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1234 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1235 m_freem(m); 1236 return (ENOBUFS); 1237 } 1238 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1239 1240 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1241 if (rxd->rx_m != NULL) { 1242 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1243 BUS_DMASYNC_POSTREAD); 1244 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1245 } 1246 map = rxd->rx_dmamap; 1247 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1248 sc->vr_cdata.vr_rx_sparemap = map; 1249 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1250 BUS_DMASYNC_PREREAD); 1251 rxd->rx_m = m; 1252 desc = rxd->desc; 1253 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1254 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1255 desc->vr_status = htole32(VR_RXSTAT_OWN); 1256 1257 return (0); 1258 } 1259 1260 #ifndef __NO_STRICT_ALIGNMENT 1261 static __inline void 1262 vr_fixup_rx(struct mbuf *m) 1263 { 1264 uint16_t *src, *dst; 1265 int i; 1266 1267 src = mtod(m, uint16_t *); 1268 dst = src - 1; 1269 1270 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1271 *dst++ = *src++; 1272 1273 m->m_data -= ETHER_ALIGN; 1274 } 1275 #endif 1276 1277 /* 1278 * A frame has been uploaded: pass the resulting mbuf chain up to 1279 * the higher level protocols. 1280 */ 1281 static int 1282 vr_rxeof(struct vr_softc *sc) 1283 { 1284 struct vr_rxdesc *rxd; 1285 struct mbuf *m; 1286 struct ifnet *ifp; 1287 struct vr_desc *cur_rx; 1288 int cons, prog, total_len, rx_npkts; 1289 uint32_t rxstat, rxctl; 1290 1291 VR_LOCK_ASSERT(sc); 1292 ifp = sc->vr_ifp; 1293 cons = sc->vr_cdata.vr_rx_cons; 1294 rx_npkts = 0; 1295 1296 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1297 sc->vr_cdata.vr_rx_ring_map, 1298 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1299 1300 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1301 #ifdef DEVICE_POLLING 1302 if (ifp->if_capenable & IFCAP_POLLING) { 1303 if (sc->rxcycles <= 0) 1304 break; 1305 sc->rxcycles--; 1306 } 1307 #endif 1308 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1309 rxstat = le32toh(cur_rx->vr_status); 1310 rxctl = le32toh(cur_rx->vr_ctl); 1311 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1312 break; 1313 1314 prog++; 1315 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1316 m = rxd->rx_m; 1317 1318 /* 1319 * If an error occurs, update stats, clear the 1320 * status word and leave the mbuf cluster in place: 1321 * it should simply get re-used next time this descriptor 1322 * comes up in the ring. 1323 * We don't support SG in Rx path yet, so discard 1324 * partial frame. 1325 */ 1326 if ((rxstat & VR_RXSTAT_RX_OK) == 0 || 1327 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != 1328 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1329 ifp->if_ierrors++; 1330 sc->vr_stat.rx_errors++; 1331 if (rxstat & VR_RXSTAT_CRCERR) 1332 sc->vr_stat.rx_crc_errors++; 1333 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1334 sc->vr_stat.rx_alignment++; 1335 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1336 sc->vr_stat.rx_fifo_overflows++; 1337 if (rxstat & VR_RXSTAT_GIANT) 1338 sc->vr_stat.rx_giants++; 1339 if (rxstat & VR_RXSTAT_RUNT) 1340 sc->vr_stat.rx_runts++; 1341 if (rxstat & VR_RXSTAT_BUFFERR) 1342 sc->vr_stat.rx_no_buffers++; 1343 #ifdef VR_SHOW_ERRORS 1344 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1345 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1346 #endif 1347 vr_discard_rxbuf(rxd); 1348 continue; 1349 } 1350 1351 if (vr_newbuf(sc, cons) != 0) { 1352 ifp->if_iqdrops++; 1353 sc->vr_stat.rx_errors++; 1354 sc->vr_stat.rx_no_mbufs++; 1355 vr_discard_rxbuf(rxd); 1356 continue; 1357 } 1358 1359 /* 1360 * XXX The VIA Rhine chip includes the CRC with every 1361 * received frame, and there's no way to turn this 1362 * behavior off (at least, I can't find anything in 1363 * the manual that explains how to do it) so we have 1364 * to trim off the CRC manually. 1365 */ 1366 total_len = VR_RXBYTES(rxstat); 1367 total_len -= ETHER_CRC_LEN; 1368 m->m_pkthdr.len = m->m_len = total_len; 1369 #ifndef __NO_STRICT_ALIGNMENT 1370 /* 1371 * RX buffers must be 32-bit aligned. 1372 * Ignore the alignment problems on the non-strict alignment 1373 * platform. The performance hit incurred due to unaligned 1374 * accesses is much smaller than the hit produced by forcing 1375 * buffer copies all the time. 1376 */ 1377 vr_fixup_rx(m); 1378 #endif 1379 m->m_pkthdr.rcvif = ifp; 1380 ifp->if_ipackets++; 1381 sc->vr_stat.rx_ok++; 1382 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1383 (rxstat & VR_RXSTAT_FRAG) == 0 && 1384 (rxctl & VR_RXCTL_IP) != 0) { 1385 /* Checksum is valid for non-fragmented IP packets. */ 1386 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1387 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1388 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1389 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1390 m->m_pkthdr.csum_flags |= 1391 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1392 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1393 m->m_pkthdr.csum_data = 0xffff; 1394 } 1395 } 1396 } 1397 VR_UNLOCK(sc); 1398 (*ifp->if_input)(ifp, m); 1399 VR_LOCK(sc); 1400 rx_npkts++; 1401 } 1402 1403 if (prog > 0) { 1404 /* 1405 * Let controller know how many number of RX buffers 1406 * are posted but avoid expensive register access if 1407 * TX pause capability was not negotiated with link 1408 * partner. 1409 */ 1410 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) { 1411 if (prog >= VR_RX_RING_CNT) 1412 prog = VR_RX_RING_CNT - 1; 1413 CSR_WRITE_1(sc, VR_FLOWCR0, prog); 1414 } 1415 sc->vr_cdata.vr_rx_cons = cons; 1416 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1417 sc->vr_cdata.vr_rx_ring_map, 1418 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1419 } 1420 return (rx_npkts); 1421 } 1422 1423 /* 1424 * A frame was downloaded to the chip. It's safe for us to clean up 1425 * the list buffers. 1426 */ 1427 static void 1428 vr_txeof(struct vr_softc *sc) 1429 { 1430 struct vr_txdesc *txd; 1431 struct vr_desc *cur_tx; 1432 struct ifnet *ifp; 1433 uint32_t txctl, txstat; 1434 int cons, prod; 1435 1436 VR_LOCK_ASSERT(sc); 1437 1438 cons = sc->vr_cdata.vr_tx_cons; 1439 prod = sc->vr_cdata.vr_tx_prod; 1440 if (cons == prod) 1441 return; 1442 1443 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1444 sc->vr_cdata.vr_tx_ring_map, 1445 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1446 1447 ifp = sc->vr_ifp; 1448 /* 1449 * Go through our tx list and free mbufs for those 1450 * frames that have been transmitted. 1451 */ 1452 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1453 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1454 txctl = le32toh(cur_tx->vr_ctl); 1455 txstat = le32toh(cur_tx->vr_status); 1456 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1457 break; 1458 1459 sc->vr_cdata.vr_tx_cnt--; 1460 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1461 /* Only the first descriptor in the chain is valid. */ 1462 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1463 continue; 1464 1465 txd = &sc->vr_cdata.vr_txdesc[cons]; 1466 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1467 __func__)); 1468 1469 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1470 ifp->if_oerrors++; 1471 sc->vr_stat.tx_errors++; 1472 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1473 /* Give up and restart Tx. */ 1474 sc->vr_stat.tx_abort++; 1475 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1476 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1477 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1478 txd->tx_dmamap); 1479 m_freem(txd->tx_m); 1480 txd->tx_m = NULL; 1481 VR_INC(cons, VR_TX_RING_CNT); 1482 sc->vr_cdata.vr_tx_cons = cons; 1483 if (vr_tx_stop(sc) != 0) { 1484 device_printf(sc->vr_dev, 1485 "%s: Tx shutdown error -- " 1486 "resetting\n", __func__); 1487 sc->vr_flags |= VR_F_RESTART; 1488 return; 1489 } 1490 vr_tx_start(sc); 1491 break; 1492 } 1493 if ((sc->vr_revid < REV_ID_VT3071_A && 1494 (txstat & VR_TXSTAT_UNDERRUN)) || 1495 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1496 sc->vr_stat.tx_underrun++; 1497 /* Retry and restart Tx. */ 1498 sc->vr_cdata.vr_tx_cnt++; 1499 sc->vr_cdata.vr_tx_cons = cons; 1500 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1501 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1502 sc->vr_cdata.vr_tx_ring_map, 1503 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1504 vr_tx_underrun(sc); 1505 return; 1506 } 1507 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1508 ifp->if_collisions++; 1509 sc->vr_stat.tx_collisions++; 1510 } 1511 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1512 ifp->if_collisions++; 1513 sc->vr_stat.tx_late_collisions++; 1514 } 1515 } else { 1516 sc->vr_stat.tx_ok++; 1517 ifp->if_opackets++; 1518 } 1519 1520 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1521 BUS_DMASYNC_POSTWRITE); 1522 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1523 if (sc->vr_revid < REV_ID_VT3071_A) { 1524 ifp->if_collisions += 1525 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1526 sc->vr_stat.tx_collisions += 1527 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1528 } else { 1529 ifp->if_collisions += (txstat & 0x0f); 1530 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1531 } 1532 m_freem(txd->tx_m); 1533 txd->tx_m = NULL; 1534 } 1535 1536 sc->vr_cdata.vr_tx_cons = cons; 1537 if (sc->vr_cdata.vr_tx_cnt == 0) 1538 sc->vr_watchdog_timer = 0; 1539 } 1540 1541 static void 1542 vr_tick(void *xsc) 1543 { 1544 struct vr_softc *sc; 1545 struct mii_data *mii; 1546 1547 sc = (struct vr_softc *)xsc; 1548 1549 VR_LOCK_ASSERT(sc); 1550 1551 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1552 device_printf(sc->vr_dev, "restarting\n"); 1553 sc->vr_stat.num_restart++; 1554 sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1555 vr_init_locked(sc); 1556 sc->vr_flags &= ~VR_F_RESTART; 1557 } 1558 1559 mii = device_get_softc(sc->vr_miibus); 1560 mii_tick(mii); 1561 if ((sc->vr_flags & VR_F_LINK) == 0) 1562 vr_miibus_statchg(sc->vr_dev); 1563 vr_watchdog(sc); 1564 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1565 } 1566 1567 #ifdef DEVICE_POLLING 1568 static poll_handler_t vr_poll; 1569 static poll_handler_t vr_poll_locked; 1570 1571 static int 1572 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1573 { 1574 struct vr_softc *sc; 1575 int rx_npkts; 1576 1577 sc = ifp->if_softc; 1578 rx_npkts = 0; 1579 1580 VR_LOCK(sc); 1581 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1582 rx_npkts = vr_poll_locked(ifp, cmd, count); 1583 VR_UNLOCK(sc); 1584 return (rx_npkts); 1585 } 1586 1587 static int 1588 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1589 { 1590 struct vr_softc *sc; 1591 int rx_npkts; 1592 1593 sc = ifp->if_softc; 1594 1595 VR_LOCK_ASSERT(sc); 1596 1597 sc->rxcycles = count; 1598 rx_npkts = vr_rxeof(sc); 1599 vr_txeof(sc); 1600 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1601 vr_start_locked(ifp); 1602 1603 if (cmd == POLL_AND_CHECK_STATUS) { 1604 uint16_t status; 1605 1606 /* Also check status register. */ 1607 status = CSR_READ_2(sc, VR_ISR); 1608 if (status) 1609 CSR_WRITE_2(sc, VR_ISR, status); 1610 1611 if ((status & VR_INTRS) == 0) 1612 return (rx_npkts); 1613 1614 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1615 VR_ISR_STATSOFLOW)) != 0) { 1616 if (vr_error(sc, status) != 0) 1617 return (rx_npkts); 1618 } 1619 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1620 #ifdef VR_SHOW_ERRORS 1621 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1622 __func__, status, VR_ISR_ERR_BITS); 1623 #endif 1624 vr_rx_start(sc); 1625 } 1626 } 1627 return (rx_npkts); 1628 } 1629 #endif /* DEVICE_POLLING */ 1630 1631 /* Back off the transmit threshold. */ 1632 static void 1633 vr_tx_underrun(struct vr_softc *sc) 1634 { 1635 int thresh; 1636 1637 device_printf(sc->vr_dev, "Tx underrun -- "); 1638 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1639 thresh = sc->vr_txthresh; 1640 sc->vr_txthresh++; 1641 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1642 sc->vr_txthresh = VR_TXTHRESH_MAX; 1643 printf("using store and forward mode\n"); 1644 } else 1645 printf("increasing Tx threshold(%d -> %d)\n", 1646 vr_tx_threshold_tables[thresh].value, 1647 vr_tx_threshold_tables[thresh + 1].value); 1648 } else 1649 printf("\n"); 1650 sc->vr_stat.tx_underrun++; 1651 if (vr_tx_stop(sc) != 0) { 1652 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1653 "resetting\n", __func__); 1654 sc->vr_flags |= VR_F_RESTART; 1655 return; 1656 } 1657 vr_tx_start(sc); 1658 } 1659 1660 static int 1661 vr_intr(void *arg) 1662 { 1663 struct vr_softc *sc; 1664 uint16_t status; 1665 1666 sc = (struct vr_softc *)arg; 1667 1668 status = CSR_READ_2(sc, VR_ISR); 1669 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1670 return (FILTER_STRAY); 1671 1672 /* Disable interrupts. */ 1673 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1674 1675 taskqueue_enqueue_fast(taskqueue_fast, &sc->vr_inttask); 1676 1677 return (FILTER_HANDLED); 1678 } 1679 1680 static void 1681 vr_int_task(void *arg, int npending) 1682 { 1683 struct vr_softc *sc; 1684 struct ifnet *ifp; 1685 uint16_t status; 1686 1687 sc = (struct vr_softc *)arg; 1688 1689 VR_LOCK(sc); 1690 1691 if ((sc->vr_flags & VR_F_SUSPENDED) != 0) 1692 goto done_locked; 1693 1694 status = CSR_READ_2(sc, VR_ISR); 1695 ifp = sc->vr_ifp; 1696 #ifdef DEVICE_POLLING 1697 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1698 goto done_locked; 1699 #endif 1700 1701 /* Suppress unwanted interrupts. */ 1702 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1703 (sc->vr_flags & VR_F_RESTART) != 0) { 1704 CSR_WRITE_2(sc, VR_IMR, 0); 1705 CSR_WRITE_2(sc, VR_ISR, status); 1706 goto done_locked; 1707 } 1708 1709 for (; (status & VR_INTRS) != 0;) { 1710 CSR_WRITE_2(sc, VR_ISR, status); 1711 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1712 VR_ISR_STATSOFLOW)) != 0) { 1713 if (vr_error(sc, status) != 0) { 1714 VR_UNLOCK(sc); 1715 return; 1716 } 1717 } 1718 vr_rxeof(sc); 1719 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1720 #ifdef VR_SHOW_ERRORS 1721 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1722 __func__, status, VR_ISR_ERR_BITS); 1723 #endif 1724 /* Restart Rx if RxDMA SM was stopped. */ 1725 vr_rx_start(sc); 1726 } 1727 vr_txeof(sc); 1728 1729 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1730 vr_start_locked(ifp); 1731 1732 status = CSR_READ_2(sc, VR_ISR); 1733 } 1734 1735 /* Re-enable interrupts. */ 1736 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1737 1738 done_locked: 1739 VR_UNLOCK(sc); 1740 } 1741 1742 static int 1743 vr_error(struct vr_softc *sc, uint16_t status) 1744 { 1745 uint16_t pcis; 1746 1747 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1748 if ((status & VR_ISR_BUSERR) != 0) { 1749 status &= ~VR_ISR_BUSERR; 1750 sc->vr_stat.bus_errors++; 1751 /* Disable further interrupts. */ 1752 CSR_WRITE_2(sc, VR_IMR, 0); 1753 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1754 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1755 "resetting\n", pcis); 1756 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1757 sc->vr_flags |= VR_F_RESTART; 1758 return (EAGAIN); 1759 } 1760 if ((status & VR_ISR_LINKSTAT2) != 0) { 1761 /* Link state change, duplex changes etc. */ 1762 status &= ~VR_ISR_LINKSTAT2; 1763 } 1764 if ((status & VR_ISR_STATSOFLOW) != 0) { 1765 status &= ~VR_ISR_STATSOFLOW; 1766 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1767 /* Update MIB counters. */ 1768 } 1769 } 1770 1771 if (status != 0) 1772 device_printf(sc->vr_dev, 1773 "unhandled interrupt, status = 0x%04x\n", status); 1774 return (0); 1775 } 1776 1777 /* 1778 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1779 * pointers to the fragment pointers. 1780 */ 1781 static int 1782 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1783 { 1784 struct vr_txdesc *txd; 1785 struct vr_desc *desc; 1786 struct mbuf *m; 1787 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1788 uint32_t csum_flags, txctl; 1789 int error, i, nsegs, prod, si; 1790 int padlen; 1791 1792 VR_LOCK_ASSERT(sc); 1793 1794 M_ASSERTPKTHDR((*m_head)); 1795 1796 /* 1797 * Some VIA Rhine wants packet buffers to be longword 1798 * aligned, but very often our mbufs aren't. Rather than 1799 * waste time trying to decide when to copy and when not 1800 * to copy, just do it all the time. 1801 */ 1802 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1803 m = m_defrag(*m_head, M_NOWAIT); 1804 if (m == NULL) { 1805 m_freem(*m_head); 1806 *m_head = NULL; 1807 return (ENOBUFS); 1808 } 1809 *m_head = m; 1810 } 1811 1812 /* 1813 * The Rhine chip doesn't auto-pad, so we have to make 1814 * sure to pad short frames out to the minimum frame length 1815 * ourselves. 1816 */ 1817 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1818 m = *m_head; 1819 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1820 if (M_WRITABLE(m) == 0) { 1821 /* Get a writable copy. */ 1822 m = m_dup(*m_head, M_NOWAIT); 1823 m_freem(*m_head); 1824 if (m == NULL) { 1825 *m_head = NULL; 1826 return (ENOBUFS); 1827 } 1828 *m_head = m; 1829 } 1830 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1831 m = m_defrag(m, M_NOWAIT); 1832 if (m == NULL) { 1833 m_freem(*m_head); 1834 *m_head = NULL; 1835 return (ENOBUFS); 1836 } 1837 } 1838 /* 1839 * Manually pad short frames, and zero the pad space 1840 * to avoid leaking data. 1841 */ 1842 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1843 m->m_pkthdr.len += padlen; 1844 m->m_len = m->m_pkthdr.len; 1845 *m_head = m; 1846 } 1847 1848 prod = sc->vr_cdata.vr_tx_prod; 1849 txd = &sc->vr_cdata.vr_txdesc[prod]; 1850 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1851 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1852 if (error == EFBIG) { 1853 m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS); 1854 if (m == NULL) { 1855 m_freem(*m_head); 1856 *m_head = NULL; 1857 return (ENOBUFS); 1858 } 1859 *m_head = m; 1860 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1861 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1862 if (error != 0) { 1863 m_freem(*m_head); 1864 *m_head = NULL; 1865 return (error); 1866 } 1867 } else if (error != 0) 1868 return (error); 1869 if (nsegs == 0) { 1870 m_freem(*m_head); 1871 *m_head = NULL; 1872 return (EIO); 1873 } 1874 1875 /* Check number of available descriptors. */ 1876 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1877 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1878 return (ENOBUFS); 1879 } 1880 1881 txd->tx_m = *m_head; 1882 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1883 BUS_DMASYNC_PREWRITE); 1884 1885 /* Set checksum offload. */ 1886 csum_flags = 0; 1887 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1888 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1889 csum_flags |= VR_TXCTL_IPCSUM; 1890 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1891 csum_flags |= VR_TXCTL_TCPCSUM; 1892 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1893 csum_flags |= VR_TXCTL_UDPCSUM; 1894 } 1895 1896 /* 1897 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1898 * is required for all descriptors regardless of single or 1899 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1900 * the first descriptor for a multi-fragmented frames. Without 1901 * that VIA Rhine chip generates Tx underrun interrupts and can't 1902 * send any frames. 1903 */ 1904 si = prod; 1905 for (i = 0; i < nsegs; i++) { 1906 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1907 desc->vr_status = 0; 1908 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1909 if (i == 0) 1910 txctl |= VR_TXCTL_FIRSTFRAG; 1911 desc->vr_ctl = htole32(txctl); 1912 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1913 sc->vr_cdata.vr_tx_cnt++; 1914 VR_INC(prod, VR_TX_RING_CNT); 1915 } 1916 /* Update producer index. */ 1917 sc->vr_cdata.vr_tx_prod = prod; 1918 1919 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1920 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1921 1922 /* 1923 * Set EOP on the last desciptor and reuqest Tx completion 1924 * interrupt for every VR_TX_INTR_THRESH-th frames. 1925 */ 1926 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1927 if (sc->vr_cdata.vr_tx_pkts == 0) 1928 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1929 else 1930 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1931 1932 /* Lastly turn the first descriptor ownership to hardware. */ 1933 desc = &sc->vr_rdata.vr_tx_ring[si]; 1934 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1935 1936 /* Sync descriptors. */ 1937 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1938 sc->vr_cdata.vr_tx_ring_map, 1939 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1940 1941 return (0); 1942 } 1943 1944 static void 1945 vr_start(struct ifnet *ifp) 1946 { 1947 struct vr_softc *sc; 1948 1949 sc = ifp->if_softc; 1950 VR_LOCK(sc); 1951 vr_start_locked(ifp); 1952 VR_UNLOCK(sc); 1953 } 1954 1955 static void 1956 vr_start_locked(struct ifnet *ifp) 1957 { 1958 struct vr_softc *sc; 1959 struct mbuf *m_head; 1960 int enq; 1961 1962 sc = ifp->if_softc; 1963 1964 VR_LOCK_ASSERT(sc); 1965 1966 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1967 IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0) 1968 return; 1969 1970 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1971 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1972 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1973 if (m_head == NULL) 1974 break; 1975 /* 1976 * Pack the data into the transmit ring. If we 1977 * don't have room, set the OACTIVE flag and wait 1978 * for the NIC to drain the ring. 1979 */ 1980 if (vr_encap(sc, &m_head)) { 1981 if (m_head == NULL) 1982 break; 1983 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1984 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1985 break; 1986 } 1987 1988 enq++; 1989 /* 1990 * If there's a BPF listener, bounce a copy of this frame 1991 * to him. 1992 */ 1993 ETHER_BPF_MTAP(ifp, m_head); 1994 } 1995 1996 if (enq > 0) { 1997 /* Tell the chip to start transmitting. */ 1998 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 1999 /* Set a timeout in case the chip goes out to lunch. */ 2000 sc->vr_watchdog_timer = 5; 2001 } 2002 } 2003 2004 static void 2005 vr_init(void *xsc) 2006 { 2007 struct vr_softc *sc; 2008 2009 sc = (struct vr_softc *)xsc; 2010 VR_LOCK(sc); 2011 vr_init_locked(sc); 2012 VR_UNLOCK(sc); 2013 } 2014 2015 static void 2016 vr_init_locked(struct vr_softc *sc) 2017 { 2018 struct ifnet *ifp; 2019 struct mii_data *mii; 2020 bus_addr_t addr; 2021 int i; 2022 2023 VR_LOCK_ASSERT(sc); 2024 2025 ifp = sc->vr_ifp; 2026 mii = device_get_softc(sc->vr_miibus); 2027 2028 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2029 return; 2030 2031 /* Cancel pending I/O and free all RX/TX buffers. */ 2032 vr_stop(sc); 2033 vr_reset(sc); 2034 2035 /* Set our station address. */ 2036 for (i = 0; i < ETHER_ADDR_LEN; i++) 2037 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); 2038 2039 /* Set DMA size. */ 2040 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2041 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2042 2043 /* 2044 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2045 * so we must set both. 2046 */ 2047 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2048 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2049 2050 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2051 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2052 2053 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2054 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2055 2056 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2057 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2058 2059 /* Init circular RX list. */ 2060 if (vr_rx_ring_init(sc) != 0) { 2061 device_printf(sc->vr_dev, 2062 "initialization failed: no memory for rx buffers\n"); 2063 vr_stop(sc); 2064 return; 2065 } 2066 2067 /* Init tx descriptors. */ 2068 vr_tx_ring_init(sc); 2069 2070 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2071 uint8_t vcam[2] = { 0, 0 }; 2072 2073 /* Disable VLAN hardware tag insertion/stripping. */ 2074 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); 2075 /* Disable VLAN hardware filtering. */ 2076 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); 2077 /* Disable all CAM entries. */ 2078 vr_cam_mask(sc, VR_MCAST_CAM, 0); 2079 vr_cam_mask(sc, VR_VLAN_CAM, 0); 2080 /* Enable the first VLAN CAM. */ 2081 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); 2082 vr_cam_mask(sc, VR_VLAN_CAM, 1); 2083 } 2084 2085 /* 2086 * Set up receive filter. 2087 */ 2088 vr_set_filter(sc); 2089 2090 /* 2091 * Load the address of the RX ring. 2092 */ 2093 addr = VR_RX_RING_ADDR(sc, 0); 2094 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2095 /* 2096 * Load the address of the TX ring. 2097 */ 2098 addr = VR_TX_RING_ADDR(sc, 0); 2099 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2100 /* Default : full-duplex, no Tx poll. */ 2101 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2102 2103 /* Set flow-control parameters for Rhine III. */ 2104 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2105 /* 2106 * Configure Rx buffer count available for incoming 2107 * packet. 2108 * Even though data sheet says almost nothing about 2109 * this register, this register should be updated 2110 * whenever driver adds new RX buffers to controller. 2111 * Otherwise, XON frame is not sent to link partner 2112 * even if controller has enough RX buffers and you 2113 * would be isolated from network. 2114 * The controller is not smart enough to know number 2115 * of available RX buffers so driver have to let 2116 * controller know how many RX buffers are posted. 2117 * In other words, this register works like a residue 2118 * counter for RX buffers and should be initialized 2119 * to the number of total RX buffers - 1 before 2120 * enabling RX MAC. Note, this register is 8bits so 2121 * it effectively limits the maximum number of RX 2122 * buffer to be configured by controller is 255. 2123 */ 2124 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1); 2125 /* 2126 * Tx pause low threshold : 8 free receive buffers 2127 * Tx pause XON high threshold : 24 free receive buffers 2128 */ 2129 CSR_WRITE_1(sc, VR_FLOWCR1, 2130 VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF); 2131 /* Set Tx pause timer. */ 2132 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2133 } 2134 2135 /* Enable receiver and transmitter. */ 2136 CSR_WRITE_1(sc, VR_CR0, 2137 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2138 2139 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2140 #ifdef DEVICE_POLLING 2141 /* 2142 * Disable interrupts if we are polling. 2143 */ 2144 if (ifp->if_capenable & IFCAP_POLLING) 2145 CSR_WRITE_2(sc, VR_IMR, 0); 2146 else 2147 #endif 2148 /* 2149 * Enable interrupts and disable MII intrs. 2150 */ 2151 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2152 if (sc->vr_revid > REV_ID_VT6102_A) 2153 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2154 2155 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2156 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2157 2158 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2159 mii_mediachg(mii); 2160 2161 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2162 } 2163 2164 /* 2165 * Set media options. 2166 */ 2167 static int 2168 vr_ifmedia_upd(struct ifnet *ifp) 2169 { 2170 struct vr_softc *sc; 2171 struct mii_data *mii; 2172 struct mii_softc *miisc; 2173 int error; 2174 2175 sc = ifp->if_softc; 2176 VR_LOCK(sc); 2177 mii = device_get_softc(sc->vr_miibus); 2178 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2179 PHY_RESET(miisc); 2180 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2181 error = mii_mediachg(mii); 2182 VR_UNLOCK(sc); 2183 2184 return (error); 2185 } 2186 2187 /* 2188 * Report current media status. 2189 */ 2190 static void 2191 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2192 { 2193 struct vr_softc *sc; 2194 struct mii_data *mii; 2195 2196 sc = ifp->if_softc; 2197 mii = device_get_softc(sc->vr_miibus); 2198 VR_LOCK(sc); 2199 if ((ifp->if_flags & IFF_UP) == 0) { 2200 VR_UNLOCK(sc); 2201 return; 2202 } 2203 mii_pollstat(mii); 2204 ifmr->ifm_active = mii->mii_media_active; 2205 ifmr->ifm_status = mii->mii_media_status; 2206 VR_UNLOCK(sc); 2207 } 2208 2209 static int 2210 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2211 { 2212 struct vr_softc *sc; 2213 struct ifreq *ifr; 2214 struct mii_data *mii; 2215 int error, mask; 2216 2217 sc = ifp->if_softc; 2218 ifr = (struct ifreq *)data; 2219 error = 0; 2220 2221 switch (command) { 2222 case SIOCSIFFLAGS: 2223 VR_LOCK(sc); 2224 if (ifp->if_flags & IFF_UP) { 2225 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2226 if ((ifp->if_flags ^ sc->vr_if_flags) & 2227 (IFF_PROMISC | IFF_ALLMULTI)) 2228 vr_set_filter(sc); 2229 } else { 2230 if ((sc->vr_flags & VR_F_DETACHED) == 0) 2231 vr_init_locked(sc); 2232 } 2233 } else { 2234 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2235 vr_stop(sc); 2236 } 2237 sc->vr_if_flags = ifp->if_flags; 2238 VR_UNLOCK(sc); 2239 break; 2240 case SIOCADDMULTI: 2241 case SIOCDELMULTI: 2242 VR_LOCK(sc); 2243 vr_set_filter(sc); 2244 VR_UNLOCK(sc); 2245 break; 2246 case SIOCGIFMEDIA: 2247 case SIOCSIFMEDIA: 2248 mii = device_get_softc(sc->vr_miibus); 2249 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2250 break; 2251 case SIOCSIFCAP: 2252 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2253 #ifdef DEVICE_POLLING 2254 if (mask & IFCAP_POLLING) { 2255 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2256 error = ether_poll_register(vr_poll, ifp); 2257 if (error != 0) 2258 break; 2259 VR_LOCK(sc); 2260 /* Disable interrupts. */ 2261 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2262 ifp->if_capenable |= IFCAP_POLLING; 2263 VR_UNLOCK(sc); 2264 } else { 2265 error = ether_poll_deregister(ifp); 2266 /* Enable interrupts. */ 2267 VR_LOCK(sc); 2268 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2269 ifp->if_capenable &= ~IFCAP_POLLING; 2270 VR_UNLOCK(sc); 2271 } 2272 } 2273 #endif /* DEVICE_POLLING */ 2274 if ((mask & IFCAP_TXCSUM) != 0 && 2275 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2276 ifp->if_capenable ^= IFCAP_TXCSUM; 2277 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2278 ifp->if_hwassist |= VR_CSUM_FEATURES; 2279 else 2280 ifp->if_hwassist &= ~VR_CSUM_FEATURES; 2281 } 2282 if ((mask & IFCAP_RXCSUM) != 0 && 2283 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2284 ifp->if_capenable ^= IFCAP_RXCSUM; 2285 if ((mask & IFCAP_WOL_UCAST) != 0 && 2286 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2287 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2288 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2289 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2290 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2291 break; 2292 default: 2293 error = ether_ioctl(ifp, command, data); 2294 break; 2295 } 2296 2297 return (error); 2298 } 2299 2300 static void 2301 vr_watchdog(struct vr_softc *sc) 2302 { 2303 struct ifnet *ifp; 2304 2305 VR_LOCK_ASSERT(sc); 2306 2307 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2308 return; 2309 2310 ifp = sc->vr_ifp; 2311 /* 2312 * Reclaim first as we don't request interrupt for every packets. 2313 */ 2314 vr_txeof(sc); 2315 if (sc->vr_cdata.vr_tx_cnt == 0) 2316 return; 2317 2318 if ((sc->vr_flags & VR_F_LINK) == 0) { 2319 if (bootverbose) 2320 if_printf(sc->vr_ifp, "watchdog timeout " 2321 "(missed link)\n"); 2322 ifp->if_oerrors++; 2323 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2324 vr_init_locked(sc); 2325 return; 2326 } 2327 2328 ifp->if_oerrors++; 2329 if_printf(ifp, "watchdog timeout\n"); 2330 2331 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2332 vr_init_locked(sc); 2333 2334 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2335 vr_start_locked(ifp); 2336 } 2337 2338 static void 2339 vr_tx_start(struct vr_softc *sc) 2340 { 2341 bus_addr_t addr; 2342 uint8_t cmd; 2343 2344 cmd = CSR_READ_1(sc, VR_CR0); 2345 if ((cmd & VR_CR0_TX_ON) == 0) { 2346 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2347 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2348 cmd |= VR_CR0_TX_ON; 2349 CSR_WRITE_1(sc, VR_CR0, cmd); 2350 } 2351 if (sc->vr_cdata.vr_tx_cnt != 0) { 2352 sc->vr_watchdog_timer = 5; 2353 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2354 } 2355 } 2356 2357 static void 2358 vr_rx_start(struct vr_softc *sc) 2359 { 2360 bus_addr_t addr; 2361 uint8_t cmd; 2362 2363 cmd = CSR_READ_1(sc, VR_CR0); 2364 if ((cmd & VR_CR0_RX_ON) == 0) { 2365 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2366 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2367 cmd |= VR_CR0_RX_ON; 2368 CSR_WRITE_1(sc, VR_CR0, cmd); 2369 } 2370 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2371 } 2372 2373 static int 2374 vr_tx_stop(struct vr_softc *sc) 2375 { 2376 int i; 2377 uint8_t cmd; 2378 2379 cmd = CSR_READ_1(sc, VR_CR0); 2380 if ((cmd & VR_CR0_TX_ON) != 0) { 2381 cmd &= ~VR_CR0_TX_ON; 2382 CSR_WRITE_1(sc, VR_CR0, cmd); 2383 for (i = VR_TIMEOUT; i > 0; i--) { 2384 DELAY(5); 2385 cmd = CSR_READ_1(sc, VR_CR0); 2386 if ((cmd & VR_CR0_TX_ON) == 0) 2387 break; 2388 } 2389 if (i == 0) 2390 return (ETIMEDOUT); 2391 } 2392 return (0); 2393 } 2394 2395 static int 2396 vr_rx_stop(struct vr_softc *sc) 2397 { 2398 int i; 2399 uint8_t cmd; 2400 2401 cmd = CSR_READ_1(sc, VR_CR0); 2402 if ((cmd & VR_CR0_RX_ON) != 0) { 2403 cmd &= ~VR_CR0_RX_ON; 2404 CSR_WRITE_1(sc, VR_CR0, cmd); 2405 for (i = VR_TIMEOUT; i > 0; i--) { 2406 DELAY(5); 2407 cmd = CSR_READ_1(sc, VR_CR0); 2408 if ((cmd & VR_CR0_RX_ON) == 0) 2409 break; 2410 } 2411 if (i == 0) 2412 return (ETIMEDOUT); 2413 } 2414 return (0); 2415 } 2416 2417 /* 2418 * Stop the adapter and free any mbufs allocated to the 2419 * RX and TX lists. 2420 */ 2421 static void 2422 vr_stop(struct vr_softc *sc) 2423 { 2424 struct vr_txdesc *txd; 2425 struct vr_rxdesc *rxd; 2426 struct ifnet *ifp; 2427 int i; 2428 2429 VR_LOCK_ASSERT(sc); 2430 2431 ifp = sc->vr_ifp; 2432 sc->vr_watchdog_timer = 0; 2433 2434 callout_stop(&sc->vr_stat_callout); 2435 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2436 2437 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2438 if (vr_rx_stop(sc) != 0) 2439 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2440 if (vr_tx_stop(sc) != 0) 2441 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2442 /* Clear pending interrupts. */ 2443 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2444 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2445 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2446 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2447 2448 /* 2449 * Free RX and TX mbufs still in the queues. 2450 */ 2451 for (i = 0; i < VR_RX_RING_CNT; i++) { 2452 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2453 if (rxd->rx_m != NULL) { 2454 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2455 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2456 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2457 rxd->rx_dmamap); 2458 m_freem(rxd->rx_m); 2459 rxd->rx_m = NULL; 2460 } 2461 } 2462 for (i = 0; i < VR_TX_RING_CNT; i++) { 2463 txd = &sc->vr_cdata.vr_txdesc[i]; 2464 if (txd->tx_m != NULL) { 2465 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2466 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2467 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2468 txd->tx_dmamap); 2469 m_freem(txd->tx_m); 2470 txd->tx_m = NULL; 2471 } 2472 } 2473 } 2474 2475 /* 2476 * Stop all chip I/O so that the kernel's probe routines don't 2477 * get confused by errant DMAs when rebooting. 2478 */ 2479 static int 2480 vr_shutdown(device_t dev) 2481 { 2482 2483 return (vr_suspend(dev)); 2484 } 2485 2486 static int 2487 vr_suspend(device_t dev) 2488 { 2489 struct vr_softc *sc; 2490 2491 sc = device_get_softc(dev); 2492 2493 VR_LOCK(sc); 2494 vr_stop(sc); 2495 vr_setwol(sc); 2496 sc->vr_flags |= VR_F_SUSPENDED; 2497 VR_UNLOCK(sc); 2498 2499 return (0); 2500 } 2501 2502 static int 2503 vr_resume(device_t dev) 2504 { 2505 struct vr_softc *sc; 2506 struct ifnet *ifp; 2507 2508 sc = device_get_softc(dev); 2509 2510 VR_LOCK(sc); 2511 ifp = sc->vr_ifp; 2512 vr_clrwol(sc); 2513 vr_reset(sc); 2514 if (ifp->if_flags & IFF_UP) 2515 vr_init_locked(sc); 2516 2517 sc->vr_flags &= ~VR_F_SUSPENDED; 2518 VR_UNLOCK(sc); 2519 2520 return (0); 2521 } 2522 2523 static void 2524 vr_setwol(struct vr_softc *sc) 2525 { 2526 struct ifnet *ifp; 2527 int pmc; 2528 uint16_t pmstat; 2529 uint8_t v; 2530 2531 VR_LOCK_ASSERT(sc); 2532 2533 if (sc->vr_revid < REV_ID_VT6102_A || 2534 pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2535 return; 2536 2537 ifp = sc->vr_ifp; 2538 2539 /* Clear WOL configuration. */ 2540 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2541 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2542 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2543 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2544 if (sc->vr_revid > REV_ID_VT6105_B0) { 2545 /* Newer Rhine III supports two additional patterns. */ 2546 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2547 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2548 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2549 } 2550 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2551 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2552 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2553 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2554 /* 2555 * It seems that multicast wakeup frames require programming pattern 2556 * registers and valid CRC as well as pattern mask for each pattern. 2557 * While it's possible to setup such a pattern it would complicate 2558 * WOL configuration so ignore multicast wakeup frames. 2559 */ 2560 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2561 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2562 v = CSR_READ_1(sc, VR_STICKHW); 2563 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2564 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2565 } 2566 2567 /* Put hardware into sleep. */ 2568 v = CSR_READ_1(sc, VR_STICKHW); 2569 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2570 CSR_WRITE_1(sc, VR_STICKHW, v); 2571 2572 /* Request PME if WOL is requested. */ 2573 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2574 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2575 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2576 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2577 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2578 } 2579 2580 static void 2581 vr_clrwol(struct vr_softc *sc) 2582 { 2583 uint8_t v; 2584 2585 VR_LOCK_ASSERT(sc); 2586 2587 if (sc->vr_revid < REV_ID_VT6102_A) 2588 return; 2589 2590 /* Take hardware out of sleep. */ 2591 v = CSR_READ_1(sc, VR_STICKHW); 2592 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2593 CSR_WRITE_1(sc, VR_STICKHW, v); 2594 2595 /* Clear WOL configuration as WOL may interfere normal operation. */ 2596 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2597 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2598 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2599 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2600 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2601 if (sc->vr_revid > REV_ID_VT6105_B0) { 2602 /* Newer Rhine III supports two additional patterns. */ 2603 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2604 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2605 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2606 } 2607 } 2608 2609 static int 2610 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2611 { 2612 struct vr_softc *sc; 2613 struct vr_statistics *stat; 2614 int error; 2615 int result; 2616 2617 result = -1; 2618 error = sysctl_handle_int(oidp, &result, 0, req); 2619 2620 if (error != 0 || req->newptr == NULL) 2621 return (error); 2622 2623 if (result == 1) { 2624 sc = (struct vr_softc *)arg1; 2625 stat = &sc->vr_stat; 2626 2627 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2628 printf("Outbound good frames : %ju\n", 2629 (uintmax_t)stat->tx_ok); 2630 printf("Inbound good frames : %ju\n", 2631 (uintmax_t)stat->rx_ok); 2632 printf("Outbound errors : %u\n", stat->tx_errors); 2633 printf("Inbound errors : %u\n", stat->rx_errors); 2634 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2635 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2636 printf("Inbound FIFO overflows : %d\n", 2637 stat->rx_fifo_overflows); 2638 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2639 printf("Inbound frame alignment errors : %u\n", 2640 stat->rx_alignment); 2641 printf("Inbound giant frames : %u\n", stat->rx_giants); 2642 printf("Inbound runt frames : %u\n", stat->rx_runts); 2643 printf("Outbound aborted with excessive collisions : %u\n", 2644 stat->tx_abort); 2645 printf("Outbound collisions : %u\n", stat->tx_collisions); 2646 printf("Outbound late collisions : %u\n", 2647 stat->tx_late_collisions); 2648 printf("Outbound underrun : %u\n", stat->tx_underrun); 2649 printf("PCI bus errors : %u\n", stat->bus_errors); 2650 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2651 stat->num_restart); 2652 } 2653 2654 return (error); 2655 } 2656