1 /*- 2 * Copyright (c) 1997, 1998 3 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Bill Paul. 16 * 4. Neither the name of the author nor the names of any co-contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #ifdef HAVE_KERNEL_OPTION_HEADERS 64 #include "opt_device_polling.h" 65 #endif 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bus.h> 70 #include <sys/endian.h> 71 #include <sys/kernel.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/rman.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 81 #include <net/bpf.h> 82 #include <net/if.h> 83 #include <net/ethernet.h> 84 #include <net/if_dl.h> 85 #include <net/if_media.h> 86 #include <net/if_types.h> 87 #include <net/if_vlan_var.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 92 #include <dev/pci/pcireg.h> 93 #include <dev/pci/pcivar.h> 94 95 #include <machine/bus.h> 96 97 #include <dev/vr/if_vrreg.h> 98 99 /* "device miibus" required. See GENERIC if you get errors here. */ 100 #include "miibus_if.h" 101 102 MODULE_DEPEND(vr, pci, 1, 1, 1); 103 MODULE_DEPEND(vr, ether, 1, 1, 1); 104 MODULE_DEPEND(vr, miibus, 1, 1, 1); 105 106 /* Define to show Rx/Tx error status. */ 107 #undef VR_SHOW_ERRORS 108 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 109 110 /* 111 * Various supported device vendors/types, their names & quirks. 112 */ 113 #define VR_Q_NEEDALIGN (1<<0) 114 #define VR_Q_CSUM (1<<1) 115 #define VR_Q_CAM (1<<2) 116 117 static struct vr_type { 118 u_int16_t vr_vid; 119 u_int16_t vr_did; 120 int vr_quirks; 121 char *vr_name; 122 } vr_devs[] = { 123 { VIA_VENDORID, VIA_DEVICEID_RHINE, 124 VR_Q_NEEDALIGN, 125 "VIA VT3043 Rhine I 10/100BaseTX" }, 126 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 127 VR_Q_NEEDALIGN, 128 "VIA VT86C100A Rhine II 10/100BaseTX" }, 129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 130 0, 131 "VIA VT6102 Rhine II 10/100BaseTX" }, 132 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 133 0, 134 "VIA VT6105 Rhine III 10/100BaseTX" }, 135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 136 VR_Q_CSUM | VR_Q_CAM, 137 "VIA VT6105M Rhine III 10/100BaseTX" }, 138 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 139 VR_Q_NEEDALIGN, 140 "Delta Electronics Rhine II 10/100BaseTX" }, 141 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 142 VR_Q_NEEDALIGN, 143 "Addtron Technology Rhine II 10/100BaseTX" }, 144 { 0, 0, 0, NULL } 145 }; 146 147 static int vr_probe(device_t); 148 static int vr_attach(device_t); 149 static int vr_detach(device_t); 150 static int vr_shutdown(device_t); 151 static int vr_suspend(device_t); 152 static int vr_resume(device_t); 153 154 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 155 static int vr_dma_alloc(struct vr_softc *); 156 static void vr_dma_free(struct vr_softc *); 157 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 158 static int vr_newbuf(struct vr_softc *, int); 159 160 #ifndef __NO_STRICT_ALIGNMENT 161 static __inline void vr_fixup_rx(struct mbuf *); 162 #endif 163 static void vr_rxeof(struct vr_softc *); 164 static void vr_txeof(struct vr_softc *); 165 static void vr_tick(void *); 166 static int vr_error(struct vr_softc *, uint16_t); 167 static void vr_tx_underrun(struct vr_softc *); 168 static void vr_intr(void *); 169 static void vr_start(struct ifnet *); 170 static void vr_start_locked(struct ifnet *); 171 static int vr_encap(struct vr_softc *, struct mbuf **); 172 static int vr_ioctl(struct ifnet *, u_long, caddr_t); 173 static void vr_init(void *); 174 static void vr_init_locked(struct vr_softc *); 175 static void vr_tx_start(struct vr_softc *); 176 static void vr_rx_start(struct vr_softc *); 177 static int vr_tx_stop(struct vr_softc *); 178 static int vr_rx_stop(struct vr_softc *); 179 static void vr_stop(struct vr_softc *); 180 static void vr_watchdog(struct vr_softc *); 181 static int vr_ifmedia_upd(struct ifnet *); 182 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 183 184 static int vr_miibus_readreg(device_t, int, int); 185 static int vr_miibus_writereg(device_t, int, int, int); 186 static void vr_miibus_statchg(device_t); 187 188 static void vr_link_task(void *, int); 189 static int vr_setperf(struct vr_softc *, int, uint8_t *); 190 static void vr_set_filter(struct vr_softc *); 191 static void vr_reset(const struct vr_softc *); 192 static int vr_tx_ring_init(struct vr_softc *); 193 static int vr_rx_ring_init(struct vr_softc *); 194 static void vr_setwol(struct vr_softc *); 195 static void vr_clrwol(struct vr_softc *); 196 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 197 198 static struct vr_tx_threshold_table { 199 int tx_cfg; 200 int bcr_cfg; 201 int value; 202 } vr_tx_threshold_tables[] = { 203 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 204 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 205 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 206 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 207 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 208 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 209 }; 210 211 static device_method_t vr_methods[] = { 212 /* Device interface */ 213 DEVMETHOD(device_probe, vr_probe), 214 DEVMETHOD(device_attach, vr_attach), 215 DEVMETHOD(device_detach, vr_detach), 216 DEVMETHOD(device_shutdown, vr_shutdown), 217 DEVMETHOD(device_suspend, vr_suspend), 218 DEVMETHOD(device_resume, vr_resume), 219 220 /* bus interface */ 221 DEVMETHOD(bus_print_child, bus_generic_print_child), 222 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 223 224 /* MII interface */ 225 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 226 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 227 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 228 DEVMETHOD(miibus_linkchg, vr_miibus_statchg), 229 230 { NULL, NULL } 231 }; 232 233 static driver_t vr_driver = { 234 "vr", 235 vr_methods, 236 sizeof(struct vr_softc) 237 }; 238 239 static devclass_t vr_devclass; 240 241 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 242 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 243 244 static int 245 vr_miibus_readreg(device_t dev, int phy, int reg) 246 { 247 struct vr_softc *sc; 248 int i; 249 250 sc = device_get_softc(dev); 251 if (sc->vr_phyaddr != phy) 252 return (0); 253 254 /* Set the register address. */ 255 CSR_WRITE_1(sc, VR_MIIADDR, reg); 256 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 257 258 for (i = 0; i < VR_MII_TIMEOUT; i++) { 259 DELAY(1); 260 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 261 break; 262 } 263 if (i == VR_MII_TIMEOUT) 264 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 265 266 return (CSR_READ_2(sc, VR_MIIDATA)); 267 } 268 269 static int 270 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 271 { 272 struct vr_softc *sc; 273 int i; 274 275 sc = device_get_softc(dev); 276 if (sc->vr_phyaddr != phy) 277 return (0); 278 279 /* Set the register address and data to write. */ 280 CSR_WRITE_1(sc, VR_MIIADDR, reg); 281 CSR_WRITE_2(sc, VR_MIIDATA, data); 282 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 283 284 for (i = 0; i < VR_MII_TIMEOUT; i++) { 285 DELAY(1); 286 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 287 break; 288 } 289 if (i == VR_MII_TIMEOUT) 290 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 291 reg); 292 293 return (0); 294 } 295 296 static void 297 vr_miibus_statchg(device_t dev) 298 { 299 struct vr_softc *sc; 300 301 sc = device_get_softc(dev); 302 taskqueue_enqueue(taskqueue_swi, &sc->vr_link_task); 303 } 304 305 /* 306 * In order to fiddle with the 307 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 308 * first have to put the transmit and/or receive logic in the idle state. 309 */ 310 static void 311 vr_link_task(void *arg, int pending) 312 { 313 struct vr_softc *sc; 314 struct mii_data *mii; 315 struct ifnet *ifp; 316 int lfdx, mfdx; 317 uint8_t cr0, cr1, fc; 318 319 sc = (struct vr_softc *)arg; 320 321 VR_LOCK(sc); 322 mii = device_get_softc(sc->vr_miibus); 323 ifp = sc->vr_ifp; 324 if (mii == NULL || ifp == NULL || 325 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 326 VR_UNLOCK(sc); 327 return; 328 } 329 330 if (mii->mii_media_status & IFM_ACTIVE) { 331 if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) 332 sc->vr_link = 1; 333 } else 334 sc->vr_link = 0; 335 336 if (sc->vr_link != 0) { 337 cr0 = CSR_READ_1(sc, VR_CR0); 338 cr1 = CSR_READ_1(sc, VR_CR1); 339 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 340 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 341 if (mfdx != lfdx) { 342 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 343 if (vr_tx_stop(sc) != 0 || 344 vr_rx_stop(sc) != 0) { 345 device_printf(sc->vr_dev, 346 "%s: Tx/Rx shutdown error -- " 347 "resetting\n", __func__); 348 sc->vr_flags |= VR_F_RESTART; 349 VR_UNLOCK(sc); 350 return; 351 } 352 } 353 if (lfdx) 354 cr1 |= VR_CR1_FULLDUPLEX; 355 else 356 cr1 &= ~VR_CR1_FULLDUPLEX; 357 CSR_WRITE_1(sc, VR_CR1, cr1); 358 } 359 fc = 0; 360 #ifdef notyet 361 /* Configure flow-control. */ 362 if (sc->vr_revid >= REV_ID_VT6105_A0) { 363 fc = CSR_READ_1(sc, VR_FLOWCR1); 364 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 365 if ((IFM_OPTIONS(mii->mii_media_active) & 366 IFM_ETH_RXPAUSE) != 0) 367 fc |= VR_FLOWCR1_RXPAUSE; 368 if ((IFM_OPTIONS(mii->mii_media_active) & 369 IFM_ETH_TXPAUSE) != 0) 370 fc |= VR_FLOWCR1_TXPAUSE; 371 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 372 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 373 /* No Tx puase capability available for Rhine II. */ 374 fc = CSR_READ_1(sc, VR_MISC_CR0); 375 fc &= ~VR_MISCCR0_RXPAUSE; 376 if ((IFM_OPTIONS(mii->mii_media_active) & 377 IFM_ETH_RXPAUSE) != 0) 378 fc |= VR_MISCCR0_RXPAUSE; 379 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 380 } 381 #endif 382 vr_rx_start(sc); 383 vr_tx_start(sc); 384 } else { 385 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 386 device_printf(sc->vr_dev, 387 "%s: Tx/Rx shutdown error -- resetting\n", 388 __func__); 389 sc->vr_flags |= VR_F_RESTART; 390 VR_UNLOCK(sc); 391 return; 392 } 393 } 394 VR_UNLOCK(sc); 395 } 396 397 /* 398 * Copy the address 'mac' into the perfect RX filter entry at 399 * offset 'idx.' The perfect filter only has 32 entries so do 400 * some sanity tests. 401 */ 402 static int 403 vr_setperf(struct vr_softc *sc, int idx, uint8_t *mac) 404 { 405 int i; 406 407 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 408 return (EINVAL); 409 410 /* Set CAM entry address. */ 411 CSR_WRITE_1(sc, VR_CAMADDR, idx); 412 /* Set CAM entry data. */ 413 for (i = 0; i < ETHER_ADDR_LEN; i++) 414 CSR_WRITE_1(sc, VR_MAR0 + i, mac[i]); 415 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 416 CSR_WRITE_1(sc, VR_CAMCTL, 417 VR_CAMCTL_ENA | VR_CAMCTL_MCAST | VR_CAMCTL_WRITE); 418 for (i = 0; i < VR_TIMEOUT; i++) { 419 DELAY(1); 420 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 421 break; 422 } 423 424 if (i == VR_TIMEOUT) 425 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 426 __func__); 427 428 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 429 } 430 431 /* 432 * Program the 64-bit multicast hash filter. 433 */ 434 static void 435 vr_set_filter(struct vr_softc *sc) 436 { 437 struct ifnet *ifp; 438 int h; 439 uint32_t hashes[2] = { 0, 0 }; 440 struct ifmultiaddr *ifma; 441 uint8_t rxfilt; 442 int error, mcnt; 443 uint32_t cam_mask; 444 445 VR_LOCK_ASSERT(sc); 446 447 ifp = sc->vr_ifp; 448 rxfilt = CSR_READ_1(sc, VR_RXCFG); 449 rxfilt = ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI); 450 if (ifp->if_flags & IFF_BROADCAST) 451 rxfilt |= VR_RXCFG_RX_BROAD; 452 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 453 rxfilt |= VR_RXCFG_RX_MULTI; 454 if (ifp->if_flags & IFF_PROMISC) 455 rxfilt |= VR_RXCFG_RX_PROMISC; 456 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 457 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 458 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 459 return; 460 } 461 462 /* Now program new ones. */ 463 error = 0; 464 IF_ADDR_LOCK(ifp); 465 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 466 /* 467 * For hardwares that have CAM capability, use 468 * 32 entries multicast perfect filter. 469 */ 470 cam_mask = 0; 471 mcnt = 0; 472 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 473 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 474 if (ifma->ifma_addr->sa_family != AF_LINK) 475 continue; 476 error = vr_setperf(sc, mcnt, 477 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 478 if (error != 0) { 479 cam_mask = 0; 480 break; 481 } 482 cam_mask |= 1 << mcnt; 483 mcnt++; 484 } 485 /* Enable multicast CAM entries depending on mask. */ 486 CSR_WRITE_1(sc, VR_CAMMASK, cam_mask); 487 /* Accessing CAM done. */ 488 CSR_WRITE_1(sc, VR_CAMCTL, 0); 489 } 490 491 mcnt = 0; 492 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 493 /* 494 * If there are too many multicast addresses or 495 * setting multicast CAM filter failed, use hash 496 * table based filtering. 497 */ 498 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 499 if (ifma->ifma_addr->sa_family != AF_LINK) 500 continue; 501 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 502 ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; 503 if (h < 32) 504 hashes[0] |= (1 << h); 505 else 506 hashes[1] |= (1 << (h - 32)); 507 mcnt++; 508 } 509 } 510 IF_ADDR_UNLOCK(ifp); 511 512 if (mcnt > 0) 513 rxfilt |= VR_RXCFG_RX_MULTI; 514 515 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 516 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 517 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 518 } 519 520 static void 521 vr_reset(const struct vr_softc *sc) 522 { 523 int i; 524 525 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 526 527 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 528 if (sc->vr_revid < REV_ID_VT6102_A) { 529 /* VT86C100A needs more delay after reset. */ 530 DELAY(100); 531 } 532 for (i = 0; i < VR_TIMEOUT; i++) { 533 DELAY(10); 534 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 535 break; 536 } 537 if (i == VR_TIMEOUT) { 538 if (sc->vr_revid < REV_ID_VT6102_A) 539 device_printf(sc->vr_dev, "reset never completed!\n"); 540 else { 541 /* Use newer force reset command. */ 542 device_printf(sc->vr_dev, 543 "Using force reset command.\n"); 544 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 545 /* 546 * Wait a little while for the chip to get its brains 547 * in order. 548 */ 549 DELAY(2000); 550 } 551 } 552 553 } 554 555 /* 556 * Probe for a VIA Rhine chip. Check the PCI vendor and device 557 * IDs against our list and return a match or NULL 558 */ 559 static struct vr_type * 560 vr_match(device_t dev) 561 { 562 struct vr_type *t = vr_devs; 563 564 for (t = vr_devs; t->vr_name != NULL; t++) 565 if ((pci_get_vendor(dev) == t->vr_vid) && 566 (pci_get_device(dev) == t->vr_did)) 567 return (t); 568 return (NULL); 569 } 570 571 /* 572 * Probe for a VIA Rhine chip. Check the PCI vendor and device 573 * IDs against our list and return a device name if we find a match. 574 */ 575 static int 576 vr_probe(device_t dev) 577 { 578 struct vr_type *t; 579 580 t = vr_match(dev); 581 if (t != NULL) { 582 device_set_desc(dev, t->vr_name); 583 return (BUS_PROBE_DEFAULT); 584 } 585 return (ENXIO); 586 } 587 588 /* 589 * Attach the interface. Allocate softc structures, do ifmedia 590 * setup and ethernet/BPF attach. 591 */ 592 static int 593 vr_attach(device_t dev) 594 { 595 struct vr_softc *sc; 596 struct ifnet *ifp; 597 struct vr_type *t; 598 uint8_t eaddr[ETHER_ADDR_LEN]; 599 int error, rid; 600 int i, pmc; 601 602 sc = device_get_softc(dev); 603 sc->vr_dev = dev; 604 t = vr_match(dev); 605 KASSERT(t != NULL, ("Lost if_vr device match")); 606 sc->vr_quirks = t->vr_quirks; 607 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 608 609 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 610 MTX_DEF); 611 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 612 TASK_INIT(&sc->vr_link_task, 0, vr_link_task, sc); 613 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 614 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 615 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, 616 vr_sysctl_stats, "I", "Statistics"); 617 618 error = 0; 619 620 /* 621 * Map control/status registers. 622 */ 623 pci_enable_busmaster(dev); 624 sc->vr_revid = pci_get_revid(dev); 625 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 626 627 sc->vr_res_id = PCIR_BAR(0); 628 sc->vr_res_type = SYS_RES_IOPORT; 629 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 630 &sc->vr_res_id, RF_ACTIVE); 631 if (sc->vr_res == NULL) { 632 device_printf(dev, "couldn't map ports\n"); 633 error = ENXIO; 634 goto fail; 635 } 636 637 /* Allocate interrupt. */ 638 rid = 0; 639 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 640 RF_SHAREABLE | RF_ACTIVE); 641 642 if (sc->vr_irq == NULL) { 643 device_printf(dev, "couldn't map interrupt\n"); 644 error = ENXIO; 645 goto fail; 646 } 647 648 /* Allocate ifnet structure. */ 649 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 650 if (ifp == NULL) { 651 device_printf(dev, "couldn't allocate ifnet structure\n"); 652 error = ENOSPC; 653 goto fail; 654 } 655 ifp->if_softc = sc; 656 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 657 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 658 ifp->if_ioctl = vr_ioctl; 659 ifp->if_start = vr_start; 660 ifp->if_init = vr_init; 661 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); 662 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; 663 IFQ_SET_READY(&ifp->if_snd); 664 665 /* Configure Tx FIFO threshold. */ 666 sc->vr_txthresh = VR_TXTHRESH_MIN; 667 if (sc->vr_revid < REV_ID_VT6105_A0) { 668 /* 669 * Use store and forward mode for Rhine I/II. 670 * Otherwise they produce a lot of Tx underruns and 671 * it would take a while to get working FIFO threshold 672 * value. 673 */ 674 sc->vr_txthresh = VR_TXTHRESH_MAX; 675 } 676 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 677 ifp->if_hwassist = VR_CSUM_FEATURES; 678 ifp->if_capabilities |= IFCAP_HWCSUM; 679 /* 680 * To update checksum field the hardware may need to 681 * store entire frames into FIFO before transmitting. 682 */ 683 sc->vr_txthresh = VR_TXTHRESH_MAX; 684 } 685 686 if (sc->vr_revid >= REV_ID_VT6102_A && 687 pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 688 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; 689 690 /* Rhine supports oversized VLAN frame. */ 691 ifp->if_capabilities |= IFCAP_VLAN_MTU; 692 ifp->if_capenable = ifp->if_capabilities; 693 #ifdef DEVICE_POLLING 694 ifp->if_capabilities |= IFCAP_POLLING; 695 #endif 696 697 /* 698 * Windows may put the chip in suspend mode when it 699 * shuts down. Be sure to kick it in the head to wake it 700 * up again. 701 */ 702 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) 703 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 704 705 /* 706 * Get station address. The way the Rhine chips work, 707 * you're not allowed to directly access the EEPROM once 708 * they've been programmed a special way. Consequently, 709 * we need to read the node address from the PAR0 and PAR1 710 * registers. 711 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 712 * VR_CFGC and VR_CFGD such that memory mapped IO configured 713 * by driver is reset to default state. 714 */ 715 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 716 for (i = VR_TIMEOUT; i > 0; i--) { 717 DELAY(1); 718 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 719 break; 720 } 721 if (i == 0) 722 device_printf(dev, "Reloading EEPROM timeout!\n"); 723 for (i = 0; i < ETHER_ADDR_LEN; i++) 724 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 725 726 /* Reset the adapter. */ 727 vr_reset(sc); 728 /* Ack intr & disable further interrupts. */ 729 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 730 CSR_WRITE_2(sc, VR_IMR, 0); 731 if (sc->vr_revid >= REV_ID_VT6102_A) 732 CSR_WRITE_2(sc, VR_MII_IMR, 0); 733 734 if (sc->vr_revid < REV_ID_VT6102_A) { 735 pci_write_config(dev, VR_PCI_MODE2, 736 pci_read_config(dev, VR_PCI_MODE2, 1) | 737 VR_MODE2_MODE10T, 1); 738 } else { 739 /* Report error instead of retrying forever. */ 740 pci_write_config(dev, VR_PCI_MODE2, 741 pci_read_config(dev, VR_PCI_MODE2, 1) | 742 VR_MODE2_PCEROPT, 1); 743 /* Detect MII coding error. */ 744 pci_write_config(dev, VR_PCI_MODE3, 745 pci_read_config(dev, VR_PCI_MODE3, 1) | 746 VR_MODE3_MIION, 1); 747 if (sc->vr_revid >= REV_ID_VT6105_LOM && 748 sc->vr_revid < REV_ID_VT6105M_A0) 749 pci_write_config(dev, VR_PCI_MODE2, 750 pci_read_config(dev, VR_PCI_MODE2, 1) | 751 VR_MODE2_MODE10T, 1); 752 /* Enable Memory-Read-Multiple. */ 753 if (sc->vr_revid >= REV_ID_VT6107_A1 && 754 sc->vr_revid < REV_ID_VT6105M_A0) 755 pci_write_config(dev, VR_PCI_MODE2, 756 pci_read_config(dev, VR_PCI_MODE2, 1) | 757 VR_MODE2_MRDPL, 1); 758 } 759 /* Disable MII AUTOPOLL. */ 760 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 761 762 if (vr_dma_alloc(sc) != 0) { 763 error = ENXIO; 764 goto fail; 765 } 766 767 /* Save PHY address. */ 768 if (sc->vr_revid >= REV_ID_VT6105_A0) 769 sc->vr_phyaddr = 1; 770 else 771 sc->vr_phyaddr = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 772 773 /* Do MII setup. */ 774 if (mii_phy_probe(dev, &sc->vr_miibus, 775 vr_ifmedia_upd, vr_ifmedia_sts)) { 776 device_printf(dev, "MII without any phy!\n"); 777 error = ENXIO; 778 goto fail; 779 } 780 781 /* Call MI attach routine. */ 782 ether_ifattach(ifp, eaddr); 783 /* 784 * Tell the upper layer(s) we support long frames. 785 * Must appear after the call to ether_ifattach() because 786 * ether_ifattach() sets ifi_hdrlen to the default value. 787 */ 788 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 789 790 /* Hook interrupt last to avoid having to lock softc. */ 791 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 792 NULL, vr_intr, sc, &sc->vr_intrhand); 793 794 if (error) { 795 device_printf(dev, "couldn't set up irq\n"); 796 ether_ifdetach(ifp); 797 goto fail; 798 } 799 800 fail: 801 if (error) 802 vr_detach(dev); 803 804 return (error); 805 } 806 807 /* 808 * Shutdown hardware and free up resources. This can be called any 809 * time after the mutex has been initialized. It is called in both 810 * the error case in attach and the normal detach case so it needs 811 * to be careful about only freeing resources that have actually been 812 * allocated. 813 */ 814 static int 815 vr_detach(device_t dev) 816 { 817 struct vr_softc *sc = device_get_softc(dev); 818 struct ifnet *ifp = sc->vr_ifp; 819 820 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 821 822 #ifdef DEVICE_POLLING 823 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 824 ether_poll_deregister(ifp); 825 #endif 826 827 /* These should only be active if attach succeeded. */ 828 if (device_is_attached(dev)) { 829 VR_LOCK(sc); 830 sc->vr_detach = 1; 831 vr_stop(sc); 832 VR_UNLOCK(sc); 833 callout_drain(&sc->vr_stat_callout); 834 taskqueue_drain(taskqueue_swi, &sc->vr_link_task); 835 ether_ifdetach(ifp); 836 } 837 if (sc->vr_miibus) 838 device_delete_child(dev, sc->vr_miibus); 839 bus_generic_detach(dev); 840 841 if (sc->vr_intrhand) 842 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 843 if (sc->vr_irq) 844 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 845 if (sc->vr_res) 846 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 847 sc->vr_res); 848 849 if (ifp) 850 if_free(ifp); 851 852 vr_dma_free(sc); 853 854 mtx_destroy(&sc->vr_mtx); 855 856 return (0); 857 } 858 859 struct vr_dmamap_arg { 860 bus_addr_t vr_busaddr; 861 }; 862 863 static void 864 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 865 { 866 struct vr_dmamap_arg *ctx; 867 868 if (error != 0) 869 return; 870 ctx = arg; 871 ctx->vr_busaddr = segs[0].ds_addr; 872 } 873 874 static int 875 vr_dma_alloc(struct vr_softc *sc) 876 { 877 struct vr_dmamap_arg ctx; 878 struct vr_txdesc *txd; 879 struct vr_rxdesc *rxd; 880 bus_size_t tx_alignment; 881 int error, i; 882 883 /* Create parent DMA tag. */ 884 error = bus_dma_tag_create( 885 bus_get_dma_tag(sc->vr_dev), /* parent */ 886 1, 0, /* alignment, boundary */ 887 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 888 BUS_SPACE_MAXADDR, /* highaddr */ 889 NULL, NULL, /* filter, filterarg */ 890 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 891 0, /* nsegments */ 892 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 893 0, /* flags */ 894 NULL, NULL, /* lockfunc, lockarg */ 895 &sc->vr_cdata.vr_parent_tag); 896 if (error != 0) { 897 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 898 goto fail; 899 } 900 /* Create tag for Tx ring. */ 901 error = bus_dma_tag_create( 902 sc->vr_cdata.vr_parent_tag, /* parent */ 903 VR_RING_ALIGN, 0, /* alignment, boundary */ 904 BUS_SPACE_MAXADDR, /* lowaddr */ 905 BUS_SPACE_MAXADDR, /* highaddr */ 906 NULL, NULL, /* filter, filterarg */ 907 VR_TX_RING_SIZE, /* maxsize */ 908 1, /* nsegments */ 909 VR_TX_RING_SIZE, /* maxsegsize */ 910 0, /* flags */ 911 NULL, NULL, /* lockfunc, lockarg */ 912 &sc->vr_cdata.vr_tx_ring_tag); 913 if (error != 0) { 914 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 915 goto fail; 916 } 917 918 /* Create tag for Rx ring. */ 919 error = bus_dma_tag_create( 920 sc->vr_cdata.vr_parent_tag, /* parent */ 921 VR_RING_ALIGN, 0, /* alignment, boundary */ 922 BUS_SPACE_MAXADDR, /* lowaddr */ 923 BUS_SPACE_MAXADDR, /* highaddr */ 924 NULL, NULL, /* filter, filterarg */ 925 VR_RX_RING_SIZE, /* maxsize */ 926 1, /* nsegments */ 927 VR_RX_RING_SIZE, /* maxsegsize */ 928 0, /* flags */ 929 NULL, NULL, /* lockfunc, lockarg */ 930 &sc->vr_cdata.vr_rx_ring_tag); 931 if (error != 0) { 932 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 933 goto fail; 934 } 935 936 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 937 tx_alignment = sizeof(uint32_t); 938 else 939 tx_alignment = 1; 940 /* Create tag for Tx buffers. */ 941 error = bus_dma_tag_create( 942 sc->vr_cdata.vr_parent_tag, /* parent */ 943 tx_alignment, 0, /* alignment, boundary */ 944 BUS_SPACE_MAXADDR, /* lowaddr */ 945 BUS_SPACE_MAXADDR, /* highaddr */ 946 NULL, NULL, /* filter, filterarg */ 947 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 948 VR_MAXFRAGS, /* nsegments */ 949 MCLBYTES, /* maxsegsize */ 950 0, /* flags */ 951 NULL, NULL, /* lockfunc, lockarg */ 952 &sc->vr_cdata.vr_tx_tag); 953 if (error != 0) { 954 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 955 goto fail; 956 } 957 958 /* Create tag for Rx buffers. */ 959 error = bus_dma_tag_create( 960 sc->vr_cdata.vr_parent_tag, /* parent */ 961 VR_RX_ALIGN, 0, /* alignment, boundary */ 962 BUS_SPACE_MAXADDR, /* lowaddr */ 963 BUS_SPACE_MAXADDR, /* highaddr */ 964 NULL, NULL, /* filter, filterarg */ 965 MCLBYTES, /* maxsize */ 966 1, /* nsegments */ 967 MCLBYTES, /* maxsegsize */ 968 0, /* flags */ 969 NULL, NULL, /* lockfunc, lockarg */ 970 &sc->vr_cdata.vr_rx_tag); 971 if (error != 0) { 972 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 973 goto fail; 974 } 975 976 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 977 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 978 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 979 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 980 if (error != 0) { 981 device_printf(sc->vr_dev, 982 "failed to allocate DMA'able memory for Tx ring\n"); 983 goto fail; 984 } 985 986 ctx.vr_busaddr = 0; 987 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 988 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 989 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 990 if (error != 0 || ctx.vr_busaddr == 0) { 991 device_printf(sc->vr_dev, 992 "failed to load DMA'able memory for Tx ring\n"); 993 goto fail; 994 } 995 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 996 997 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 998 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 999 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 1000 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 1001 if (error != 0) { 1002 device_printf(sc->vr_dev, 1003 "failed to allocate DMA'able memory for Rx ring\n"); 1004 goto fail; 1005 } 1006 1007 ctx.vr_busaddr = 0; 1008 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1009 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1010 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1011 if (error != 0 || ctx.vr_busaddr == 0) { 1012 device_printf(sc->vr_dev, 1013 "failed to load DMA'able memory for Rx ring\n"); 1014 goto fail; 1015 } 1016 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1017 1018 /* Create DMA maps for Tx buffers. */ 1019 for (i = 0; i < VR_TX_RING_CNT; i++) { 1020 txd = &sc->vr_cdata.vr_txdesc[i]; 1021 txd->tx_m = NULL; 1022 txd->tx_dmamap = NULL; 1023 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1024 &txd->tx_dmamap); 1025 if (error != 0) { 1026 device_printf(sc->vr_dev, 1027 "failed to create Tx dmamap\n"); 1028 goto fail; 1029 } 1030 } 1031 /* Create DMA maps for Rx buffers. */ 1032 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1033 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1034 device_printf(sc->vr_dev, 1035 "failed to create spare Rx dmamap\n"); 1036 goto fail; 1037 } 1038 for (i = 0; i < VR_RX_RING_CNT; i++) { 1039 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1040 rxd->rx_m = NULL; 1041 rxd->rx_dmamap = NULL; 1042 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1043 &rxd->rx_dmamap); 1044 if (error != 0) { 1045 device_printf(sc->vr_dev, 1046 "failed to create Rx dmamap\n"); 1047 goto fail; 1048 } 1049 } 1050 1051 fail: 1052 return (error); 1053 } 1054 1055 static void 1056 vr_dma_free(struct vr_softc *sc) 1057 { 1058 struct vr_txdesc *txd; 1059 struct vr_rxdesc *rxd; 1060 int i; 1061 1062 /* Tx ring. */ 1063 if (sc->vr_cdata.vr_tx_ring_tag) { 1064 if (sc->vr_cdata.vr_tx_ring_map) 1065 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1066 sc->vr_cdata.vr_tx_ring_map); 1067 if (sc->vr_cdata.vr_tx_ring_map && 1068 sc->vr_rdata.vr_tx_ring) 1069 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1070 sc->vr_rdata.vr_tx_ring, 1071 sc->vr_cdata.vr_tx_ring_map); 1072 sc->vr_rdata.vr_tx_ring = NULL; 1073 sc->vr_cdata.vr_tx_ring_map = NULL; 1074 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1075 sc->vr_cdata.vr_tx_ring_tag = NULL; 1076 } 1077 /* Rx ring. */ 1078 if (sc->vr_cdata.vr_rx_ring_tag) { 1079 if (sc->vr_cdata.vr_rx_ring_map) 1080 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1081 sc->vr_cdata.vr_rx_ring_map); 1082 if (sc->vr_cdata.vr_rx_ring_map && 1083 sc->vr_rdata.vr_rx_ring) 1084 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1085 sc->vr_rdata.vr_rx_ring, 1086 sc->vr_cdata.vr_rx_ring_map); 1087 sc->vr_rdata.vr_rx_ring = NULL; 1088 sc->vr_cdata.vr_rx_ring_map = NULL; 1089 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1090 sc->vr_cdata.vr_rx_ring_tag = NULL; 1091 } 1092 /* Tx buffers. */ 1093 if (sc->vr_cdata.vr_tx_tag) { 1094 for (i = 0; i < VR_TX_RING_CNT; i++) { 1095 txd = &sc->vr_cdata.vr_txdesc[i]; 1096 if (txd->tx_dmamap) { 1097 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1098 txd->tx_dmamap); 1099 txd->tx_dmamap = NULL; 1100 } 1101 } 1102 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1103 sc->vr_cdata.vr_tx_tag = NULL; 1104 } 1105 /* Rx buffers. */ 1106 if (sc->vr_cdata.vr_rx_tag) { 1107 for (i = 0; i < VR_RX_RING_CNT; i++) { 1108 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1109 if (rxd->rx_dmamap) { 1110 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1111 rxd->rx_dmamap); 1112 rxd->rx_dmamap = NULL; 1113 } 1114 } 1115 if (sc->vr_cdata.vr_rx_sparemap) { 1116 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1117 sc->vr_cdata.vr_rx_sparemap); 1118 sc->vr_cdata.vr_rx_sparemap = 0; 1119 } 1120 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1121 sc->vr_cdata.vr_rx_tag = NULL; 1122 } 1123 1124 if (sc->vr_cdata.vr_parent_tag) { 1125 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1126 sc->vr_cdata.vr_parent_tag = NULL; 1127 } 1128 } 1129 1130 /* 1131 * Initialize the transmit descriptors. 1132 */ 1133 static int 1134 vr_tx_ring_init(struct vr_softc *sc) 1135 { 1136 struct vr_ring_data *rd; 1137 struct vr_txdesc *txd; 1138 bus_addr_t addr; 1139 int i; 1140 1141 sc->vr_cdata.vr_tx_prod = 0; 1142 sc->vr_cdata.vr_tx_cons = 0; 1143 sc->vr_cdata.vr_tx_cnt = 0; 1144 sc->vr_cdata.vr_tx_pkts = 0; 1145 1146 rd = &sc->vr_rdata; 1147 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1148 for (i = 0; i < VR_TX_RING_CNT; i++) { 1149 if (i == VR_TX_RING_CNT - 1) 1150 addr = VR_TX_RING_ADDR(sc, 0); 1151 else 1152 addr = VR_TX_RING_ADDR(sc, i + 1); 1153 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1154 txd = &sc->vr_cdata.vr_txdesc[i]; 1155 txd->tx_m = NULL; 1156 } 1157 1158 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1159 sc->vr_cdata.vr_tx_ring_map, 1160 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1161 1162 return (0); 1163 } 1164 1165 /* 1166 * Initialize the RX descriptors and allocate mbufs for them. Note that 1167 * we arrange the descriptors in a closed ring, so that the last descriptor 1168 * points back to the first. 1169 */ 1170 static int 1171 vr_rx_ring_init(struct vr_softc *sc) 1172 { 1173 struct vr_ring_data *rd; 1174 struct vr_rxdesc *rxd; 1175 bus_addr_t addr; 1176 int i; 1177 1178 sc->vr_cdata.vr_rx_cons = 0; 1179 1180 rd = &sc->vr_rdata; 1181 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1182 for (i = 0; i < VR_RX_RING_CNT; i++) { 1183 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1184 rxd->rx_m = NULL; 1185 rxd->desc = &rd->vr_rx_ring[i]; 1186 if (i == VR_RX_RING_CNT - 1) 1187 addr = VR_RX_RING_ADDR(sc, 0); 1188 else 1189 addr = VR_RX_RING_ADDR(sc, i + 1); 1190 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1191 if (vr_newbuf(sc, i) != 0) 1192 return (ENOBUFS); 1193 } 1194 1195 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1196 sc->vr_cdata.vr_rx_ring_map, 1197 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1198 1199 return (0); 1200 } 1201 1202 static __inline void 1203 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1204 { 1205 struct vr_desc *desc; 1206 1207 desc = rxd->desc; 1208 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1209 desc->vr_status = htole32(VR_RXSTAT_OWN); 1210 } 1211 1212 /* 1213 * Initialize an RX descriptor and attach an MBUF cluster. 1214 * Note: the length fields are only 11 bits wide, which means the 1215 * largest size we can specify is 2047. This is important because 1216 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1217 * overflow the field and make a mess. 1218 */ 1219 static int 1220 vr_newbuf(struct vr_softc *sc, int idx) 1221 { 1222 struct vr_desc *desc; 1223 struct vr_rxdesc *rxd; 1224 struct mbuf *m; 1225 bus_dma_segment_t segs[1]; 1226 bus_dmamap_t map; 1227 int nsegs; 1228 1229 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1230 if (m == NULL) 1231 return (ENOBUFS); 1232 m->m_len = m->m_pkthdr.len = MCLBYTES; 1233 m_adj(m, sizeof(uint64_t)); 1234 1235 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1236 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1237 m_freem(m); 1238 return (ENOBUFS); 1239 } 1240 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1241 1242 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1243 if (rxd->rx_m != NULL) { 1244 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1245 BUS_DMASYNC_POSTREAD); 1246 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1247 } 1248 map = rxd->rx_dmamap; 1249 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1250 sc->vr_cdata.vr_rx_sparemap = map; 1251 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1252 BUS_DMASYNC_PREREAD); 1253 rxd->rx_m = m; 1254 desc = rxd->desc; 1255 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1256 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1257 desc->vr_status = htole32(VR_RXSTAT_OWN); 1258 1259 return (0); 1260 } 1261 1262 #ifndef __NO_STRICT_ALIGNMENT 1263 static __inline void 1264 vr_fixup_rx(struct mbuf *m) 1265 { 1266 uint16_t *src, *dst; 1267 int i; 1268 1269 src = mtod(m, uint16_t *); 1270 dst = src - 1; 1271 1272 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1273 *dst++ = *src++; 1274 1275 m->m_data -= ETHER_ALIGN; 1276 } 1277 #endif 1278 1279 /* 1280 * A frame has been uploaded: pass the resulting mbuf chain up to 1281 * the higher level protocols. 1282 */ 1283 static void 1284 vr_rxeof(struct vr_softc *sc) 1285 { 1286 struct vr_rxdesc *rxd; 1287 struct mbuf *m; 1288 struct ifnet *ifp; 1289 struct vr_desc *cur_rx; 1290 int cons, prog, total_len; 1291 uint32_t rxstat, rxctl; 1292 1293 VR_LOCK_ASSERT(sc); 1294 ifp = sc->vr_ifp; 1295 cons = sc->vr_cdata.vr_rx_cons; 1296 1297 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1298 sc->vr_cdata.vr_rx_ring_map, 1299 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1300 1301 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1302 #ifdef DEVICE_POLLING 1303 if (ifp->if_capenable & IFCAP_POLLING) { 1304 if (sc->rxcycles <= 0) 1305 break; 1306 sc->rxcycles--; 1307 } 1308 #endif 1309 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1310 rxstat = le32toh(cur_rx->vr_status); 1311 rxctl = le32toh(cur_rx->vr_ctl); 1312 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1313 break; 1314 1315 prog++; 1316 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1317 m = rxd->rx_m; 1318 1319 /* 1320 * If an error occurs, update stats, clear the 1321 * status word and leave the mbuf cluster in place: 1322 * it should simply get re-used next time this descriptor 1323 * comes up in the ring. 1324 * We don't support SG in Rx path yet, so discard 1325 * partial frame. 1326 */ 1327 if ((rxstat & (VR_RXSTAT_RXERR | VR_RXSTAT_FIRSTFRAG | 1328 VR_RXSTAT_LASTFRAG)) != 1329 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1330 ifp->if_ierrors++; 1331 sc->vr_stat.rx_errors++; 1332 if (rxstat & VR_RXSTAT_CRCERR) 1333 sc->vr_stat.rx_crc_errors++; 1334 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1335 sc->vr_stat.rx_alignment++; 1336 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1337 sc->vr_stat.rx_fifo_overflows++; 1338 if (rxstat & VR_RXSTAT_GIANT) 1339 sc->vr_stat.rx_giants++; 1340 if (rxstat & VR_RXSTAT_RUNT) 1341 sc->vr_stat.rx_runts++; 1342 if (rxstat & VR_RXSTAT_BUFFERR) 1343 sc->vr_stat.rx_no_buffers++; 1344 #ifdef VR_SHOW_ERRORS 1345 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1346 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1347 #endif 1348 vr_discard_rxbuf(rxd); 1349 continue; 1350 } 1351 1352 if (vr_newbuf(sc, cons) != 0) { 1353 ifp->if_iqdrops++; 1354 sc->vr_stat.rx_errors++; 1355 sc->vr_stat.rx_no_mbufs++; 1356 vr_discard_rxbuf(rxd); 1357 continue; 1358 } 1359 1360 /* 1361 * XXX The VIA Rhine chip includes the CRC with every 1362 * received frame, and there's no way to turn this 1363 * behavior off (at least, I can't find anything in 1364 * the manual that explains how to do it) so we have 1365 * to trim off the CRC manually. 1366 */ 1367 total_len = VR_RXBYTES(rxstat); 1368 total_len -= ETHER_CRC_LEN; 1369 m->m_pkthdr.len = m->m_len = total_len; 1370 #ifndef __NO_STRICT_ALIGNMENT 1371 /* 1372 * RX buffers must be 32-bit aligned. 1373 * Ignore the alignment problems on the non-strict alignment 1374 * platform. The performance hit incurred due to unaligned 1375 * accesses is much smaller than the hit produced by forcing 1376 * buffer copies all the time. 1377 */ 1378 vr_fixup_rx(m); 1379 #endif 1380 m->m_pkthdr.rcvif = ifp; 1381 ifp->if_ipackets++; 1382 sc->vr_stat.rx_ok++; 1383 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1384 (rxstat & VR_RXSTAT_FRAG) == 0 && 1385 (rxctl & VR_RXCTL_IP) != 0) { 1386 /* Checksum is valid for non-fragmented IP packets. */ 1387 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1388 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1389 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1390 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1391 m->m_pkthdr.csum_flags |= 1392 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1393 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1394 m->m_pkthdr.csum_data = 0xffff; 1395 } 1396 } 1397 } 1398 VR_UNLOCK(sc); 1399 (*ifp->if_input)(ifp, m); 1400 VR_LOCK(sc); 1401 } 1402 1403 if (prog > 0) { 1404 sc->vr_cdata.vr_rx_cons = cons; 1405 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1406 sc->vr_cdata.vr_rx_ring_map, 1407 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1408 } 1409 } 1410 1411 /* 1412 * A frame was downloaded to the chip. It's safe for us to clean up 1413 * the list buffers. 1414 */ 1415 static void 1416 vr_txeof(struct vr_softc *sc) 1417 { 1418 struct vr_txdesc *txd; 1419 struct vr_desc *cur_tx; 1420 struct ifnet *ifp; 1421 uint32_t txctl, txstat; 1422 int cons, prod; 1423 1424 VR_LOCK_ASSERT(sc); 1425 1426 cons = sc->vr_cdata.vr_tx_cons; 1427 prod = sc->vr_cdata.vr_tx_prod; 1428 if (cons == prod) 1429 return; 1430 1431 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1432 sc->vr_cdata.vr_tx_ring_map, 1433 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1434 1435 ifp = sc->vr_ifp; 1436 /* 1437 * Go through our tx list and free mbufs for those 1438 * frames that have been transmitted. 1439 */ 1440 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1441 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1442 txctl = le32toh(cur_tx->vr_ctl); 1443 txstat = le32toh(cur_tx->vr_status); 1444 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1445 break; 1446 1447 sc->vr_cdata.vr_tx_cnt--; 1448 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1449 /* Only the first descriptor in the chain is valid. */ 1450 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1451 continue; 1452 1453 txd = &sc->vr_cdata.vr_txdesc[cons]; 1454 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1455 __func__)); 1456 1457 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1458 ifp->if_oerrors++; 1459 sc->vr_stat.tx_errors++; 1460 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1461 /* Give up and restart Tx. */ 1462 sc->vr_stat.tx_abort++; 1463 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1464 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1465 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1466 txd->tx_dmamap); 1467 m_freem(txd->tx_m); 1468 txd->tx_m = NULL; 1469 VR_INC(cons, VR_TX_RING_CNT); 1470 sc->vr_cdata.vr_tx_cons = cons; 1471 if (vr_tx_stop(sc) != 0) { 1472 device_printf(sc->vr_dev, 1473 "%s: Tx shutdown error -- " 1474 "resetting\n", __func__); 1475 sc->vr_flags |= VR_F_RESTART; 1476 return; 1477 } 1478 vr_tx_start(sc); 1479 break; 1480 } 1481 if ((sc->vr_revid < REV_ID_VT3071_A && 1482 (txstat & VR_TXSTAT_UNDERRUN)) || 1483 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1484 sc->vr_stat.tx_underrun++; 1485 /* Retry and restart Tx. */ 1486 sc->vr_cdata.vr_tx_cnt++; 1487 sc->vr_cdata.vr_tx_cons = cons; 1488 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1489 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1490 sc->vr_cdata.vr_tx_ring_map, 1491 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1492 vr_tx_underrun(sc); 1493 return; 1494 } 1495 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1496 ifp->if_collisions++; 1497 sc->vr_stat.tx_collisions++; 1498 } 1499 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1500 ifp->if_collisions++; 1501 sc->vr_stat.tx_late_collisions++; 1502 } 1503 } else { 1504 sc->vr_stat.tx_ok++; 1505 ifp->if_opackets++; 1506 } 1507 1508 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1509 BUS_DMASYNC_POSTWRITE); 1510 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1511 if (sc->vr_revid < REV_ID_VT3071_A) { 1512 ifp->if_collisions += 1513 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1514 sc->vr_stat.tx_collisions += 1515 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1516 } else { 1517 ifp->if_collisions += (txstat & 0x0f); 1518 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1519 } 1520 m_freem(txd->tx_m); 1521 txd->tx_m = NULL; 1522 } 1523 1524 sc->vr_cdata.vr_tx_cons = cons; 1525 if (sc->vr_cdata.vr_tx_cnt == 0) 1526 sc->vr_watchdog_timer = 0; 1527 } 1528 1529 static void 1530 vr_tick(void *xsc) 1531 { 1532 struct vr_softc *sc; 1533 struct mii_data *mii; 1534 1535 sc = (struct vr_softc *)xsc; 1536 1537 VR_LOCK_ASSERT(sc); 1538 1539 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1540 device_printf(sc->vr_dev, "restarting\n"); 1541 sc->vr_stat.num_restart++; 1542 vr_stop(sc); 1543 vr_reset(sc); 1544 vr_init_locked(sc); 1545 sc->vr_flags &= ~VR_F_RESTART; 1546 } 1547 1548 mii = device_get_softc(sc->vr_miibus); 1549 mii_tick(mii); 1550 vr_watchdog(sc); 1551 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1552 } 1553 1554 #ifdef DEVICE_POLLING 1555 static poll_handler_t vr_poll; 1556 static poll_handler_t vr_poll_locked; 1557 1558 static void 1559 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1560 { 1561 struct vr_softc *sc; 1562 1563 sc = ifp->if_softc; 1564 1565 VR_LOCK(sc); 1566 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1567 vr_poll_locked(ifp, cmd, count); 1568 VR_UNLOCK(sc); 1569 } 1570 1571 static void 1572 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1573 { 1574 struct vr_softc *sc; 1575 1576 sc = ifp->if_softc; 1577 1578 VR_LOCK_ASSERT(sc); 1579 1580 sc->rxcycles = count; 1581 vr_rxeof(sc); 1582 vr_txeof(sc); 1583 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1584 vr_start_locked(ifp); 1585 1586 if (cmd == POLL_AND_CHECK_STATUS) { 1587 uint16_t status; 1588 1589 /* Also check status register. */ 1590 status = CSR_READ_2(sc, VR_ISR); 1591 if (status) 1592 CSR_WRITE_2(sc, VR_ISR, status); 1593 1594 if ((status & VR_INTRS) == 0) 1595 return; 1596 1597 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1598 VR_ISR_STATSOFLOW)) != 0) { 1599 if (vr_error(sc, status) != 0) 1600 return; 1601 } 1602 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1603 #ifdef VR_SHOW_ERRORS 1604 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1605 __func__, status, VR_ISR_ERR_BITS); 1606 #endif 1607 vr_rx_start(sc); 1608 } 1609 } 1610 } 1611 #endif /* DEVICE_POLLING */ 1612 1613 /* Back off the transmit threshold. */ 1614 static void 1615 vr_tx_underrun(struct vr_softc *sc) 1616 { 1617 int thresh; 1618 1619 device_printf(sc->vr_dev, "Tx underrun -- "); 1620 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1621 thresh = sc->vr_txthresh; 1622 sc->vr_txthresh++; 1623 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1624 sc->vr_txthresh = VR_TXTHRESH_MAX; 1625 printf("using store and forward mode\n"); 1626 } else 1627 printf("increasing Tx threshold(%d -> %d)\n", 1628 vr_tx_threshold_tables[thresh].value, 1629 vr_tx_threshold_tables[thresh + 1].value); 1630 } else 1631 printf("\n"); 1632 sc->vr_stat.tx_underrun++; 1633 if (vr_tx_stop(sc) != 0) { 1634 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1635 "resetting\n", __func__); 1636 sc->vr_flags |= VR_F_RESTART; 1637 return; 1638 } 1639 vr_tx_start(sc); 1640 } 1641 1642 static void 1643 vr_intr(void *arg) 1644 { 1645 struct vr_softc *sc; 1646 struct ifnet *ifp; 1647 uint16_t status; 1648 1649 sc = (struct vr_softc *)arg; 1650 1651 VR_LOCK(sc); 1652 1653 if (sc->vr_suspended != 0) 1654 goto done_locked; 1655 1656 status = CSR_READ_2(sc, VR_ISR); 1657 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1658 goto done_locked; 1659 1660 ifp = sc->vr_ifp; 1661 #ifdef DEVICE_POLLING 1662 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1663 goto done_locked; 1664 #endif 1665 1666 /* Suppress unwanted interrupts. */ 1667 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1668 (sc->vr_flags & VR_F_RESTART) != 0) { 1669 CSR_WRITE_2(sc, VR_IMR, 0); 1670 CSR_WRITE_2(sc, VR_ISR, status); 1671 goto done_locked; 1672 } 1673 1674 /* Disable interrupts. */ 1675 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1676 1677 for (; (status & VR_INTRS) != 0;) { 1678 CSR_WRITE_2(sc, VR_ISR, status); 1679 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1680 VR_ISR_STATSOFLOW)) != 0) { 1681 if (vr_error(sc, status) != 0) { 1682 VR_UNLOCK(sc); 1683 return; 1684 } 1685 } 1686 vr_rxeof(sc); 1687 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1688 #ifdef VR_SHOW_ERRORS 1689 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1690 __func__, status, VR_ISR_ERR_BITS); 1691 #endif 1692 /* Restart Rx if RxDMA SM was stopped. */ 1693 vr_rx_start(sc); 1694 } 1695 vr_txeof(sc); 1696 status = CSR_READ_2(sc, VR_ISR); 1697 } 1698 1699 /* Re-enable interrupts. */ 1700 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1701 1702 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1703 vr_start_locked(ifp); 1704 1705 done_locked: 1706 VR_UNLOCK(sc); 1707 } 1708 1709 static int 1710 vr_error(struct vr_softc *sc, uint16_t status) 1711 { 1712 uint16_t pcis; 1713 1714 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1715 if ((status & VR_ISR_BUSERR) != 0) { 1716 status &= ~VR_ISR_BUSERR; 1717 sc->vr_stat.bus_errors++; 1718 /* Disable further interrupts. */ 1719 CSR_WRITE_2(sc, VR_IMR, 0); 1720 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1721 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1722 "resetting\n", pcis); 1723 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1724 sc->vr_flags |= VR_F_RESTART; 1725 return (EAGAIN); 1726 } 1727 if ((status & VR_ISR_LINKSTAT2) != 0) { 1728 /* Link state change, duplex changes etc. */ 1729 status &= ~VR_ISR_LINKSTAT2; 1730 } 1731 if ((status & VR_ISR_STATSOFLOW) != 0) { 1732 status &= ~VR_ISR_STATSOFLOW; 1733 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1734 /* Update MIB counters. */ 1735 } 1736 } 1737 1738 if (status != 0) 1739 device_printf(sc->vr_dev, 1740 "unhandled interrupt, status = 0x%04x\n", status); 1741 return (0); 1742 } 1743 1744 /* 1745 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1746 * pointers to the fragment pointers. 1747 */ 1748 static int 1749 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1750 { 1751 struct vr_txdesc *txd; 1752 struct vr_desc *desc; 1753 struct mbuf *m; 1754 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1755 uint32_t csum_flags, txctl; 1756 int error, i, nsegs, prod, si; 1757 int padlen; 1758 1759 VR_LOCK_ASSERT(sc); 1760 1761 M_ASSERTPKTHDR((*m_head)); 1762 1763 /* 1764 * Some VIA Rhine wants packet buffers to be longword 1765 * aligned, but very often our mbufs aren't. Rather than 1766 * waste time trying to decide when to copy and when not 1767 * to copy, just do it all the time. 1768 */ 1769 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1770 m = m_defrag(*m_head, M_DONTWAIT); 1771 if (m == NULL) { 1772 m_freem(*m_head); 1773 *m_head = NULL; 1774 return (ENOBUFS); 1775 } 1776 *m_head = m; 1777 } 1778 1779 /* 1780 * The Rhine chip doesn't auto-pad, so we have to make 1781 * sure to pad short frames out to the minimum frame length 1782 * ourselves. 1783 */ 1784 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1785 m = *m_head; 1786 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1787 if (M_WRITABLE(m) == 0) { 1788 /* Get a writable copy. */ 1789 m = m_dup(*m_head, M_DONTWAIT); 1790 m_freem(*m_head); 1791 if (m == NULL) { 1792 *m_head = NULL; 1793 return (ENOBUFS); 1794 } 1795 *m_head = m; 1796 } 1797 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1798 m = m_defrag(m, M_DONTWAIT); 1799 if (m == NULL) { 1800 m_freem(*m_head); 1801 *m_head = NULL; 1802 return (ENOBUFS); 1803 } 1804 } 1805 /* 1806 * Manually pad short frames, and zero the pad space 1807 * to avoid leaking data. 1808 */ 1809 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1810 m->m_pkthdr.len += padlen; 1811 m->m_len = m->m_pkthdr.len; 1812 *m_head = m; 1813 } 1814 1815 prod = sc->vr_cdata.vr_tx_prod; 1816 txd = &sc->vr_cdata.vr_txdesc[prod]; 1817 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1818 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1819 if (error == EFBIG) { 1820 m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS); 1821 if (m == NULL) { 1822 m_freem(*m_head); 1823 *m_head = NULL; 1824 return (ENOBUFS); 1825 } 1826 *m_head = m; 1827 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1828 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1829 if (error != 0) { 1830 m_freem(*m_head); 1831 *m_head = NULL; 1832 return (error); 1833 } 1834 } else if (error != 0) 1835 return (error); 1836 if (nsegs == 0) { 1837 m_freem(*m_head); 1838 *m_head = NULL; 1839 return (EIO); 1840 } 1841 1842 /* Check number of available descriptors. */ 1843 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1844 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1845 return (ENOBUFS); 1846 } 1847 1848 txd->tx_m = *m_head; 1849 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1850 BUS_DMASYNC_PREWRITE); 1851 1852 /* Set checksum offload. */ 1853 csum_flags = 0; 1854 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1855 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1856 csum_flags |= VR_TXCTL_IPCSUM; 1857 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1858 csum_flags |= VR_TXCTL_TCPCSUM; 1859 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1860 csum_flags |= VR_TXCTL_UDPCSUM; 1861 } 1862 1863 /* 1864 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1865 * is required for all descriptors regardless of single or 1866 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1867 * the first descriptor for a multi-fragmented frames. Without 1868 * that VIA Rhine chip generates Tx underrun interrupts and can't 1869 * send any frames. 1870 */ 1871 si = prod; 1872 for (i = 0; i < nsegs; i++) { 1873 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1874 desc->vr_status = 0; 1875 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1876 if (i == 0) 1877 txctl |= VR_TXCTL_FIRSTFRAG; 1878 desc->vr_ctl = htole32(txctl); 1879 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1880 sc->vr_cdata.vr_tx_cnt++; 1881 VR_INC(prod, VR_TX_RING_CNT); 1882 } 1883 /* Update producer index. */ 1884 sc->vr_cdata.vr_tx_prod = prod; 1885 1886 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1887 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1888 1889 /* 1890 * Set EOP on the last desciptor and reuqest Tx completion 1891 * interrupt for every VR_TX_INTR_THRESH-th frames. 1892 */ 1893 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1894 if (sc->vr_cdata.vr_tx_pkts == 0) 1895 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1896 else 1897 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1898 1899 /* Lastly turn the first descriptor ownership to hardware. */ 1900 desc = &sc->vr_rdata.vr_tx_ring[si]; 1901 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1902 1903 /* Sync descriptors. */ 1904 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1905 sc->vr_cdata.vr_tx_ring_map, 1906 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1907 1908 return (0); 1909 } 1910 1911 static void 1912 vr_start(struct ifnet *ifp) 1913 { 1914 struct vr_softc *sc; 1915 1916 sc = ifp->if_softc; 1917 VR_LOCK(sc); 1918 vr_start_locked(ifp); 1919 VR_UNLOCK(sc); 1920 } 1921 1922 static void 1923 vr_start_locked(struct ifnet *ifp) 1924 { 1925 struct vr_softc *sc; 1926 struct mbuf *m_head; 1927 int enq; 1928 1929 sc = ifp->if_softc; 1930 1931 VR_LOCK_ASSERT(sc); 1932 1933 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1934 IFF_DRV_RUNNING || sc->vr_link == 0) 1935 return; 1936 1937 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1938 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1939 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1940 if (m_head == NULL) 1941 break; 1942 /* 1943 * Pack the data into the transmit ring. If we 1944 * don't have room, set the OACTIVE flag and wait 1945 * for the NIC to drain the ring. 1946 */ 1947 if (vr_encap(sc, &m_head)) { 1948 if (m_head == NULL) 1949 break; 1950 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1951 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1952 break; 1953 } 1954 1955 enq++; 1956 /* 1957 * If there's a BPF listener, bounce a copy of this frame 1958 * to him. 1959 */ 1960 ETHER_BPF_MTAP(ifp, m_head); 1961 } 1962 1963 if (enq > 0) { 1964 /* Tell the chip to start transmitting. */ 1965 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 1966 /* Set a timeout in case the chip goes out to lunch. */ 1967 sc->vr_watchdog_timer = 5; 1968 } 1969 } 1970 1971 static void 1972 vr_init(void *xsc) 1973 { 1974 struct vr_softc *sc; 1975 1976 sc = (struct vr_softc *)xsc; 1977 VR_LOCK(sc); 1978 vr_init_locked(sc); 1979 VR_UNLOCK(sc); 1980 } 1981 1982 static void 1983 vr_init_locked(struct vr_softc *sc) 1984 { 1985 struct ifnet *ifp; 1986 struct mii_data *mii; 1987 bus_addr_t addr; 1988 int i; 1989 1990 VR_LOCK_ASSERT(sc); 1991 1992 ifp = sc->vr_ifp; 1993 mii = device_get_softc(sc->vr_miibus); 1994 1995 /* Cancel pending I/O and free all RX/TX buffers. */ 1996 vr_stop(sc); 1997 vr_reset(sc); 1998 1999 /* Set our station address. */ 2000 for (i = 0; i < ETHER_ADDR_LEN; i++) 2001 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); 2002 2003 /* Set DMA size. */ 2004 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2005 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2006 2007 /* 2008 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2009 * so we must set both. 2010 */ 2011 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2012 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2013 2014 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2015 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2016 2017 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2018 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2019 2020 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2021 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2022 2023 /* Init circular RX list. */ 2024 if (vr_rx_ring_init(sc) != 0) { 2025 device_printf(sc->vr_dev, 2026 "initialization failed: no memory for rx buffers\n"); 2027 vr_stop(sc); 2028 return; 2029 } 2030 2031 /* Init tx descriptors. */ 2032 vr_tx_ring_init(sc); 2033 2034 /* Disable all VLAN CAM entries. */ 2035 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2036 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 2037 CSR_WRITE_1(sc, VR_CAMMASK, 0); 2038 CSR_WRITE_1(sc, VR_CAMCTL, 0); 2039 } 2040 2041 /* 2042 * Set up receive filter. 2043 */ 2044 vr_set_filter(sc); 2045 2046 /* 2047 * Load the address of the RX ring. 2048 */ 2049 addr = VR_RX_RING_ADDR(sc, 0); 2050 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2051 /* 2052 * Load the address of the TX ring. 2053 */ 2054 addr = VR_TX_RING_ADDR(sc, 0); 2055 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2056 /* Default : full-duplex, no Tx poll. */ 2057 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2058 2059 /* Set flow-control parameters for Rhine III. */ 2060 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2061 /* Rx buffer count available for incoming packet. */ 2062 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT); 2063 /* 2064 * Tx pause low threshold : 16 free receive buffers 2065 * Tx pause XON high threshold : 48 free receive buffers 2066 */ 2067 CSR_WRITE_1(sc, VR_FLOWCR1, 2068 VR_FLOWCR1_TXLO16 | VR_FLOWCR1_TXHI48 | VR_FLOWCR1_XONXOFF); 2069 /* Set Tx pause timer. */ 2070 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2071 } 2072 2073 /* Enable receiver and transmitter. */ 2074 CSR_WRITE_1(sc, VR_CR0, 2075 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2076 2077 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2078 #ifdef DEVICE_POLLING 2079 /* 2080 * Disable interrupts if we are polling. 2081 */ 2082 if (ifp->if_capenable & IFCAP_POLLING) 2083 CSR_WRITE_2(sc, VR_IMR, 0); 2084 else 2085 #endif 2086 /* 2087 * Enable interrupts and disable MII intrs. 2088 */ 2089 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2090 if (sc->vr_revid > REV_ID_VT6102_A) 2091 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2092 2093 sc->vr_link = 0; 2094 mii_mediachg(mii); 2095 2096 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2097 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2098 2099 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2100 } 2101 2102 /* 2103 * Set media options. 2104 */ 2105 static int 2106 vr_ifmedia_upd(struct ifnet *ifp) 2107 { 2108 struct vr_softc *sc; 2109 struct mii_data *mii; 2110 struct mii_softc *miisc; 2111 int error; 2112 2113 sc = ifp->if_softc; 2114 VR_LOCK(sc); 2115 mii = device_get_softc(sc->vr_miibus); 2116 if (mii->mii_instance) { 2117 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2118 mii_phy_reset(miisc); 2119 } 2120 error = mii_mediachg(mii); 2121 VR_UNLOCK(sc); 2122 2123 return (error); 2124 } 2125 2126 /* 2127 * Report current media status. 2128 */ 2129 static void 2130 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2131 { 2132 struct vr_softc *sc; 2133 struct mii_data *mii; 2134 2135 sc = ifp->if_softc; 2136 mii = device_get_softc(sc->vr_miibus); 2137 VR_LOCK(sc); 2138 mii_pollstat(mii); 2139 VR_UNLOCK(sc); 2140 ifmr->ifm_active = mii->mii_media_active; 2141 ifmr->ifm_status = mii->mii_media_status; 2142 } 2143 2144 static int 2145 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2146 { 2147 struct vr_softc *sc; 2148 struct ifreq *ifr; 2149 struct mii_data *mii; 2150 int error, mask; 2151 2152 sc = ifp->if_softc; 2153 ifr = (struct ifreq *)data; 2154 error = 0; 2155 2156 switch (command) { 2157 case SIOCSIFFLAGS: 2158 VR_LOCK(sc); 2159 if (ifp->if_flags & IFF_UP) { 2160 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2161 if ((ifp->if_flags ^ sc->vr_if_flags) & 2162 (IFF_PROMISC | IFF_ALLMULTI)) 2163 vr_set_filter(sc); 2164 } else { 2165 if (sc->vr_detach == 0) 2166 vr_init_locked(sc); 2167 } 2168 } else { 2169 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2170 vr_stop(sc); 2171 } 2172 sc->vr_if_flags = ifp->if_flags; 2173 VR_UNLOCK(sc); 2174 break; 2175 case SIOCADDMULTI: 2176 case SIOCDELMULTI: 2177 VR_LOCK(sc); 2178 vr_set_filter(sc); 2179 VR_UNLOCK(sc); 2180 break; 2181 case SIOCGIFMEDIA: 2182 case SIOCSIFMEDIA: 2183 mii = device_get_softc(sc->vr_miibus); 2184 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2185 break; 2186 case SIOCSIFCAP: 2187 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2188 #ifdef DEVICE_POLLING 2189 if (mask & IFCAP_POLLING) { 2190 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2191 error = ether_poll_register(vr_poll, ifp); 2192 if (error != 0) 2193 break; 2194 VR_LOCK(sc); 2195 /* Disable interrupts. */ 2196 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2197 ifp->if_capenable |= IFCAP_POLLING; 2198 VR_UNLOCK(sc); 2199 } else { 2200 error = ether_poll_deregister(ifp); 2201 /* Enable interrupts. */ 2202 VR_LOCK(sc); 2203 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2204 ifp->if_capenable &= ~IFCAP_POLLING; 2205 VR_UNLOCK(sc); 2206 } 2207 } 2208 #endif /* DEVICE_POLLING */ 2209 if ((mask & IFCAP_TXCSUM) != 0 && 2210 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2211 ifp->if_capenable ^= IFCAP_TXCSUM; 2212 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2213 ifp->if_hwassist |= VR_CSUM_FEATURES; 2214 else 2215 ifp->if_hwassist &= ~VR_CSUM_FEATURES; 2216 } 2217 if ((mask & IFCAP_RXCSUM) != 0 && 2218 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2219 ifp->if_capenable ^= IFCAP_RXCSUM; 2220 if ((mask & IFCAP_WOL_UCAST) != 0 && 2221 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2222 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2223 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2224 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2225 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2226 break; 2227 default: 2228 error = ether_ioctl(ifp, command, data); 2229 break; 2230 } 2231 2232 return (error); 2233 } 2234 2235 static void 2236 vr_watchdog(struct vr_softc *sc) 2237 { 2238 struct ifnet *ifp; 2239 2240 VR_LOCK_ASSERT(sc); 2241 2242 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2243 return; 2244 2245 ifp = sc->vr_ifp; 2246 /* 2247 * Reclaim first as we don't request interrupt for every packets. 2248 */ 2249 vr_txeof(sc); 2250 if (sc->vr_cdata.vr_tx_cnt == 0) 2251 return; 2252 2253 if (sc->vr_link == 0) { 2254 if (bootverbose) 2255 if_printf(sc->vr_ifp, "watchdog timeout " 2256 "(missed link)\n"); 2257 ifp->if_oerrors++; 2258 vr_init_locked(sc); 2259 return; 2260 } 2261 2262 ifp->if_oerrors++; 2263 if_printf(ifp, "watchdog timeout\n"); 2264 2265 vr_stop(sc); 2266 vr_reset(sc); 2267 vr_init_locked(sc); 2268 2269 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2270 vr_start_locked(ifp); 2271 } 2272 2273 static void 2274 vr_tx_start(struct vr_softc *sc) 2275 { 2276 bus_addr_t addr; 2277 uint8_t cmd; 2278 2279 cmd = CSR_READ_1(sc, VR_CR0); 2280 if ((cmd & VR_CR0_TX_ON) == 0) { 2281 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2282 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2283 cmd |= VR_CR0_TX_ON; 2284 CSR_WRITE_1(sc, VR_CR0, cmd); 2285 } 2286 if (sc->vr_cdata.vr_tx_cnt != 0) { 2287 sc->vr_watchdog_timer = 5; 2288 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2289 } 2290 } 2291 2292 static void 2293 vr_rx_start(struct vr_softc *sc) 2294 { 2295 bus_addr_t addr; 2296 uint8_t cmd; 2297 2298 cmd = CSR_READ_1(sc, VR_CR0); 2299 if ((cmd & VR_CR0_RX_ON) == 0) { 2300 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2301 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2302 cmd |= VR_CR0_RX_ON; 2303 CSR_WRITE_1(sc, VR_CR0, cmd); 2304 } 2305 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2306 } 2307 2308 static int 2309 vr_tx_stop(struct vr_softc *sc) 2310 { 2311 int i; 2312 uint8_t cmd; 2313 2314 cmd = CSR_READ_1(sc, VR_CR0); 2315 if ((cmd & VR_CR0_TX_ON) != 0) { 2316 cmd &= ~VR_CR0_TX_ON; 2317 CSR_WRITE_1(sc, VR_CR0, cmd); 2318 for (i = VR_TIMEOUT; i > 0; i--) { 2319 DELAY(5); 2320 cmd = CSR_READ_1(sc, VR_CR0); 2321 if ((cmd & VR_CR0_TX_ON) == 0) 2322 break; 2323 } 2324 if (i == 0) 2325 return (ETIMEDOUT); 2326 } 2327 return (0); 2328 } 2329 2330 static int 2331 vr_rx_stop(struct vr_softc *sc) 2332 { 2333 int i; 2334 uint8_t cmd; 2335 2336 cmd = CSR_READ_1(sc, VR_CR0); 2337 if ((cmd & VR_CR0_RX_ON) != 0) { 2338 cmd &= ~VR_CR0_RX_ON; 2339 CSR_WRITE_1(sc, VR_CR0, cmd); 2340 for (i = VR_TIMEOUT; i > 0; i--) { 2341 DELAY(5); 2342 cmd = CSR_READ_1(sc, VR_CR0); 2343 if ((cmd & VR_CR0_RX_ON) == 0) 2344 break; 2345 } 2346 if (i == 0) 2347 return (ETIMEDOUT); 2348 } 2349 return (0); 2350 } 2351 2352 /* 2353 * Stop the adapter and free any mbufs allocated to the 2354 * RX and TX lists. 2355 */ 2356 static void 2357 vr_stop(struct vr_softc *sc) 2358 { 2359 struct vr_txdesc *txd; 2360 struct vr_rxdesc *rxd; 2361 struct ifnet *ifp; 2362 int i; 2363 2364 VR_LOCK_ASSERT(sc); 2365 2366 ifp = sc->vr_ifp; 2367 sc->vr_watchdog_timer = 0; 2368 2369 callout_stop(&sc->vr_stat_callout); 2370 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2371 2372 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2373 if (vr_rx_stop(sc) != 0) 2374 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2375 if (vr_tx_stop(sc) != 0) 2376 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2377 /* Clear pending interrupts. */ 2378 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2379 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2380 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2381 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2382 2383 /* 2384 * Free RX and TX mbufs still in the queues. 2385 */ 2386 for (i = 0; i < VR_RX_RING_CNT; i++) { 2387 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2388 if (rxd->rx_m != NULL) { 2389 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2390 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2391 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2392 rxd->rx_dmamap); 2393 m_freem(rxd->rx_m); 2394 rxd->rx_m = NULL; 2395 } 2396 } 2397 for (i = 0; i < VR_TX_RING_CNT; i++) { 2398 txd = &sc->vr_cdata.vr_txdesc[i]; 2399 if (txd->tx_m != NULL) { 2400 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2401 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2402 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2403 txd->tx_dmamap); 2404 m_freem(txd->tx_m); 2405 txd->tx_m = NULL; 2406 } 2407 } 2408 } 2409 2410 /* 2411 * Stop all chip I/O so that the kernel's probe routines don't 2412 * get confused by errant DMAs when rebooting. 2413 */ 2414 static int 2415 vr_shutdown(device_t dev) 2416 { 2417 2418 return (vr_suspend(dev)); 2419 } 2420 2421 static int 2422 vr_suspend(device_t dev) 2423 { 2424 struct vr_softc *sc; 2425 2426 sc = device_get_softc(dev); 2427 2428 VR_LOCK(sc); 2429 vr_stop(sc); 2430 vr_setwol(sc); 2431 sc->vr_suspended = 1; 2432 VR_UNLOCK(sc); 2433 2434 return (0); 2435 } 2436 2437 static int 2438 vr_resume(device_t dev) 2439 { 2440 struct vr_softc *sc; 2441 struct ifnet *ifp; 2442 2443 sc = device_get_softc(dev); 2444 2445 VR_LOCK(sc); 2446 ifp = sc->vr_ifp; 2447 vr_clrwol(sc); 2448 vr_reset(sc); 2449 if (ifp->if_flags & IFF_UP) 2450 vr_init_locked(sc); 2451 2452 sc->vr_suspended = 0; 2453 VR_UNLOCK(sc); 2454 2455 return (0); 2456 } 2457 2458 static void 2459 vr_setwol(struct vr_softc *sc) 2460 { 2461 struct ifnet *ifp; 2462 int pmc; 2463 uint16_t pmstat; 2464 uint8_t v; 2465 2466 VR_LOCK_ASSERT(sc); 2467 2468 if (sc->vr_revid < REV_ID_VT6102_A || 2469 pci_find_extcap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2470 return; 2471 2472 ifp = sc->vr_ifp; 2473 2474 /* Clear WOL configuration. */ 2475 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2476 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2477 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2478 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2479 if (sc->vr_revid > REV_ID_VT6105_B0) { 2480 /* Newer Rhine III supports two additional patterns. */ 2481 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2482 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2483 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2484 } 2485 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2486 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2487 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2488 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2489 /* 2490 * It seems that multicast wakeup frames require programming pattern 2491 * registers and valid CRC as well as pattern mask for each pattern. 2492 * While it's possible to setup such a pattern it would complicate 2493 * WOL configuration so ignore multicast wakeup frames. 2494 */ 2495 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2496 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2497 v = CSR_READ_1(sc, VR_STICKHW); 2498 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2499 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2500 } 2501 2502 /* Put hardware into sleep. */ 2503 v = CSR_READ_1(sc, VR_STICKHW); 2504 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2505 CSR_WRITE_1(sc, VR_STICKHW, v); 2506 2507 /* Request PME if WOL is requested. */ 2508 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2509 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2510 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2511 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2512 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2513 } 2514 2515 static void 2516 vr_clrwol(struct vr_softc *sc) 2517 { 2518 uint8_t v; 2519 2520 VR_LOCK_ASSERT(sc); 2521 2522 if (sc->vr_revid < REV_ID_VT6102_A) 2523 return; 2524 2525 /* Take hardware out of sleep. */ 2526 v = CSR_READ_1(sc, VR_STICKHW); 2527 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2528 CSR_WRITE_1(sc, VR_STICKHW, v); 2529 2530 /* Clear WOL configuration as WOL may interfere normal operation. */ 2531 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2532 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2533 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2534 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2535 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2536 if (sc->vr_revid > REV_ID_VT6105_B0) { 2537 /* Newer Rhine III supports two additional patterns. */ 2538 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2539 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2540 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2541 } 2542 } 2543 2544 static int 2545 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2546 { 2547 struct vr_softc *sc; 2548 struct vr_statistics *stat; 2549 int error; 2550 int result; 2551 2552 result = -1; 2553 error = sysctl_handle_int(oidp, &result, 0, req); 2554 2555 if (error != 0 || req->newptr == NULL) 2556 return (error); 2557 2558 if (result == 1) { 2559 sc = (struct vr_softc *)arg1; 2560 stat = &sc->vr_stat; 2561 2562 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2563 printf("Outbound good frames : %ju\n", 2564 (uintmax_t)stat->tx_ok); 2565 printf("Inbound good frames : %ju\n", 2566 (uintmax_t)stat->rx_ok); 2567 printf("Outbound errors : %u\n", stat->tx_errors); 2568 printf("Inbound errors : %u\n", stat->rx_errors); 2569 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2570 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2571 printf("Inbound FIFO overflows : %d\n", 2572 stat->rx_fifo_overflows); 2573 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2574 printf("Inbound frame alignment errors : %u\n", 2575 stat->rx_alignment); 2576 printf("Inbound giant frames : %u\n", stat->rx_giants); 2577 printf("Inbound runt frames : %u\n", stat->rx_runts); 2578 printf("Outbound aborted with excessive collisions : %u\n", 2579 stat->tx_abort); 2580 printf("Outbound collisions : %u\n", stat->tx_collisions); 2581 printf("Outbound late collisions : %u\n", 2582 stat->tx_late_collisions); 2583 printf("Outbound underrun : %u\n", stat->tx_underrun); 2584 printf("PCI bus errors : %u\n", stat->bus_errors); 2585 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2586 stat->num_restart); 2587 } 2588 2589 return (error); 2590 } 2591