1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * VIA Rhine fast ethernet PCI NIC driver 40 * 41 * Supports various network adapters based on the VIA Rhine 42 * and Rhine II PCI controllers, including the D-Link DFE530TX. 43 * Datasheets are available at http://www.via.com.tw. 44 * 45 * Written by Bill Paul <wpaul@ctr.columbia.edu> 46 * Electrical Engineering Department 47 * Columbia University, New York City 48 */ 49 50 /* 51 * The VIA Rhine controllers are similar in some respects to the 52 * the DEC tulip chips, except less complicated. The controller 53 * uses an MII bus and an external physical layer interface. The 54 * receiver has a one entry perfect filter and a 64-bit hash table 55 * multicast filter. Transmit and receive descriptors are similar 56 * to the tulip. 57 * 58 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 59 * transmit buffers must be longword aligned. Unfortunately, 60 * FreeBSD doesn't guarantee that mbufs will be filled in starting 61 * at longword boundaries, so we have to do a buffer copy before 62 * transmission. 63 */ 64 65 #ifdef HAVE_KERNEL_OPTION_HEADERS 66 #include "opt_device_polling.h" 67 #endif 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mbuf.h> 76 #include <sys/module.h> 77 #include <sys/rman.h> 78 #include <sys/socket.h> 79 #include <sys/sockio.h> 80 #include <sys/sysctl.h> 81 #include <sys/taskqueue.h> 82 83 #include <net/bpf.h> 84 #include <net/if.h> 85 #include <net/if_var.h> 86 #include <net/ethernet.h> 87 #include <net/if_dl.h> 88 #include <net/if_media.h> 89 #include <net/if_types.h> 90 #include <net/if_vlan_var.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 98 #include <machine/bus.h> 99 100 #include <dev/vr/if_vrreg.h> 101 102 /* "device miibus" required. See GENERIC if you get errors here. */ 103 #include "miibus_if.h" 104 105 MODULE_DEPEND(vr, pci, 1, 1, 1); 106 MODULE_DEPEND(vr, ether, 1, 1, 1); 107 MODULE_DEPEND(vr, miibus, 1, 1, 1); 108 109 /* Define to show Rx/Tx error status. */ 110 #undef VR_SHOW_ERRORS 111 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 112 113 /* 114 * Various supported device vendors/types, their names & quirks. 115 */ 116 #define VR_Q_NEEDALIGN (1<<0) 117 #define VR_Q_CSUM (1<<1) 118 #define VR_Q_CAM (1<<2) 119 120 static const struct vr_type { 121 u_int16_t vr_vid; 122 u_int16_t vr_did; 123 int vr_quirks; 124 const char *vr_name; 125 } vr_devs[] = { 126 { VIA_VENDORID, VIA_DEVICEID_RHINE, 127 VR_Q_NEEDALIGN, 128 "VIA VT3043 Rhine I 10/100BaseTX" }, 129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 130 VR_Q_NEEDALIGN, 131 "VIA VT86C100A Rhine II 10/100BaseTX" }, 132 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 133 0, 134 "VIA VT6102 Rhine II 10/100BaseTX" }, 135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 136 0, 137 "VIA VT6105 Rhine III 10/100BaseTX" }, 138 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 139 VR_Q_CSUM, 140 "VIA VT6105M Rhine III 10/100BaseTX" }, 141 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 142 VR_Q_NEEDALIGN, 143 "Delta Electronics Rhine II 10/100BaseTX" }, 144 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 145 VR_Q_NEEDALIGN, 146 "Addtron Technology Rhine II 10/100BaseTX" }, 147 { 0, 0, 0, NULL } 148 }; 149 150 static int vr_probe(device_t); 151 static int vr_attach(device_t); 152 static int vr_detach(device_t); 153 static int vr_shutdown(device_t); 154 static int vr_suspend(device_t); 155 static int vr_resume(device_t); 156 157 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 158 static int vr_dma_alloc(struct vr_softc *); 159 static void vr_dma_free(struct vr_softc *); 160 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 161 static int vr_newbuf(struct vr_softc *, int); 162 163 #ifndef __NO_STRICT_ALIGNMENT 164 static __inline void vr_fixup_rx(struct mbuf *); 165 #endif 166 static int vr_rxeof(struct vr_softc *); 167 static void vr_txeof(struct vr_softc *); 168 static void vr_tick(void *); 169 static int vr_error(struct vr_softc *, uint16_t); 170 static void vr_tx_underrun(struct vr_softc *); 171 static int vr_intr(void *); 172 static void vr_int_task(void *, int); 173 static void vr_start(struct ifnet *); 174 static void vr_start_locked(struct ifnet *); 175 static int vr_encap(struct vr_softc *, struct mbuf **); 176 static int vr_ioctl(struct ifnet *, u_long, caddr_t); 177 static void vr_init(void *); 178 static void vr_init_locked(struct vr_softc *); 179 static void vr_tx_start(struct vr_softc *); 180 static void vr_rx_start(struct vr_softc *); 181 static int vr_tx_stop(struct vr_softc *); 182 static int vr_rx_stop(struct vr_softc *); 183 static void vr_stop(struct vr_softc *); 184 static void vr_watchdog(struct vr_softc *); 185 static int vr_ifmedia_upd(struct ifnet *); 186 static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); 187 188 static int vr_miibus_readreg(device_t, int, int); 189 static int vr_miibus_writereg(device_t, int, int, int); 190 static void vr_miibus_statchg(device_t); 191 192 static void vr_cam_mask(struct vr_softc *, uint32_t, int); 193 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); 194 static void vr_set_filter(struct vr_softc *); 195 static void vr_reset(const struct vr_softc *); 196 static int vr_tx_ring_init(struct vr_softc *); 197 static int vr_rx_ring_init(struct vr_softc *); 198 static void vr_setwol(struct vr_softc *); 199 static void vr_clrwol(struct vr_softc *); 200 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 201 202 static const struct vr_tx_threshold_table { 203 int tx_cfg; 204 int bcr_cfg; 205 int value; 206 } vr_tx_threshold_tables[] = { 207 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 208 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 209 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 210 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 211 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 212 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 213 }; 214 215 static device_method_t vr_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_probe, vr_probe), 218 DEVMETHOD(device_attach, vr_attach), 219 DEVMETHOD(device_detach, vr_detach), 220 DEVMETHOD(device_shutdown, vr_shutdown), 221 DEVMETHOD(device_suspend, vr_suspend), 222 DEVMETHOD(device_resume, vr_resume), 223 224 /* MII interface */ 225 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 226 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 227 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 228 229 DEVMETHOD_END 230 }; 231 232 static driver_t vr_driver = { 233 "vr", 234 vr_methods, 235 sizeof(struct vr_softc) 236 }; 237 238 static devclass_t vr_devclass; 239 240 DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); 241 DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); 242 243 static int 244 vr_miibus_readreg(device_t dev, int phy, int reg) 245 { 246 struct vr_softc *sc; 247 int i; 248 249 sc = device_get_softc(dev); 250 251 /* Set the register address. */ 252 CSR_WRITE_1(sc, VR_MIIADDR, reg); 253 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 254 255 for (i = 0; i < VR_MII_TIMEOUT; i++) { 256 DELAY(1); 257 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 258 break; 259 } 260 if (i == VR_MII_TIMEOUT) 261 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 262 263 return (CSR_READ_2(sc, VR_MIIDATA)); 264 } 265 266 static int 267 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 268 { 269 struct vr_softc *sc; 270 int i; 271 272 sc = device_get_softc(dev); 273 274 /* Set the register address and data to write. */ 275 CSR_WRITE_1(sc, VR_MIIADDR, reg); 276 CSR_WRITE_2(sc, VR_MIIDATA, data); 277 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 278 279 for (i = 0; i < VR_MII_TIMEOUT; i++) { 280 DELAY(1); 281 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 282 break; 283 } 284 if (i == VR_MII_TIMEOUT) 285 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 286 reg); 287 288 return (0); 289 } 290 291 /* 292 * In order to fiddle with the 293 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 294 * first have to put the transmit and/or receive logic in the idle state. 295 */ 296 static void 297 vr_miibus_statchg(device_t dev) 298 { 299 struct vr_softc *sc; 300 struct mii_data *mii; 301 struct ifnet *ifp; 302 int lfdx, mfdx; 303 uint8_t cr0, cr1, fc; 304 305 sc = device_get_softc(dev); 306 mii = device_get_softc(sc->vr_miibus); 307 ifp = sc->vr_ifp; 308 if (mii == NULL || ifp == NULL || 309 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 310 return; 311 312 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 313 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 314 (IFM_ACTIVE | IFM_AVALID)) { 315 switch (IFM_SUBTYPE(mii->mii_media_active)) { 316 case IFM_10_T: 317 case IFM_100_TX: 318 sc->vr_flags |= VR_F_LINK; 319 break; 320 default: 321 break; 322 } 323 } 324 325 if ((sc->vr_flags & VR_F_LINK) != 0) { 326 cr0 = CSR_READ_1(sc, VR_CR0); 327 cr1 = CSR_READ_1(sc, VR_CR1); 328 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 329 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 330 if (mfdx != lfdx) { 331 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 332 if (vr_tx_stop(sc) != 0 || 333 vr_rx_stop(sc) != 0) { 334 device_printf(sc->vr_dev, 335 "%s: Tx/Rx shutdown error -- " 336 "resetting\n", __func__); 337 sc->vr_flags |= VR_F_RESTART; 338 VR_UNLOCK(sc); 339 return; 340 } 341 } 342 if (lfdx) 343 cr1 |= VR_CR1_FULLDUPLEX; 344 else 345 cr1 &= ~VR_CR1_FULLDUPLEX; 346 CSR_WRITE_1(sc, VR_CR1, cr1); 347 } 348 fc = 0; 349 /* Configure flow-control. */ 350 if (sc->vr_revid >= REV_ID_VT6105_A0) { 351 fc = CSR_READ_1(sc, VR_FLOWCR1); 352 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 353 if ((IFM_OPTIONS(mii->mii_media_active) & 354 IFM_ETH_RXPAUSE) != 0) 355 fc |= VR_FLOWCR1_RXPAUSE; 356 if ((IFM_OPTIONS(mii->mii_media_active) & 357 IFM_ETH_TXPAUSE) != 0) { 358 fc |= VR_FLOWCR1_TXPAUSE; 359 sc->vr_flags |= VR_F_TXPAUSE; 360 } 361 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 362 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 363 /* No Tx puase capability available for Rhine II. */ 364 fc = CSR_READ_1(sc, VR_MISC_CR0); 365 fc &= ~VR_MISCCR0_RXPAUSE; 366 if ((IFM_OPTIONS(mii->mii_media_active) & 367 IFM_ETH_RXPAUSE) != 0) 368 fc |= VR_MISCCR0_RXPAUSE; 369 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 370 } 371 vr_rx_start(sc); 372 vr_tx_start(sc); 373 } else { 374 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 375 device_printf(sc->vr_dev, 376 "%s: Tx/Rx shutdown error -- resetting\n", 377 __func__); 378 sc->vr_flags |= VR_F_RESTART; 379 } 380 } 381 } 382 383 384 static void 385 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) 386 { 387 388 if (type == VR_MCAST_CAM) 389 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 390 else 391 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 392 CSR_WRITE_4(sc, VR_CAMMASK, mask); 393 CSR_WRITE_1(sc, VR_CAMCTL, 0); 394 } 395 396 static int 397 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) 398 { 399 int i; 400 401 if (type == VR_MCAST_CAM) { 402 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 403 return (EINVAL); 404 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 405 } else 406 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 407 408 /* Set CAM entry address. */ 409 CSR_WRITE_1(sc, VR_CAMADDR, idx); 410 /* Set CAM entry data. */ 411 if (type == VR_MCAST_CAM) { 412 for (i = 0; i < ETHER_ADDR_LEN; i++) 413 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); 414 } else { 415 CSR_WRITE_1(sc, VR_VCAM0, mac[0]); 416 CSR_WRITE_1(sc, VR_VCAM1, mac[1]); 417 } 418 DELAY(10); 419 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 420 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); 421 for (i = 0; i < VR_TIMEOUT; i++) { 422 DELAY(1); 423 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 424 break; 425 } 426 427 if (i == VR_TIMEOUT) 428 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 429 __func__); 430 CSR_WRITE_1(sc, VR_CAMCTL, 0); 431 432 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 433 } 434 435 struct vr_hash_maddr_cam_ctx { 436 struct vr_softc *sc; 437 uint32_t mask; 438 int error; 439 }; 440 441 static u_int 442 vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 443 { 444 struct vr_hash_maddr_cam_ctx *ctx = arg; 445 446 if (ctx->error != 0) 447 return (0); 448 ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl)); 449 if (ctx->error != 0) { 450 ctx->mask = 0; 451 return (0); 452 } 453 ctx->mask |= 1 << mcnt; 454 455 return (1); 456 } 457 458 static u_int 459 vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 460 { 461 uint32_t *hashes = arg; 462 int h; 463 464 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 465 if (h < 32) 466 hashes[0] |= (1 << h); 467 else 468 hashes[1] |= (1 << (h - 32)); 469 470 return (1); 471 } 472 473 /* 474 * Program the 64-bit multicast hash filter. 475 */ 476 static void 477 vr_set_filter(struct vr_softc *sc) 478 { 479 struct ifnet *ifp; 480 uint32_t hashes[2] = { 0, 0 }; 481 uint8_t rxfilt; 482 int error, mcnt; 483 484 VR_LOCK_ASSERT(sc); 485 486 ifp = sc->vr_ifp; 487 rxfilt = CSR_READ_1(sc, VR_RXCFG); 488 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | 489 VR_RXCFG_RX_MULTI); 490 if (ifp->if_flags & IFF_BROADCAST) 491 rxfilt |= VR_RXCFG_RX_BROAD; 492 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 493 rxfilt |= VR_RXCFG_RX_MULTI; 494 if (ifp->if_flags & IFF_PROMISC) 495 rxfilt |= VR_RXCFG_RX_PROMISC; 496 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 497 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 498 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 499 return; 500 } 501 502 /* Now program new ones. */ 503 error = 0; 504 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 505 struct vr_hash_maddr_cam_ctx ctx; 506 507 /* 508 * For hardwares that have CAM capability, use 509 * 32 entries multicast perfect filter. 510 */ 511 ctx.sc = sc; 512 ctx.mask = 0; 513 ctx.error = 0; 514 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx); 515 vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask); 516 } 517 518 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 519 /* 520 * If there are too many multicast addresses or 521 * setting multicast CAM filter failed, use hash 522 * table based filtering. 523 */ 524 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes); 525 } 526 527 if (mcnt > 0) 528 rxfilt |= VR_RXCFG_RX_MULTI; 529 530 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 531 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 532 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 533 } 534 535 static void 536 vr_reset(const struct vr_softc *sc) 537 { 538 int i; 539 540 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 541 542 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 543 if (sc->vr_revid < REV_ID_VT6102_A) { 544 /* VT86C100A needs more delay after reset. */ 545 DELAY(100); 546 } 547 for (i = 0; i < VR_TIMEOUT; i++) { 548 DELAY(10); 549 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 550 break; 551 } 552 if (i == VR_TIMEOUT) { 553 if (sc->vr_revid < REV_ID_VT6102_A) 554 device_printf(sc->vr_dev, "reset never completed!\n"); 555 else { 556 /* Use newer force reset command. */ 557 device_printf(sc->vr_dev, 558 "Using force reset command.\n"); 559 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 560 /* 561 * Wait a little while for the chip to get its brains 562 * in order. 563 */ 564 DELAY(2000); 565 } 566 } 567 568 } 569 570 /* 571 * Probe for a VIA Rhine chip. Check the PCI vendor and device 572 * IDs against our list and return a match or NULL 573 */ 574 static const struct vr_type * 575 vr_match(device_t dev) 576 { 577 const struct vr_type *t = vr_devs; 578 579 for (t = vr_devs; t->vr_name != NULL; t++) 580 if ((pci_get_vendor(dev) == t->vr_vid) && 581 (pci_get_device(dev) == t->vr_did)) 582 return (t); 583 return (NULL); 584 } 585 586 /* 587 * Probe for a VIA Rhine chip. Check the PCI vendor and device 588 * IDs against our list and return a device name if we find a match. 589 */ 590 static int 591 vr_probe(device_t dev) 592 { 593 const struct vr_type *t; 594 595 t = vr_match(dev); 596 if (t != NULL) { 597 device_set_desc(dev, t->vr_name); 598 return (BUS_PROBE_DEFAULT); 599 } 600 return (ENXIO); 601 } 602 603 /* 604 * Attach the interface. Allocate softc structures, do ifmedia 605 * setup and ethernet/BPF attach. 606 */ 607 static int 608 vr_attach(device_t dev) 609 { 610 struct vr_softc *sc; 611 struct ifnet *ifp; 612 const struct vr_type *t; 613 uint8_t eaddr[ETHER_ADDR_LEN]; 614 int error, rid; 615 int i, phy, pmc; 616 617 sc = device_get_softc(dev); 618 sc->vr_dev = dev; 619 t = vr_match(dev); 620 KASSERT(t != NULL, ("Lost if_vr device match")); 621 sc->vr_quirks = t->vr_quirks; 622 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 623 624 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 625 MTX_DEF); 626 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 627 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 628 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 629 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 630 sc, 0, vr_sysctl_stats, "I", "Statistics"); 631 632 error = 0; 633 634 /* 635 * Map control/status registers. 636 */ 637 pci_enable_busmaster(dev); 638 sc->vr_revid = pci_get_revid(dev); 639 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 640 641 sc->vr_res_id = PCIR_BAR(0); 642 sc->vr_res_type = SYS_RES_IOPORT; 643 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 644 &sc->vr_res_id, RF_ACTIVE); 645 if (sc->vr_res == NULL) { 646 device_printf(dev, "couldn't map ports\n"); 647 error = ENXIO; 648 goto fail; 649 } 650 651 /* Allocate interrupt. */ 652 rid = 0; 653 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 654 RF_SHAREABLE | RF_ACTIVE); 655 656 if (sc->vr_irq == NULL) { 657 device_printf(dev, "couldn't map interrupt\n"); 658 error = ENXIO; 659 goto fail; 660 } 661 662 /* Allocate ifnet structure. */ 663 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 664 if (ifp == NULL) { 665 device_printf(dev, "couldn't allocate ifnet structure\n"); 666 error = ENOSPC; 667 goto fail; 668 } 669 ifp->if_softc = sc; 670 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 671 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 672 ifp->if_ioctl = vr_ioctl; 673 ifp->if_start = vr_start; 674 ifp->if_init = vr_init; 675 IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); 676 ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; 677 IFQ_SET_READY(&ifp->if_snd); 678 679 NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); 680 681 /* Configure Tx FIFO threshold. */ 682 sc->vr_txthresh = VR_TXTHRESH_MIN; 683 if (sc->vr_revid < REV_ID_VT6105_A0) { 684 /* 685 * Use store and forward mode for Rhine I/II. 686 * Otherwise they produce a lot of Tx underruns and 687 * it would take a while to get working FIFO threshold 688 * value. 689 */ 690 sc->vr_txthresh = VR_TXTHRESH_MAX; 691 } 692 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 693 ifp->if_hwassist = VR_CSUM_FEATURES; 694 ifp->if_capabilities |= IFCAP_HWCSUM; 695 /* 696 * To update checksum field the hardware may need to 697 * store entire frames into FIFO before transmitting. 698 */ 699 sc->vr_txthresh = VR_TXTHRESH_MAX; 700 } 701 702 if (sc->vr_revid >= REV_ID_VT6102_A && 703 pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 704 ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; 705 706 /* Rhine supports oversized VLAN frame. */ 707 ifp->if_capabilities |= IFCAP_VLAN_MTU; 708 ifp->if_capenable = ifp->if_capabilities; 709 #ifdef DEVICE_POLLING 710 ifp->if_capabilities |= IFCAP_POLLING; 711 #endif 712 713 /* 714 * Windows may put the chip in suspend mode when it 715 * shuts down. Be sure to kick it in the head to wake it 716 * up again. 717 */ 718 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 719 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 720 721 /* 722 * Get station address. The way the Rhine chips work, 723 * you're not allowed to directly access the EEPROM once 724 * they've been programmed a special way. Consequently, 725 * we need to read the node address from the PAR0 and PAR1 726 * registers. 727 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 728 * VR_CFGC and VR_CFGD such that memory mapped IO configured 729 * by driver is reset to default state. 730 */ 731 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 732 for (i = VR_TIMEOUT; i > 0; i--) { 733 DELAY(1); 734 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 735 break; 736 } 737 if (i == 0) 738 device_printf(dev, "Reloading EEPROM timeout!\n"); 739 for (i = 0; i < ETHER_ADDR_LEN; i++) 740 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 741 742 /* Reset the adapter. */ 743 vr_reset(sc); 744 /* Ack intr & disable further interrupts. */ 745 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 746 CSR_WRITE_2(sc, VR_IMR, 0); 747 if (sc->vr_revid >= REV_ID_VT6102_A) 748 CSR_WRITE_2(sc, VR_MII_IMR, 0); 749 750 if (sc->vr_revid < REV_ID_VT6102_A) { 751 pci_write_config(dev, VR_PCI_MODE2, 752 pci_read_config(dev, VR_PCI_MODE2, 1) | 753 VR_MODE2_MODE10T, 1); 754 } else { 755 /* Report error instead of retrying forever. */ 756 pci_write_config(dev, VR_PCI_MODE2, 757 pci_read_config(dev, VR_PCI_MODE2, 1) | 758 VR_MODE2_PCEROPT, 1); 759 /* Detect MII coding error. */ 760 pci_write_config(dev, VR_PCI_MODE3, 761 pci_read_config(dev, VR_PCI_MODE3, 1) | 762 VR_MODE3_MIION, 1); 763 if (sc->vr_revid >= REV_ID_VT6105_LOM && 764 sc->vr_revid < REV_ID_VT6105M_A0) 765 pci_write_config(dev, VR_PCI_MODE2, 766 pci_read_config(dev, VR_PCI_MODE2, 1) | 767 VR_MODE2_MODE10T, 1); 768 /* Enable Memory-Read-Multiple. */ 769 if (sc->vr_revid >= REV_ID_VT6107_A1 && 770 sc->vr_revid < REV_ID_VT6105M_A0) 771 pci_write_config(dev, VR_PCI_MODE2, 772 pci_read_config(dev, VR_PCI_MODE2, 1) | 773 VR_MODE2_MRDPL, 1); 774 } 775 /* Disable MII AUTOPOLL. */ 776 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 777 778 if (vr_dma_alloc(sc) != 0) { 779 error = ENXIO; 780 goto fail; 781 } 782 783 /* Do MII setup. */ 784 if (sc->vr_revid >= REV_ID_VT6105_A0) 785 phy = 1; 786 else 787 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 788 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, 789 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 790 sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0); 791 if (error != 0) { 792 device_printf(dev, "attaching PHYs failed\n"); 793 goto fail; 794 } 795 796 /* Call MI attach routine. */ 797 ether_ifattach(ifp, eaddr); 798 /* 799 * Tell the upper layer(s) we support long frames. 800 * Must appear after the call to ether_ifattach() because 801 * ether_ifattach() sets ifi_hdrlen to the default value. 802 */ 803 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 804 805 /* Hook interrupt last to avoid having to lock softc. */ 806 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 807 vr_intr, NULL, sc, &sc->vr_intrhand); 808 809 if (error) { 810 device_printf(dev, "couldn't set up irq\n"); 811 ether_ifdetach(ifp); 812 goto fail; 813 } 814 815 fail: 816 if (error) 817 vr_detach(dev); 818 819 return (error); 820 } 821 822 /* 823 * Shutdown hardware and free up resources. This can be called any 824 * time after the mutex has been initialized. It is called in both 825 * the error case in attach and the normal detach case so it needs 826 * to be careful about only freeing resources that have actually been 827 * allocated. 828 */ 829 static int 830 vr_detach(device_t dev) 831 { 832 struct vr_softc *sc = device_get_softc(dev); 833 struct ifnet *ifp = sc->vr_ifp; 834 835 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 836 837 #ifdef DEVICE_POLLING 838 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) 839 ether_poll_deregister(ifp); 840 #endif 841 842 /* These should only be active if attach succeeded. */ 843 if (device_is_attached(dev)) { 844 VR_LOCK(sc); 845 sc->vr_flags |= VR_F_DETACHED; 846 vr_stop(sc); 847 VR_UNLOCK(sc); 848 callout_drain(&sc->vr_stat_callout); 849 taskqueue_drain(taskqueue_fast, &sc->vr_inttask); 850 ether_ifdetach(ifp); 851 } 852 if (sc->vr_miibus) 853 device_delete_child(dev, sc->vr_miibus); 854 bus_generic_detach(dev); 855 856 if (sc->vr_intrhand) 857 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 858 if (sc->vr_irq) 859 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 860 if (sc->vr_res) 861 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 862 sc->vr_res); 863 864 if (ifp) 865 if_free(ifp); 866 867 vr_dma_free(sc); 868 869 mtx_destroy(&sc->vr_mtx); 870 871 return (0); 872 } 873 874 struct vr_dmamap_arg { 875 bus_addr_t vr_busaddr; 876 }; 877 878 static void 879 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 880 { 881 struct vr_dmamap_arg *ctx; 882 883 if (error != 0) 884 return; 885 ctx = arg; 886 ctx->vr_busaddr = segs[0].ds_addr; 887 } 888 889 static int 890 vr_dma_alloc(struct vr_softc *sc) 891 { 892 struct vr_dmamap_arg ctx; 893 struct vr_txdesc *txd; 894 struct vr_rxdesc *rxd; 895 bus_size_t tx_alignment; 896 int error, i; 897 898 /* Create parent DMA tag. */ 899 error = bus_dma_tag_create( 900 bus_get_dma_tag(sc->vr_dev), /* parent */ 901 1, 0, /* alignment, boundary */ 902 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 903 BUS_SPACE_MAXADDR, /* highaddr */ 904 NULL, NULL, /* filter, filterarg */ 905 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 906 0, /* nsegments */ 907 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 908 0, /* flags */ 909 NULL, NULL, /* lockfunc, lockarg */ 910 &sc->vr_cdata.vr_parent_tag); 911 if (error != 0) { 912 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 913 goto fail; 914 } 915 /* Create tag for Tx ring. */ 916 error = bus_dma_tag_create( 917 sc->vr_cdata.vr_parent_tag, /* parent */ 918 VR_RING_ALIGN, 0, /* alignment, boundary */ 919 BUS_SPACE_MAXADDR, /* lowaddr */ 920 BUS_SPACE_MAXADDR, /* highaddr */ 921 NULL, NULL, /* filter, filterarg */ 922 VR_TX_RING_SIZE, /* maxsize */ 923 1, /* nsegments */ 924 VR_TX_RING_SIZE, /* maxsegsize */ 925 0, /* flags */ 926 NULL, NULL, /* lockfunc, lockarg */ 927 &sc->vr_cdata.vr_tx_ring_tag); 928 if (error != 0) { 929 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 930 goto fail; 931 } 932 933 /* Create tag for Rx ring. */ 934 error = bus_dma_tag_create( 935 sc->vr_cdata.vr_parent_tag, /* parent */ 936 VR_RING_ALIGN, 0, /* alignment, boundary */ 937 BUS_SPACE_MAXADDR, /* lowaddr */ 938 BUS_SPACE_MAXADDR, /* highaddr */ 939 NULL, NULL, /* filter, filterarg */ 940 VR_RX_RING_SIZE, /* maxsize */ 941 1, /* nsegments */ 942 VR_RX_RING_SIZE, /* maxsegsize */ 943 0, /* flags */ 944 NULL, NULL, /* lockfunc, lockarg */ 945 &sc->vr_cdata.vr_rx_ring_tag); 946 if (error != 0) { 947 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 948 goto fail; 949 } 950 951 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 952 tx_alignment = sizeof(uint32_t); 953 else 954 tx_alignment = 1; 955 /* Create tag for Tx buffers. */ 956 error = bus_dma_tag_create( 957 sc->vr_cdata.vr_parent_tag, /* parent */ 958 tx_alignment, 0, /* alignment, boundary */ 959 BUS_SPACE_MAXADDR, /* lowaddr */ 960 BUS_SPACE_MAXADDR, /* highaddr */ 961 NULL, NULL, /* filter, filterarg */ 962 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 963 VR_MAXFRAGS, /* nsegments */ 964 MCLBYTES, /* maxsegsize */ 965 0, /* flags */ 966 NULL, NULL, /* lockfunc, lockarg */ 967 &sc->vr_cdata.vr_tx_tag); 968 if (error != 0) { 969 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 970 goto fail; 971 } 972 973 /* Create tag for Rx buffers. */ 974 error = bus_dma_tag_create( 975 sc->vr_cdata.vr_parent_tag, /* parent */ 976 VR_RX_ALIGN, 0, /* alignment, boundary */ 977 BUS_SPACE_MAXADDR, /* lowaddr */ 978 BUS_SPACE_MAXADDR, /* highaddr */ 979 NULL, NULL, /* filter, filterarg */ 980 MCLBYTES, /* maxsize */ 981 1, /* nsegments */ 982 MCLBYTES, /* maxsegsize */ 983 0, /* flags */ 984 NULL, NULL, /* lockfunc, lockarg */ 985 &sc->vr_cdata.vr_rx_tag); 986 if (error != 0) { 987 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 988 goto fail; 989 } 990 991 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 992 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 993 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 994 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 995 if (error != 0) { 996 device_printf(sc->vr_dev, 997 "failed to allocate DMA'able memory for Tx ring\n"); 998 goto fail; 999 } 1000 1001 ctx.vr_busaddr = 0; 1002 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 1003 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 1004 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1005 if (error != 0 || ctx.vr_busaddr == 0) { 1006 device_printf(sc->vr_dev, 1007 "failed to load DMA'able memory for Tx ring\n"); 1008 goto fail; 1009 } 1010 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 1011 1012 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1013 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 1014 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 1015 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 1016 if (error != 0) { 1017 device_printf(sc->vr_dev, 1018 "failed to allocate DMA'able memory for Rx ring\n"); 1019 goto fail; 1020 } 1021 1022 ctx.vr_busaddr = 0; 1023 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1024 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1025 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1026 if (error != 0 || ctx.vr_busaddr == 0) { 1027 device_printf(sc->vr_dev, 1028 "failed to load DMA'able memory for Rx ring\n"); 1029 goto fail; 1030 } 1031 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1032 1033 /* Create DMA maps for Tx buffers. */ 1034 for (i = 0; i < VR_TX_RING_CNT; i++) { 1035 txd = &sc->vr_cdata.vr_txdesc[i]; 1036 txd->tx_m = NULL; 1037 txd->tx_dmamap = NULL; 1038 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1039 &txd->tx_dmamap); 1040 if (error != 0) { 1041 device_printf(sc->vr_dev, 1042 "failed to create Tx dmamap\n"); 1043 goto fail; 1044 } 1045 } 1046 /* Create DMA maps for Rx buffers. */ 1047 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1048 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1049 device_printf(sc->vr_dev, 1050 "failed to create spare Rx dmamap\n"); 1051 goto fail; 1052 } 1053 for (i = 0; i < VR_RX_RING_CNT; i++) { 1054 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1055 rxd->rx_m = NULL; 1056 rxd->rx_dmamap = NULL; 1057 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1058 &rxd->rx_dmamap); 1059 if (error != 0) { 1060 device_printf(sc->vr_dev, 1061 "failed to create Rx dmamap\n"); 1062 goto fail; 1063 } 1064 } 1065 1066 fail: 1067 return (error); 1068 } 1069 1070 static void 1071 vr_dma_free(struct vr_softc *sc) 1072 { 1073 struct vr_txdesc *txd; 1074 struct vr_rxdesc *rxd; 1075 int i; 1076 1077 /* Tx ring. */ 1078 if (sc->vr_cdata.vr_tx_ring_tag) { 1079 if (sc->vr_rdata.vr_tx_ring_paddr) 1080 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1081 sc->vr_cdata.vr_tx_ring_map); 1082 if (sc->vr_rdata.vr_tx_ring) 1083 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1084 sc->vr_rdata.vr_tx_ring, 1085 sc->vr_cdata.vr_tx_ring_map); 1086 sc->vr_rdata.vr_tx_ring = NULL; 1087 sc->vr_rdata.vr_tx_ring_paddr = 0; 1088 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1089 sc->vr_cdata.vr_tx_ring_tag = NULL; 1090 } 1091 /* Rx ring. */ 1092 if (sc->vr_cdata.vr_rx_ring_tag) { 1093 if (sc->vr_rdata.vr_rx_ring_paddr) 1094 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1095 sc->vr_cdata.vr_rx_ring_map); 1096 if (sc->vr_rdata.vr_rx_ring) 1097 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1098 sc->vr_rdata.vr_rx_ring, 1099 sc->vr_cdata.vr_rx_ring_map); 1100 sc->vr_rdata.vr_rx_ring = NULL; 1101 sc->vr_rdata.vr_rx_ring_paddr = 0; 1102 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1103 sc->vr_cdata.vr_rx_ring_tag = NULL; 1104 } 1105 /* Tx buffers. */ 1106 if (sc->vr_cdata.vr_tx_tag) { 1107 for (i = 0; i < VR_TX_RING_CNT; i++) { 1108 txd = &sc->vr_cdata.vr_txdesc[i]; 1109 if (txd->tx_dmamap) { 1110 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1111 txd->tx_dmamap); 1112 txd->tx_dmamap = NULL; 1113 } 1114 } 1115 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1116 sc->vr_cdata.vr_tx_tag = NULL; 1117 } 1118 /* Rx buffers. */ 1119 if (sc->vr_cdata.vr_rx_tag) { 1120 for (i = 0; i < VR_RX_RING_CNT; i++) { 1121 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1122 if (rxd->rx_dmamap) { 1123 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1124 rxd->rx_dmamap); 1125 rxd->rx_dmamap = NULL; 1126 } 1127 } 1128 if (sc->vr_cdata.vr_rx_sparemap) { 1129 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1130 sc->vr_cdata.vr_rx_sparemap); 1131 sc->vr_cdata.vr_rx_sparemap = 0; 1132 } 1133 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1134 sc->vr_cdata.vr_rx_tag = NULL; 1135 } 1136 1137 if (sc->vr_cdata.vr_parent_tag) { 1138 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1139 sc->vr_cdata.vr_parent_tag = NULL; 1140 } 1141 } 1142 1143 /* 1144 * Initialize the transmit descriptors. 1145 */ 1146 static int 1147 vr_tx_ring_init(struct vr_softc *sc) 1148 { 1149 struct vr_ring_data *rd; 1150 struct vr_txdesc *txd; 1151 bus_addr_t addr; 1152 int i; 1153 1154 sc->vr_cdata.vr_tx_prod = 0; 1155 sc->vr_cdata.vr_tx_cons = 0; 1156 sc->vr_cdata.vr_tx_cnt = 0; 1157 sc->vr_cdata.vr_tx_pkts = 0; 1158 1159 rd = &sc->vr_rdata; 1160 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1161 for (i = 0; i < VR_TX_RING_CNT; i++) { 1162 if (i == VR_TX_RING_CNT - 1) 1163 addr = VR_TX_RING_ADDR(sc, 0); 1164 else 1165 addr = VR_TX_RING_ADDR(sc, i + 1); 1166 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1167 txd = &sc->vr_cdata.vr_txdesc[i]; 1168 txd->tx_m = NULL; 1169 } 1170 1171 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1172 sc->vr_cdata.vr_tx_ring_map, 1173 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1174 1175 return (0); 1176 } 1177 1178 /* 1179 * Initialize the RX descriptors and allocate mbufs for them. Note that 1180 * we arrange the descriptors in a closed ring, so that the last descriptor 1181 * points back to the first. 1182 */ 1183 static int 1184 vr_rx_ring_init(struct vr_softc *sc) 1185 { 1186 struct vr_ring_data *rd; 1187 struct vr_rxdesc *rxd; 1188 bus_addr_t addr; 1189 int i; 1190 1191 sc->vr_cdata.vr_rx_cons = 0; 1192 1193 rd = &sc->vr_rdata; 1194 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1195 for (i = 0; i < VR_RX_RING_CNT; i++) { 1196 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1197 rxd->rx_m = NULL; 1198 rxd->desc = &rd->vr_rx_ring[i]; 1199 if (i == VR_RX_RING_CNT - 1) 1200 addr = VR_RX_RING_ADDR(sc, 0); 1201 else 1202 addr = VR_RX_RING_ADDR(sc, i + 1); 1203 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1204 if (vr_newbuf(sc, i) != 0) 1205 return (ENOBUFS); 1206 } 1207 1208 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1209 sc->vr_cdata.vr_rx_ring_map, 1210 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1211 1212 return (0); 1213 } 1214 1215 static __inline void 1216 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1217 { 1218 struct vr_desc *desc; 1219 1220 desc = rxd->desc; 1221 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1222 desc->vr_status = htole32(VR_RXSTAT_OWN); 1223 } 1224 1225 /* 1226 * Initialize an RX descriptor and attach an MBUF cluster. 1227 * Note: the length fields are only 11 bits wide, which means the 1228 * largest size we can specify is 2047. This is important because 1229 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1230 * overflow the field and make a mess. 1231 */ 1232 static int 1233 vr_newbuf(struct vr_softc *sc, int idx) 1234 { 1235 struct vr_desc *desc; 1236 struct vr_rxdesc *rxd; 1237 struct mbuf *m; 1238 bus_dma_segment_t segs[1]; 1239 bus_dmamap_t map; 1240 int nsegs; 1241 1242 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1243 if (m == NULL) 1244 return (ENOBUFS); 1245 m->m_len = m->m_pkthdr.len = MCLBYTES; 1246 m_adj(m, sizeof(uint64_t)); 1247 1248 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1249 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1250 m_freem(m); 1251 return (ENOBUFS); 1252 } 1253 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1254 1255 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1256 if (rxd->rx_m != NULL) { 1257 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1258 BUS_DMASYNC_POSTREAD); 1259 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1260 } 1261 map = rxd->rx_dmamap; 1262 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1263 sc->vr_cdata.vr_rx_sparemap = map; 1264 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1265 BUS_DMASYNC_PREREAD); 1266 rxd->rx_m = m; 1267 desc = rxd->desc; 1268 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1269 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1270 desc->vr_status = htole32(VR_RXSTAT_OWN); 1271 1272 return (0); 1273 } 1274 1275 #ifndef __NO_STRICT_ALIGNMENT 1276 static __inline void 1277 vr_fixup_rx(struct mbuf *m) 1278 { 1279 uint16_t *src, *dst; 1280 int i; 1281 1282 src = mtod(m, uint16_t *); 1283 dst = src - 1; 1284 1285 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1286 *dst++ = *src++; 1287 1288 m->m_data -= ETHER_ALIGN; 1289 } 1290 #endif 1291 1292 /* 1293 * A frame has been uploaded: pass the resulting mbuf chain up to 1294 * the higher level protocols. 1295 */ 1296 static int 1297 vr_rxeof(struct vr_softc *sc) 1298 { 1299 struct vr_rxdesc *rxd; 1300 struct mbuf *m; 1301 struct ifnet *ifp; 1302 struct vr_desc *cur_rx; 1303 int cons, prog, total_len, rx_npkts; 1304 uint32_t rxstat, rxctl; 1305 1306 VR_LOCK_ASSERT(sc); 1307 ifp = sc->vr_ifp; 1308 cons = sc->vr_cdata.vr_rx_cons; 1309 rx_npkts = 0; 1310 1311 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1312 sc->vr_cdata.vr_rx_ring_map, 1313 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1314 1315 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1316 #ifdef DEVICE_POLLING 1317 if (ifp->if_capenable & IFCAP_POLLING) { 1318 if (sc->rxcycles <= 0) 1319 break; 1320 sc->rxcycles--; 1321 } 1322 #endif 1323 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1324 rxstat = le32toh(cur_rx->vr_status); 1325 rxctl = le32toh(cur_rx->vr_ctl); 1326 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1327 break; 1328 1329 prog++; 1330 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1331 m = rxd->rx_m; 1332 1333 /* 1334 * If an error occurs, update stats, clear the 1335 * status word and leave the mbuf cluster in place: 1336 * it should simply get re-used next time this descriptor 1337 * comes up in the ring. 1338 * We don't support SG in Rx path yet, so discard 1339 * partial frame. 1340 */ 1341 if ((rxstat & VR_RXSTAT_RX_OK) == 0 || 1342 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != 1343 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1344 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1345 sc->vr_stat.rx_errors++; 1346 if (rxstat & VR_RXSTAT_CRCERR) 1347 sc->vr_stat.rx_crc_errors++; 1348 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1349 sc->vr_stat.rx_alignment++; 1350 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1351 sc->vr_stat.rx_fifo_overflows++; 1352 if (rxstat & VR_RXSTAT_GIANT) 1353 sc->vr_stat.rx_giants++; 1354 if (rxstat & VR_RXSTAT_RUNT) 1355 sc->vr_stat.rx_runts++; 1356 if (rxstat & VR_RXSTAT_BUFFERR) 1357 sc->vr_stat.rx_no_buffers++; 1358 #ifdef VR_SHOW_ERRORS 1359 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1360 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1361 #endif 1362 vr_discard_rxbuf(rxd); 1363 continue; 1364 } 1365 1366 if (vr_newbuf(sc, cons) != 0) { 1367 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1368 sc->vr_stat.rx_errors++; 1369 sc->vr_stat.rx_no_mbufs++; 1370 vr_discard_rxbuf(rxd); 1371 continue; 1372 } 1373 1374 /* 1375 * XXX The VIA Rhine chip includes the CRC with every 1376 * received frame, and there's no way to turn this 1377 * behavior off (at least, I can't find anything in 1378 * the manual that explains how to do it) so we have 1379 * to trim off the CRC manually. 1380 */ 1381 total_len = VR_RXBYTES(rxstat); 1382 total_len -= ETHER_CRC_LEN; 1383 m->m_pkthdr.len = m->m_len = total_len; 1384 #ifndef __NO_STRICT_ALIGNMENT 1385 /* 1386 * RX buffers must be 32-bit aligned. 1387 * Ignore the alignment problems on the non-strict alignment 1388 * platform. The performance hit incurred due to unaligned 1389 * accesses is much smaller than the hit produced by forcing 1390 * buffer copies all the time. 1391 */ 1392 vr_fixup_rx(m); 1393 #endif 1394 m->m_pkthdr.rcvif = ifp; 1395 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1396 sc->vr_stat.rx_ok++; 1397 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 1398 (rxstat & VR_RXSTAT_FRAG) == 0 && 1399 (rxctl & VR_RXCTL_IP) != 0) { 1400 /* Checksum is valid for non-fragmented IP packets. */ 1401 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1402 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1403 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1404 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1405 m->m_pkthdr.csum_flags |= 1406 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1407 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1408 m->m_pkthdr.csum_data = 0xffff; 1409 } 1410 } 1411 } 1412 VR_UNLOCK(sc); 1413 (*ifp->if_input)(ifp, m); 1414 VR_LOCK(sc); 1415 rx_npkts++; 1416 } 1417 1418 if (prog > 0) { 1419 /* 1420 * Let controller know how many number of RX buffers 1421 * are posted but avoid expensive register access if 1422 * TX pause capability was not negotiated with link 1423 * partner. 1424 */ 1425 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) { 1426 if (prog >= VR_RX_RING_CNT) 1427 prog = VR_RX_RING_CNT - 1; 1428 CSR_WRITE_1(sc, VR_FLOWCR0, prog); 1429 } 1430 sc->vr_cdata.vr_rx_cons = cons; 1431 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1432 sc->vr_cdata.vr_rx_ring_map, 1433 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1434 } 1435 return (rx_npkts); 1436 } 1437 1438 /* 1439 * A frame was downloaded to the chip. It's safe for us to clean up 1440 * the list buffers. 1441 */ 1442 static void 1443 vr_txeof(struct vr_softc *sc) 1444 { 1445 struct vr_txdesc *txd; 1446 struct vr_desc *cur_tx; 1447 struct ifnet *ifp; 1448 uint32_t txctl, txstat; 1449 int cons, prod; 1450 1451 VR_LOCK_ASSERT(sc); 1452 1453 cons = sc->vr_cdata.vr_tx_cons; 1454 prod = sc->vr_cdata.vr_tx_prod; 1455 if (cons == prod) 1456 return; 1457 1458 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1459 sc->vr_cdata.vr_tx_ring_map, 1460 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1461 1462 ifp = sc->vr_ifp; 1463 /* 1464 * Go through our tx list and free mbufs for those 1465 * frames that have been transmitted. 1466 */ 1467 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1468 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1469 txctl = le32toh(cur_tx->vr_ctl); 1470 txstat = le32toh(cur_tx->vr_status); 1471 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1472 break; 1473 1474 sc->vr_cdata.vr_tx_cnt--; 1475 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1476 /* Only the first descriptor in the chain is valid. */ 1477 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1478 continue; 1479 1480 txd = &sc->vr_cdata.vr_txdesc[cons]; 1481 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1482 __func__)); 1483 1484 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1485 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1486 sc->vr_stat.tx_errors++; 1487 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1488 /* Give up and restart Tx. */ 1489 sc->vr_stat.tx_abort++; 1490 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1491 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1492 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1493 txd->tx_dmamap); 1494 m_freem(txd->tx_m); 1495 txd->tx_m = NULL; 1496 VR_INC(cons, VR_TX_RING_CNT); 1497 sc->vr_cdata.vr_tx_cons = cons; 1498 if (vr_tx_stop(sc) != 0) { 1499 device_printf(sc->vr_dev, 1500 "%s: Tx shutdown error -- " 1501 "resetting\n", __func__); 1502 sc->vr_flags |= VR_F_RESTART; 1503 return; 1504 } 1505 vr_tx_start(sc); 1506 break; 1507 } 1508 if ((sc->vr_revid < REV_ID_VT3071_A && 1509 (txstat & VR_TXSTAT_UNDERRUN)) || 1510 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1511 sc->vr_stat.tx_underrun++; 1512 /* Retry and restart Tx. */ 1513 sc->vr_cdata.vr_tx_cnt++; 1514 sc->vr_cdata.vr_tx_cons = cons; 1515 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1516 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1517 sc->vr_cdata.vr_tx_ring_map, 1518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1519 vr_tx_underrun(sc); 1520 return; 1521 } 1522 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1523 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1524 sc->vr_stat.tx_collisions++; 1525 } 1526 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1527 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1528 sc->vr_stat.tx_late_collisions++; 1529 } 1530 } else { 1531 sc->vr_stat.tx_ok++; 1532 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1533 } 1534 1535 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1536 BUS_DMASYNC_POSTWRITE); 1537 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1538 if (sc->vr_revid < REV_ID_VT3071_A) { 1539 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1540 (txstat & VR_TXSTAT_COLLCNT) >> 3); 1541 sc->vr_stat.tx_collisions += 1542 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1543 } else { 1544 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f)); 1545 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1546 } 1547 m_freem(txd->tx_m); 1548 txd->tx_m = NULL; 1549 } 1550 1551 sc->vr_cdata.vr_tx_cons = cons; 1552 if (sc->vr_cdata.vr_tx_cnt == 0) 1553 sc->vr_watchdog_timer = 0; 1554 } 1555 1556 static void 1557 vr_tick(void *xsc) 1558 { 1559 struct vr_softc *sc; 1560 struct mii_data *mii; 1561 1562 sc = (struct vr_softc *)xsc; 1563 1564 VR_LOCK_ASSERT(sc); 1565 1566 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1567 device_printf(sc->vr_dev, "restarting\n"); 1568 sc->vr_stat.num_restart++; 1569 sc->vr_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1570 vr_init_locked(sc); 1571 sc->vr_flags &= ~VR_F_RESTART; 1572 } 1573 1574 mii = device_get_softc(sc->vr_miibus); 1575 mii_tick(mii); 1576 if ((sc->vr_flags & VR_F_LINK) == 0) 1577 vr_miibus_statchg(sc->vr_dev); 1578 vr_watchdog(sc); 1579 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1580 } 1581 1582 #ifdef DEVICE_POLLING 1583 static poll_handler_t vr_poll; 1584 static poll_handler_t vr_poll_locked; 1585 1586 static int 1587 vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1588 { 1589 struct vr_softc *sc; 1590 int rx_npkts; 1591 1592 sc = ifp->if_softc; 1593 rx_npkts = 0; 1594 1595 VR_LOCK(sc); 1596 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1597 rx_npkts = vr_poll_locked(ifp, cmd, count); 1598 VR_UNLOCK(sc); 1599 return (rx_npkts); 1600 } 1601 1602 static int 1603 vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) 1604 { 1605 struct vr_softc *sc; 1606 int rx_npkts; 1607 1608 sc = ifp->if_softc; 1609 1610 VR_LOCK_ASSERT(sc); 1611 1612 sc->rxcycles = count; 1613 rx_npkts = vr_rxeof(sc); 1614 vr_txeof(sc); 1615 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1616 vr_start_locked(ifp); 1617 1618 if (cmd == POLL_AND_CHECK_STATUS) { 1619 uint16_t status; 1620 1621 /* Also check status register. */ 1622 status = CSR_READ_2(sc, VR_ISR); 1623 if (status) 1624 CSR_WRITE_2(sc, VR_ISR, status); 1625 1626 if ((status & VR_INTRS) == 0) 1627 return (rx_npkts); 1628 1629 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1630 VR_ISR_STATSOFLOW)) != 0) { 1631 if (vr_error(sc, status) != 0) 1632 return (rx_npkts); 1633 } 1634 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1635 #ifdef VR_SHOW_ERRORS 1636 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1637 __func__, status, VR_ISR_ERR_BITS); 1638 #endif 1639 vr_rx_start(sc); 1640 } 1641 } 1642 return (rx_npkts); 1643 } 1644 #endif /* DEVICE_POLLING */ 1645 1646 /* Back off the transmit threshold. */ 1647 static void 1648 vr_tx_underrun(struct vr_softc *sc) 1649 { 1650 int thresh; 1651 1652 device_printf(sc->vr_dev, "Tx underrun -- "); 1653 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1654 thresh = sc->vr_txthresh; 1655 sc->vr_txthresh++; 1656 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1657 sc->vr_txthresh = VR_TXTHRESH_MAX; 1658 printf("using store and forward mode\n"); 1659 } else 1660 printf("increasing Tx threshold(%d -> %d)\n", 1661 vr_tx_threshold_tables[thresh].value, 1662 vr_tx_threshold_tables[thresh + 1].value); 1663 } else 1664 printf("\n"); 1665 sc->vr_stat.tx_underrun++; 1666 if (vr_tx_stop(sc) != 0) { 1667 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1668 "resetting\n", __func__); 1669 sc->vr_flags |= VR_F_RESTART; 1670 return; 1671 } 1672 vr_tx_start(sc); 1673 } 1674 1675 static int 1676 vr_intr(void *arg) 1677 { 1678 struct vr_softc *sc; 1679 uint16_t status; 1680 1681 sc = (struct vr_softc *)arg; 1682 1683 status = CSR_READ_2(sc, VR_ISR); 1684 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1685 return (FILTER_STRAY); 1686 1687 /* Disable interrupts. */ 1688 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1689 1690 taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask); 1691 1692 return (FILTER_HANDLED); 1693 } 1694 1695 static void 1696 vr_int_task(void *arg, int npending) 1697 { 1698 struct vr_softc *sc; 1699 struct ifnet *ifp; 1700 uint16_t status; 1701 1702 sc = (struct vr_softc *)arg; 1703 1704 VR_LOCK(sc); 1705 1706 if ((sc->vr_flags & VR_F_SUSPENDED) != 0) 1707 goto done_locked; 1708 1709 status = CSR_READ_2(sc, VR_ISR); 1710 ifp = sc->vr_ifp; 1711 #ifdef DEVICE_POLLING 1712 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1713 goto done_locked; 1714 #endif 1715 1716 /* Suppress unwanted interrupts. */ 1717 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || 1718 (sc->vr_flags & VR_F_RESTART) != 0) { 1719 CSR_WRITE_2(sc, VR_IMR, 0); 1720 CSR_WRITE_2(sc, VR_ISR, status); 1721 goto done_locked; 1722 } 1723 1724 for (; (status & VR_INTRS) != 0;) { 1725 CSR_WRITE_2(sc, VR_ISR, status); 1726 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1727 VR_ISR_STATSOFLOW)) != 0) { 1728 if (vr_error(sc, status) != 0) { 1729 VR_UNLOCK(sc); 1730 return; 1731 } 1732 } 1733 vr_rxeof(sc); 1734 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1735 #ifdef VR_SHOW_ERRORS 1736 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1737 __func__, status, VR_ISR_ERR_BITS); 1738 #endif 1739 /* Restart Rx if RxDMA SM was stopped. */ 1740 vr_rx_start(sc); 1741 } 1742 vr_txeof(sc); 1743 1744 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1745 vr_start_locked(ifp); 1746 1747 status = CSR_READ_2(sc, VR_ISR); 1748 } 1749 1750 /* Re-enable interrupts. */ 1751 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1752 1753 done_locked: 1754 VR_UNLOCK(sc); 1755 } 1756 1757 static int 1758 vr_error(struct vr_softc *sc, uint16_t status) 1759 { 1760 uint16_t pcis; 1761 1762 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1763 if ((status & VR_ISR_BUSERR) != 0) { 1764 status &= ~VR_ISR_BUSERR; 1765 sc->vr_stat.bus_errors++; 1766 /* Disable further interrupts. */ 1767 CSR_WRITE_2(sc, VR_IMR, 0); 1768 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1769 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1770 "resetting\n", pcis); 1771 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1772 sc->vr_flags |= VR_F_RESTART; 1773 return (EAGAIN); 1774 } 1775 if ((status & VR_ISR_LINKSTAT2) != 0) { 1776 /* Link state change, duplex changes etc. */ 1777 status &= ~VR_ISR_LINKSTAT2; 1778 } 1779 if ((status & VR_ISR_STATSOFLOW) != 0) { 1780 status &= ~VR_ISR_STATSOFLOW; 1781 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1782 /* Update MIB counters. */ 1783 } 1784 } 1785 1786 if (status != 0) 1787 device_printf(sc->vr_dev, 1788 "unhandled interrupt, status = 0x%04x\n", status); 1789 return (0); 1790 } 1791 1792 /* 1793 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1794 * pointers to the fragment pointers. 1795 */ 1796 static int 1797 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1798 { 1799 struct vr_txdesc *txd; 1800 struct vr_desc *desc; 1801 struct mbuf *m; 1802 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1803 uint32_t csum_flags, txctl; 1804 int error, i, nsegs, prod, si; 1805 int padlen; 1806 1807 VR_LOCK_ASSERT(sc); 1808 1809 M_ASSERTPKTHDR((*m_head)); 1810 1811 /* 1812 * Some VIA Rhine wants packet buffers to be longword 1813 * aligned, but very often our mbufs aren't. Rather than 1814 * waste time trying to decide when to copy and when not 1815 * to copy, just do it all the time. 1816 */ 1817 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1818 m = m_defrag(*m_head, M_NOWAIT); 1819 if (m == NULL) { 1820 m_freem(*m_head); 1821 *m_head = NULL; 1822 return (ENOBUFS); 1823 } 1824 *m_head = m; 1825 } 1826 1827 /* 1828 * The Rhine chip doesn't auto-pad, so we have to make 1829 * sure to pad short frames out to the minimum frame length 1830 * ourselves. 1831 */ 1832 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1833 m = *m_head; 1834 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1835 if (M_WRITABLE(m) == 0) { 1836 /* Get a writable copy. */ 1837 m = m_dup(*m_head, M_NOWAIT); 1838 m_freem(*m_head); 1839 if (m == NULL) { 1840 *m_head = NULL; 1841 return (ENOBUFS); 1842 } 1843 *m_head = m; 1844 } 1845 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1846 m = m_defrag(m, M_NOWAIT); 1847 if (m == NULL) { 1848 m_freem(*m_head); 1849 *m_head = NULL; 1850 return (ENOBUFS); 1851 } 1852 } 1853 /* 1854 * Manually pad short frames, and zero the pad space 1855 * to avoid leaking data. 1856 */ 1857 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1858 m->m_pkthdr.len += padlen; 1859 m->m_len = m->m_pkthdr.len; 1860 *m_head = m; 1861 } 1862 1863 prod = sc->vr_cdata.vr_tx_prod; 1864 txd = &sc->vr_cdata.vr_txdesc[prod]; 1865 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1866 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1867 if (error == EFBIG) { 1868 m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS); 1869 if (m == NULL) { 1870 m_freem(*m_head); 1871 *m_head = NULL; 1872 return (ENOBUFS); 1873 } 1874 *m_head = m; 1875 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1876 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1877 if (error != 0) { 1878 m_freem(*m_head); 1879 *m_head = NULL; 1880 return (error); 1881 } 1882 } else if (error != 0) 1883 return (error); 1884 if (nsegs == 0) { 1885 m_freem(*m_head); 1886 *m_head = NULL; 1887 return (EIO); 1888 } 1889 1890 /* Check number of available descriptors. */ 1891 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1892 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1893 return (ENOBUFS); 1894 } 1895 1896 txd->tx_m = *m_head; 1897 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1898 BUS_DMASYNC_PREWRITE); 1899 1900 /* Set checksum offload. */ 1901 csum_flags = 0; 1902 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1903 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1904 csum_flags |= VR_TXCTL_IPCSUM; 1905 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1906 csum_flags |= VR_TXCTL_TCPCSUM; 1907 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1908 csum_flags |= VR_TXCTL_UDPCSUM; 1909 } 1910 1911 /* 1912 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1913 * is required for all descriptors regardless of single or 1914 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1915 * the first descriptor for a multi-fragmented frames. Without 1916 * that VIA Rhine chip generates Tx underrun interrupts and can't 1917 * send any frames. 1918 */ 1919 si = prod; 1920 for (i = 0; i < nsegs; i++) { 1921 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1922 desc->vr_status = 0; 1923 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1924 if (i == 0) 1925 txctl |= VR_TXCTL_FIRSTFRAG; 1926 desc->vr_ctl = htole32(txctl); 1927 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1928 sc->vr_cdata.vr_tx_cnt++; 1929 VR_INC(prod, VR_TX_RING_CNT); 1930 } 1931 /* Update producer index. */ 1932 sc->vr_cdata.vr_tx_prod = prod; 1933 1934 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1935 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1936 1937 /* 1938 * Set EOP on the last desciptor and reuqest Tx completion 1939 * interrupt for every VR_TX_INTR_THRESH-th frames. 1940 */ 1941 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1942 if (sc->vr_cdata.vr_tx_pkts == 0) 1943 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1944 else 1945 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1946 1947 /* Lastly turn the first descriptor ownership to hardware. */ 1948 desc = &sc->vr_rdata.vr_tx_ring[si]; 1949 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1950 1951 /* Sync descriptors. */ 1952 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1953 sc->vr_cdata.vr_tx_ring_map, 1954 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1955 1956 return (0); 1957 } 1958 1959 static void 1960 vr_start(struct ifnet *ifp) 1961 { 1962 struct vr_softc *sc; 1963 1964 sc = ifp->if_softc; 1965 VR_LOCK(sc); 1966 vr_start_locked(ifp); 1967 VR_UNLOCK(sc); 1968 } 1969 1970 static void 1971 vr_start_locked(struct ifnet *ifp) 1972 { 1973 struct vr_softc *sc; 1974 struct mbuf *m_head; 1975 int enq; 1976 1977 sc = ifp->if_softc; 1978 1979 VR_LOCK_ASSERT(sc); 1980 1981 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1982 IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0) 1983 return; 1984 1985 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1986 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1987 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1988 if (m_head == NULL) 1989 break; 1990 /* 1991 * Pack the data into the transmit ring. If we 1992 * don't have room, set the OACTIVE flag and wait 1993 * for the NIC to drain the ring. 1994 */ 1995 if (vr_encap(sc, &m_head)) { 1996 if (m_head == NULL) 1997 break; 1998 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1999 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2000 break; 2001 } 2002 2003 enq++; 2004 /* 2005 * If there's a BPF listener, bounce a copy of this frame 2006 * to him. 2007 */ 2008 ETHER_BPF_MTAP(ifp, m_head); 2009 } 2010 2011 if (enq > 0) { 2012 /* Tell the chip to start transmitting. */ 2013 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2014 /* Set a timeout in case the chip goes out to lunch. */ 2015 sc->vr_watchdog_timer = 5; 2016 } 2017 } 2018 2019 static void 2020 vr_init(void *xsc) 2021 { 2022 struct vr_softc *sc; 2023 2024 sc = (struct vr_softc *)xsc; 2025 VR_LOCK(sc); 2026 vr_init_locked(sc); 2027 VR_UNLOCK(sc); 2028 } 2029 2030 static void 2031 vr_init_locked(struct vr_softc *sc) 2032 { 2033 struct ifnet *ifp; 2034 struct mii_data *mii; 2035 bus_addr_t addr; 2036 int i; 2037 2038 VR_LOCK_ASSERT(sc); 2039 2040 ifp = sc->vr_ifp; 2041 mii = device_get_softc(sc->vr_miibus); 2042 2043 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2044 return; 2045 2046 /* Cancel pending I/O and free all RX/TX buffers. */ 2047 vr_stop(sc); 2048 vr_reset(sc); 2049 2050 /* Set our station address. */ 2051 for (i = 0; i < ETHER_ADDR_LEN; i++) 2052 CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); 2053 2054 /* Set DMA size. */ 2055 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2056 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2057 2058 /* 2059 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2060 * so we must set both. 2061 */ 2062 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2063 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2064 2065 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2066 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2067 2068 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2069 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2070 2071 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2072 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2073 2074 /* Init circular RX list. */ 2075 if (vr_rx_ring_init(sc) != 0) { 2076 device_printf(sc->vr_dev, 2077 "initialization failed: no memory for rx buffers\n"); 2078 vr_stop(sc); 2079 return; 2080 } 2081 2082 /* Init tx descriptors. */ 2083 vr_tx_ring_init(sc); 2084 2085 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2086 uint8_t vcam[2] = { 0, 0 }; 2087 2088 /* Disable VLAN hardware tag insertion/stripping. */ 2089 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); 2090 /* Disable VLAN hardware filtering. */ 2091 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); 2092 /* Disable all CAM entries. */ 2093 vr_cam_mask(sc, VR_MCAST_CAM, 0); 2094 vr_cam_mask(sc, VR_VLAN_CAM, 0); 2095 /* Enable the first VLAN CAM. */ 2096 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); 2097 vr_cam_mask(sc, VR_VLAN_CAM, 1); 2098 } 2099 2100 /* 2101 * Set up receive filter. 2102 */ 2103 vr_set_filter(sc); 2104 2105 /* 2106 * Load the address of the RX ring. 2107 */ 2108 addr = VR_RX_RING_ADDR(sc, 0); 2109 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2110 /* 2111 * Load the address of the TX ring. 2112 */ 2113 addr = VR_TX_RING_ADDR(sc, 0); 2114 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2115 /* Default : full-duplex, no Tx poll. */ 2116 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2117 2118 /* Set flow-control parameters for Rhine III. */ 2119 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2120 /* 2121 * Configure Rx buffer count available for incoming 2122 * packet. 2123 * Even though data sheet says almost nothing about 2124 * this register, this register should be updated 2125 * whenever driver adds new RX buffers to controller. 2126 * Otherwise, XON frame is not sent to link partner 2127 * even if controller has enough RX buffers and you 2128 * would be isolated from network. 2129 * The controller is not smart enough to know number 2130 * of available RX buffers so driver have to let 2131 * controller know how many RX buffers are posted. 2132 * In other words, this register works like a residue 2133 * counter for RX buffers and should be initialized 2134 * to the number of total RX buffers - 1 before 2135 * enabling RX MAC. Note, this register is 8bits so 2136 * it effectively limits the maximum number of RX 2137 * buffer to be configured by controller is 255. 2138 */ 2139 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1); 2140 /* 2141 * Tx pause low threshold : 8 free receive buffers 2142 * Tx pause XON high threshold : 24 free receive buffers 2143 */ 2144 CSR_WRITE_1(sc, VR_FLOWCR1, 2145 VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF); 2146 /* Set Tx pause timer. */ 2147 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2148 } 2149 2150 /* Enable receiver and transmitter. */ 2151 CSR_WRITE_1(sc, VR_CR0, 2152 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2153 2154 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2155 #ifdef DEVICE_POLLING 2156 /* 2157 * Disable interrupts if we are polling. 2158 */ 2159 if (ifp->if_capenable & IFCAP_POLLING) 2160 CSR_WRITE_2(sc, VR_IMR, 0); 2161 else 2162 #endif 2163 /* 2164 * Enable interrupts and disable MII intrs. 2165 */ 2166 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2167 if (sc->vr_revid > REV_ID_VT6102_A) 2168 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2169 2170 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2171 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2172 2173 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2174 mii_mediachg(mii); 2175 2176 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2177 } 2178 2179 /* 2180 * Set media options. 2181 */ 2182 static int 2183 vr_ifmedia_upd(struct ifnet *ifp) 2184 { 2185 struct vr_softc *sc; 2186 struct mii_data *mii; 2187 struct mii_softc *miisc; 2188 int error; 2189 2190 sc = ifp->if_softc; 2191 VR_LOCK(sc); 2192 mii = device_get_softc(sc->vr_miibus); 2193 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2194 PHY_RESET(miisc); 2195 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2196 error = mii_mediachg(mii); 2197 VR_UNLOCK(sc); 2198 2199 return (error); 2200 } 2201 2202 /* 2203 * Report current media status. 2204 */ 2205 static void 2206 vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2207 { 2208 struct vr_softc *sc; 2209 struct mii_data *mii; 2210 2211 sc = ifp->if_softc; 2212 mii = device_get_softc(sc->vr_miibus); 2213 VR_LOCK(sc); 2214 if ((ifp->if_flags & IFF_UP) == 0) { 2215 VR_UNLOCK(sc); 2216 return; 2217 } 2218 mii_pollstat(mii); 2219 ifmr->ifm_active = mii->mii_media_active; 2220 ifmr->ifm_status = mii->mii_media_status; 2221 VR_UNLOCK(sc); 2222 } 2223 2224 static int 2225 vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2226 { 2227 struct vr_softc *sc; 2228 struct ifreq *ifr; 2229 struct mii_data *mii; 2230 int error, mask; 2231 2232 sc = ifp->if_softc; 2233 ifr = (struct ifreq *)data; 2234 error = 0; 2235 2236 switch (command) { 2237 case SIOCSIFFLAGS: 2238 VR_LOCK(sc); 2239 if (ifp->if_flags & IFF_UP) { 2240 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2241 if ((ifp->if_flags ^ sc->vr_if_flags) & 2242 (IFF_PROMISC | IFF_ALLMULTI)) 2243 vr_set_filter(sc); 2244 } else { 2245 if ((sc->vr_flags & VR_F_DETACHED) == 0) 2246 vr_init_locked(sc); 2247 } 2248 } else { 2249 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2250 vr_stop(sc); 2251 } 2252 sc->vr_if_flags = ifp->if_flags; 2253 VR_UNLOCK(sc); 2254 break; 2255 case SIOCADDMULTI: 2256 case SIOCDELMULTI: 2257 VR_LOCK(sc); 2258 vr_set_filter(sc); 2259 VR_UNLOCK(sc); 2260 break; 2261 case SIOCGIFMEDIA: 2262 case SIOCSIFMEDIA: 2263 mii = device_get_softc(sc->vr_miibus); 2264 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2265 break; 2266 case SIOCSIFCAP: 2267 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2268 #ifdef DEVICE_POLLING 2269 if (mask & IFCAP_POLLING) { 2270 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2271 error = ether_poll_register(vr_poll, ifp); 2272 if (error != 0) 2273 break; 2274 VR_LOCK(sc); 2275 /* Disable interrupts. */ 2276 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2277 ifp->if_capenable |= IFCAP_POLLING; 2278 VR_UNLOCK(sc); 2279 } else { 2280 error = ether_poll_deregister(ifp); 2281 /* Enable interrupts. */ 2282 VR_LOCK(sc); 2283 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2284 ifp->if_capenable &= ~IFCAP_POLLING; 2285 VR_UNLOCK(sc); 2286 } 2287 } 2288 #endif /* DEVICE_POLLING */ 2289 if ((mask & IFCAP_TXCSUM) != 0 && 2290 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2291 ifp->if_capenable ^= IFCAP_TXCSUM; 2292 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2293 ifp->if_hwassist |= VR_CSUM_FEATURES; 2294 else 2295 ifp->if_hwassist &= ~VR_CSUM_FEATURES; 2296 } 2297 if ((mask & IFCAP_RXCSUM) != 0 && 2298 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2299 ifp->if_capenable ^= IFCAP_RXCSUM; 2300 if ((mask & IFCAP_WOL_UCAST) != 0 && 2301 (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) 2302 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2303 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2304 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2305 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2306 break; 2307 default: 2308 error = ether_ioctl(ifp, command, data); 2309 break; 2310 } 2311 2312 return (error); 2313 } 2314 2315 static void 2316 vr_watchdog(struct vr_softc *sc) 2317 { 2318 struct ifnet *ifp; 2319 2320 VR_LOCK_ASSERT(sc); 2321 2322 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2323 return; 2324 2325 ifp = sc->vr_ifp; 2326 /* 2327 * Reclaim first as we don't request interrupt for every packets. 2328 */ 2329 vr_txeof(sc); 2330 if (sc->vr_cdata.vr_tx_cnt == 0) 2331 return; 2332 2333 if ((sc->vr_flags & VR_F_LINK) == 0) { 2334 if (bootverbose) 2335 if_printf(sc->vr_ifp, "watchdog timeout " 2336 "(missed link)\n"); 2337 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2338 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2339 vr_init_locked(sc); 2340 return; 2341 } 2342 2343 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2344 if_printf(ifp, "watchdog timeout\n"); 2345 2346 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2347 vr_init_locked(sc); 2348 2349 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2350 vr_start_locked(ifp); 2351 } 2352 2353 static void 2354 vr_tx_start(struct vr_softc *sc) 2355 { 2356 bus_addr_t addr; 2357 uint8_t cmd; 2358 2359 cmd = CSR_READ_1(sc, VR_CR0); 2360 if ((cmd & VR_CR0_TX_ON) == 0) { 2361 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2362 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2363 cmd |= VR_CR0_TX_ON; 2364 CSR_WRITE_1(sc, VR_CR0, cmd); 2365 } 2366 if (sc->vr_cdata.vr_tx_cnt != 0) { 2367 sc->vr_watchdog_timer = 5; 2368 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2369 } 2370 } 2371 2372 static void 2373 vr_rx_start(struct vr_softc *sc) 2374 { 2375 bus_addr_t addr; 2376 uint8_t cmd; 2377 2378 cmd = CSR_READ_1(sc, VR_CR0); 2379 if ((cmd & VR_CR0_RX_ON) == 0) { 2380 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2381 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2382 cmd |= VR_CR0_RX_ON; 2383 CSR_WRITE_1(sc, VR_CR0, cmd); 2384 } 2385 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2386 } 2387 2388 static int 2389 vr_tx_stop(struct vr_softc *sc) 2390 { 2391 int i; 2392 uint8_t cmd; 2393 2394 cmd = CSR_READ_1(sc, VR_CR0); 2395 if ((cmd & VR_CR0_TX_ON) != 0) { 2396 cmd &= ~VR_CR0_TX_ON; 2397 CSR_WRITE_1(sc, VR_CR0, cmd); 2398 for (i = VR_TIMEOUT; i > 0; i--) { 2399 DELAY(5); 2400 cmd = CSR_READ_1(sc, VR_CR0); 2401 if ((cmd & VR_CR0_TX_ON) == 0) 2402 break; 2403 } 2404 if (i == 0) 2405 return (ETIMEDOUT); 2406 } 2407 return (0); 2408 } 2409 2410 static int 2411 vr_rx_stop(struct vr_softc *sc) 2412 { 2413 int i; 2414 uint8_t cmd; 2415 2416 cmd = CSR_READ_1(sc, VR_CR0); 2417 if ((cmd & VR_CR0_RX_ON) != 0) { 2418 cmd &= ~VR_CR0_RX_ON; 2419 CSR_WRITE_1(sc, VR_CR0, cmd); 2420 for (i = VR_TIMEOUT; i > 0; i--) { 2421 DELAY(5); 2422 cmd = CSR_READ_1(sc, VR_CR0); 2423 if ((cmd & VR_CR0_RX_ON) == 0) 2424 break; 2425 } 2426 if (i == 0) 2427 return (ETIMEDOUT); 2428 } 2429 return (0); 2430 } 2431 2432 /* 2433 * Stop the adapter and free any mbufs allocated to the 2434 * RX and TX lists. 2435 */ 2436 static void 2437 vr_stop(struct vr_softc *sc) 2438 { 2439 struct vr_txdesc *txd; 2440 struct vr_rxdesc *rxd; 2441 struct ifnet *ifp; 2442 int i; 2443 2444 VR_LOCK_ASSERT(sc); 2445 2446 ifp = sc->vr_ifp; 2447 sc->vr_watchdog_timer = 0; 2448 2449 callout_stop(&sc->vr_stat_callout); 2450 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2451 2452 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2453 if (vr_rx_stop(sc) != 0) 2454 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2455 if (vr_tx_stop(sc) != 0) 2456 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2457 /* Clear pending interrupts. */ 2458 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2459 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2460 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2461 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2462 2463 /* 2464 * Free RX and TX mbufs still in the queues. 2465 */ 2466 for (i = 0; i < VR_RX_RING_CNT; i++) { 2467 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2468 if (rxd->rx_m != NULL) { 2469 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2470 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2471 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2472 rxd->rx_dmamap); 2473 m_freem(rxd->rx_m); 2474 rxd->rx_m = NULL; 2475 } 2476 } 2477 for (i = 0; i < VR_TX_RING_CNT; i++) { 2478 txd = &sc->vr_cdata.vr_txdesc[i]; 2479 if (txd->tx_m != NULL) { 2480 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2481 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2482 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2483 txd->tx_dmamap); 2484 m_freem(txd->tx_m); 2485 txd->tx_m = NULL; 2486 } 2487 } 2488 } 2489 2490 /* 2491 * Stop all chip I/O so that the kernel's probe routines don't 2492 * get confused by errant DMAs when rebooting. 2493 */ 2494 static int 2495 vr_shutdown(device_t dev) 2496 { 2497 2498 return (vr_suspend(dev)); 2499 } 2500 2501 static int 2502 vr_suspend(device_t dev) 2503 { 2504 struct vr_softc *sc; 2505 2506 sc = device_get_softc(dev); 2507 2508 VR_LOCK(sc); 2509 vr_stop(sc); 2510 vr_setwol(sc); 2511 sc->vr_flags |= VR_F_SUSPENDED; 2512 VR_UNLOCK(sc); 2513 2514 return (0); 2515 } 2516 2517 static int 2518 vr_resume(device_t dev) 2519 { 2520 struct vr_softc *sc; 2521 struct ifnet *ifp; 2522 2523 sc = device_get_softc(dev); 2524 2525 VR_LOCK(sc); 2526 ifp = sc->vr_ifp; 2527 vr_clrwol(sc); 2528 vr_reset(sc); 2529 if (ifp->if_flags & IFF_UP) 2530 vr_init_locked(sc); 2531 2532 sc->vr_flags &= ~VR_F_SUSPENDED; 2533 VR_UNLOCK(sc); 2534 2535 return (0); 2536 } 2537 2538 static void 2539 vr_setwol(struct vr_softc *sc) 2540 { 2541 struct ifnet *ifp; 2542 int pmc; 2543 uint16_t pmstat; 2544 uint8_t v; 2545 2546 VR_LOCK_ASSERT(sc); 2547 2548 if (sc->vr_revid < REV_ID_VT6102_A || 2549 pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2550 return; 2551 2552 ifp = sc->vr_ifp; 2553 2554 /* Clear WOL configuration. */ 2555 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2556 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2557 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2558 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2559 if (sc->vr_revid > REV_ID_VT6105_B0) { 2560 /* Newer Rhine III supports two additional patterns. */ 2561 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2562 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2563 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2564 } 2565 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2566 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2567 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2568 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2569 /* 2570 * It seems that multicast wakeup frames require programming pattern 2571 * registers and valid CRC as well as pattern mask for each pattern. 2572 * While it's possible to setup such a pattern it would complicate 2573 * WOL configuration so ignore multicast wakeup frames. 2574 */ 2575 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2576 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2577 v = CSR_READ_1(sc, VR_STICKHW); 2578 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2579 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2580 } 2581 2582 /* Put hardware into sleep. */ 2583 v = CSR_READ_1(sc, VR_STICKHW); 2584 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2585 CSR_WRITE_1(sc, VR_STICKHW, v); 2586 2587 /* Request PME if WOL is requested. */ 2588 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2589 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2590 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2591 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2592 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2593 } 2594 2595 static void 2596 vr_clrwol(struct vr_softc *sc) 2597 { 2598 uint8_t v; 2599 2600 VR_LOCK_ASSERT(sc); 2601 2602 if (sc->vr_revid < REV_ID_VT6102_A) 2603 return; 2604 2605 /* Take hardware out of sleep. */ 2606 v = CSR_READ_1(sc, VR_STICKHW); 2607 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2608 CSR_WRITE_1(sc, VR_STICKHW, v); 2609 2610 /* Clear WOL configuration as WOL may interfere normal operation. */ 2611 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2612 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2613 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2614 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2615 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2616 if (sc->vr_revid > REV_ID_VT6105_B0) { 2617 /* Newer Rhine III supports two additional patterns. */ 2618 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2619 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2620 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2621 } 2622 } 2623 2624 static int 2625 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2626 { 2627 struct vr_softc *sc; 2628 struct vr_statistics *stat; 2629 int error; 2630 int result; 2631 2632 result = -1; 2633 error = sysctl_handle_int(oidp, &result, 0, req); 2634 2635 if (error != 0 || req->newptr == NULL) 2636 return (error); 2637 2638 if (result == 1) { 2639 sc = (struct vr_softc *)arg1; 2640 stat = &sc->vr_stat; 2641 2642 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2643 printf("Outbound good frames : %ju\n", 2644 (uintmax_t)stat->tx_ok); 2645 printf("Inbound good frames : %ju\n", 2646 (uintmax_t)stat->rx_ok); 2647 printf("Outbound errors : %u\n", stat->tx_errors); 2648 printf("Inbound errors : %u\n", stat->rx_errors); 2649 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2650 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2651 printf("Inbound FIFO overflows : %d\n", 2652 stat->rx_fifo_overflows); 2653 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2654 printf("Inbound frame alignment errors : %u\n", 2655 stat->rx_alignment); 2656 printf("Inbound giant frames : %u\n", stat->rx_giants); 2657 printf("Inbound runt frames : %u\n", stat->rx_runts); 2658 printf("Outbound aborted with excessive collisions : %u\n", 2659 stat->tx_abort); 2660 printf("Outbound collisions : %u\n", stat->tx_collisions); 2661 printf("Outbound late collisions : %u\n", 2662 stat->tx_late_collisions); 2663 printf("Outbound underrun : %u\n", stat->tx_underrun); 2664 printf("PCI bus errors : %u\n", stat->bus_errors); 2665 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2666 stat->num_restart); 2667 } 2668 2669 return (error); 2670 } 2671