1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 /* 37 * VIA Rhine fast ethernet PCI NIC driver 38 * 39 * Supports various network adapters based on the VIA Rhine 40 * and Rhine II PCI controllers, including the D-Link DFE530TX. 41 * Datasheets are available at http://www.via.com.tw. 42 * 43 * Written by Bill Paul <wpaul@ctr.columbia.edu> 44 * Electrical Engineering Department 45 * Columbia University, New York City 46 */ 47 48 /* 49 * The VIA Rhine controllers are similar in some respects to the 50 * the DEC tulip chips, except less complicated. The controller 51 * uses an MII bus and an external physical layer interface. The 52 * receiver has a one entry perfect filter and a 64-bit hash table 53 * multicast filter. Transmit and receive descriptors are similar 54 * to the tulip. 55 * 56 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 57 * transmit buffers must be longword aligned. Unfortunately, 58 * FreeBSD doesn't guarantee that mbufs will be filled in starting 59 * at longword boundaries, so we have to do a buffer copy before 60 * transmission. 61 */ 62 63 #ifdef HAVE_KERNEL_OPTION_HEADERS 64 #include "opt_device_polling.h" 65 #endif 66 67 #include <sys/param.h> 68 #include <sys/systm.h> 69 #include <sys/bus.h> 70 #include <sys/endian.h> 71 #include <sys/kernel.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/module.h> 75 #include <sys/rman.h> 76 #include <sys/socket.h> 77 #include <sys/sockio.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 81 #include <net/bpf.h> 82 #include <net/if.h> 83 #include <net/if_var.h> 84 #include <net/ethernet.h> 85 #include <net/if_dl.h> 86 #include <net/if_media.h> 87 #include <net/if_types.h> 88 #include <net/if_vlan_var.h> 89 90 #include <dev/mii/mii.h> 91 #include <dev/mii/miivar.h> 92 93 #include <dev/pci/pcireg.h> 94 #include <dev/pci/pcivar.h> 95 96 #include <machine/bus.h> 97 98 #include <dev/vr/if_vrreg.h> 99 100 /* "device miibus" required. See GENERIC if you get errors here. */ 101 #include "miibus_if.h" 102 103 MODULE_DEPEND(vr, pci, 1, 1, 1); 104 MODULE_DEPEND(vr, ether, 1, 1, 1); 105 MODULE_DEPEND(vr, miibus, 1, 1, 1); 106 107 /* Define to show Rx/Tx error status. */ 108 #undef VR_SHOW_ERRORS 109 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 110 111 /* 112 * Various supported device vendors/types, their names & quirks. 113 */ 114 #define VR_Q_NEEDALIGN (1<<0) 115 #define VR_Q_CSUM (1<<1) 116 #define VR_Q_CAM (1<<2) 117 118 static const struct vr_type { 119 u_int16_t vr_vid; 120 u_int16_t vr_did; 121 int vr_quirks; 122 const char *vr_name; 123 } vr_devs[] = { 124 { VIA_VENDORID, VIA_DEVICEID_RHINE, 125 VR_Q_NEEDALIGN, 126 "VIA VT3043 Rhine I 10/100BaseTX" }, 127 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 128 VR_Q_NEEDALIGN, 129 "VIA VT86C100A Rhine II 10/100BaseTX" }, 130 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 131 0, 132 "VIA VT6102 Rhine II 10/100BaseTX" }, 133 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 134 0, 135 "VIA VT6105 Rhine III 10/100BaseTX" }, 136 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 137 VR_Q_CSUM, 138 "VIA VT6105M Rhine III 10/100BaseTX" }, 139 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 140 VR_Q_NEEDALIGN, 141 "Delta Electronics Rhine II 10/100BaseTX" }, 142 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 143 VR_Q_NEEDALIGN, 144 "Addtron Technology Rhine II 10/100BaseTX" }, 145 { 0, 0, 0, NULL } 146 }; 147 148 static int vr_probe(device_t); 149 static int vr_attach(device_t); 150 static int vr_detach(device_t); 151 static int vr_shutdown(device_t); 152 static int vr_suspend(device_t); 153 static int vr_resume(device_t); 154 155 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 156 static int vr_dma_alloc(struct vr_softc *); 157 static void vr_dma_free(struct vr_softc *); 158 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 159 static int vr_newbuf(struct vr_softc *, int); 160 161 #ifndef __NO_STRICT_ALIGNMENT 162 static __inline void vr_fixup_rx(struct mbuf *); 163 #endif 164 static int vr_rxeof(struct vr_softc *); 165 static void vr_txeof(struct vr_softc *); 166 static void vr_tick(void *); 167 static int vr_error(struct vr_softc *, uint16_t); 168 static void vr_tx_underrun(struct vr_softc *); 169 static int vr_intr(void *); 170 static void vr_int_task(void *, int); 171 static void vr_start(if_t); 172 static void vr_start_locked(if_t); 173 static int vr_encap(struct vr_softc *, struct mbuf **); 174 static int vr_ioctl(if_t, u_long, caddr_t); 175 static void vr_init(void *); 176 static void vr_init_locked(struct vr_softc *); 177 static void vr_tx_start(struct vr_softc *); 178 static void vr_rx_start(struct vr_softc *); 179 static int vr_tx_stop(struct vr_softc *); 180 static int vr_rx_stop(struct vr_softc *); 181 static void vr_stop(struct vr_softc *); 182 static void vr_watchdog(struct vr_softc *); 183 static int vr_ifmedia_upd(if_t); 184 static void vr_ifmedia_sts(if_t, struct ifmediareq *); 185 186 static int vr_miibus_readreg(device_t, int, int); 187 static int vr_miibus_writereg(device_t, int, int, int); 188 static void vr_miibus_statchg(device_t); 189 190 static void vr_cam_mask(struct vr_softc *, uint32_t, int); 191 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); 192 static void vr_set_filter(struct vr_softc *); 193 static void vr_reset(const struct vr_softc *); 194 static int vr_tx_ring_init(struct vr_softc *); 195 static int vr_rx_ring_init(struct vr_softc *); 196 static void vr_setwol(struct vr_softc *); 197 static void vr_clrwol(struct vr_softc *); 198 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 199 200 static const struct vr_tx_threshold_table { 201 int tx_cfg; 202 int bcr_cfg; 203 int value; 204 } vr_tx_threshold_tables[] = { 205 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 206 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 207 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 208 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 209 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 210 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 211 }; 212 213 static device_method_t vr_methods[] = { 214 /* Device interface */ 215 DEVMETHOD(device_probe, vr_probe), 216 DEVMETHOD(device_attach, vr_attach), 217 DEVMETHOD(device_detach, vr_detach), 218 DEVMETHOD(device_shutdown, vr_shutdown), 219 DEVMETHOD(device_suspend, vr_suspend), 220 DEVMETHOD(device_resume, vr_resume), 221 222 /* MII interface */ 223 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 224 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 225 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 226 227 DEVMETHOD_END 228 }; 229 230 static driver_t vr_driver = { 231 "vr", 232 vr_methods, 233 sizeof(struct vr_softc) 234 }; 235 236 DRIVER_MODULE(vr, pci, vr_driver, 0, 0); 237 DRIVER_MODULE(miibus, vr, miibus_driver, 0, 0); 238 239 static int 240 vr_miibus_readreg(device_t dev, int phy, int reg) 241 { 242 struct vr_softc *sc; 243 int i; 244 245 sc = device_get_softc(dev); 246 247 /* Set the register address. */ 248 CSR_WRITE_1(sc, VR_MIIADDR, reg); 249 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 250 251 for (i = 0; i < VR_MII_TIMEOUT; i++) { 252 DELAY(1); 253 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 254 break; 255 } 256 if (i == VR_MII_TIMEOUT) 257 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 258 259 return (CSR_READ_2(sc, VR_MIIDATA)); 260 } 261 262 static int 263 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 264 { 265 struct vr_softc *sc; 266 int i; 267 268 sc = device_get_softc(dev); 269 270 /* Set the register address and data to write. */ 271 CSR_WRITE_1(sc, VR_MIIADDR, reg); 272 CSR_WRITE_2(sc, VR_MIIDATA, data); 273 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 274 275 for (i = 0; i < VR_MII_TIMEOUT; i++) { 276 DELAY(1); 277 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 278 break; 279 } 280 if (i == VR_MII_TIMEOUT) 281 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 282 reg); 283 284 return (0); 285 } 286 287 /* 288 * In order to fiddle with the 289 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 290 * first have to put the transmit and/or receive logic in the idle state. 291 */ 292 static void 293 vr_miibus_statchg(device_t dev) 294 { 295 struct vr_softc *sc; 296 struct mii_data *mii; 297 if_t ifp; 298 int lfdx, mfdx; 299 uint8_t cr0, cr1, fc; 300 301 sc = device_get_softc(dev); 302 mii = device_get_softc(sc->vr_miibus); 303 ifp = sc->vr_ifp; 304 if (mii == NULL || ifp == NULL || 305 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 306 return; 307 308 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 309 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 310 (IFM_ACTIVE | IFM_AVALID)) { 311 switch (IFM_SUBTYPE(mii->mii_media_active)) { 312 case IFM_10_T: 313 case IFM_100_TX: 314 sc->vr_flags |= VR_F_LINK; 315 break; 316 default: 317 break; 318 } 319 } 320 321 if ((sc->vr_flags & VR_F_LINK) != 0) { 322 cr0 = CSR_READ_1(sc, VR_CR0); 323 cr1 = CSR_READ_1(sc, VR_CR1); 324 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 325 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 326 if (mfdx != lfdx) { 327 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 328 if (vr_tx_stop(sc) != 0 || 329 vr_rx_stop(sc) != 0) { 330 device_printf(sc->vr_dev, 331 "%s: Tx/Rx shutdown error -- " 332 "resetting\n", __func__); 333 sc->vr_flags |= VR_F_RESTART; 334 VR_UNLOCK(sc); 335 return; 336 } 337 } 338 if (lfdx) 339 cr1 |= VR_CR1_FULLDUPLEX; 340 else 341 cr1 &= ~VR_CR1_FULLDUPLEX; 342 CSR_WRITE_1(sc, VR_CR1, cr1); 343 } 344 fc = 0; 345 /* Configure flow-control. */ 346 if (sc->vr_revid >= REV_ID_VT6105_A0) { 347 fc = CSR_READ_1(sc, VR_FLOWCR1); 348 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 349 if ((IFM_OPTIONS(mii->mii_media_active) & 350 IFM_ETH_RXPAUSE) != 0) 351 fc |= VR_FLOWCR1_RXPAUSE; 352 if ((IFM_OPTIONS(mii->mii_media_active) & 353 IFM_ETH_TXPAUSE) != 0) { 354 fc |= VR_FLOWCR1_TXPAUSE; 355 sc->vr_flags |= VR_F_TXPAUSE; 356 } 357 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 358 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 359 /* No Tx puase capability available for Rhine II. */ 360 fc = CSR_READ_1(sc, VR_MISC_CR0); 361 fc &= ~VR_MISCCR0_RXPAUSE; 362 if ((IFM_OPTIONS(mii->mii_media_active) & 363 IFM_ETH_RXPAUSE) != 0) 364 fc |= VR_MISCCR0_RXPAUSE; 365 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 366 } 367 vr_rx_start(sc); 368 vr_tx_start(sc); 369 } else { 370 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 371 device_printf(sc->vr_dev, 372 "%s: Tx/Rx shutdown error -- resetting\n", 373 __func__); 374 sc->vr_flags |= VR_F_RESTART; 375 } 376 } 377 } 378 379 static void 380 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) 381 { 382 383 if (type == VR_MCAST_CAM) 384 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 385 else 386 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 387 CSR_WRITE_4(sc, VR_CAMMASK, mask); 388 CSR_WRITE_1(sc, VR_CAMCTL, 0); 389 } 390 391 static int 392 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) 393 { 394 int i; 395 396 if (type == VR_MCAST_CAM) { 397 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 398 return (EINVAL); 399 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 400 } else 401 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 402 403 /* Set CAM entry address. */ 404 CSR_WRITE_1(sc, VR_CAMADDR, idx); 405 /* Set CAM entry data. */ 406 if (type == VR_MCAST_CAM) { 407 for (i = 0; i < ETHER_ADDR_LEN; i++) 408 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); 409 } else { 410 CSR_WRITE_1(sc, VR_VCAM0, mac[0]); 411 CSR_WRITE_1(sc, VR_VCAM1, mac[1]); 412 } 413 DELAY(10); 414 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 415 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); 416 for (i = 0; i < VR_TIMEOUT; i++) { 417 DELAY(1); 418 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 419 break; 420 } 421 422 if (i == VR_TIMEOUT) 423 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 424 __func__); 425 CSR_WRITE_1(sc, VR_CAMCTL, 0); 426 427 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 428 } 429 430 struct vr_hash_maddr_cam_ctx { 431 struct vr_softc *sc; 432 uint32_t mask; 433 int error; 434 }; 435 436 static u_int 437 vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 438 { 439 struct vr_hash_maddr_cam_ctx *ctx = arg; 440 441 if (ctx->error != 0) 442 return (0); 443 ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl)); 444 if (ctx->error != 0) { 445 ctx->mask = 0; 446 return (0); 447 } 448 ctx->mask |= 1 << mcnt; 449 450 return (1); 451 } 452 453 static u_int 454 vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 455 { 456 uint32_t *hashes = arg; 457 int h; 458 459 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 460 if (h < 32) 461 hashes[0] |= (1 << h); 462 else 463 hashes[1] |= (1 << (h - 32)); 464 465 return (1); 466 } 467 468 /* 469 * Program the 64-bit multicast hash filter. 470 */ 471 static void 472 vr_set_filter(struct vr_softc *sc) 473 { 474 if_t ifp; 475 uint32_t hashes[2] = { 0, 0 }; 476 uint8_t rxfilt; 477 int error, mcnt; 478 479 VR_LOCK_ASSERT(sc); 480 481 ifp = sc->vr_ifp; 482 rxfilt = CSR_READ_1(sc, VR_RXCFG); 483 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | 484 VR_RXCFG_RX_MULTI); 485 if (if_getflags(ifp) & IFF_BROADCAST) 486 rxfilt |= VR_RXCFG_RX_BROAD; 487 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { 488 rxfilt |= VR_RXCFG_RX_MULTI; 489 if (if_getflags(ifp) & IFF_PROMISC) 490 rxfilt |= VR_RXCFG_RX_PROMISC; 491 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 492 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 493 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 494 return; 495 } 496 497 /* Now program new ones. */ 498 error = 0; 499 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 500 struct vr_hash_maddr_cam_ctx ctx; 501 502 /* 503 * For hardwares that have CAM capability, use 504 * 32 entries multicast perfect filter. 505 */ 506 ctx.sc = sc; 507 ctx.mask = 0; 508 ctx.error = 0; 509 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx); 510 vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask); 511 } 512 513 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 514 /* 515 * If there are too many multicast addresses or 516 * setting multicast CAM filter failed, use hash 517 * table based filtering. 518 */ 519 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes); 520 } 521 522 if (mcnt > 0) 523 rxfilt |= VR_RXCFG_RX_MULTI; 524 525 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 526 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 527 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 528 } 529 530 static void 531 vr_reset(const struct vr_softc *sc) 532 { 533 int i; 534 535 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 536 537 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 538 if (sc->vr_revid < REV_ID_VT6102_A) { 539 /* VT86C100A needs more delay after reset. */ 540 DELAY(100); 541 } 542 for (i = 0; i < VR_TIMEOUT; i++) { 543 DELAY(10); 544 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 545 break; 546 } 547 if (i == VR_TIMEOUT) { 548 if (sc->vr_revid < REV_ID_VT6102_A) 549 device_printf(sc->vr_dev, "reset never completed!\n"); 550 else { 551 /* Use newer force reset command. */ 552 device_printf(sc->vr_dev, 553 "Using force reset command.\n"); 554 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 555 /* 556 * Wait a little while for the chip to get its brains 557 * in order. 558 */ 559 DELAY(2000); 560 } 561 } 562 563 } 564 565 /* 566 * Probe for a VIA Rhine chip. Check the PCI vendor and device 567 * IDs against our list and return a match or NULL 568 */ 569 static const struct vr_type * 570 vr_match(device_t dev) 571 { 572 const struct vr_type *t = vr_devs; 573 574 for (t = vr_devs; t->vr_name != NULL; t++) 575 if ((pci_get_vendor(dev) == t->vr_vid) && 576 (pci_get_device(dev) == t->vr_did)) 577 return (t); 578 return (NULL); 579 } 580 581 /* 582 * Probe for a VIA Rhine chip. Check the PCI vendor and device 583 * IDs against our list and return a device name if we find a match. 584 */ 585 static int 586 vr_probe(device_t dev) 587 { 588 const struct vr_type *t; 589 590 t = vr_match(dev); 591 if (t != NULL) { 592 device_set_desc(dev, t->vr_name); 593 return (BUS_PROBE_DEFAULT); 594 } 595 return (ENXIO); 596 } 597 598 /* 599 * Attach the interface. Allocate softc structures, do ifmedia 600 * setup and ethernet/BPF attach. 601 */ 602 static int 603 vr_attach(device_t dev) 604 { 605 struct vr_softc *sc; 606 if_t ifp; 607 const struct vr_type *t; 608 uint8_t eaddr[ETHER_ADDR_LEN]; 609 int error, rid; 610 int i, phy, pmc; 611 612 sc = device_get_softc(dev); 613 sc->vr_dev = dev; 614 t = vr_match(dev); 615 KASSERT(t != NULL, ("Lost if_vr device match")); 616 sc->vr_quirks = t->vr_quirks; 617 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 618 619 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 620 MTX_DEF); 621 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 622 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 623 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 624 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 625 sc, 0, vr_sysctl_stats, "I", "Statistics"); 626 627 error = 0; 628 629 /* 630 * Map control/status registers. 631 */ 632 pci_enable_busmaster(dev); 633 sc->vr_revid = pci_get_revid(dev); 634 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 635 636 sc->vr_res_id = PCIR_BAR(0); 637 sc->vr_res_type = SYS_RES_IOPORT; 638 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 639 &sc->vr_res_id, RF_ACTIVE); 640 if (sc->vr_res == NULL) { 641 device_printf(dev, "couldn't map ports\n"); 642 error = ENXIO; 643 goto fail; 644 } 645 646 /* Allocate interrupt. */ 647 rid = 0; 648 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 649 RF_SHAREABLE | RF_ACTIVE); 650 651 if (sc->vr_irq == NULL) { 652 device_printf(dev, "couldn't map interrupt\n"); 653 error = ENXIO; 654 goto fail; 655 } 656 657 /* Allocate ifnet structure. */ 658 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 659 if_setsoftc(ifp, sc); 660 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 661 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 662 if_setioctlfn(ifp, vr_ioctl); 663 if_setstartfn(ifp, vr_start); 664 if_setinitfn(ifp, vr_init); 665 if_setsendqlen(ifp, VR_TX_RING_CNT - 1); 666 if_setsendqready(ifp); 667 668 NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); 669 670 /* Configure Tx FIFO threshold. */ 671 sc->vr_txthresh = VR_TXTHRESH_MIN; 672 if (sc->vr_revid < REV_ID_VT6105_A0) { 673 /* 674 * Use store and forward mode for Rhine I/II. 675 * Otherwise they produce a lot of Tx underruns and 676 * it would take a while to get working FIFO threshold 677 * value. 678 */ 679 sc->vr_txthresh = VR_TXTHRESH_MAX; 680 } 681 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 682 if_sethwassist(ifp, VR_CSUM_FEATURES); 683 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); 684 /* 685 * To update checksum field the hardware may need to 686 * store entire frames into FIFO before transmitting. 687 */ 688 sc->vr_txthresh = VR_TXTHRESH_MAX; 689 } 690 691 if (sc->vr_revid >= REV_ID_VT6102_A && 692 pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 693 if_setcapabilitiesbit(ifp, IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC, 0); 694 695 /* Rhine supports oversized VLAN frame. */ 696 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 697 if_setcapenable(ifp, if_getcapabilities(ifp)); 698 #ifdef DEVICE_POLLING 699 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 700 #endif 701 702 /* 703 * Windows may put the chip in suspend mode when it 704 * shuts down. Be sure to kick it in the head to wake it 705 * up again. 706 */ 707 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 708 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 709 710 /* 711 * Get station address. The way the Rhine chips work, 712 * you're not allowed to directly access the EEPROM once 713 * they've been programmed a special way. Consequently, 714 * we need to read the node address from the PAR0 and PAR1 715 * registers. 716 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 717 * VR_CFGC and VR_CFGD such that memory mapped IO configured 718 * by driver is reset to default state. 719 */ 720 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 721 for (i = VR_TIMEOUT; i > 0; i--) { 722 DELAY(1); 723 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 724 break; 725 } 726 if (i == 0) 727 device_printf(dev, "Reloading EEPROM timeout!\n"); 728 for (i = 0; i < ETHER_ADDR_LEN; i++) 729 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 730 731 /* Reset the adapter. */ 732 vr_reset(sc); 733 /* Ack intr & disable further interrupts. */ 734 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 735 CSR_WRITE_2(sc, VR_IMR, 0); 736 if (sc->vr_revid >= REV_ID_VT6102_A) 737 CSR_WRITE_2(sc, VR_MII_IMR, 0); 738 739 if (sc->vr_revid < REV_ID_VT6102_A) { 740 pci_write_config(dev, VR_PCI_MODE2, 741 pci_read_config(dev, VR_PCI_MODE2, 1) | 742 VR_MODE2_MODE10T, 1); 743 } else { 744 /* Report error instead of retrying forever. */ 745 pci_write_config(dev, VR_PCI_MODE2, 746 pci_read_config(dev, VR_PCI_MODE2, 1) | 747 VR_MODE2_PCEROPT, 1); 748 /* Detect MII coding error. */ 749 pci_write_config(dev, VR_PCI_MODE3, 750 pci_read_config(dev, VR_PCI_MODE3, 1) | 751 VR_MODE3_MIION, 1); 752 if (sc->vr_revid >= REV_ID_VT6105_LOM && 753 sc->vr_revid < REV_ID_VT6105M_A0) 754 pci_write_config(dev, VR_PCI_MODE2, 755 pci_read_config(dev, VR_PCI_MODE2, 1) | 756 VR_MODE2_MODE10T, 1); 757 /* Enable Memory-Read-Multiple. */ 758 if (sc->vr_revid >= REV_ID_VT6107_A1 && 759 sc->vr_revid < REV_ID_VT6105M_A0) 760 pci_write_config(dev, VR_PCI_MODE2, 761 pci_read_config(dev, VR_PCI_MODE2, 1) | 762 VR_MODE2_MRDPL, 1); 763 } 764 /* Disable MII AUTOPOLL. */ 765 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 766 767 if (vr_dma_alloc(sc) != 0) { 768 error = ENXIO; 769 goto fail; 770 } 771 772 /* Do MII setup. */ 773 if (sc->vr_revid >= REV_ID_VT6105_A0) 774 phy = 1; 775 else 776 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 777 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, 778 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 779 sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0); 780 if (error != 0) { 781 device_printf(dev, "attaching PHYs failed\n"); 782 goto fail; 783 } 784 785 /* Call MI attach routine. */ 786 ether_ifattach(ifp, eaddr); 787 /* 788 * Tell the upper layer(s) we support long frames. 789 * Must appear after the call to ether_ifattach() because 790 * ether_ifattach() sets ifi_hdrlen to the default value. 791 */ 792 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 793 794 /* Hook interrupt last to avoid having to lock softc. */ 795 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 796 vr_intr, NULL, sc, &sc->vr_intrhand); 797 798 if (error) { 799 device_printf(dev, "couldn't set up irq\n"); 800 ether_ifdetach(ifp); 801 goto fail; 802 } 803 804 fail: 805 if (error) 806 vr_detach(dev); 807 808 return (error); 809 } 810 811 /* 812 * Shutdown hardware and free up resources. This can be called any 813 * time after the mutex has been initialized. It is called in both 814 * the error case in attach and the normal detach case so it needs 815 * to be careful about only freeing resources that have actually been 816 * allocated. 817 */ 818 static int 819 vr_detach(device_t dev) 820 { 821 struct vr_softc *sc = device_get_softc(dev); 822 if_t ifp = sc->vr_ifp; 823 824 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 825 826 #ifdef DEVICE_POLLING 827 if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING) 828 ether_poll_deregister(ifp); 829 #endif 830 831 /* These should only be active if attach succeeded. */ 832 if (device_is_attached(dev)) { 833 VR_LOCK(sc); 834 sc->vr_flags |= VR_F_DETACHED; 835 vr_stop(sc); 836 VR_UNLOCK(sc); 837 callout_drain(&sc->vr_stat_callout); 838 taskqueue_drain(taskqueue_fast, &sc->vr_inttask); 839 ether_ifdetach(ifp); 840 } 841 if (sc->vr_miibus) 842 device_delete_child(dev, sc->vr_miibus); 843 bus_generic_detach(dev); 844 845 if (sc->vr_intrhand) 846 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 847 if (sc->vr_irq) 848 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 849 if (sc->vr_res) 850 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 851 sc->vr_res); 852 853 if (ifp) 854 if_free(ifp); 855 856 vr_dma_free(sc); 857 858 mtx_destroy(&sc->vr_mtx); 859 860 return (0); 861 } 862 863 struct vr_dmamap_arg { 864 bus_addr_t vr_busaddr; 865 }; 866 867 static void 868 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 869 { 870 struct vr_dmamap_arg *ctx; 871 872 if (error != 0) 873 return; 874 ctx = arg; 875 ctx->vr_busaddr = segs[0].ds_addr; 876 } 877 878 static int 879 vr_dma_alloc(struct vr_softc *sc) 880 { 881 struct vr_dmamap_arg ctx; 882 struct vr_txdesc *txd; 883 struct vr_rxdesc *rxd; 884 bus_size_t tx_alignment; 885 int error, i; 886 887 /* Create parent DMA tag. */ 888 error = bus_dma_tag_create( 889 bus_get_dma_tag(sc->vr_dev), /* parent */ 890 1, 0, /* alignment, boundary */ 891 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 892 BUS_SPACE_MAXADDR, /* highaddr */ 893 NULL, NULL, /* filter, filterarg */ 894 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 895 0, /* nsegments */ 896 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 897 0, /* flags */ 898 NULL, NULL, /* lockfunc, lockarg */ 899 &sc->vr_cdata.vr_parent_tag); 900 if (error != 0) { 901 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 902 goto fail; 903 } 904 /* Create tag for Tx ring. */ 905 error = bus_dma_tag_create( 906 sc->vr_cdata.vr_parent_tag, /* parent */ 907 VR_RING_ALIGN, 0, /* alignment, boundary */ 908 BUS_SPACE_MAXADDR, /* lowaddr */ 909 BUS_SPACE_MAXADDR, /* highaddr */ 910 NULL, NULL, /* filter, filterarg */ 911 VR_TX_RING_SIZE, /* maxsize */ 912 1, /* nsegments */ 913 VR_TX_RING_SIZE, /* maxsegsize */ 914 0, /* flags */ 915 NULL, NULL, /* lockfunc, lockarg */ 916 &sc->vr_cdata.vr_tx_ring_tag); 917 if (error != 0) { 918 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 919 goto fail; 920 } 921 922 /* Create tag for Rx ring. */ 923 error = bus_dma_tag_create( 924 sc->vr_cdata.vr_parent_tag, /* parent */ 925 VR_RING_ALIGN, 0, /* alignment, boundary */ 926 BUS_SPACE_MAXADDR, /* lowaddr */ 927 BUS_SPACE_MAXADDR, /* highaddr */ 928 NULL, NULL, /* filter, filterarg */ 929 VR_RX_RING_SIZE, /* maxsize */ 930 1, /* nsegments */ 931 VR_RX_RING_SIZE, /* maxsegsize */ 932 0, /* flags */ 933 NULL, NULL, /* lockfunc, lockarg */ 934 &sc->vr_cdata.vr_rx_ring_tag); 935 if (error != 0) { 936 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 937 goto fail; 938 } 939 940 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 941 tx_alignment = sizeof(uint32_t); 942 else 943 tx_alignment = 1; 944 /* Create tag for Tx buffers. */ 945 error = bus_dma_tag_create( 946 sc->vr_cdata.vr_parent_tag, /* parent */ 947 tx_alignment, 0, /* alignment, boundary */ 948 BUS_SPACE_MAXADDR, /* lowaddr */ 949 BUS_SPACE_MAXADDR, /* highaddr */ 950 NULL, NULL, /* filter, filterarg */ 951 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 952 VR_MAXFRAGS, /* nsegments */ 953 MCLBYTES, /* maxsegsize */ 954 0, /* flags */ 955 NULL, NULL, /* lockfunc, lockarg */ 956 &sc->vr_cdata.vr_tx_tag); 957 if (error != 0) { 958 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 959 goto fail; 960 } 961 962 /* Create tag for Rx buffers. */ 963 error = bus_dma_tag_create( 964 sc->vr_cdata.vr_parent_tag, /* parent */ 965 VR_RX_ALIGN, 0, /* alignment, boundary */ 966 BUS_SPACE_MAXADDR, /* lowaddr */ 967 BUS_SPACE_MAXADDR, /* highaddr */ 968 NULL, NULL, /* filter, filterarg */ 969 MCLBYTES, /* maxsize */ 970 1, /* nsegments */ 971 MCLBYTES, /* maxsegsize */ 972 0, /* flags */ 973 NULL, NULL, /* lockfunc, lockarg */ 974 &sc->vr_cdata.vr_rx_tag); 975 if (error != 0) { 976 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 977 goto fail; 978 } 979 980 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 981 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 982 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 983 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 984 if (error != 0) { 985 device_printf(sc->vr_dev, 986 "failed to allocate DMA'able memory for Tx ring\n"); 987 goto fail; 988 } 989 990 ctx.vr_busaddr = 0; 991 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 992 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 993 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 994 if (error != 0 || ctx.vr_busaddr == 0) { 995 device_printf(sc->vr_dev, 996 "failed to load DMA'able memory for Tx ring\n"); 997 goto fail; 998 } 999 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 1000 1001 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1002 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 1003 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 1004 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 1005 if (error != 0) { 1006 device_printf(sc->vr_dev, 1007 "failed to allocate DMA'able memory for Rx ring\n"); 1008 goto fail; 1009 } 1010 1011 ctx.vr_busaddr = 0; 1012 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1013 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1014 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1015 if (error != 0 || ctx.vr_busaddr == 0) { 1016 device_printf(sc->vr_dev, 1017 "failed to load DMA'able memory for Rx ring\n"); 1018 goto fail; 1019 } 1020 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1021 1022 /* Create DMA maps for Tx buffers. */ 1023 for (i = 0; i < VR_TX_RING_CNT; i++) { 1024 txd = &sc->vr_cdata.vr_txdesc[i]; 1025 txd->tx_m = NULL; 1026 txd->tx_dmamap = NULL; 1027 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1028 &txd->tx_dmamap); 1029 if (error != 0) { 1030 device_printf(sc->vr_dev, 1031 "failed to create Tx dmamap\n"); 1032 goto fail; 1033 } 1034 } 1035 /* Create DMA maps for Rx buffers. */ 1036 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1037 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1038 device_printf(sc->vr_dev, 1039 "failed to create spare Rx dmamap\n"); 1040 goto fail; 1041 } 1042 for (i = 0; i < VR_RX_RING_CNT; i++) { 1043 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1044 rxd->rx_m = NULL; 1045 rxd->rx_dmamap = NULL; 1046 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1047 &rxd->rx_dmamap); 1048 if (error != 0) { 1049 device_printf(sc->vr_dev, 1050 "failed to create Rx dmamap\n"); 1051 goto fail; 1052 } 1053 } 1054 1055 fail: 1056 return (error); 1057 } 1058 1059 static void 1060 vr_dma_free(struct vr_softc *sc) 1061 { 1062 struct vr_txdesc *txd; 1063 struct vr_rxdesc *rxd; 1064 int i; 1065 1066 /* Tx ring. */ 1067 if (sc->vr_cdata.vr_tx_ring_tag) { 1068 if (sc->vr_rdata.vr_tx_ring_paddr) 1069 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1070 sc->vr_cdata.vr_tx_ring_map); 1071 if (sc->vr_rdata.vr_tx_ring) 1072 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1073 sc->vr_rdata.vr_tx_ring, 1074 sc->vr_cdata.vr_tx_ring_map); 1075 sc->vr_rdata.vr_tx_ring = NULL; 1076 sc->vr_rdata.vr_tx_ring_paddr = 0; 1077 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1078 sc->vr_cdata.vr_tx_ring_tag = NULL; 1079 } 1080 /* Rx ring. */ 1081 if (sc->vr_cdata.vr_rx_ring_tag) { 1082 if (sc->vr_rdata.vr_rx_ring_paddr) 1083 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1084 sc->vr_cdata.vr_rx_ring_map); 1085 if (sc->vr_rdata.vr_rx_ring) 1086 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1087 sc->vr_rdata.vr_rx_ring, 1088 sc->vr_cdata.vr_rx_ring_map); 1089 sc->vr_rdata.vr_rx_ring = NULL; 1090 sc->vr_rdata.vr_rx_ring_paddr = 0; 1091 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1092 sc->vr_cdata.vr_rx_ring_tag = NULL; 1093 } 1094 /* Tx buffers. */ 1095 if (sc->vr_cdata.vr_tx_tag) { 1096 for (i = 0; i < VR_TX_RING_CNT; i++) { 1097 txd = &sc->vr_cdata.vr_txdesc[i]; 1098 if (txd->tx_dmamap) { 1099 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1100 txd->tx_dmamap); 1101 txd->tx_dmamap = NULL; 1102 } 1103 } 1104 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1105 sc->vr_cdata.vr_tx_tag = NULL; 1106 } 1107 /* Rx buffers. */ 1108 if (sc->vr_cdata.vr_rx_tag) { 1109 for (i = 0; i < VR_RX_RING_CNT; i++) { 1110 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1111 if (rxd->rx_dmamap) { 1112 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1113 rxd->rx_dmamap); 1114 rxd->rx_dmamap = NULL; 1115 } 1116 } 1117 if (sc->vr_cdata.vr_rx_sparemap) { 1118 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1119 sc->vr_cdata.vr_rx_sparemap); 1120 sc->vr_cdata.vr_rx_sparemap = 0; 1121 } 1122 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1123 sc->vr_cdata.vr_rx_tag = NULL; 1124 } 1125 1126 if (sc->vr_cdata.vr_parent_tag) { 1127 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1128 sc->vr_cdata.vr_parent_tag = NULL; 1129 } 1130 } 1131 1132 /* 1133 * Initialize the transmit descriptors. 1134 */ 1135 static int 1136 vr_tx_ring_init(struct vr_softc *sc) 1137 { 1138 struct vr_ring_data *rd; 1139 struct vr_txdesc *txd; 1140 bus_addr_t addr; 1141 int i; 1142 1143 sc->vr_cdata.vr_tx_prod = 0; 1144 sc->vr_cdata.vr_tx_cons = 0; 1145 sc->vr_cdata.vr_tx_cnt = 0; 1146 sc->vr_cdata.vr_tx_pkts = 0; 1147 1148 rd = &sc->vr_rdata; 1149 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1150 for (i = 0; i < VR_TX_RING_CNT; i++) { 1151 if (i == VR_TX_RING_CNT - 1) 1152 addr = VR_TX_RING_ADDR(sc, 0); 1153 else 1154 addr = VR_TX_RING_ADDR(sc, i + 1); 1155 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1156 txd = &sc->vr_cdata.vr_txdesc[i]; 1157 txd->tx_m = NULL; 1158 } 1159 1160 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1161 sc->vr_cdata.vr_tx_ring_map, 1162 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1163 1164 return (0); 1165 } 1166 1167 /* 1168 * Initialize the RX descriptors and allocate mbufs for them. Note that 1169 * we arrange the descriptors in a closed ring, so that the last descriptor 1170 * points back to the first. 1171 */ 1172 static int 1173 vr_rx_ring_init(struct vr_softc *sc) 1174 { 1175 struct vr_ring_data *rd; 1176 struct vr_rxdesc *rxd; 1177 bus_addr_t addr; 1178 int i; 1179 1180 sc->vr_cdata.vr_rx_cons = 0; 1181 1182 rd = &sc->vr_rdata; 1183 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1184 for (i = 0; i < VR_RX_RING_CNT; i++) { 1185 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1186 rxd->rx_m = NULL; 1187 rxd->desc = &rd->vr_rx_ring[i]; 1188 if (i == VR_RX_RING_CNT - 1) 1189 addr = VR_RX_RING_ADDR(sc, 0); 1190 else 1191 addr = VR_RX_RING_ADDR(sc, i + 1); 1192 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1193 if (vr_newbuf(sc, i) != 0) 1194 return (ENOBUFS); 1195 } 1196 1197 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1198 sc->vr_cdata.vr_rx_ring_map, 1199 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1200 1201 return (0); 1202 } 1203 1204 static __inline void 1205 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1206 { 1207 struct vr_desc *desc; 1208 1209 desc = rxd->desc; 1210 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1211 desc->vr_status = htole32(VR_RXSTAT_OWN); 1212 } 1213 1214 /* 1215 * Initialize an RX descriptor and attach an MBUF cluster. 1216 * Note: the length fields are only 11 bits wide, which means the 1217 * largest size we can specify is 2047. This is important because 1218 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1219 * overflow the field and make a mess. 1220 */ 1221 static int 1222 vr_newbuf(struct vr_softc *sc, int idx) 1223 { 1224 struct vr_desc *desc; 1225 struct vr_rxdesc *rxd; 1226 struct mbuf *m; 1227 bus_dma_segment_t segs[1]; 1228 bus_dmamap_t map; 1229 int nsegs; 1230 1231 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1232 if (m == NULL) 1233 return (ENOBUFS); 1234 m->m_len = m->m_pkthdr.len = MCLBYTES; 1235 m_adj(m, sizeof(uint64_t)); 1236 1237 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1238 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1239 m_freem(m); 1240 return (ENOBUFS); 1241 } 1242 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1243 1244 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1245 if (rxd->rx_m != NULL) { 1246 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1247 BUS_DMASYNC_POSTREAD); 1248 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1249 } 1250 map = rxd->rx_dmamap; 1251 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1252 sc->vr_cdata.vr_rx_sparemap = map; 1253 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1254 BUS_DMASYNC_PREREAD); 1255 rxd->rx_m = m; 1256 desc = rxd->desc; 1257 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1258 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1259 desc->vr_status = htole32(VR_RXSTAT_OWN); 1260 1261 return (0); 1262 } 1263 1264 #ifndef __NO_STRICT_ALIGNMENT 1265 static __inline void 1266 vr_fixup_rx(struct mbuf *m) 1267 { 1268 uint16_t *src, *dst; 1269 int i; 1270 1271 src = mtod(m, uint16_t *); 1272 dst = src - 1; 1273 1274 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1275 *dst++ = *src++; 1276 1277 m->m_data -= ETHER_ALIGN; 1278 } 1279 #endif 1280 1281 /* 1282 * A frame has been uploaded: pass the resulting mbuf chain up to 1283 * the higher level protocols. 1284 */ 1285 static int 1286 vr_rxeof(struct vr_softc *sc) 1287 { 1288 struct vr_rxdesc *rxd; 1289 struct mbuf *m; 1290 if_t ifp; 1291 struct vr_desc *cur_rx; 1292 int cons, prog, total_len, rx_npkts; 1293 uint32_t rxstat, rxctl; 1294 1295 VR_LOCK_ASSERT(sc); 1296 ifp = sc->vr_ifp; 1297 cons = sc->vr_cdata.vr_rx_cons; 1298 rx_npkts = 0; 1299 1300 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1301 sc->vr_cdata.vr_rx_ring_map, 1302 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1303 1304 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1305 #ifdef DEVICE_POLLING 1306 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1307 if (sc->rxcycles <= 0) 1308 break; 1309 sc->rxcycles--; 1310 } 1311 #endif 1312 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1313 rxstat = le32toh(cur_rx->vr_status); 1314 rxctl = le32toh(cur_rx->vr_ctl); 1315 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1316 break; 1317 1318 prog++; 1319 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1320 m = rxd->rx_m; 1321 1322 /* 1323 * If an error occurs, update stats, clear the 1324 * status word and leave the mbuf cluster in place: 1325 * it should simply get re-used next time this descriptor 1326 * comes up in the ring. 1327 * We don't support SG in Rx path yet, so discard 1328 * partial frame. 1329 */ 1330 if ((rxstat & VR_RXSTAT_RX_OK) == 0 || 1331 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != 1332 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1333 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1334 sc->vr_stat.rx_errors++; 1335 if (rxstat & VR_RXSTAT_CRCERR) 1336 sc->vr_stat.rx_crc_errors++; 1337 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1338 sc->vr_stat.rx_alignment++; 1339 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1340 sc->vr_stat.rx_fifo_overflows++; 1341 if (rxstat & VR_RXSTAT_GIANT) 1342 sc->vr_stat.rx_giants++; 1343 if (rxstat & VR_RXSTAT_RUNT) 1344 sc->vr_stat.rx_runts++; 1345 if (rxstat & VR_RXSTAT_BUFFERR) 1346 sc->vr_stat.rx_no_buffers++; 1347 #ifdef VR_SHOW_ERRORS 1348 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1349 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1350 #endif 1351 vr_discard_rxbuf(rxd); 1352 continue; 1353 } 1354 1355 if (vr_newbuf(sc, cons) != 0) { 1356 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1357 sc->vr_stat.rx_errors++; 1358 sc->vr_stat.rx_no_mbufs++; 1359 vr_discard_rxbuf(rxd); 1360 continue; 1361 } 1362 1363 /* 1364 * XXX The VIA Rhine chip includes the CRC with every 1365 * received frame, and there's no way to turn this 1366 * behavior off (at least, I can't find anything in 1367 * the manual that explains how to do it) so we have 1368 * to trim off the CRC manually. 1369 */ 1370 total_len = VR_RXBYTES(rxstat); 1371 total_len -= ETHER_CRC_LEN; 1372 m->m_pkthdr.len = m->m_len = total_len; 1373 #ifndef __NO_STRICT_ALIGNMENT 1374 /* 1375 * RX buffers must be 32-bit aligned. 1376 * Ignore the alignment problems on the non-strict alignment 1377 * platform. The performance hit incurred due to unaligned 1378 * accesses is much smaller than the hit produced by forcing 1379 * buffer copies all the time. 1380 */ 1381 vr_fixup_rx(m); 1382 #endif 1383 m->m_pkthdr.rcvif = ifp; 1384 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1385 sc->vr_stat.rx_ok++; 1386 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 1387 (rxstat & VR_RXSTAT_FRAG) == 0 && 1388 (rxctl & VR_RXCTL_IP) != 0) { 1389 /* Checksum is valid for non-fragmented IP packets. */ 1390 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1391 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1392 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1393 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1394 m->m_pkthdr.csum_flags |= 1395 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1396 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1397 m->m_pkthdr.csum_data = 0xffff; 1398 } 1399 } 1400 } 1401 VR_UNLOCK(sc); 1402 if_input(ifp, m); 1403 VR_LOCK(sc); 1404 rx_npkts++; 1405 } 1406 1407 if (prog > 0) { 1408 /* 1409 * Let controller know how many number of RX buffers 1410 * are posted but avoid expensive register access if 1411 * TX pause capability was not negotiated with link 1412 * partner. 1413 */ 1414 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) { 1415 if (prog >= VR_RX_RING_CNT) 1416 prog = VR_RX_RING_CNT - 1; 1417 CSR_WRITE_1(sc, VR_FLOWCR0, prog); 1418 } 1419 sc->vr_cdata.vr_rx_cons = cons; 1420 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1421 sc->vr_cdata.vr_rx_ring_map, 1422 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1423 } 1424 return (rx_npkts); 1425 } 1426 1427 /* 1428 * A frame was downloaded to the chip. It's safe for us to clean up 1429 * the list buffers. 1430 */ 1431 static void 1432 vr_txeof(struct vr_softc *sc) 1433 { 1434 struct vr_txdesc *txd; 1435 struct vr_desc *cur_tx; 1436 if_t ifp; 1437 uint32_t txctl, txstat; 1438 int cons, prod; 1439 1440 VR_LOCK_ASSERT(sc); 1441 1442 cons = sc->vr_cdata.vr_tx_cons; 1443 prod = sc->vr_cdata.vr_tx_prod; 1444 if (cons == prod) 1445 return; 1446 1447 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1448 sc->vr_cdata.vr_tx_ring_map, 1449 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1450 1451 ifp = sc->vr_ifp; 1452 /* 1453 * Go through our tx list and free mbufs for those 1454 * frames that have been transmitted. 1455 */ 1456 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1457 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1458 txctl = le32toh(cur_tx->vr_ctl); 1459 txstat = le32toh(cur_tx->vr_status); 1460 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1461 break; 1462 1463 sc->vr_cdata.vr_tx_cnt--; 1464 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1465 /* Only the first descriptor in the chain is valid. */ 1466 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1467 continue; 1468 1469 txd = &sc->vr_cdata.vr_txdesc[cons]; 1470 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1471 __func__)); 1472 1473 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1474 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1475 sc->vr_stat.tx_errors++; 1476 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1477 /* Give up and restart Tx. */ 1478 sc->vr_stat.tx_abort++; 1479 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1480 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1481 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1482 txd->tx_dmamap); 1483 m_freem(txd->tx_m); 1484 txd->tx_m = NULL; 1485 VR_INC(cons, VR_TX_RING_CNT); 1486 sc->vr_cdata.vr_tx_cons = cons; 1487 if (vr_tx_stop(sc) != 0) { 1488 device_printf(sc->vr_dev, 1489 "%s: Tx shutdown error -- " 1490 "resetting\n", __func__); 1491 sc->vr_flags |= VR_F_RESTART; 1492 return; 1493 } 1494 vr_tx_start(sc); 1495 break; 1496 } 1497 if ((sc->vr_revid < REV_ID_VT3071_A && 1498 (txstat & VR_TXSTAT_UNDERRUN)) || 1499 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1500 sc->vr_stat.tx_underrun++; 1501 /* Retry and restart Tx. */ 1502 sc->vr_cdata.vr_tx_cnt++; 1503 sc->vr_cdata.vr_tx_cons = cons; 1504 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1505 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1506 sc->vr_cdata.vr_tx_ring_map, 1507 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1508 vr_tx_underrun(sc); 1509 return; 1510 } 1511 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1512 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1513 sc->vr_stat.tx_collisions++; 1514 } 1515 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1516 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1517 sc->vr_stat.tx_late_collisions++; 1518 } 1519 } else { 1520 sc->vr_stat.tx_ok++; 1521 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1522 } 1523 1524 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1525 BUS_DMASYNC_POSTWRITE); 1526 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1527 if (sc->vr_revid < REV_ID_VT3071_A) { 1528 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1529 (txstat & VR_TXSTAT_COLLCNT) >> 3); 1530 sc->vr_stat.tx_collisions += 1531 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1532 } else { 1533 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f)); 1534 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1535 } 1536 m_freem(txd->tx_m); 1537 txd->tx_m = NULL; 1538 } 1539 1540 sc->vr_cdata.vr_tx_cons = cons; 1541 if (sc->vr_cdata.vr_tx_cnt == 0) 1542 sc->vr_watchdog_timer = 0; 1543 } 1544 1545 static void 1546 vr_tick(void *xsc) 1547 { 1548 struct vr_softc *sc; 1549 struct mii_data *mii; 1550 1551 sc = (struct vr_softc *)xsc; 1552 1553 VR_LOCK_ASSERT(sc); 1554 1555 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1556 device_printf(sc->vr_dev, "restarting\n"); 1557 sc->vr_stat.num_restart++; 1558 if_setdrvflagbits(sc->vr_ifp, 0, IFF_DRV_RUNNING); 1559 vr_init_locked(sc); 1560 sc->vr_flags &= ~VR_F_RESTART; 1561 } 1562 1563 mii = device_get_softc(sc->vr_miibus); 1564 mii_tick(mii); 1565 if ((sc->vr_flags & VR_F_LINK) == 0) 1566 vr_miibus_statchg(sc->vr_dev); 1567 vr_watchdog(sc); 1568 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1569 } 1570 1571 #ifdef DEVICE_POLLING 1572 static poll_handler_t vr_poll; 1573 static poll_handler_t vr_poll_locked; 1574 1575 static int 1576 vr_poll(if_t ifp, enum poll_cmd cmd, int count) 1577 { 1578 struct vr_softc *sc; 1579 int rx_npkts; 1580 1581 sc = if_getsoftc(ifp); 1582 rx_npkts = 0; 1583 1584 VR_LOCK(sc); 1585 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1586 rx_npkts = vr_poll_locked(ifp, cmd, count); 1587 VR_UNLOCK(sc); 1588 return (rx_npkts); 1589 } 1590 1591 static int 1592 vr_poll_locked(if_t ifp, enum poll_cmd cmd, int count) 1593 { 1594 struct vr_softc *sc; 1595 int rx_npkts; 1596 1597 sc = if_getsoftc(ifp); 1598 1599 VR_LOCK_ASSERT(sc); 1600 1601 sc->rxcycles = count; 1602 rx_npkts = vr_rxeof(sc); 1603 vr_txeof(sc); 1604 if (!if_sendq_empty(ifp)) 1605 vr_start_locked(ifp); 1606 1607 if (cmd == POLL_AND_CHECK_STATUS) { 1608 uint16_t status; 1609 1610 /* Also check status register. */ 1611 status = CSR_READ_2(sc, VR_ISR); 1612 if (status) 1613 CSR_WRITE_2(sc, VR_ISR, status); 1614 1615 if ((status & VR_INTRS) == 0) 1616 return (rx_npkts); 1617 1618 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1619 VR_ISR_STATSOFLOW)) != 0) { 1620 if (vr_error(sc, status) != 0) 1621 return (rx_npkts); 1622 } 1623 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1624 #ifdef VR_SHOW_ERRORS 1625 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1626 __func__, status, VR_ISR_ERR_BITS); 1627 #endif 1628 vr_rx_start(sc); 1629 } 1630 } 1631 return (rx_npkts); 1632 } 1633 #endif /* DEVICE_POLLING */ 1634 1635 /* Back off the transmit threshold. */ 1636 static void 1637 vr_tx_underrun(struct vr_softc *sc) 1638 { 1639 int thresh; 1640 1641 device_printf(sc->vr_dev, "Tx underrun -- "); 1642 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1643 thresh = sc->vr_txthresh; 1644 sc->vr_txthresh++; 1645 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1646 sc->vr_txthresh = VR_TXTHRESH_MAX; 1647 printf("using store and forward mode\n"); 1648 } else 1649 printf("increasing Tx threshold(%d -> %d)\n", 1650 vr_tx_threshold_tables[thresh].value, 1651 vr_tx_threshold_tables[thresh + 1].value); 1652 } else 1653 printf("\n"); 1654 sc->vr_stat.tx_underrun++; 1655 if (vr_tx_stop(sc) != 0) { 1656 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1657 "resetting\n", __func__); 1658 sc->vr_flags |= VR_F_RESTART; 1659 return; 1660 } 1661 vr_tx_start(sc); 1662 } 1663 1664 static int 1665 vr_intr(void *arg) 1666 { 1667 struct vr_softc *sc; 1668 uint16_t status; 1669 1670 sc = (struct vr_softc *)arg; 1671 1672 status = CSR_READ_2(sc, VR_ISR); 1673 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1674 return (FILTER_STRAY); 1675 1676 /* Disable interrupts. */ 1677 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1678 1679 taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask); 1680 1681 return (FILTER_HANDLED); 1682 } 1683 1684 static void 1685 vr_int_task(void *arg, int npending) 1686 { 1687 struct vr_softc *sc; 1688 if_t ifp; 1689 uint16_t status; 1690 1691 sc = (struct vr_softc *)arg; 1692 1693 VR_LOCK(sc); 1694 1695 if ((sc->vr_flags & VR_F_SUSPENDED) != 0) 1696 goto done_locked; 1697 1698 status = CSR_READ_2(sc, VR_ISR); 1699 ifp = sc->vr_ifp; 1700 #ifdef DEVICE_POLLING 1701 if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0) 1702 goto done_locked; 1703 #endif 1704 1705 /* Suppress unwanted interrupts. */ 1706 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || 1707 (sc->vr_flags & VR_F_RESTART) != 0) { 1708 CSR_WRITE_2(sc, VR_IMR, 0); 1709 CSR_WRITE_2(sc, VR_ISR, status); 1710 goto done_locked; 1711 } 1712 1713 for (; (status & VR_INTRS) != 0;) { 1714 CSR_WRITE_2(sc, VR_ISR, status); 1715 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1716 VR_ISR_STATSOFLOW)) != 0) { 1717 if (vr_error(sc, status) != 0) { 1718 VR_UNLOCK(sc); 1719 return; 1720 } 1721 } 1722 vr_rxeof(sc); 1723 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1724 #ifdef VR_SHOW_ERRORS 1725 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1726 __func__, status, VR_ISR_ERR_BITS); 1727 #endif 1728 /* Restart Rx if RxDMA SM was stopped. */ 1729 vr_rx_start(sc); 1730 } 1731 vr_txeof(sc); 1732 1733 if (!if_sendq_empty(ifp)) 1734 vr_start_locked(ifp); 1735 1736 status = CSR_READ_2(sc, VR_ISR); 1737 } 1738 1739 /* Re-enable interrupts. */ 1740 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1741 1742 done_locked: 1743 VR_UNLOCK(sc); 1744 } 1745 1746 static int 1747 vr_error(struct vr_softc *sc, uint16_t status) 1748 { 1749 uint16_t pcis; 1750 1751 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1752 if ((status & VR_ISR_BUSERR) != 0) { 1753 status &= ~VR_ISR_BUSERR; 1754 sc->vr_stat.bus_errors++; 1755 /* Disable further interrupts. */ 1756 CSR_WRITE_2(sc, VR_IMR, 0); 1757 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1758 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1759 "resetting\n", pcis); 1760 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1761 sc->vr_flags |= VR_F_RESTART; 1762 return (EAGAIN); 1763 } 1764 if ((status & VR_ISR_LINKSTAT2) != 0) { 1765 /* Link state change, duplex changes etc. */ 1766 status &= ~VR_ISR_LINKSTAT2; 1767 } 1768 if ((status & VR_ISR_STATSOFLOW) != 0) { 1769 status &= ~VR_ISR_STATSOFLOW; 1770 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1771 /* Update MIB counters. */ 1772 } 1773 } 1774 1775 if (status != 0) 1776 device_printf(sc->vr_dev, 1777 "unhandled interrupt, status = 0x%04x\n", status); 1778 return (0); 1779 } 1780 1781 /* 1782 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1783 * pointers to the fragment pointers. 1784 */ 1785 static int 1786 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1787 { 1788 struct vr_txdesc *txd; 1789 struct vr_desc *desc; 1790 struct mbuf *m; 1791 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1792 uint32_t csum_flags, txctl; 1793 int error, i, nsegs, prod, si; 1794 int padlen; 1795 1796 VR_LOCK_ASSERT(sc); 1797 1798 M_ASSERTPKTHDR((*m_head)); 1799 1800 /* 1801 * Some VIA Rhine wants packet buffers to be longword 1802 * aligned, but very often our mbufs aren't. Rather than 1803 * waste time trying to decide when to copy and when not 1804 * to copy, just do it all the time. 1805 */ 1806 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1807 m = m_defrag(*m_head, M_NOWAIT); 1808 if (m == NULL) { 1809 m_freem(*m_head); 1810 *m_head = NULL; 1811 return (ENOBUFS); 1812 } 1813 *m_head = m; 1814 } 1815 1816 /* 1817 * The Rhine chip doesn't auto-pad, so we have to make 1818 * sure to pad short frames out to the minimum frame length 1819 * ourselves. 1820 */ 1821 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1822 m = *m_head; 1823 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1824 if (M_WRITABLE(m) == 0) { 1825 /* Get a writable copy. */ 1826 m = m_dup(*m_head, M_NOWAIT); 1827 m_freem(*m_head); 1828 if (m == NULL) { 1829 *m_head = NULL; 1830 return (ENOBUFS); 1831 } 1832 *m_head = m; 1833 } 1834 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1835 m = m_defrag(m, M_NOWAIT); 1836 if (m == NULL) { 1837 m_freem(*m_head); 1838 *m_head = NULL; 1839 return (ENOBUFS); 1840 } 1841 } 1842 /* 1843 * Manually pad short frames, and zero the pad space 1844 * to avoid leaking data. 1845 */ 1846 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1847 m->m_pkthdr.len += padlen; 1848 m->m_len = m->m_pkthdr.len; 1849 *m_head = m; 1850 } 1851 1852 prod = sc->vr_cdata.vr_tx_prod; 1853 txd = &sc->vr_cdata.vr_txdesc[prod]; 1854 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1855 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1856 if (error == EFBIG) { 1857 m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS); 1858 if (m == NULL) { 1859 m_freem(*m_head); 1860 *m_head = NULL; 1861 return (ENOBUFS); 1862 } 1863 *m_head = m; 1864 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1865 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1866 if (error != 0) { 1867 m_freem(*m_head); 1868 *m_head = NULL; 1869 return (error); 1870 } 1871 } else if (error != 0) 1872 return (error); 1873 if (nsegs == 0) { 1874 m_freem(*m_head); 1875 *m_head = NULL; 1876 return (EIO); 1877 } 1878 1879 /* Check number of available descriptors. */ 1880 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1881 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1882 return (ENOBUFS); 1883 } 1884 1885 txd->tx_m = *m_head; 1886 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1887 BUS_DMASYNC_PREWRITE); 1888 1889 /* Set checksum offload. */ 1890 csum_flags = 0; 1891 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1892 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1893 csum_flags |= VR_TXCTL_IPCSUM; 1894 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1895 csum_flags |= VR_TXCTL_TCPCSUM; 1896 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1897 csum_flags |= VR_TXCTL_UDPCSUM; 1898 } 1899 1900 /* 1901 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1902 * is required for all descriptors regardless of single or 1903 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1904 * the first descriptor for a multi-fragmented frames. Without 1905 * that VIA Rhine chip generates Tx underrun interrupts and can't 1906 * send any frames. 1907 */ 1908 si = prod; 1909 for (i = 0; i < nsegs; i++) { 1910 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1911 desc->vr_status = 0; 1912 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1913 if (i == 0) 1914 txctl |= VR_TXCTL_FIRSTFRAG; 1915 desc->vr_ctl = htole32(txctl); 1916 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1917 sc->vr_cdata.vr_tx_cnt++; 1918 VR_INC(prod, VR_TX_RING_CNT); 1919 } 1920 /* Update producer index. */ 1921 sc->vr_cdata.vr_tx_prod = prod; 1922 1923 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1924 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1925 1926 /* 1927 * Set EOP on the last descriptor and request Tx completion 1928 * interrupt for every VR_TX_INTR_THRESH-th frames. 1929 */ 1930 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1931 if (sc->vr_cdata.vr_tx_pkts == 0) 1932 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1933 else 1934 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1935 1936 /* Lastly turn the first descriptor ownership to hardware. */ 1937 desc = &sc->vr_rdata.vr_tx_ring[si]; 1938 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1939 1940 /* Sync descriptors. */ 1941 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1942 sc->vr_cdata.vr_tx_ring_map, 1943 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1944 1945 return (0); 1946 } 1947 1948 static void 1949 vr_start(if_t ifp) 1950 { 1951 struct vr_softc *sc; 1952 1953 sc = if_getsoftc(ifp); 1954 VR_LOCK(sc); 1955 vr_start_locked(ifp); 1956 VR_UNLOCK(sc); 1957 } 1958 1959 static void 1960 vr_start_locked(if_t ifp) 1961 { 1962 struct vr_softc *sc; 1963 struct mbuf *m_head; 1964 int enq; 1965 1966 sc = if_getsoftc(ifp); 1967 1968 VR_LOCK_ASSERT(sc); 1969 1970 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1971 IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0) 1972 return; 1973 1974 for (enq = 0; !if_sendq_empty(ifp) && 1975 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1976 m_head = if_dequeue(ifp); 1977 if (m_head == NULL) 1978 break; 1979 /* 1980 * Pack the data into the transmit ring. If we 1981 * don't have room, set the OACTIVE flag and wait 1982 * for the NIC to drain the ring. 1983 */ 1984 if (vr_encap(sc, &m_head)) { 1985 if (m_head == NULL) 1986 break; 1987 if_sendq_prepend(ifp, m_head); 1988 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1989 break; 1990 } 1991 1992 enq++; 1993 /* 1994 * If there's a BPF listener, bounce a copy of this frame 1995 * to him. 1996 */ 1997 ETHER_BPF_MTAP(ifp, m_head); 1998 } 1999 2000 if (enq > 0) { 2001 /* Tell the chip to start transmitting. */ 2002 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2003 /* Set a timeout in case the chip goes out to lunch. */ 2004 sc->vr_watchdog_timer = 5; 2005 } 2006 } 2007 2008 static void 2009 vr_init(void *xsc) 2010 { 2011 struct vr_softc *sc; 2012 2013 sc = (struct vr_softc *)xsc; 2014 VR_LOCK(sc); 2015 vr_init_locked(sc); 2016 VR_UNLOCK(sc); 2017 } 2018 2019 static void 2020 vr_init_locked(struct vr_softc *sc) 2021 { 2022 if_t ifp; 2023 struct mii_data *mii; 2024 bus_addr_t addr; 2025 int i; 2026 2027 VR_LOCK_ASSERT(sc); 2028 2029 ifp = sc->vr_ifp; 2030 mii = device_get_softc(sc->vr_miibus); 2031 2032 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2033 return; 2034 2035 /* Cancel pending I/O and free all RX/TX buffers. */ 2036 vr_stop(sc); 2037 vr_reset(sc); 2038 2039 /* Set our station address. */ 2040 for (i = 0; i < ETHER_ADDR_LEN; i++) 2041 CSR_WRITE_1(sc, VR_PAR0 + i, if_getlladdr(sc->vr_ifp)[i]); 2042 2043 /* Set DMA size. */ 2044 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2045 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2046 2047 /* 2048 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2049 * so we must set both. 2050 */ 2051 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2052 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2053 2054 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2055 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2056 2057 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2058 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2059 2060 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2061 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2062 2063 /* Init circular RX list. */ 2064 if (vr_rx_ring_init(sc) != 0) { 2065 device_printf(sc->vr_dev, 2066 "initialization failed: no memory for rx buffers\n"); 2067 vr_stop(sc); 2068 return; 2069 } 2070 2071 /* Init tx descriptors. */ 2072 vr_tx_ring_init(sc); 2073 2074 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2075 uint8_t vcam[2] = { 0, 0 }; 2076 2077 /* Disable VLAN hardware tag insertion/stripping. */ 2078 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); 2079 /* Disable VLAN hardware filtering. */ 2080 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); 2081 /* Disable all CAM entries. */ 2082 vr_cam_mask(sc, VR_MCAST_CAM, 0); 2083 vr_cam_mask(sc, VR_VLAN_CAM, 0); 2084 /* Enable the first VLAN CAM. */ 2085 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); 2086 vr_cam_mask(sc, VR_VLAN_CAM, 1); 2087 } 2088 2089 /* 2090 * Set up receive filter. 2091 */ 2092 vr_set_filter(sc); 2093 2094 /* 2095 * Load the address of the RX ring. 2096 */ 2097 addr = VR_RX_RING_ADDR(sc, 0); 2098 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2099 /* 2100 * Load the address of the TX ring. 2101 */ 2102 addr = VR_TX_RING_ADDR(sc, 0); 2103 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2104 /* Default : full-duplex, no Tx poll. */ 2105 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2106 2107 /* Set flow-control parameters for Rhine III. */ 2108 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2109 /* 2110 * Configure Rx buffer count available for incoming 2111 * packet. 2112 * Even though data sheet says almost nothing about 2113 * this register, this register should be updated 2114 * whenever driver adds new RX buffers to controller. 2115 * Otherwise, XON frame is not sent to link partner 2116 * even if controller has enough RX buffers and you 2117 * would be isolated from network. 2118 * The controller is not smart enough to know number 2119 * of available RX buffers so driver have to let 2120 * controller know how many RX buffers are posted. 2121 * In other words, this register works like a residue 2122 * counter for RX buffers and should be initialized 2123 * to the number of total RX buffers - 1 before 2124 * enabling RX MAC. Note, this register is 8bits so 2125 * it effectively limits the maximum number of RX 2126 * buffer to be configured by controller is 255. 2127 */ 2128 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1); 2129 /* 2130 * Tx pause low threshold : 8 free receive buffers 2131 * Tx pause XON high threshold : 24 free receive buffers 2132 */ 2133 CSR_WRITE_1(sc, VR_FLOWCR1, 2134 VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF); 2135 /* Set Tx pause timer. */ 2136 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2137 } 2138 2139 /* Enable receiver and transmitter. */ 2140 CSR_WRITE_1(sc, VR_CR0, 2141 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2142 2143 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2144 #ifdef DEVICE_POLLING 2145 /* 2146 * Disable interrupts if we are polling. 2147 */ 2148 if (if_getcapenable(ifp) & IFCAP_POLLING) 2149 CSR_WRITE_2(sc, VR_IMR, 0); 2150 else 2151 #endif 2152 /* 2153 * Enable interrupts and disable MII intrs. 2154 */ 2155 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2156 if (sc->vr_revid > REV_ID_VT6102_A) 2157 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2158 2159 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2160 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2161 2162 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2163 mii_mediachg(mii); 2164 2165 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2166 } 2167 2168 /* 2169 * Set media options. 2170 */ 2171 static int 2172 vr_ifmedia_upd(if_t ifp) 2173 { 2174 struct vr_softc *sc; 2175 struct mii_data *mii; 2176 struct mii_softc *miisc; 2177 int error; 2178 2179 sc = if_getsoftc(ifp); 2180 VR_LOCK(sc); 2181 mii = device_get_softc(sc->vr_miibus); 2182 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2183 PHY_RESET(miisc); 2184 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2185 error = mii_mediachg(mii); 2186 VR_UNLOCK(sc); 2187 2188 return (error); 2189 } 2190 2191 /* 2192 * Report current media status. 2193 */ 2194 static void 2195 vr_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 2196 { 2197 struct vr_softc *sc; 2198 struct mii_data *mii; 2199 2200 sc = if_getsoftc(ifp); 2201 mii = device_get_softc(sc->vr_miibus); 2202 VR_LOCK(sc); 2203 if ((if_getflags(ifp) & IFF_UP) == 0) { 2204 VR_UNLOCK(sc); 2205 return; 2206 } 2207 mii_pollstat(mii); 2208 ifmr->ifm_active = mii->mii_media_active; 2209 ifmr->ifm_status = mii->mii_media_status; 2210 VR_UNLOCK(sc); 2211 } 2212 2213 static int 2214 vr_ioctl(if_t ifp, u_long command, caddr_t data) 2215 { 2216 struct vr_softc *sc; 2217 struct ifreq *ifr; 2218 struct mii_data *mii; 2219 int error, mask; 2220 2221 sc = if_getsoftc(ifp); 2222 ifr = (struct ifreq *)data; 2223 error = 0; 2224 2225 switch (command) { 2226 case SIOCSIFFLAGS: 2227 VR_LOCK(sc); 2228 if (if_getflags(ifp) & IFF_UP) { 2229 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2230 if ((if_getflags(ifp) ^ sc->vr_if_flags) & 2231 (IFF_PROMISC | IFF_ALLMULTI)) 2232 vr_set_filter(sc); 2233 } else { 2234 if ((sc->vr_flags & VR_F_DETACHED) == 0) 2235 vr_init_locked(sc); 2236 } 2237 } else { 2238 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2239 vr_stop(sc); 2240 } 2241 sc->vr_if_flags = if_getflags(ifp); 2242 VR_UNLOCK(sc); 2243 break; 2244 case SIOCADDMULTI: 2245 case SIOCDELMULTI: 2246 VR_LOCK(sc); 2247 vr_set_filter(sc); 2248 VR_UNLOCK(sc); 2249 break; 2250 case SIOCGIFMEDIA: 2251 case SIOCSIFMEDIA: 2252 mii = device_get_softc(sc->vr_miibus); 2253 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2254 break; 2255 case SIOCSIFCAP: 2256 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 2257 #ifdef DEVICE_POLLING 2258 if (mask & IFCAP_POLLING) { 2259 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2260 error = ether_poll_register(vr_poll, ifp); 2261 if (error != 0) 2262 break; 2263 VR_LOCK(sc); 2264 /* Disable interrupts. */ 2265 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2266 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 2267 VR_UNLOCK(sc); 2268 } else { 2269 error = ether_poll_deregister(ifp); 2270 /* Enable interrupts. */ 2271 VR_LOCK(sc); 2272 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2273 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 2274 VR_UNLOCK(sc); 2275 } 2276 } 2277 #endif /* DEVICE_POLLING */ 2278 if ((mask & IFCAP_TXCSUM) != 0 && 2279 (IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) { 2280 if_togglecapenable(ifp, IFCAP_TXCSUM); 2281 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0) 2282 if_sethwassistbits(ifp, VR_CSUM_FEATURES, 0); 2283 else 2284 if_sethwassistbits(ifp, 0, VR_CSUM_FEATURES); 2285 } 2286 if ((mask & IFCAP_RXCSUM) != 0 && 2287 (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) 2288 if_togglecapenable(ifp, IFCAP_RXCSUM); 2289 if ((mask & IFCAP_WOL_UCAST) != 0 && 2290 (if_getcapabilities(ifp) & IFCAP_WOL_UCAST) != 0) 2291 if_togglecapenable(ifp, IFCAP_WOL_UCAST); 2292 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2293 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) 2294 if_togglecapenable(ifp, IFCAP_WOL_MAGIC); 2295 break; 2296 default: 2297 error = ether_ioctl(ifp, command, data); 2298 break; 2299 } 2300 2301 return (error); 2302 } 2303 2304 static void 2305 vr_watchdog(struct vr_softc *sc) 2306 { 2307 if_t ifp; 2308 2309 VR_LOCK_ASSERT(sc); 2310 2311 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2312 return; 2313 2314 ifp = sc->vr_ifp; 2315 /* 2316 * Reclaim first as we don't request interrupt for every packets. 2317 */ 2318 vr_txeof(sc); 2319 if (sc->vr_cdata.vr_tx_cnt == 0) 2320 return; 2321 2322 if ((sc->vr_flags & VR_F_LINK) == 0) { 2323 if (bootverbose) 2324 if_printf(sc->vr_ifp, "watchdog timeout " 2325 "(missed link)\n"); 2326 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2327 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2328 vr_init_locked(sc); 2329 return; 2330 } 2331 2332 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2333 if_printf(ifp, "watchdog timeout\n"); 2334 2335 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2336 vr_init_locked(sc); 2337 2338 if (!if_sendq_empty(ifp)) 2339 vr_start_locked(ifp); 2340 } 2341 2342 static void 2343 vr_tx_start(struct vr_softc *sc) 2344 { 2345 bus_addr_t addr; 2346 uint8_t cmd; 2347 2348 cmd = CSR_READ_1(sc, VR_CR0); 2349 if ((cmd & VR_CR0_TX_ON) == 0) { 2350 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2351 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2352 cmd |= VR_CR0_TX_ON; 2353 CSR_WRITE_1(sc, VR_CR0, cmd); 2354 } 2355 if (sc->vr_cdata.vr_tx_cnt != 0) { 2356 sc->vr_watchdog_timer = 5; 2357 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2358 } 2359 } 2360 2361 static void 2362 vr_rx_start(struct vr_softc *sc) 2363 { 2364 bus_addr_t addr; 2365 uint8_t cmd; 2366 2367 cmd = CSR_READ_1(sc, VR_CR0); 2368 if ((cmd & VR_CR0_RX_ON) == 0) { 2369 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2370 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2371 cmd |= VR_CR0_RX_ON; 2372 CSR_WRITE_1(sc, VR_CR0, cmd); 2373 } 2374 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2375 } 2376 2377 static int 2378 vr_tx_stop(struct vr_softc *sc) 2379 { 2380 int i; 2381 uint8_t cmd; 2382 2383 cmd = CSR_READ_1(sc, VR_CR0); 2384 if ((cmd & VR_CR0_TX_ON) != 0) { 2385 cmd &= ~VR_CR0_TX_ON; 2386 CSR_WRITE_1(sc, VR_CR0, cmd); 2387 for (i = VR_TIMEOUT; i > 0; i--) { 2388 DELAY(5); 2389 cmd = CSR_READ_1(sc, VR_CR0); 2390 if ((cmd & VR_CR0_TX_ON) == 0) 2391 break; 2392 } 2393 if (i == 0) 2394 return (ETIMEDOUT); 2395 } 2396 return (0); 2397 } 2398 2399 static int 2400 vr_rx_stop(struct vr_softc *sc) 2401 { 2402 int i; 2403 uint8_t cmd; 2404 2405 cmd = CSR_READ_1(sc, VR_CR0); 2406 if ((cmd & VR_CR0_RX_ON) != 0) { 2407 cmd &= ~VR_CR0_RX_ON; 2408 CSR_WRITE_1(sc, VR_CR0, cmd); 2409 for (i = VR_TIMEOUT; i > 0; i--) { 2410 DELAY(5); 2411 cmd = CSR_READ_1(sc, VR_CR0); 2412 if ((cmd & VR_CR0_RX_ON) == 0) 2413 break; 2414 } 2415 if (i == 0) 2416 return (ETIMEDOUT); 2417 } 2418 return (0); 2419 } 2420 2421 /* 2422 * Stop the adapter and free any mbufs allocated to the 2423 * RX and TX lists. 2424 */ 2425 static void 2426 vr_stop(struct vr_softc *sc) 2427 { 2428 struct vr_txdesc *txd; 2429 struct vr_rxdesc *rxd; 2430 if_t ifp; 2431 int i; 2432 2433 VR_LOCK_ASSERT(sc); 2434 2435 ifp = sc->vr_ifp; 2436 sc->vr_watchdog_timer = 0; 2437 2438 callout_stop(&sc->vr_stat_callout); 2439 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 2440 2441 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2442 if (vr_rx_stop(sc) != 0) 2443 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2444 if (vr_tx_stop(sc) != 0) 2445 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2446 /* Clear pending interrupts. */ 2447 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2448 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2449 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2450 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2451 2452 /* 2453 * Free RX and TX mbufs still in the queues. 2454 */ 2455 for (i = 0; i < VR_RX_RING_CNT; i++) { 2456 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2457 if (rxd->rx_m != NULL) { 2458 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2459 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2460 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2461 rxd->rx_dmamap); 2462 m_freem(rxd->rx_m); 2463 rxd->rx_m = NULL; 2464 } 2465 } 2466 for (i = 0; i < VR_TX_RING_CNT; i++) { 2467 txd = &sc->vr_cdata.vr_txdesc[i]; 2468 if (txd->tx_m != NULL) { 2469 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2470 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2471 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2472 txd->tx_dmamap); 2473 m_freem(txd->tx_m); 2474 txd->tx_m = NULL; 2475 } 2476 } 2477 } 2478 2479 /* 2480 * Stop all chip I/O so that the kernel's probe routines don't 2481 * get confused by errant DMAs when rebooting. 2482 */ 2483 static int 2484 vr_shutdown(device_t dev) 2485 { 2486 2487 return (vr_suspend(dev)); 2488 } 2489 2490 static int 2491 vr_suspend(device_t dev) 2492 { 2493 struct vr_softc *sc; 2494 2495 sc = device_get_softc(dev); 2496 2497 VR_LOCK(sc); 2498 vr_stop(sc); 2499 vr_setwol(sc); 2500 sc->vr_flags |= VR_F_SUSPENDED; 2501 VR_UNLOCK(sc); 2502 2503 return (0); 2504 } 2505 2506 static int 2507 vr_resume(device_t dev) 2508 { 2509 struct vr_softc *sc; 2510 if_t ifp; 2511 2512 sc = device_get_softc(dev); 2513 2514 VR_LOCK(sc); 2515 ifp = sc->vr_ifp; 2516 vr_clrwol(sc); 2517 vr_reset(sc); 2518 if (if_getflags(ifp) & IFF_UP) 2519 vr_init_locked(sc); 2520 2521 sc->vr_flags &= ~VR_F_SUSPENDED; 2522 VR_UNLOCK(sc); 2523 2524 return (0); 2525 } 2526 2527 static void 2528 vr_setwol(struct vr_softc *sc) 2529 { 2530 if_t ifp; 2531 int pmc; 2532 uint16_t pmstat; 2533 uint8_t v; 2534 2535 VR_LOCK_ASSERT(sc); 2536 2537 if (sc->vr_revid < REV_ID_VT6102_A || 2538 pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2539 return; 2540 2541 ifp = sc->vr_ifp; 2542 2543 /* Clear WOL configuration. */ 2544 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2545 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2546 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2547 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2548 if (sc->vr_revid > REV_ID_VT6105_B0) { 2549 /* Newer Rhine III supports two additional patterns. */ 2550 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2551 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2552 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2553 } 2554 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0) 2555 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2556 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) 2557 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2558 /* 2559 * It seems that multicast wakeup frames require programming pattern 2560 * registers and valid CRC as well as pattern mask for each pattern. 2561 * While it's possible to setup such a pattern it would complicate 2562 * WOL configuration so ignore multicast wakeup frames. 2563 */ 2564 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) { 2565 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2566 v = CSR_READ_1(sc, VR_STICKHW); 2567 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2568 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2569 } 2570 2571 /* Put hardware into sleep. */ 2572 v = CSR_READ_1(sc, VR_STICKHW); 2573 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2574 CSR_WRITE_1(sc, VR_STICKHW, v); 2575 2576 /* Request PME if WOL is requested. */ 2577 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2578 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2579 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) 2580 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2581 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2582 } 2583 2584 static void 2585 vr_clrwol(struct vr_softc *sc) 2586 { 2587 uint8_t v; 2588 2589 VR_LOCK_ASSERT(sc); 2590 2591 if (sc->vr_revid < REV_ID_VT6102_A) 2592 return; 2593 2594 /* Take hardware out of sleep. */ 2595 v = CSR_READ_1(sc, VR_STICKHW); 2596 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2597 CSR_WRITE_1(sc, VR_STICKHW, v); 2598 2599 /* Clear WOL configuration as WOL may interfere normal operation. */ 2600 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2601 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2602 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2603 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2604 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2605 if (sc->vr_revid > REV_ID_VT6105_B0) { 2606 /* Newer Rhine III supports two additional patterns. */ 2607 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2608 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2609 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2610 } 2611 } 2612 2613 static int 2614 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2615 { 2616 struct vr_softc *sc; 2617 struct vr_statistics *stat; 2618 int error; 2619 int result; 2620 2621 result = -1; 2622 error = sysctl_handle_int(oidp, &result, 0, req); 2623 2624 if (error != 0 || req->newptr == NULL) 2625 return (error); 2626 2627 if (result == 1) { 2628 sc = (struct vr_softc *)arg1; 2629 stat = &sc->vr_stat; 2630 2631 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2632 printf("Outbound good frames : %ju\n", 2633 (uintmax_t)stat->tx_ok); 2634 printf("Inbound good frames : %ju\n", 2635 (uintmax_t)stat->rx_ok); 2636 printf("Outbound errors : %u\n", stat->tx_errors); 2637 printf("Inbound errors : %u\n", stat->rx_errors); 2638 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2639 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2640 printf("Inbound FIFO overflows : %d\n", 2641 stat->rx_fifo_overflows); 2642 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2643 printf("Inbound frame alignment errors : %u\n", 2644 stat->rx_alignment); 2645 printf("Inbound giant frames : %u\n", stat->rx_giants); 2646 printf("Inbound runt frames : %u\n", stat->rx_runts); 2647 printf("Outbound aborted with excessive collisions : %u\n", 2648 stat->tx_abort); 2649 printf("Outbound collisions : %u\n", stat->tx_collisions); 2650 printf("Outbound late collisions : %u\n", 2651 stat->tx_late_collisions); 2652 printf("Outbound underrun : %u\n", stat->tx_underrun); 2653 printf("PCI bus errors : %u\n", stat->bus_errors); 2654 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2655 stat->num_restart); 2656 } 2657 2658 return (error); 2659 } 2660