1 /*- 2 * SPDX-License-Identifier: BSD-4-Clause 3 * 4 * Copyright (c) 1997, 1998 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 /* 39 * VIA Rhine fast ethernet PCI NIC driver 40 * 41 * Supports various network adapters based on the VIA Rhine 42 * and Rhine II PCI controllers, including the D-Link DFE530TX. 43 * Datasheets are available at http://www.via.com.tw. 44 * 45 * Written by Bill Paul <wpaul@ctr.columbia.edu> 46 * Electrical Engineering Department 47 * Columbia University, New York City 48 */ 49 50 /* 51 * The VIA Rhine controllers are similar in some respects to the 52 * the DEC tulip chips, except less complicated. The controller 53 * uses an MII bus and an external physical layer interface. The 54 * receiver has a one entry perfect filter and a 64-bit hash table 55 * multicast filter. Transmit and receive descriptors are similar 56 * to the tulip. 57 * 58 * Some Rhine chips has a serious flaw in its transmit DMA mechanism: 59 * transmit buffers must be longword aligned. Unfortunately, 60 * FreeBSD doesn't guarantee that mbufs will be filled in starting 61 * at longword boundaries, so we have to do a buffer copy before 62 * transmission. 63 */ 64 65 #ifdef HAVE_KERNEL_OPTION_HEADERS 66 #include "opt_device_polling.h" 67 #endif 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/bus.h> 72 #include <sys/endian.h> 73 #include <sys/kernel.h> 74 #include <sys/malloc.h> 75 #include <sys/mbuf.h> 76 #include <sys/module.h> 77 #include <sys/rman.h> 78 #include <sys/socket.h> 79 #include <sys/sockio.h> 80 #include <sys/sysctl.h> 81 #include <sys/taskqueue.h> 82 83 #include <net/bpf.h> 84 #include <net/if.h> 85 #include <net/if_var.h> 86 #include <net/ethernet.h> 87 #include <net/if_dl.h> 88 #include <net/if_media.h> 89 #include <net/if_types.h> 90 #include <net/if_vlan_var.h> 91 92 #include <dev/mii/mii.h> 93 #include <dev/mii/miivar.h> 94 95 #include <dev/pci/pcireg.h> 96 #include <dev/pci/pcivar.h> 97 98 #include <machine/bus.h> 99 100 #include <dev/vr/if_vrreg.h> 101 102 /* "device miibus" required. See GENERIC if you get errors here. */ 103 #include "miibus_if.h" 104 105 MODULE_DEPEND(vr, pci, 1, 1, 1); 106 MODULE_DEPEND(vr, ether, 1, 1, 1); 107 MODULE_DEPEND(vr, miibus, 1, 1, 1); 108 109 /* Define to show Rx/Tx error status. */ 110 #undef VR_SHOW_ERRORS 111 #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 112 113 /* 114 * Various supported device vendors/types, their names & quirks. 115 */ 116 #define VR_Q_NEEDALIGN (1<<0) 117 #define VR_Q_CSUM (1<<1) 118 #define VR_Q_CAM (1<<2) 119 120 static const struct vr_type { 121 u_int16_t vr_vid; 122 u_int16_t vr_did; 123 int vr_quirks; 124 const char *vr_name; 125 } vr_devs[] = { 126 { VIA_VENDORID, VIA_DEVICEID_RHINE, 127 VR_Q_NEEDALIGN, 128 "VIA VT3043 Rhine I 10/100BaseTX" }, 129 { VIA_VENDORID, VIA_DEVICEID_RHINE_II, 130 VR_Q_NEEDALIGN, 131 "VIA VT86C100A Rhine II 10/100BaseTX" }, 132 { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 133 0, 134 "VIA VT6102 Rhine II 10/100BaseTX" }, 135 { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 136 0, 137 "VIA VT6105 Rhine III 10/100BaseTX" }, 138 { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, 139 VR_Q_CSUM, 140 "VIA VT6105M Rhine III 10/100BaseTX" }, 141 { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, 142 VR_Q_NEEDALIGN, 143 "Delta Electronics Rhine II 10/100BaseTX" }, 144 { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, 145 VR_Q_NEEDALIGN, 146 "Addtron Technology Rhine II 10/100BaseTX" }, 147 { 0, 0, 0, NULL } 148 }; 149 150 static int vr_probe(device_t); 151 static int vr_attach(device_t); 152 static int vr_detach(device_t); 153 static int vr_shutdown(device_t); 154 static int vr_suspend(device_t); 155 static int vr_resume(device_t); 156 157 static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); 158 static int vr_dma_alloc(struct vr_softc *); 159 static void vr_dma_free(struct vr_softc *); 160 static __inline void vr_discard_rxbuf(struct vr_rxdesc *); 161 static int vr_newbuf(struct vr_softc *, int); 162 163 #ifndef __NO_STRICT_ALIGNMENT 164 static __inline void vr_fixup_rx(struct mbuf *); 165 #endif 166 static int vr_rxeof(struct vr_softc *); 167 static void vr_txeof(struct vr_softc *); 168 static void vr_tick(void *); 169 static int vr_error(struct vr_softc *, uint16_t); 170 static void vr_tx_underrun(struct vr_softc *); 171 static int vr_intr(void *); 172 static void vr_int_task(void *, int); 173 static void vr_start(if_t); 174 static void vr_start_locked(if_t); 175 static int vr_encap(struct vr_softc *, struct mbuf **); 176 static int vr_ioctl(if_t, u_long, caddr_t); 177 static void vr_init(void *); 178 static void vr_init_locked(struct vr_softc *); 179 static void vr_tx_start(struct vr_softc *); 180 static void vr_rx_start(struct vr_softc *); 181 static int vr_tx_stop(struct vr_softc *); 182 static int vr_rx_stop(struct vr_softc *); 183 static void vr_stop(struct vr_softc *); 184 static void vr_watchdog(struct vr_softc *); 185 static int vr_ifmedia_upd(if_t); 186 static void vr_ifmedia_sts(if_t, struct ifmediareq *); 187 188 static int vr_miibus_readreg(device_t, int, int); 189 static int vr_miibus_writereg(device_t, int, int, int); 190 static void vr_miibus_statchg(device_t); 191 192 static void vr_cam_mask(struct vr_softc *, uint32_t, int); 193 static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); 194 static void vr_set_filter(struct vr_softc *); 195 static void vr_reset(const struct vr_softc *); 196 static int vr_tx_ring_init(struct vr_softc *); 197 static int vr_rx_ring_init(struct vr_softc *); 198 static void vr_setwol(struct vr_softc *); 199 static void vr_clrwol(struct vr_softc *); 200 static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); 201 202 static const struct vr_tx_threshold_table { 203 int tx_cfg; 204 int bcr_cfg; 205 int value; 206 } vr_tx_threshold_tables[] = { 207 { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, 208 { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, 209 { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, 210 { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, 211 { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, 212 { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } 213 }; 214 215 static device_method_t vr_methods[] = { 216 /* Device interface */ 217 DEVMETHOD(device_probe, vr_probe), 218 DEVMETHOD(device_attach, vr_attach), 219 DEVMETHOD(device_detach, vr_detach), 220 DEVMETHOD(device_shutdown, vr_shutdown), 221 DEVMETHOD(device_suspend, vr_suspend), 222 DEVMETHOD(device_resume, vr_resume), 223 224 /* MII interface */ 225 DEVMETHOD(miibus_readreg, vr_miibus_readreg), 226 DEVMETHOD(miibus_writereg, vr_miibus_writereg), 227 DEVMETHOD(miibus_statchg, vr_miibus_statchg), 228 229 DEVMETHOD_END 230 }; 231 232 static driver_t vr_driver = { 233 "vr", 234 vr_methods, 235 sizeof(struct vr_softc) 236 }; 237 238 DRIVER_MODULE(vr, pci, vr_driver, 0, 0); 239 DRIVER_MODULE(miibus, vr, miibus_driver, 0, 0); 240 241 static int 242 vr_miibus_readreg(device_t dev, int phy, int reg) 243 { 244 struct vr_softc *sc; 245 int i; 246 247 sc = device_get_softc(dev); 248 249 /* Set the register address. */ 250 CSR_WRITE_1(sc, VR_MIIADDR, reg); 251 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); 252 253 for (i = 0; i < VR_MII_TIMEOUT; i++) { 254 DELAY(1); 255 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) 256 break; 257 } 258 if (i == VR_MII_TIMEOUT) 259 device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); 260 261 return (CSR_READ_2(sc, VR_MIIDATA)); 262 } 263 264 static int 265 vr_miibus_writereg(device_t dev, int phy, int reg, int data) 266 { 267 struct vr_softc *sc; 268 int i; 269 270 sc = device_get_softc(dev); 271 272 /* Set the register address and data to write. */ 273 CSR_WRITE_1(sc, VR_MIIADDR, reg); 274 CSR_WRITE_2(sc, VR_MIIDATA, data); 275 VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); 276 277 for (i = 0; i < VR_MII_TIMEOUT; i++) { 278 DELAY(1); 279 if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) 280 break; 281 } 282 if (i == VR_MII_TIMEOUT) 283 device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, 284 reg); 285 286 return (0); 287 } 288 289 /* 290 * In order to fiddle with the 291 * 'full-duplex' and '100Mbps' bits in the netconfig register, we 292 * first have to put the transmit and/or receive logic in the idle state. 293 */ 294 static void 295 vr_miibus_statchg(device_t dev) 296 { 297 struct vr_softc *sc; 298 struct mii_data *mii; 299 if_t ifp; 300 int lfdx, mfdx; 301 uint8_t cr0, cr1, fc; 302 303 sc = device_get_softc(dev); 304 mii = device_get_softc(sc->vr_miibus); 305 ifp = sc->vr_ifp; 306 if (mii == NULL || ifp == NULL || 307 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 308 return; 309 310 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 311 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 312 (IFM_ACTIVE | IFM_AVALID)) { 313 switch (IFM_SUBTYPE(mii->mii_media_active)) { 314 case IFM_10_T: 315 case IFM_100_TX: 316 sc->vr_flags |= VR_F_LINK; 317 break; 318 default: 319 break; 320 } 321 } 322 323 if ((sc->vr_flags & VR_F_LINK) != 0) { 324 cr0 = CSR_READ_1(sc, VR_CR0); 325 cr1 = CSR_READ_1(sc, VR_CR1); 326 mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; 327 lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; 328 if (mfdx != lfdx) { 329 if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { 330 if (vr_tx_stop(sc) != 0 || 331 vr_rx_stop(sc) != 0) { 332 device_printf(sc->vr_dev, 333 "%s: Tx/Rx shutdown error -- " 334 "resetting\n", __func__); 335 sc->vr_flags |= VR_F_RESTART; 336 VR_UNLOCK(sc); 337 return; 338 } 339 } 340 if (lfdx) 341 cr1 |= VR_CR1_FULLDUPLEX; 342 else 343 cr1 &= ~VR_CR1_FULLDUPLEX; 344 CSR_WRITE_1(sc, VR_CR1, cr1); 345 } 346 fc = 0; 347 /* Configure flow-control. */ 348 if (sc->vr_revid >= REV_ID_VT6105_A0) { 349 fc = CSR_READ_1(sc, VR_FLOWCR1); 350 fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); 351 if ((IFM_OPTIONS(mii->mii_media_active) & 352 IFM_ETH_RXPAUSE) != 0) 353 fc |= VR_FLOWCR1_RXPAUSE; 354 if ((IFM_OPTIONS(mii->mii_media_active) & 355 IFM_ETH_TXPAUSE) != 0) { 356 fc |= VR_FLOWCR1_TXPAUSE; 357 sc->vr_flags |= VR_F_TXPAUSE; 358 } 359 CSR_WRITE_1(sc, VR_FLOWCR1, fc); 360 } else if (sc->vr_revid >= REV_ID_VT6102_A) { 361 /* No Tx puase capability available for Rhine II. */ 362 fc = CSR_READ_1(sc, VR_MISC_CR0); 363 fc &= ~VR_MISCCR0_RXPAUSE; 364 if ((IFM_OPTIONS(mii->mii_media_active) & 365 IFM_ETH_RXPAUSE) != 0) 366 fc |= VR_MISCCR0_RXPAUSE; 367 CSR_WRITE_1(sc, VR_MISC_CR0, fc); 368 } 369 vr_rx_start(sc); 370 vr_tx_start(sc); 371 } else { 372 if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { 373 device_printf(sc->vr_dev, 374 "%s: Tx/Rx shutdown error -- resetting\n", 375 __func__); 376 sc->vr_flags |= VR_F_RESTART; 377 } 378 } 379 } 380 381 static void 382 vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) 383 { 384 385 if (type == VR_MCAST_CAM) 386 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 387 else 388 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 389 CSR_WRITE_4(sc, VR_CAMMASK, mask); 390 CSR_WRITE_1(sc, VR_CAMCTL, 0); 391 } 392 393 static int 394 vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) 395 { 396 int i; 397 398 if (type == VR_MCAST_CAM) { 399 if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) 400 return (EINVAL); 401 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); 402 } else 403 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); 404 405 /* Set CAM entry address. */ 406 CSR_WRITE_1(sc, VR_CAMADDR, idx); 407 /* Set CAM entry data. */ 408 if (type == VR_MCAST_CAM) { 409 for (i = 0; i < ETHER_ADDR_LEN; i++) 410 CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); 411 } else { 412 CSR_WRITE_1(sc, VR_VCAM0, mac[0]); 413 CSR_WRITE_1(sc, VR_VCAM1, mac[1]); 414 } 415 DELAY(10); 416 /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ 417 CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); 418 for (i = 0; i < VR_TIMEOUT; i++) { 419 DELAY(1); 420 if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) 421 break; 422 } 423 424 if (i == VR_TIMEOUT) 425 device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", 426 __func__); 427 CSR_WRITE_1(sc, VR_CAMCTL, 0); 428 429 return (i == VR_TIMEOUT ? ETIMEDOUT : 0); 430 } 431 432 struct vr_hash_maddr_cam_ctx { 433 struct vr_softc *sc; 434 uint32_t mask; 435 int error; 436 }; 437 438 static u_int 439 vr_hash_maddr_cam(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 440 { 441 struct vr_hash_maddr_cam_ctx *ctx = arg; 442 443 if (ctx->error != 0) 444 return (0); 445 ctx->error = vr_cam_data(ctx->sc, VR_MCAST_CAM, mcnt, LLADDR(sdl)); 446 if (ctx->error != 0) { 447 ctx->mask = 0; 448 return (0); 449 } 450 ctx->mask |= 1 << mcnt; 451 452 return (1); 453 } 454 455 static u_int 456 vr_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 457 { 458 uint32_t *hashes = arg; 459 int h; 460 461 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN) >> 26; 462 if (h < 32) 463 hashes[0] |= (1 << h); 464 else 465 hashes[1] |= (1 << (h - 32)); 466 467 return (1); 468 } 469 470 /* 471 * Program the 64-bit multicast hash filter. 472 */ 473 static void 474 vr_set_filter(struct vr_softc *sc) 475 { 476 if_t ifp; 477 uint32_t hashes[2] = { 0, 0 }; 478 uint8_t rxfilt; 479 int error, mcnt; 480 481 VR_LOCK_ASSERT(sc); 482 483 ifp = sc->vr_ifp; 484 rxfilt = CSR_READ_1(sc, VR_RXCFG); 485 rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | 486 VR_RXCFG_RX_MULTI); 487 if (if_getflags(ifp) & IFF_BROADCAST) 488 rxfilt |= VR_RXCFG_RX_BROAD; 489 if (if_getflags(ifp) & IFF_ALLMULTI || if_getflags(ifp) & IFF_PROMISC) { 490 rxfilt |= VR_RXCFG_RX_MULTI; 491 if (if_getflags(ifp) & IFF_PROMISC) 492 rxfilt |= VR_RXCFG_RX_PROMISC; 493 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 494 CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); 495 CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); 496 return; 497 } 498 499 /* Now program new ones. */ 500 error = 0; 501 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 502 struct vr_hash_maddr_cam_ctx ctx; 503 504 /* 505 * For hardwares that have CAM capability, use 506 * 32 entries multicast perfect filter. 507 */ 508 ctx.sc = sc; 509 ctx.mask = 0; 510 ctx.error = 0; 511 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr_cam, &ctx); 512 vr_cam_mask(sc, VR_MCAST_CAM, ctx.mask); 513 } 514 515 if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { 516 /* 517 * If there are too many multicast addresses or 518 * setting multicast CAM filter failed, use hash 519 * table based filtering. 520 */ 521 mcnt = if_foreach_llmaddr(ifp, vr_hash_maddr, hashes); 522 } 523 524 if (mcnt > 0) 525 rxfilt |= VR_RXCFG_RX_MULTI; 526 527 CSR_WRITE_4(sc, VR_MAR0, hashes[0]); 528 CSR_WRITE_4(sc, VR_MAR1, hashes[1]); 529 CSR_WRITE_1(sc, VR_RXCFG, rxfilt); 530 } 531 532 static void 533 vr_reset(const struct vr_softc *sc) 534 { 535 int i; 536 537 /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ 538 539 CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); 540 if (sc->vr_revid < REV_ID_VT6102_A) { 541 /* VT86C100A needs more delay after reset. */ 542 DELAY(100); 543 } 544 for (i = 0; i < VR_TIMEOUT; i++) { 545 DELAY(10); 546 if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) 547 break; 548 } 549 if (i == VR_TIMEOUT) { 550 if (sc->vr_revid < REV_ID_VT6102_A) 551 device_printf(sc->vr_dev, "reset never completed!\n"); 552 else { 553 /* Use newer force reset command. */ 554 device_printf(sc->vr_dev, 555 "Using force reset command.\n"); 556 VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); 557 /* 558 * Wait a little while for the chip to get its brains 559 * in order. 560 */ 561 DELAY(2000); 562 } 563 } 564 565 } 566 567 /* 568 * Probe for a VIA Rhine chip. Check the PCI vendor and device 569 * IDs against our list and return a match or NULL 570 */ 571 static const struct vr_type * 572 vr_match(device_t dev) 573 { 574 const struct vr_type *t = vr_devs; 575 576 for (t = vr_devs; t->vr_name != NULL; t++) 577 if ((pci_get_vendor(dev) == t->vr_vid) && 578 (pci_get_device(dev) == t->vr_did)) 579 return (t); 580 return (NULL); 581 } 582 583 /* 584 * Probe for a VIA Rhine chip. Check the PCI vendor and device 585 * IDs against our list and return a device name if we find a match. 586 */ 587 static int 588 vr_probe(device_t dev) 589 { 590 const struct vr_type *t; 591 592 t = vr_match(dev); 593 if (t != NULL) { 594 device_set_desc(dev, t->vr_name); 595 return (BUS_PROBE_DEFAULT); 596 } 597 return (ENXIO); 598 } 599 600 /* 601 * Attach the interface. Allocate softc structures, do ifmedia 602 * setup and ethernet/BPF attach. 603 */ 604 static int 605 vr_attach(device_t dev) 606 { 607 struct vr_softc *sc; 608 if_t ifp; 609 const struct vr_type *t; 610 uint8_t eaddr[ETHER_ADDR_LEN]; 611 int error, rid; 612 int i, phy, pmc; 613 614 sc = device_get_softc(dev); 615 sc->vr_dev = dev; 616 t = vr_match(dev); 617 KASSERT(t != NULL, ("Lost if_vr device match")); 618 sc->vr_quirks = t->vr_quirks; 619 device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); 620 621 mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 622 MTX_DEF); 623 callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); 624 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 625 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 626 OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 627 sc, 0, vr_sysctl_stats, "I", "Statistics"); 628 629 error = 0; 630 631 /* 632 * Map control/status registers. 633 */ 634 pci_enable_busmaster(dev); 635 sc->vr_revid = pci_get_revid(dev); 636 device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); 637 638 sc->vr_res_id = PCIR_BAR(0); 639 sc->vr_res_type = SYS_RES_IOPORT; 640 sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, 641 &sc->vr_res_id, RF_ACTIVE); 642 if (sc->vr_res == NULL) { 643 device_printf(dev, "couldn't map ports\n"); 644 error = ENXIO; 645 goto fail; 646 } 647 648 /* Allocate interrupt. */ 649 rid = 0; 650 sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 651 RF_SHAREABLE | RF_ACTIVE); 652 653 if (sc->vr_irq == NULL) { 654 device_printf(dev, "couldn't map interrupt\n"); 655 error = ENXIO; 656 goto fail; 657 } 658 659 /* Allocate ifnet structure. */ 660 ifp = sc->vr_ifp = if_alloc(IFT_ETHER); 661 if (ifp == NULL) { 662 device_printf(dev, "couldn't allocate ifnet structure\n"); 663 error = ENOSPC; 664 goto fail; 665 } 666 if_setsoftc(ifp, sc); 667 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 668 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 669 if_setioctlfn(ifp, vr_ioctl); 670 if_setstartfn(ifp, vr_start); 671 if_setinitfn(ifp, vr_init); 672 if_setsendqlen(ifp, VR_TX_RING_CNT - 1); 673 if_setsendqready(ifp); 674 675 NET_TASK_INIT(&sc->vr_inttask, 0, vr_int_task, sc); 676 677 /* Configure Tx FIFO threshold. */ 678 sc->vr_txthresh = VR_TXTHRESH_MIN; 679 if (sc->vr_revid < REV_ID_VT6105_A0) { 680 /* 681 * Use store and forward mode for Rhine I/II. 682 * Otherwise they produce a lot of Tx underruns and 683 * it would take a while to get working FIFO threshold 684 * value. 685 */ 686 sc->vr_txthresh = VR_TXTHRESH_MAX; 687 } 688 if ((sc->vr_quirks & VR_Q_CSUM) != 0) { 689 if_sethwassist(ifp, VR_CSUM_FEATURES); 690 if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); 691 /* 692 * To update checksum field the hardware may need to 693 * store entire frames into FIFO before transmitting. 694 */ 695 sc->vr_txthresh = VR_TXTHRESH_MAX; 696 } 697 698 if (sc->vr_revid >= REV_ID_VT6102_A && 699 pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 700 if_setcapabilitiesbit(ifp, IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC, 0); 701 702 /* Rhine supports oversized VLAN frame. */ 703 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 704 if_setcapenable(ifp, if_getcapabilities(ifp)); 705 #ifdef DEVICE_POLLING 706 if_setcapabilitiesbit(ifp, IFCAP_POLLING, 0); 707 #endif 708 709 /* 710 * Windows may put the chip in suspend mode when it 711 * shuts down. Be sure to kick it in the head to wake it 712 * up again. 713 */ 714 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) 715 VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); 716 717 /* 718 * Get station address. The way the Rhine chips work, 719 * you're not allowed to directly access the EEPROM once 720 * they've been programmed a special way. Consequently, 721 * we need to read the node address from the PAR0 and PAR1 722 * registers. 723 * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, 724 * VR_CFGC and VR_CFGD such that memory mapped IO configured 725 * by driver is reset to default state. 726 */ 727 VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); 728 for (i = VR_TIMEOUT; i > 0; i--) { 729 DELAY(1); 730 if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) 731 break; 732 } 733 if (i == 0) 734 device_printf(dev, "Reloading EEPROM timeout!\n"); 735 for (i = 0; i < ETHER_ADDR_LEN; i++) 736 eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); 737 738 /* Reset the adapter. */ 739 vr_reset(sc); 740 /* Ack intr & disable further interrupts. */ 741 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 742 CSR_WRITE_2(sc, VR_IMR, 0); 743 if (sc->vr_revid >= REV_ID_VT6102_A) 744 CSR_WRITE_2(sc, VR_MII_IMR, 0); 745 746 if (sc->vr_revid < REV_ID_VT6102_A) { 747 pci_write_config(dev, VR_PCI_MODE2, 748 pci_read_config(dev, VR_PCI_MODE2, 1) | 749 VR_MODE2_MODE10T, 1); 750 } else { 751 /* Report error instead of retrying forever. */ 752 pci_write_config(dev, VR_PCI_MODE2, 753 pci_read_config(dev, VR_PCI_MODE2, 1) | 754 VR_MODE2_PCEROPT, 1); 755 /* Detect MII coding error. */ 756 pci_write_config(dev, VR_PCI_MODE3, 757 pci_read_config(dev, VR_PCI_MODE3, 1) | 758 VR_MODE3_MIION, 1); 759 if (sc->vr_revid >= REV_ID_VT6105_LOM && 760 sc->vr_revid < REV_ID_VT6105M_A0) 761 pci_write_config(dev, VR_PCI_MODE2, 762 pci_read_config(dev, VR_PCI_MODE2, 1) | 763 VR_MODE2_MODE10T, 1); 764 /* Enable Memory-Read-Multiple. */ 765 if (sc->vr_revid >= REV_ID_VT6107_A1 && 766 sc->vr_revid < REV_ID_VT6105M_A0) 767 pci_write_config(dev, VR_PCI_MODE2, 768 pci_read_config(dev, VR_PCI_MODE2, 1) | 769 VR_MODE2_MRDPL, 1); 770 } 771 /* Disable MII AUTOPOLL. */ 772 VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); 773 774 if (vr_dma_alloc(sc) != 0) { 775 error = ENXIO; 776 goto fail; 777 } 778 779 /* Do MII setup. */ 780 if (sc->vr_revid >= REV_ID_VT6105_A0) 781 phy = 1; 782 else 783 phy = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; 784 error = mii_attach(dev, &sc->vr_miibus, ifp, vr_ifmedia_upd, 785 vr_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 786 sc->vr_revid >= REV_ID_VT6102_A ? MIIF_DOPAUSE : 0); 787 if (error != 0) { 788 device_printf(dev, "attaching PHYs failed\n"); 789 goto fail; 790 } 791 792 /* Call MI attach routine. */ 793 ether_ifattach(ifp, eaddr); 794 /* 795 * Tell the upper layer(s) we support long frames. 796 * Must appear after the call to ether_ifattach() because 797 * ether_ifattach() sets ifi_hdrlen to the default value. 798 */ 799 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 800 801 /* Hook interrupt last to avoid having to lock softc. */ 802 error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, 803 vr_intr, NULL, sc, &sc->vr_intrhand); 804 805 if (error) { 806 device_printf(dev, "couldn't set up irq\n"); 807 ether_ifdetach(ifp); 808 goto fail; 809 } 810 811 fail: 812 if (error) 813 vr_detach(dev); 814 815 return (error); 816 } 817 818 /* 819 * Shutdown hardware and free up resources. This can be called any 820 * time after the mutex has been initialized. It is called in both 821 * the error case in attach and the normal detach case so it needs 822 * to be careful about only freeing resources that have actually been 823 * allocated. 824 */ 825 static int 826 vr_detach(device_t dev) 827 { 828 struct vr_softc *sc = device_get_softc(dev); 829 if_t ifp = sc->vr_ifp; 830 831 KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); 832 833 #ifdef DEVICE_POLLING 834 if (ifp != NULL && if_getcapenable(ifp) & IFCAP_POLLING) 835 ether_poll_deregister(ifp); 836 #endif 837 838 /* These should only be active if attach succeeded. */ 839 if (device_is_attached(dev)) { 840 VR_LOCK(sc); 841 sc->vr_flags |= VR_F_DETACHED; 842 vr_stop(sc); 843 VR_UNLOCK(sc); 844 callout_drain(&sc->vr_stat_callout); 845 taskqueue_drain(taskqueue_fast, &sc->vr_inttask); 846 ether_ifdetach(ifp); 847 } 848 if (sc->vr_miibus) 849 device_delete_child(dev, sc->vr_miibus); 850 bus_generic_detach(dev); 851 852 if (sc->vr_intrhand) 853 bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); 854 if (sc->vr_irq) 855 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); 856 if (sc->vr_res) 857 bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, 858 sc->vr_res); 859 860 if (ifp) 861 if_free(ifp); 862 863 vr_dma_free(sc); 864 865 mtx_destroy(&sc->vr_mtx); 866 867 return (0); 868 } 869 870 struct vr_dmamap_arg { 871 bus_addr_t vr_busaddr; 872 }; 873 874 static void 875 vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 876 { 877 struct vr_dmamap_arg *ctx; 878 879 if (error != 0) 880 return; 881 ctx = arg; 882 ctx->vr_busaddr = segs[0].ds_addr; 883 } 884 885 static int 886 vr_dma_alloc(struct vr_softc *sc) 887 { 888 struct vr_dmamap_arg ctx; 889 struct vr_txdesc *txd; 890 struct vr_rxdesc *rxd; 891 bus_size_t tx_alignment; 892 int error, i; 893 894 /* Create parent DMA tag. */ 895 error = bus_dma_tag_create( 896 bus_get_dma_tag(sc->vr_dev), /* parent */ 897 1, 0, /* alignment, boundary */ 898 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 899 BUS_SPACE_MAXADDR, /* highaddr */ 900 NULL, NULL, /* filter, filterarg */ 901 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 902 0, /* nsegments */ 903 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 904 0, /* flags */ 905 NULL, NULL, /* lockfunc, lockarg */ 906 &sc->vr_cdata.vr_parent_tag); 907 if (error != 0) { 908 device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); 909 goto fail; 910 } 911 /* Create tag for Tx ring. */ 912 error = bus_dma_tag_create( 913 sc->vr_cdata.vr_parent_tag, /* parent */ 914 VR_RING_ALIGN, 0, /* alignment, boundary */ 915 BUS_SPACE_MAXADDR, /* lowaddr */ 916 BUS_SPACE_MAXADDR, /* highaddr */ 917 NULL, NULL, /* filter, filterarg */ 918 VR_TX_RING_SIZE, /* maxsize */ 919 1, /* nsegments */ 920 VR_TX_RING_SIZE, /* maxsegsize */ 921 0, /* flags */ 922 NULL, NULL, /* lockfunc, lockarg */ 923 &sc->vr_cdata.vr_tx_ring_tag); 924 if (error != 0) { 925 device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); 926 goto fail; 927 } 928 929 /* Create tag for Rx ring. */ 930 error = bus_dma_tag_create( 931 sc->vr_cdata.vr_parent_tag, /* parent */ 932 VR_RING_ALIGN, 0, /* alignment, boundary */ 933 BUS_SPACE_MAXADDR, /* lowaddr */ 934 BUS_SPACE_MAXADDR, /* highaddr */ 935 NULL, NULL, /* filter, filterarg */ 936 VR_RX_RING_SIZE, /* maxsize */ 937 1, /* nsegments */ 938 VR_RX_RING_SIZE, /* maxsegsize */ 939 0, /* flags */ 940 NULL, NULL, /* lockfunc, lockarg */ 941 &sc->vr_cdata.vr_rx_ring_tag); 942 if (error != 0) { 943 device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); 944 goto fail; 945 } 946 947 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) 948 tx_alignment = sizeof(uint32_t); 949 else 950 tx_alignment = 1; 951 /* Create tag for Tx buffers. */ 952 error = bus_dma_tag_create( 953 sc->vr_cdata.vr_parent_tag, /* parent */ 954 tx_alignment, 0, /* alignment, boundary */ 955 BUS_SPACE_MAXADDR, /* lowaddr */ 956 BUS_SPACE_MAXADDR, /* highaddr */ 957 NULL, NULL, /* filter, filterarg */ 958 MCLBYTES * VR_MAXFRAGS, /* maxsize */ 959 VR_MAXFRAGS, /* nsegments */ 960 MCLBYTES, /* maxsegsize */ 961 0, /* flags */ 962 NULL, NULL, /* lockfunc, lockarg */ 963 &sc->vr_cdata.vr_tx_tag); 964 if (error != 0) { 965 device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); 966 goto fail; 967 } 968 969 /* Create tag for Rx buffers. */ 970 error = bus_dma_tag_create( 971 sc->vr_cdata.vr_parent_tag, /* parent */ 972 VR_RX_ALIGN, 0, /* alignment, boundary */ 973 BUS_SPACE_MAXADDR, /* lowaddr */ 974 BUS_SPACE_MAXADDR, /* highaddr */ 975 NULL, NULL, /* filter, filterarg */ 976 MCLBYTES, /* maxsize */ 977 1, /* nsegments */ 978 MCLBYTES, /* maxsegsize */ 979 0, /* flags */ 980 NULL, NULL, /* lockfunc, lockarg */ 981 &sc->vr_cdata.vr_rx_tag); 982 if (error != 0) { 983 device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); 984 goto fail; 985 } 986 987 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 988 error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, 989 (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | 990 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); 991 if (error != 0) { 992 device_printf(sc->vr_dev, 993 "failed to allocate DMA'able memory for Tx ring\n"); 994 goto fail; 995 } 996 997 ctx.vr_busaddr = 0; 998 error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, 999 sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, 1000 VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1001 if (error != 0 || ctx.vr_busaddr == 0) { 1002 device_printf(sc->vr_dev, 1003 "failed to load DMA'able memory for Tx ring\n"); 1004 goto fail; 1005 } 1006 sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; 1007 1008 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1009 error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, 1010 (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | 1011 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); 1012 if (error != 0) { 1013 device_printf(sc->vr_dev, 1014 "failed to allocate DMA'able memory for Rx ring\n"); 1015 goto fail; 1016 } 1017 1018 ctx.vr_busaddr = 0; 1019 error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, 1020 sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, 1021 VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); 1022 if (error != 0 || ctx.vr_busaddr == 0) { 1023 device_printf(sc->vr_dev, 1024 "failed to load DMA'able memory for Rx ring\n"); 1025 goto fail; 1026 } 1027 sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; 1028 1029 /* Create DMA maps for Tx buffers. */ 1030 for (i = 0; i < VR_TX_RING_CNT; i++) { 1031 txd = &sc->vr_cdata.vr_txdesc[i]; 1032 txd->tx_m = NULL; 1033 txd->tx_dmamap = NULL; 1034 error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, 1035 &txd->tx_dmamap); 1036 if (error != 0) { 1037 device_printf(sc->vr_dev, 1038 "failed to create Tx dmamap\n"); 1039 goto fail; 1040 } 1041 } 1042 /* Create DMA maps for Rx buffers. */ 1043 if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1044 &sc->vr_cdata.vr_rx_sparemap)) != 0) { 1045 device_printf(sc->vr_dev, 1046 "failed to create spare Rx dmamap\n"); 1047 goto fail; 1048 } 1049 for (i = 0; i < VR_RX_RING_CNT; i++) { 1050 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1051 rxd->rx_m = NULL; 1052 rxd->rx_dmamap = NULL; 1053 error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, 1054 &rxd->rx_dmamap); 1055 if (error != 0) { 1056 device_printf(sc->vr_dev, 1057 "failed to create Rx dmamap\n"); 1058 goto fail; 1059 } 1060 } 1061 1062 fail: 1063 return (error); 1064 } 1065 1066 static void 1067 vr_dma_free(struct vr_softc *sc) 1068 { 1069 struct vr_txdesc *txd; 1070 struct vr_rxdesc *rxd; 1071 int i; 1072 1073 /* Tx ring. */ 1074 if (sc->vr_cdata.vr_tx_ring_tag) { 1075 if (sc->vr_rdata.vr_tx_ring_paddr) 1076 bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, 1077 sc->vr_cdata.vr_tx_ring_map); 1078 if (sc->vr_rdata.vr_tx_ring) 1079 bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, 1080 sc->vr_rdata.vr_tx_ring, 1081 sc->vr_cdata.vr_tx_ring_map); 1082 sc->vr_rdata.vr_tx_ring = NULL; 1083 sc->vr_rdata.vr_tx_ring_paddr = 0; 1084 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); 1085 sc->vr_cdata.vr_tx_ring_tag = NULL; 1086 } 1087 /* Rx ring. */ 1088 if (sc->vr_cdata.vr_rx_ring_tag) { 1089 if (sc->vr_rdata.vr_rx_ring_paddr) 1090 bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, 1091 sc->vr_cdata.vr_rx_ring_map); 1092 if (sc->vr_rdata.vr_rx_ring) 1093 bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, 1094 sc->vr_rdata.vr_rx_ring, 1095 sc->vr_cdata.vr_rx_ring_map); 1096 sc->vr_rdata.vr_rx_ring = NULL; 1097 sc->vr_rdata.vr_rx_ring_paddr = 0; 1098 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); 1099 sc->vr_cdata.vr_rx_ring_tag = NULL; 1100 } 1101 /* Tx buffers. */ 1102 if (sc->vr_cdata.vr_tx_tag) { 1103 for (i = 0; i < VR_TX_RING_CNT; i++) { 1104 txd = &sc->vr_cdata.vr_txdesc[i]; 1105 if (txd->tx_dmamap) { 1106 bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, 1107 txd->tx_dmamap); 1108 txd->tx_dmamap = NULL; 1109 } 1110 } 1111 bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); 1112 sc->vr_cdata.vr_tx_tag = NULL; 1113 } 1114 /* Rx buffers. */ 1115 if (sc->vr_cdata.vr_rx_tag) { 1116 for (i = 0; i < VR_RX_RING_CNT; i++) { 1117 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1118 if (rxd->rx_dmamap) { 1119 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1120 rxd->rx_dmamap); 1121 rxd->rx_dmamap = NULL; 1122 } 1123 } 1124 if (sc->vr_cdata.vr_rx_sparemap) { 1125 bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, 1126 sc->vr_cdata.vr_rx_sparemap); 1127 sc->vr_cdata.vr_rx_sparemap = 0; 1128 } 1129 bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); 1130 sc->vr_cdata.vr_rx_tag = NULL; 1131 } 1132 1133 if (sc->vr_cdata.vr_parent_tag) { 1134 bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); 1135 sc->vr_cdata.vr_parent_tag = NULL; 1136 } 1137 } 1138 1139 /* 1140 * Initialize the transmit descriptors. 1141 */ 1142 static int 1143 vr_tx_ring_init(struct vr_softc *sc) 1144 { 1145 struct vr_ring_data *rd; 1146 struct vr_txdesc *txd; 1147 bus_addr_t addr; 1148 int i; 1149 1150 sc->vr_cdata.vr_tx_prod = 0; 1151 sc->vr_cdata.vr_tx_cons = 0; 1152 sc->vr_cdata.vr_tx_cnt = 0; 1153 sc->vr_cdata.vr_tx_pkts = 0; 1154 1155 rd = &sc->vr_rdata; 1156 bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); 1157 for (i = 0; i < VR_TX_RING_CNT; i++) { 1158 if (i == VR_TX_RING_CNT - 1) 1159 addr = VR_TX_RING_ADDR(sc, 0); 1160 else 1161 addr = VR_TX_RING_ADDR(sc, i + 1); 1162 rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1163 txd = &sc->vr_cdata.vr_txdesc[i]; 1164 txd->tx_m = NULL; 1165 } 1166 1167 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1168 sc->vr_cdata.vr_tx_ring_map, 1169 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1170 1171 return (0); 1172 } 1173 1174 /* 1175 * Initialize the RX descriptors and allocate mbufs for them. Note that 1176 * we arrange the descriptors in a closed ring, so that the last descriptor 1177 * points back to the first. 1178 */ 1179 static int 1180 vr_rx_ring_init(struct vr_softc *sc) 1181 { 1182 struct vr_ring_data *rd; 1183 struct vr_rxdesc *rxd; 1184 bus_addr_t addr; 1185 int i; 1186 1187 sc->vr_cdata.vr_rx_cons = 0; 1188 1189 rd = &sc->vr_rdata; 1190 bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); 1191 for (i = 0; i < VR_RX_RING_CNT; i++) { 1192 rxd = &sc->vr_cdata.vr_rxdesc[i]; 1193 rxd->rx_m = NULL; 1194 rxd->desc = &rd->vr_rx_ring[i]; 1195 if (i == VR_RX_RING_CNT - 1) 1196 addr = VR_RX_RING_ADDR(sc, 0); 1197 else 1198 addr = VR_RX_RING_ADDR(sc, i + 1); 1199 rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); 1200 if (vr_newbuf(sc, i) != 0) 1201 return (ENOBUFS); 1202 } 1203 1204 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1205 sc->vr_cdata.vr_rx_ring_map, 1206 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1207 1208 return (0); 1209 } 1210 1211 static __inline void 1212 vr_discard_rxbuf(struct vr_rxdesc *rxd) 1213 { 1214 struct vr_desc *desc; 1215 1216 desc = rxd->desc; 1217 desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); 1218 desc->vr_status = htole32(VR_RXSTAT_OWN); 1219 } 1220 1221 /* 1222 * Initialize an RX descriptor and attach an MBUF cluster. 1223 * Note: the length fields are only 11 bits wide, which means the 1224 * largest size we can specify is 2047. This is important because 1225 * MCLBYTES is 2048, so we have to subtract one otherwise we'll 1226 * overflow the field and make a mess. 1227 */ 1228 static int 1229 vr_newbuf(struct vr_softc *sc, int idx) 1230 { 1231 struct vr_desc *desc; 1232 struct vr_rxdesc *rxd; 1233 struct mbuf *m; 1234 bus_dma_segment_t segs[1]; 1235 bus_dmamap_t map; 1236 int nsegs; 1237 1238 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1239 if (m == NULL) 1240 return (ENOBUFS); 1241 m->m_len = m->m_pkthdr.len = MCLBYTES; 1242 m_adj(m, sizeof(uint64_t)); 1243 1244 if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, 1245 sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1246 m_freem(m); 1247 return (ENOBUFS); 1248 } 1249 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1250 1251 rxd = &sc->vr_cdata.vr_rxdesc[idx]; 1252 if (rxd->rx_m != NULL) { 1253 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1254 BUS_DMASYNC_POSTREAD); 1255 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); 1256 } 1257 map = rxd->rx_dmamap; 1258 rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; 1259 sc->vr_cdata.vr_rx_sparemap = map; 1260 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, 1261 BUS_DMASYNC_PREREAD); 1262 rxd->rx_m = m; 1263 desc = rxd->desc; 1264 desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); 1265 desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); 1266 desc->vr_status = htole32(VR_RXSTAT_OWN); 1267 1268 return (0); 1269 } 1270 1271 #ifndef __NO_STRICT_ALIGNMENT 1272 static __inline void 1273 vr_fixup_rx(struct mbuf *m) 1274 { 1275 uint16_t *src, *dst; 1276 int i; 1277 1278 src = mtod(m, uint16_t *); 1279 dst = src - 1; 1280 1281 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1282 *dst++ = *src++; 1283 1284 m->m_data -= ETHER_ALIGN; 1285 } 1286 #endif 1287 1288 /* 1289 * A frame has been uploaded: pass the resulting mbuf chain up to 1290 * the higher level protocols. 1291 */ 1292 static int 1293 vr_rxeof(struct vr_softc *sc) 1294 { 1295 struct vr_rxdesc *rxd; 1296 struct mbuf *m; 1297 if_t ifp; 1298 struct vr_desc *cur_rx; 1299 int cons, prog, total_len, rx_npkts; 1300 uint32_t rxstat, rxctl; 1301 1302 VR_LOCK_ASSERT(sc); 1303 ifp = sc->vr_ifp; 1304 cons = sc->vr_cdata.vr_rx_cons; 1305 rx_npkts = 0; 1306 1307 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1308 sc->vr_cdata.vr_rx_ring_map, 1309 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1310 1311 for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { 1312 #ifdef DEVICE_POLLING 1313 if (if_getcapenable(ifp) & IFCAP_POLLING) { 1314 if (sc->rxcycles <= 0) 1315 break; 1316 sc->rxcycles--; 1317 } 1318 #endif 1319 cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; 1320 rxstat = le32toh(cur_rx->vr_status); 1321 rxctl = le32toh(cur_rx->vr_ctl); 1322 if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) 1323 break; 1324 1325 prog++; 1326 rxd = &sc->vr_cdata.vr_rxdesc[cons]; 1327 m = rxd->rx_m; 1328 1329 /* 1330 * If an error occurs, update stats, clear the 1331 * status word and leave the mbuf cluster in place: 1332 * it should simply get re-used next time this descriptor 1333 * comes up in the ring. 1334 * We don't support SG in Rx path yet, so discard 1335 * partial frame. 1336 */ 1337 if ((rxstat & VR_RXSTAT_RX_OK) == 0 || 1338 (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != 1339 (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { 1340 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1341 sc->vr_stat.rx_errors++; 1342 if (rxstat & VR_RXSTAT_CRCERR) 1343 sc->vr_stat.rx_crc_errors++; 1344 if (rxstat & VR_RXSTAT_FRAMEALIGNERR) 1345 sc->vr_stat.rx_alignment++; 1346 if (rxstat & VR_RXSTAT_FIFOOFLOW) 1347 sc->vr_stat.rx_fifo_overflows++; 1348 if (rxstat & VR_RXSTAT_GIANT) 1349 sc->vr_stat.rx_giants++; 1350 if (rxstat & VR_RXSTAT_RUNT) 1351 sc->vr_stat.rx_runts++; 1352 if (rxstat & VR_RXSTAT_BUFFERR) 1353 sc->vr_stat.rx_no_buffers++; 1354 #ifdef VR_SHOW_ERRORS 1355 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1356 __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); 1357 #endif 1358 vr_discard_rxbuf(rxd); 1359 continue; 1360 } 1361 1362 if (vr_newbuf(sc, cons) != 0) { 1363 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1364 sc->vr_stat.rx_errors++; 1365 sc->vr_stat.rx_no_mbufs++; 1366 vr_discard_rxbuf(rxd); 1367 continue; 1368 } 1369 1370 /* 1371 * XXX The VIA Rhine chip includes the CRC with every 1372 * received frame, and there's no way to turn this 1373 * behavior off (at least, I can't find anything in 1374 * the manual that explains how to do it) so we have 1375 * to trim off the CRC manually. 1376 */ 1377 total_len = VR_RXBYTES(rxstat); 1378 total_len -= ETHER_CRC_LEN; 1379 m->m_pkthdr.len = m->m_len = total_len; 1380 #ifndef __NO_STRICT_ALIGNMENT 1381 /* 1382 * RX buffers must be 32-bit aligned. 1383 * Ignore the alignment problems on the non-strict alignment 1384 * platform. The performance hit incurred due to unaligned 1385 * accesses is much smaller than the hit produced by forcing 1386 * buffer copies all the time. 1387 */ 1388 vr_fixup_rx(m); 1389 #endif 1390 m->m_pkthdr.rcvif = ifp; 1391 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1392 sc->vr_stat.rx_ok++; 1393 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 1394 (rxstat & VR_RXSTAT_FRAG) == 0 && 1395 (rxctl & VR_RXCTL_IP) != 0) { 1396 /* Checksum is valid for non-fragmented IP packets. */ 1397 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1398 if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { 1399 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1400 if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { 1401 m->m_pkthdr.csum_flags |= 1402 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1403 if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) 1404 m->m_pkthdr.csum_data = 0xffff; 1405 } 1406 } 1407 } 1408 VR_UNLOCK(sc); 1409 if_input(ifp, m); 1410 VR_LOCK(sc); 1411 rx_npkts++; 1412 } 1413 1414 if (prog > 0) { 1415 /* 1416 * Let controller know how many number of RX buffers 1417 * are posted but avoid expensive register access if 1418 * TX pause capability was not negotiated with link 1419 * partner. 1420 */ 1421 if ((sc->vr_flags & VR_F_TXPAUSE) != 0) { 1422 if (prog >= VR_RX_RING_CNT) 1423 prog = VR_RX_RING_CNT - 1; 1424 CSR_WRITE_1(sc, VR_FLOWCR0, prog); 1425 } 1426 sc->vr_cdata.vr_rx_cons = cons; 1427 bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, 1428 sc->vr_cdata.vr_rx_ring_map, 1429 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1430 } 1431 return (rx_npkts); 1432 } 1433 1434 /* 1435 * A frame was downloaded to the chip. It's safe for us to clean up 1436 * the list buffers. 1437 */ 1438 static void 1439 vr_txeof(struct vr_softc *sc) 1440 { 1441 struct vr_txdesc *txd; 1442 struct vr_desc *cur_tx; 1443 if_t ifp; 1444 uint32_t txctl, txstat; 1445 int cons, prod; 1446 1447 VR_LOCK_ASSERT(sc); 1448 1449 cons = sc->vr_cdata.vr_tx_cons; 1450 prod = sc->vr_cdata.vr_tx_prod; 1451 if (cons == prod) 1452 return; 1453 1454 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1455 sc->vr_cdata.vr_tx_ring_map, 1456 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1457 1458 ifp = sc->vr_ifp; 1459 /* 1460 * Go through our tx list and free mbufs for those 1461 * frames that have been transmitted. 1462 */ 1463 for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { 1464 cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; 1465 txctl = le32toh(cur_tx->vr_ctl); 1466 txstat = le32toh(cur_tx->vr_status); 1467 if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) 1468 break; 1469 1470 sc->vr_cdata.vr_tx_cnt--; 1471 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1472 /* Only the first descriptor in the chain is valid. */ 1473 if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) 1474 continue; 1475 1476 txd = &sc->vr_cdata.vr_txdesc[cons]; 1477 KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", 1478 __func__)); 1479 1480 if ((txstat & VR_TXSTAT_ERRSUM) != 0) { 1481 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1482 sc->vr_stat.tx_errors++; 1483 if ((txstat & VR_TXSTAT_ABRT) != 0) { 1484 /* Give up and restart Tx. */ 1485 sc->vr_stat.tx_abort++; 1486 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 1487 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1488 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 1489 txd->tx_dmamap); 1490 m_freem(txd->tx_m); 1491 txd->tx_m = NULL; 1492 VR_INC(cons, VR_TX_RING_CNT); 1493 sc->vr_cdata.vr_tx_cons = cons; 1494 if (vr_tx_stop(sc) != 0) { 1495 device_printf(sc->vr_dev, 1496 "%s: Tx shutdown error -- " 1497 "resetting\n", __func__); 1498 sc->vr_flags |= VR_F_RESTART; 1499 return; 1500 } 1501 vr_tx_start(sc); 1502 break; 1503 } 1504 if ((sc->vr_revid < REV_ID_VT3071_A && 1505 (txstat & VR_TXSTAT_UNDERRUN)) || 1506 (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { 1507 sc->vr_stat.tx_underrun++; 1508 /* Retry and restart Tx. */ 1509 sc->vr_cdata.vr_tx_cnt++; 1510 sc->vr_cdata.vr_tx_cons = cons; 1511 cur_tx->vr_status = htole32(VR_TXSTAT_OWN); 1512 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1513 sc->vr_cdata.vr_tx_ring_map, 1514 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1515 vr_tx_underrun(sc); 1516 return; 1517 } 1518 if ((txstat & VR_TXSTAT_DEFER) != 0) { 1519 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1520 sc->vr_stat.tx_collisions++; 1521 } 1522 if ((txstat & VR_TXSTAT_LATECOLL) != 0) { 1523 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); 1524 sc->vr_stat.tx_late_collisions++; 1525 } 1526 } else { 1527 sc->vr_stat.tx_ok++; 1528 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1529 } 1530 1531 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1532 BUS_DMASYNC_POSTWRITE); 1533 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1534 if (sc->vr_revid < REV_ID_VT3071_A) { 1535 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1536 (txstat & VR_TXSTAT_COLLCNT) >> 3); 1537 sc->vr_stat.tx_collisions += 1538 (txstat & VR_TXSTAT_COLLCNT) >> 3; 1539 } else { 1540 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & 0x0f)); 1541 sc->vr_stat.tx_collisions += (txstat & 0x0f); 1542 } 1543 m_freem(txd->tx_m); 1544 txd->tx_m = NULL; 1545 } 1546 1547 sc->vr_cdata.vr_tx_cons = cons; 1548 if (sc->vr_cdata.vr_tx_cnt == 0) 1549 sc->vr_watchdog_timer = 0; 1550 } 1551 1552 static void 1553 vr_tick(void *xsc) 1554 { 1555 struct vr_softc *sc; 1556 struct mii_data *mii; 1557 1558 sc = (struct vr_softc *)xsc; 1559 1560 VR_LOCK_ASSERT(sc); 1561 1562 if ((sc->vr_flags & VR_F_RESTART) != 0) { 1563 device_printf(sc->vr_dev, "restarting\n"); 1564 sc->vr_stat.num_restart++; 1565 if_setdrvflagbits(sc->vr_ifp, 0, IFF_DRV_RUNNING); 1566 vr_init_locked(sc); 1567 sc->vr_flags &= ~VR_F_RESTART; 1568 } 1569 1570 mii = device_get_softc(sc->vr_miibus); 1571 mii_tick(mii); 1572 if ((sc->vr_flags & VR_F_LINK) == 0) 1573 vr_miibus_statchg(sc->vr_dev); 1574 vr_watchdog(sc); 1575 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 1576 } 1577 1578 #ifdef DEVICE_POLLING 1579 static poll_handler_t vr_poll; 1580 static poll_handler_t vr_poll_locked; 1581 1582 static int 1583 vr_poll(if_t ifp, enum poll_cmd cmd, int count) 1584 { 1585 struct vr_softc *sc; 1586 int rx_npkts; 1587 1588 sc = if_getsoftc(ifp); 1589 rx_npkts = 0; 1590 1591 VR_LOCK(sc); 1592 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1593 rx_npkts = vr_poll_locked(ifp, cmd, count); 1594 VR_UNLOCK(sc); 1595 return (rx_npkts); 1596 } 1597 1598 static int 1599 vr_poll_locked(if_t ifp, enum poll_cmd cmd, int count) 1600 { 1601 struct vr_softc *sc; 1602 int rx_npkts; 1603 1604 sc = if_getsoftc(ifp); 1605 1606 VR_LOCK_ASSERT(sc); 1607 1608 sc->rxcycles = count; 1609 rx_npkts = vr_rxeof(sc); 1610 vr_txeof(sc); 1611 if (!if_sendq_empty(ifp)) 1612 vr_start_locked(ifp); 1613 1614 if (cmd == POLL_AND_CHECK_STATUS) { 1615 uint16_t status; 1616 1617 /* Also check status register. */ 1618 status = CSR_READ_2(sc, VR_ISR); 1619 if (status) 1620 CSR_WRITE_2(sc, VR_ISR, status); 1621 1622 if ((status & VR_INTRS) == 0) 1623 return (rx_npkts); 1624 1625 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1626 VR_ISR_STATSOFLOW)) != 0) { 1627 if (vr_error(sc, status) != 0) 1628 return (rx_npkts); 1629 } 1630 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1631 #ifdef VR_SHOW_ERRORS 1632 device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", 1633 __func__, status, VR_ISR_ERR_BITS); 1634 #endif 1635 vr_rx_start(sc); 1636 } 1637 } 1638 return (rx_npkts); 1639 } 1640 #endif /* DEVICE_POLLING */ 1641 1642 /* Back off the transmit threshold. */ 1643 static void 1644 vr_tx_underrun(struct vr_softc *sc) 1645 { 1646 int thresh; 1647 1648 device_printf(sc->vr_dev, "Tx underrun -- "); 1649 if (sc->vr_txthresh < VR_TXTHRESH_MAX) { 1650 thresh = sc->vr_txthresh; 1651 sc->vr_txthresh++; 1652 if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { 1653 sc->vr_txthresh = VR_TXTHRESH_MAX; 1654 printf("using store and forward mode\n"); 1655 } else 1656 printf("increasing Tx threshold(%d -> %d)\n", 1657 vr_tx_threshold_tables[thresh].value, 1658 vr_tx_threshold_tables[thresh + 1].value); 1659 } else 1660 printf("\n"); 1661 sc->vr_stat.tx_underrun++; 1662 if (vr_tx_stop(sc) != 0) { 1663 device_printf(sc->vr_dev, "%s: Tx shutdown error -- " 1664 "resetting\n", __func__); 1665 sc->vr_flags |= VR_F_RESTART; 1666 return; 1667 } 1668 vr_tx_start(sc); 1669 } 1670 1671 static int 1672 vr_intr(void *arg) 1673 { 1674 struct vr_softc *sc; 1675 uint16_t status; 1676 1677 sc = (struct vr_softc *)arg; 1678 1679 status = CSR_READ_2(sc, VR_ISR); 1680 if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) 1681 return (FILTER_STRAY); 1682 1683 /* Disable interrupts. */ 1684 CSR_WRITE_2(sc, VR_IMR, 0x0000); 1685 1686 taskqueue_enqueue(taskqueue_fast, &sc->vr_inttask); 1687 1688 return (FILTER_HANDLED); 1689 } 1690 1691 static void 1692 vr_int_task(void *arg, int npending) 1693 { 1694 struct vr_softc *sc; 1695 if_t ifp; 1696 uint16_t status; 1697 1698 sc = (struct vr_softc *)arg; 1699 1700 VR_LOCK(sc); 1701 1702 if ((sc->vr_flags & VR_F_SUSPENDED) != 0) 1703 goto done_locked; 1704 1705 status = CSR_READ_2(sc, VR_ISR); 1706 ifp = sc->vr_ifp; 1707 #ifdef DEVICE_POLLING 1708 if ((if_getcapenable(ifp) & IFCAP_POLLING) != 0) 1709 goto done_locked; 1710 #endif 1711 1712 /* Suppress unwanted interrupts. */ 1713 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0 || 1714 (sc->vr_flags & VR_F_RESTART) != 0) { 1715 CSR_WRITE_2(sc, VR_IMR, 0); 1716 CSR_WRITE_2(sc, VR_ISR, status); 1717 goto done_locked; 1718 } 1719 1720 for (; (status & VR_INTRS) != 0;) { 1721 CSR_WRITE_2(sc, VR_ISR, status); 1722 if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | 1723 VR_ISR_STATSOFLOW)) != 0) { 1724 if (vr_error(sc, status) != 0) { 1725 VR_UNLOCK(sc); 1726 return; 1727 } 1728 } 1729 vr_rxeof(sc); 1730 if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { 1731 #ifdef VR_SHOW_ERRORS 1732 device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", 1733 __func__, status, VR_ISR_ERR_BITS); 1734 #endif 1735 /* Restart Rx if RxDMA SM was stopped. */ 1736 vr_rx_start(sc); 1737 } 1738 vr_txeof(sc); 1739 1740 if (!if_sendq_empty(ifp)) 1741 vr_start_locked(ifp); 1742 1743 status = CSR_READ_2(sc, VR_ISR); 1744 } 1745 1746 /* Re-enable interrupts. */ 1747 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 1748 1749 done_locked: 1750 VR_UNLOCK(sc); 1751 } 1752 1753 static int 1754 vr_error(struct vr_softc *sc, uint16_t status) 1755 { 1756 uint16_t pcis; 1757 1758 status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; 1759 if ((status & VR_ISR_BUSERR) != 0) { 1760 status &= ~VR_ISR_BUSERR; 1761 sc->vr_stat.bus_errors++; 1762 /* Disable further interrupts. */ 1763 CSR_WRITE_2(sc, VR_IMR, 0); 1764 pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); 1765 device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " 1766 "resetting\n", pcis); 1767 pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); 1768 sc->vr_flags |= VR_F_RESTART; 1769 return (EAGAIN); 1770 } 1771 if ((status & VR_ISR_LINKSTAT2) != 0) { 1772 /* Link state change, duplex changes etc. */ 1773 status &= ~VR_ISR_LINKSTAT2; 1774 } 1775 if ((status & VR_ISR_STATSOFLOW) != 0) { 1776 status &= ~VR_ISR_STATSOFLOW; 1777 if (sc->vr_revid >= REV_ID_VT6105M_A0) { 1778 /* Update MIB counters. */ 1779 } 1780 } 1781 1782 if (status != 0) 1783 device_printf(sc->vr_dev, 1784 "unhandled interrupt, status = 0x%04x\n", status); 1785 return (0); 1786 } 1787 1788 /* 1789 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1790 * pointers to the fragment pointers. 1791 */ 1792 static int 1793 vr_encap(struct vr_softc *sc, struct mbuf **m_head) 1794 { 1795 struct vr_txdesc *txd; 1796 struct vr_desc *desc; 1797 struct mbuf *m; 1798 bus_dma_segment_t txsegs[VR_MAXFRAGS]; 1799 uint32_t csum_flags, txctl; 1800 int error, i, nsegs, prod, si; 1801 int padlen; 1802 1803 VR_LOCK_ASSERT(sc); 1804 1805 M_ASSERTPKTHDR((*m_head)); 1806 1807 /* 1808 * Some VIA Rhine wants packet buffers to be longword 1809 * aligned, but very often our mbufs aren't. Rather than 1810 * waste time trying to decide when to copy and when not 1811 * to copy, just do it all the time. 1812 */ 1813 if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { 1814 m = m_defrag(*m_head, M_NOWAIT); 1815 if (m == NULL) { 1816 m_freem(*m_head); 1817 *m_head = NULL; 1818 return (ENOBUFS); 1819 } 1820 *m_head = m; 1821 } 1822 1823 /* 1824 * The Rhine chip doesn't auto-pad, so we have to make 1825 * sure to pad short frames out to the minimum frame length 1826 * ourselves. 1827 */ 1828 if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { 1829 m = *m_head; 1830 padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; 1831 if (M_WRITABLE(m) == 0) { 1832 /* Get a writable copy. */ 1833 m = m_dup(*m_head, M_NOWAIT); 1834 m_freem(*m_head); 1835 if (m == NULL) { 1836 *m_head = NULL; 1837 return (ENOBUFS); 1838 } 1839 *m_head = m; 1840 } 1841 if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { 1842 m = m_defrag(m, M_NOWAIT); 1843 if (m == NULL) { 1844 m_freem(*m_head); 1845 *m_head = NULL; 1846 return (ENOBUFS); 1847 } 1848 } 1849 /* 1850 * Manually pad short frames, and zero the pad space 1851 * to avoid leaking data. 1852 */ 1853 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1854 m->m_pkthdr.len += padlen; 1855 m->m_len = m->m_pkthdr.len; 1856 *m_head = m; 1857 } 1858 1859 prod = sc->vr_cdata.vr_tx_prod; 1860 txd = &sc->vr_cdata.vr_txdesc[prod]; 1861 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1862 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1863 if (error == EFBIG) { 1864 m = m_collapse(*m_head, M_NOWAIT, VR_MAXFRAGS); 1865 if (m == NULL) { 1866 m_freem(*m_head); 1867 *m_head = NULL; 1868 return (ENOBUFS); 1869 } 1870 *m_head = m; 1871 error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, 1872 txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 1873 if (error != 0) { 1874 m_freem(*m_head); 1875 *m_head = NULL; 1876 return (error); 1877 } 1878 } else if (error != 0) 1879 return (error); 1880 if (nsegs == 0) { 1881 m_freem(*m_head); 1882 *m_head = NULL; 1883 return (EIO); 1884 } 1885 1886 /* Check number of available descriptors. */ 1887 if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { 1888 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); 1889 return (ENOBUFS); 1890 } 1891 1892 txd->tx_m = *m_head; 1893 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, 1894 BUS_DMASYNC_PREWRITE); 1895 1896 /* Set checksum offload. */ 1897 csum_flags = 0; 1898 if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { 1899 if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) 1900 csum_flags |= VR_TXCTL_IPCSUM; 1901 if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) 1902 csum_flags |= VR_TXCTL_TCPCSUM; 1903 if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) 1904 csum_flags |= VR_TXCTL_UDPCSUM; 1905 } 1906 1907 /* 1908 * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit 1909 * is required for all descriptors regardless of single or 1910 * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for 1911 * the first descriptor for a multi-fragmented frames. Without 1912 * that VIA Rhine chip generates Tx underrun interrupts and can't 1913 * send any frames. 1914 */ 1915 si = prod; 1916 for (i = 0; i < nsegs; i++) { 1917 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1918 desc->vr_status = 0; 1919 txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; 1920 if (i == 0) 1921 txctl |= VR_TXCTL_FIRSTFRAG; 1922 desc->vr_ctl = htole32(txctl); 1923 desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); 1924 sc->vr_cdata.vr_tx_cnt++; 1925 VR_INC(prod, VR_TX_RING_CNT); 1926 } 1927 /* Update producer index. */ 1928 sc->vr_cdata.vr_tx_prod = prod; 1929 1930 prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; 1931 desc = &sc->vr_rdata.vr_tx_ring[prod]; 1932 1933 /* 1934 * Set EOP on the last descriptor and request Tx completion 1935 * interrupt for every VR_TX_INTR_THRESH-th frames. 1936 */ 1937 VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); 1938 if (sc->vr_cdata.vr_tx_pkts == 0) 1939 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); 1940 else 1941 desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); 1942 1943 /* Lastly turn the first descriptor ownership to hardware. */ 1944 desc = &sc->vr_rdata.vr_tx_ring[si]; 1945 desc->vr_status |= htole32(VR_TXSTAT_OWN); 1946 1947 /* Sync descriptors. */ 1948 bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, 1949 sc->vr_cdata.vr_tx_ring_map, 1950 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1951 1952 return (0); 1953 } 1954 1955 static void 1956 vr_start(if_t ifp) 1957 { 1958 struct vr_softc *sc; 1959 1960 sc = if_getsoftc(ifp); 1961 VR_LOCK(sc); 1962 vr_start_locked(ifp); 1963 VR_UNLOCK(sc); 1964 } 1965 1966 static void 1967 vr_start_locked(if_t ifp) 1968 { 1969 struct vr_softc *sc; 1970 struct mbuf *m_head; 1971 int enq; 1972 1973 sc = if_getsoftc(ifp); 1974 1975 VR_LOCK_ASSERT(sc); 1976 1977 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1978 IFF_DRV_RUNNING || (sc->vr_flags & VR_F_LINK) == 0) 1979 return; 1980 1981 for (enq = 0; !if_sendq_empty(ifp) && 1982 sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { 1983 m_head = if_dequeue(ifp); 1984 if (m_head == NULL) 1985 break; 1986 /* 1987 * Pack the data into the transmit ring. If we 1988 * don't have room, set the OACTIVE flag and wait 1989 * for the NIC to drain the ring. 1990 */ 1991 if (vr_encap(sc, &m_head)) { 1992 if (m_head == NULL) 1993 break; 1994 if_sendq_prepend(ifp, m_head); 1995 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1996 break; 1997 } 1998 1999 enq++; 2000 /* 2001 * If there's a BPF listener, bounce a copy of this frame 2002 * to him. 2003 */ 2004 ETHER_BPF_MTAP(ifp, m_head); 2005 } 2006 2007 if (enq > 0) { 2008 /* Tell the chip to start transmitting. */ 2009 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2010 /* Set a timeout in case the chip goes out to lunch. */ 2011 sc->vr_watchdog_timer = 5; 2012 } 2013 } 2014 2015 static void 2016 vr_init(void *xsc) 2017 { 2018 struct vr_softc *sc; 2019 2020 sc = (struct vr_softc *)xsc; 2021 VR_LOCK(sc); 2022 vr_init_locked(sc); 2023 VR_UNLOCK(sc); 2024 } 2025 2026 static void 2027 vr_init_locked(struct vr_softc *sc) 2028 { 2029 if_t ifp; 2030 struct mii_data *mii; 2031 bus_addr_t addr; 2032 int i; 2033 2034 VR_LOCK_ASSERT(sc); 2035 2036 ifp = sc->vr_ifp; 2037 mii = device_get_softc(sc->vr_miibus); 2038 2039 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2040 return; 2041 2042 /* Cancel pending I/O and free all RX/TX buffers. */ 2043 vr_stop(sc); 2044 vr_reset(sc); 2045 2046 /* Set our station address. */ 2047 for (i = 0; i < ETHER_ADDR_LEN; i++) 2048 CSR_WRITE_1(sc, VR_PAR0 + i, if_getlladdr(sc->vr_ifp)[i]); 2049 2050 /* Set DMA size. */ 2051 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); 2052 VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); 2053 2054 /* 2055 * BCR0 and BCR1 can override the RXCFG and TXCFG registers, 2056 * so we must set both. 2057 */ 2058 VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); 2059 VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); 2060 2061 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); 2062 VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); 2063 2064 VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); 2065 VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); 2066 2067 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); 2068 VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); 2069 2070 /* Init circular RX list. */ 2071 if (vr_rx_ring_init(sc) != 0) { 2072 device_printf(sc->vr_dev, 2073 "initialization failed: no memory for rx buffers\n"); 2074 vr_stop(sc); 2075 return; 2076 } 2077 2078 /* Init tx descriptors. */ 2079 vr_tx_ring_init(sc); 2080 2081 if ((sc->vr_quirks & VR_Q_CAM) != 0) { 2082 uint8_t vcam[2] = { 0, 0 }; 2083 2084 /* Disable VLAN hardware tag insertion/stripping. */ 2085 VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); 2086 /* Disable VLAN hardware filtering. */ 2087 VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); 2088 /* Disable all CAM entries. */ 2089 vr_cam_mask(sc, VR_MCAST_CAM, 0); 2090 vr_cam_mask(sc, VR_VLAN_CAM, 0); 2091 /* Enable the first VLAN CAM. */ 2092 vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); 2093 vr_cam_mask(sc, VR_VLAN_CAM, 1); 2094 } 2095 2096 /* 2097 * Set up receive filter. 2098 */ 2099 vr_set_filter(sc); 2100 2101 /* 2102 * Load the address of the RX ring. 2103 */ 2104 addr = VR_RX_RING_ADDR(sc, 0); 2105 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2106 /* 2107 * Load the address of the TX ring. 2108 */ 2109 addr = VR_TX_RING_ADDR(sc, 0); 2110 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2111 /* Default : full-duplex, no Tx poll. */ 2112 CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); 2113 2114 /* Set flow-control parameters for Rhine III. */ 2115 if (sc->vr_revid >= REV_ID_VT6105_A0) { 2116 /* 2117 * Configure Rx buffer count available for incoming 2118 * packet. 2119 * Even though data sheet says almost nothing about 2120 * this register, this register should be updated 2121 * whenever driver adds new RX buffers to controller. 2122 * Otherwise, XON frame is not sent to link partner 2123 * even if controller has enough RX buffers and you 2124 * would be isolated from network. 2125 * The controller is not smart enough to know number 2126 * of available RX buffers so driver have to let 2127 * controller know how many RX buffers are posted. 2128 * In other words, this register works like a residue 2129 * counter for RX buffers and should be initialized 2130 * to the number of total RX buffers - 1 before 2131 * enabling RX MAC. Note, this register is 8bits so 2132 * it effectively limits the maximum number of RX 2133 * buffer to be configured by controller is 255. 2134 */ 2135 CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT - 1); 2136 /* 2137 * Tx pause low threshold : 8 free receive buffers 2138 * Tx pause XON high threshold : 24 free receive buffers 2139 */ 2140 CSR_WRITE_1(sc, VR_FLOWCR1, 2141 VR_FLOWCR1_TXLO8 | VR_FLOWCR1_TXHI24 | VR_FLOWCR1_XONXOFF); 2142 /* Set Tx pause timer. */ 2143 CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); 2144 } 2145 2146 /* Enable receiver and transmitter. */ 2147 CSR_WRITE_1(sc, VR_CR0, 2148 VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); 2149 2150 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2151 #ifdef DEVICE_POLLING 2152 /* 2153 * Disable interrupts if we are polling. 2154 */ 2155 if (if_getcapenable(ifp) & IFCAP_POLLING) 2156 CSR_WRITE_2(sc, VR_IMR, 0); 2157 else 2158 #endif 2159 /* 2160 * Enable interrupts and disable MII intrs. 2161 */ 2162 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2163 if (sc->vr_revid > REV_ID_VT6102_A) 2164 CSR_WRITE_2(sc, VR_MII_IMR, 0); 2165 2166 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2167 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2168 2169 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2170 mii_mediachg(mii); 2171 2172 callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); 2173 } 2174 2175 /* 2176 * Set media options. 2177 */ 2178 static int 2179 vr_ifmedia_upd(if_t ifp) 2180 { 2181 struct vr_softc *sc; 2182 struct mii_data *mii; 2183 struct mii_softc *miisc; 2184 int error; 2185 2186 sc = if_getsoftc(ifp); 2187 VR_LOCK(sc); 2188 mii = device_get_softc(sc->vr_miibus); 2189 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2190 PHY_RESET(miisc); 2191 sc->vr_flags &= ~(VR_F_LINK | VR_F_TXPAUSE); 2192 error = mii_mediachg(mii); 2193 VR_UNLOCK(sc); 2194 2195 return (error); 2196 } 2197 2198 /* 2199 * Report current media status. 2200 */ 2201 static void 2202 vr_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 2203 { 2204 struct vr_softc *sc; 2205 struct mii_data *mii; 2206 2207 sc = if_getsoftc(ifp); 2208 mii = device_get_softc(sc->vr_miibus); 2209 VR_LOCK(sc); 2210 if ((if_getflags(ifp) & IFF_UP) == 0) { 2211 VR_UNLOCK(sc); 2212 return; 2213 } 2214 mii_pollstat(mii); 2215 ifmr->ifm_active = mii->mii_media_active; 2216 ifmr->ifm_status = mii->mii_media_status; 2217 VR_UNLOCK(sc); 2218 } 2219 2220 static int 2221 vr_ioctl(if_t ifp, u_long command, caddr_t data) 2222 { 2223 struct vr_softc *sc; 2224 struct ifreq *ifr; 2225 struct mii_data *mii; 2226 int error, mask; 2227 2228 sc = if_getsoftc(ifp); 2229 ifr = (struct ifreq *)data; 2230 error = 0; 2231 2232 switch (command) { 2233 case SIOCSIFFLAGS: 2234 VR_LOCK(sc); 2235 if (if_getflags(ifp) & IFF_UP) { 2236 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2237 if ((if_getflags(ifp) ^ sc->vr_if_flags) & 2238 (IFF_PROMISC | IFF_ALLMULTI)) 2239 vr_set_filter(sc); 2240 } else { 2241 if ((sc->vr_flags & VR_F_DETACHED) == 0) 2242 vr_init_locked(sc); 2243 } 2244 } else { 2245 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2246 vr_stop(sc); 2247 } 2248 sc->vr_if_flags = if_getflags(ifp); 2249 VR_UNLOCK(sc); 2250 break; 2251 case SIOCADDMULTI: 2252 case SIOCDELMULTI: 2253 VR_LOCK(sc); 2254 vr_set_filter(sc); 2255 VR_UNLOCK(sc); 2256 break; 2257 case SIOCGIFMEDIA: 2258 case SIOCSIFMEDIA: 2259 mii = device_get_softc(sc->vr_miibus); 2260 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 2261 break; 2262 case SIOCSIFCAP: 2263 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 2264 #ifdef DEVICE_POLLING 2265 if (mask & IFCAP_POLLING) { 2266 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2267 error = ether_poll_register(vr_poll, ifp); 2268 if (error != 0) 2269 break; 2270 VR_LOCK(sc); 2271 /* Disable interrupts. */ 2272 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2273 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 2274 VR_UNLOCK(sc); 2275 } else { 2276 error = ether_poll_deregister(ifp); 2277 /* Enable interrupts. */ 2278 VR_LOCK(sc); 2279 CSR_WRITE_2(sc, VR_IMR, VR_INTRS); 2280 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 2281 VR_UNLOCK(sc); 2282 } 2283 } 2284 #endif /* DEVICE_POLLING */ 2285 if ((mask & IFCAP_TXCSUM) != 0 && 2286 (IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) { 2287 if_togglecapenable(ifp, IFCAP_TXCSUM); 2288 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0) 2289 if_sethwassistbits(ifp, VR_CSUM_FEATURES, 0); 2290 else 2291 if_sethwassistbits(ifp, 0, VR_CSUM_FEATURES); 2292 } 2293 if ((mask & IFCAP_RXCSUM) != 0 && 2294 (IFCAP_RXCSUM & if_getcapabilities(ifp)) != 0) 2295 if_togglecapenable(ifp, IFCAP_RXCSUM); 2296 if ((mask & IFCAP_WOL_UCAST) != 0 && 2297 (if_getcapabilities(ifp) & IFCAP_WOL_UCAST) != 0) 2298 if_togglecapenable(ifp, IFCAP_WOL_UCAST); 2299 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2300 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) 2301 if_togglecapenable(ifp, IFCAP_WOL_MAGIC); 2302 break; 2303 default: 2304 error = ether_ioctl(ifp, command, data); 2305 break; 2306 } 2307 2308 return (error); 2309 } 2310 2311 static void 2312 vr_watchdog(struct vr_softc *sc) 2313 { 2314 if_t ifp; 2315 2316 VR_LOCK_ASSERT(sc); 2317 2318 if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) 2319 return; 2320 2321 ifp = sc->vr_ifp; 2322 /* 2323 * Reclaim first as we don't request interrupt for every packets. 2324 */ 2325 vr_txeof(sc); 2326 if (sc->vr_cdata.vr_tx_cnt == 0) 2327 return; 2328 2329 if ((sc->vr_flags & VR_F_LINK) == 0) { 2330 if (bootverbose) 2331 if_printf(sc->vr_ifp, "watchdog timeout " 2332 "(missed link)\n"); 2333 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2334 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2335 vr_init_locked(sc); 2336 return; 2337 } 2338 2339 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2340 if_printf(ifp, "watchdog timeout\n"); 2341 2342 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2343 vr_init_locked(sc); 2344 2345 if (!if_sendq_empty(ifp)) 2346 vr_start_locked(ifp); 2347 } 2348 2349 static void 2350 vr_tx_start(struct vr_softc *sc) 2351 { 2352 bus_addr_t addr; 2353 uint8_t cmd; 2354 2355 cmd = CSR_READ_1(sc, VR_CR0); 2356 if ((cmd & VR_CR0_TX_ON) == 0) { 2357 addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); 2358 CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); 2359 cmd |= VR_CR0_TX_ON; 2360 CSR_WRITE_1(sc, VR_CR0, cmd); 2361 } 2362 if (sc->vr_cdata.vr_tx_cnt != 0) { 2363 sc->vr_watchdog_timer = 5; 2364 VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); 2365 } 2366 } 2367 2368 static void 2369 vr_rx_start(struct vr_softc *sc) 2370 { 2371 bus_addr_t addr; 2372 uint8_t cmd; 2373 2374 cmd = CSR_READ_1(sc, VR_CR0); 2375 if ((cmd & VR_CR0_RX_ON) == 0) { 2376 addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); 2377 CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); 2378 cmd |= VR_CR0_RX_ON; 2379 CSR_WRITE_1(sc, VR_CR0, cmd); 2380 } 2381 CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); 2382 } 2383 2384 static int 2385 vr_tx_stop(struct vr_softc *sc) 2386 { 2387 int i; 2388 uint8_t cmd; 2389 2390 cmd = CSR_READ_1(sc, VR_CR0); 2391 if ((cmd & VR_CR0_TX_ON) != 0) { 2392 cmd &= ~VR_CR0_TX_ON; 2393 CSR_WRITE_1(sc, VR_CR0, cmd); 2394 for (i = VR_TIMEOUT; i > 0; i--) { 2395 DELAY(5); 2396 cmd = CSR_READ_1(sc, VR_CR0); 2397 if ((cmd & VR_CR0_TX_ON) == 0) 2398 break; 2399 } 2400 if (i == 0) 2401 return (ETIMEDOUT); 2402 } 2403 return (0); 2404 } 2405 2406 static int 2407 vr_rx_stop(struct vr_softc *sc) 2408 { 2409 int i; 2410 uint8_t cmd; 2411 2412 cmd = CSR_READ_1(sc, VR_CR0); 2413 if ((cmd & VR_CR0_RX_ON) != 0) { 2414 cmd &= ~VR_CR0_RX_ON; 2415 CSR_WRITE_1(sc, VR_CR0, cmd); 2416 for (i = VR_TIMEOUT; i > 0; i--) { 2417 DELAY(5); 2418 cmd = CSR_READ_1(sc, VR_CR0); 2419 if ((cmd & VR_CR0_RX_ON) == 0) 2420 break; 2421 } 2422 if (i == 0) 2423 return (ETIMEDOUT); 2424 } 2425 return (0); 2426 } 2427 2428 /* 2429 * Stop the adapter and free any mbufs allocated to the 2430 * RX and TX lists. 2431 */ 2432 static void 2433 vr_stop(struct vr_softc *sc) 2434 { 2435 struct vr_txdesc *txd; 2436 struct vr_rxdesc *rxd; 2437 if_t ifp; 2438 int i; 2439 2440 VR_LOCK_ASSERT(sc); 2441 2442 ifp = sc->vr_ifp; 2443 sc->vr_watchdog_timer = 0; 2444 2445 callout_stop(&sc->vr_stat_callout); 2446 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 2447 2448 CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); 2449 if (vr_rx_stop(sc) != 0) 2450 device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); 2451 if (vr_tx_stop(sc) != 0) 2452 device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); 2453 /* Clear pending interrupts. */ 2454 CSR_WRITE_2(sc, VR_ISR, 0xFFFF); 2455 CSR_WRITE_2(sc, VR_IMR, 0x0000); 2456 CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); 2457 CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); 2458 2459 /* 2460 * Free RX and TX mbufs still in the queues. 2461 */ 2462 for (i = 0; i < VR_RX_RING_CNT; i++) { 2463 rxd = &sc->vr_cdata.vr_rxdesc[i]; 2464 if (rxd->rx_m != NULL) { 2465 bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, 2466 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2467 bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, 2468 rxd->rx_dmamap); 2469 m_freem(rxd->rx_m); 2470 rxd->rx_m = NULL; 2471 } 2472 } 2473 for (i = 0; i < VR_TX_RING_CNT; i++) { 2474 txd = &sc->vr_cdata.vr_txdesc[i]; 2475 if (txd->tx_m != NULL) { 2476 bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, 2477 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2478 bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, 2479 txd->tx_dmamap); 2480 m_freem(txd->tx_m); 2481 txd->tx_m = NULL; 2482 } 2483 } 2484 } 2485 2486 /* 2487 * Stop all chip I/O so that the kernel's probe routines don't 2488 * get confused by errant DMAs when rebooting. 2489 */ 2490 static int 2491 vr_shutdown(device_t dev) 2492 { 2493 2494 return (vr_suspend(dev)); 2495 } 2496 2497 static int 2498 vr_suspend(device_t dev) 2499 { 2500 struct vr_softc *sc; 2501 2502 sc = device_get_softc(dev); 2503 2504 VR_LOCK(sc); 2505 vr_stop(sc); 2506 vr_setwol(sc); 2507 sc->vr_flags |= VR_F_SUSPENDED; 2508 VR_UNLOCK(sc); 2509 2510 return (0); 2511 } 2512 2513 static int 2514 vr_resume(device_t dev) 2515 { 2516 struct vr_softc *sc; 2517 if_t ifp; 2518 2519 sc = device_get_softc(dev); 2520 2521 VR_LOCK(sc); 2522 ifp = sc->vr_ifp; 2523 vr_clrwol(sc); 2524 vr_reset(sc); 2525 if (if_getflags(ifp) & IFF_UP) 2526 vr_init_locked(sc); 2527 2528 sc->vr_flags &= ~VR_F_SUSPENDED; 2529 VR_UNLOCK(sc); 2530 2531 return (0); 2532 } 2533 2534 static void 2535 vr_setwol(struct vr_softc *sc) 2536 { 2537 if_t ifp; 2538 int pmc; 2539 uint16_t pmstat; 2540 uint8_t v; 2541 2542 VR_LOCK_ASSERT(sc); 2543 2544 if (sc->vr_revid < REV_ID_VT6102_A || 2545 pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0) 2546 return; 2547 2548 ifp = sc->vr_ifp; 2549 2550 /* Clear WOL configuration. */ 2551 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2552 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2553 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2554 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2555 if (sc->vr_revid > REV_ID_VT6105_B0) { 2556 /* Newer Rhine III supports two additional patterns. */ 2557 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2558 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2559 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2560 } 2561 if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) != 0) 2562 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); 2563 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) 2564 CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); 2565 /* 2566 * It seems that multicast wakeup frames require programming pattern 2567 * registers and valid CRC as well as pattern mask for each pattern. 2568 * While it's possible to setup such a pattern it would complicate 2569 * WOL configuration so ignore multicast wakeup frames. 2570 */ 2571 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) { 2572 CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); 2573 v = CSR_READ_1(sc, VR_STICKHW); 2574 CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); 2575 CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); 2576 } 2577 2578 /* Put hardware into sleep. */ 2579 v = CSR_READ_1(sc, VR_STICKHW); 2580 v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; 2581 CSR_WRITE_1(sc, VR_STICKHW, v); 2582 2583 /* Request PME if WOL is requested. */ 2584 pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); 2585 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2586 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) 2587 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2588 pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 2589 } 2590 2591 static void 2592 vr_clrwol(struct vr_softc *sc) 2593 { 2594 uint8_t v; 2595 2596 VR_LOCK_ASSERT(sc); 2597 2598 if (sc->vr_revid < REV_ID_VT6102_A) 2599 return; 2600 2601 /* Take hardware out of sleep. */ 2602 v = CSR_READ_1(sc, VR_STICKHW); 2603 v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); 2604 CSR_WRITE_1(sc, VR_STICKHW, v); 2605 2606 /* Clear WOL configuration as WOL may interfere normal operation. */ 2607 CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); 2608 CSR_WRITE_1(sc, VR_WOLCFG_CLR, 2609 VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); 2610 CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); 2611 CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); 2612 if (sc->vr_revid > REV_ID_VT6105_B0) { 2613 /* Newer Rhine III supports two additional patterns. */ 2614 CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); 2615 CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); 2616 CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); 2617 } 2618 } 2619 2620 static int 2621 vr_sysctl_stats(SYSCTL_HANDLER_ARGS) 2622 { 2623 struct vr_softc *sc; 2624 struct vr_statistics *stat; 2625 int error; 2626 int result; 2627 2628 result = -1; 2629 error = sysctl_handle_int(oidp, &result, 0, req); 2630 2631 if (error != 0 || req->newptr == NULL) 2632 return (error); 2633 2634 if (result == 1) { 2635 sc = (struct vr_softc *)arg1; 2636 stat = &sc->vr_stat; 2637 2638 printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); 2639 printf("Outbound good frames : %ju\n", 2640 (uintmax_t)stat->tx_ok); 2641 printf("Inbound good frames : %ju\n", 2642 (uintmax_t)stat->rx_ok); 2643 printf("Outbound errors : %u\n", stat->tx_errors); 2644 printf("Inbound errors : %u\n", stat->rx_errors); 2645 printf("Inbound no buffers : %u\n", stat->rx_no_buffers); 2646 printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); 2647 printf("Inbound FIFO overflows : %d\n", 2648 stat->rx_fifo_overflows); 2649 printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); 2650 printf("Inbound frame alignment errors : %u\n", 2651 stat->rx_alignment); 2652 printf("Inbound giant frames : %u\n", stat->rx_giants); 2653 printf("Inbound runt frames : %u\n", stat->rx_runts); 2654 printf("Outbound aborted with excessive collisions : %u\n", 2655 stat->tx_abort); 2656 printf("Outbound collisions : %u\n", stat->tx_collisions); 2657 printf("Outbound late collisions : %u\n", 2658 stat->tx_late_collisions); 2659 printf("Outbound underrun : %u\n", stat->tx_underrun); 2660 printf("PCI bus errors : %u\n", stat->bus_errors); 2661 printf("driver restarted due to Rx/Tx shutdown failure : %u\n", 2662 stat->num_restart); 2663 } 2664 2665 return (error); 2666 } 2667