1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/module.h> 44 #include <sys/mutex.h> 45 #include <sys/rman.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 50 #include <net/bpf.h> 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/if_arp.h> 54 #include <net/ethernet.h> 55 #include <net/if_dl.h> 56 #include <net/if_llc.h> 57 #include <net/if_media.h> 58 #include <net/if_types.h> 59 #include <net/if_vlan_var.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 64 #include <dev/mii/mii.h> 65 #include <dev/mii/miivar.h> 66 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 70 #include <machine/bus.h> 71 72 #include <dev/vte/if_vtereg.h> 73 #include <dev/vte/if_vtevar.h> 74 75 /* "device miibus" required. See GENERIC if you get errors here. */ 76 #include "miibus_if.h" 77 78 MODULE_DEPEND(vte, pci, 1, 1, 1); 79 MODULE_DEPEND(vte, ether, 1, 1, 1); 80 MODULE_DEPEND(vte, miibus, 1, 1, 1); 81 82 /* Tunables. */ 83 static int tx_deep_copy = 1; 84 TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy); 85 86 /* 87 * Devices supported by this driver. 88 */ 89 static const struct vte_ident vte_ident_table[] = { 90 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"}, 91 { 0, 0, NULL} 92 }; 93 94 static int vte_attach(device_t); 95 static int vte_detach(device_t); 96 static int vte_dma_alloc(struct vte_softc *); 97 static void vte_dma_free(struct vte_softc *); 98 static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int); 99 static struct vte_txdesc * 100 vte_encap(struct vte_softc *, struct mbuf **); 101 static const struct vte_ident * 102 vte_find_ident(device_t); 103 #ifndef __NO_STRICT_ALIGNMENT 104 static struct mbuf * 105 vte_fixup_rx(if_t, struct mbuf *); 106 #endif 107 static void vte_get_macaddr(struct vte_softc *); 108 static void vte_init(void *); 109 static void vte_init_locked(struct vte_softc *); 110 static int vte_init_rx_ring(struct vte_softc *); 111 static int vte_init_tx_ring(struct vte_softc *); 112 static void vte_intr(void *); 113 static int vte_ioctl(if_t, u_long, caddr_t); 114 static uint64_t vte_get_counter(if_t, ift_counter); 115 static void vte_mac_config(struct vte_softc *); 116 static int vte_miibus_readreg(device_t, int, int); 117 static void vte_miibus_statchg(device_t); 118 static int vte_miibus_writereg(device_t, int, int, int); 119 static int vte_mediachange(if_t); 120 static int vte_mediachange_locked(if_t); 121 static void vte_mediastatus(if_t, struct ifmediareq *); 122 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 123 static int vte_probe(device_t); 124 static void vte_reset(struct vte_softc *); 125 static int vte_resume(device_t); 126 static void vte_rxeof(struct vte_softc *); 127 static void vte_rxfilter(struct vte_softc *); 128 static int vte_shutdown(device_t); 129 static void vte_start(if_t); 130 static void vte_start_locked(struct vte_softc *); 131 static void vte_start_mac(struct vte_softc *); 132 static void vte_stats_clear(struct vte_softc *); 133 static void vte_stats_update(struct vte_softc *); 134 static void vte_stop(struct vte_softc *); 135 static void vte_stop_mac(struct vte_softc *); 136 static int vte_suspend(device_t); 137 static void vte_sysctl_node(struct vte_softc *); 138 static void vte_tick(void *); 139 static void vte_txeof(struct vte_softc *); 140 static void vte_watchdog(struct vte_softc *); 141 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 142 static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS); 143 144 static device_method_t vte_methods[] = { 145 /* Device interface. */ 146 DEVMETHOD(device_probe, vte_probe), 147 DEVMETHOD(device_attach, vte_attach), 148 DEVMETHOD(device_detach, vte_detach), 149 DEVMETHOD(device_shutdown, vte_shutdown), 150 DEVMETHOD(device_suspend, vte_suspend), 151 DEVMETHOD(device_resume, vte_resume), 152 153 /* MII interface. */ 154 DEVMETHOD(miibus_readreg, vte_miibus_readreg), 155 DEVMETHOD(miibus_writereg, vte_miibus_writereg), 156 DEVMETHOD(miibus_statchg, vte_miibus_statchg), 157 158 DEVMETHOD_END 159 }; 160 161 static driver_t vte_driver = { 162 "vte", 163 vte_methods, 164 sizeof(struct vte_softc) 165 }; 166 167 DRIVER_MODULE(vte, pci, vte_driver, 0, 0); 168 DRIVER_MODULE(miibus, vte, miibus_driver, 0, 0); 169 170 static int 171 vte_miibus_readreg(device_t dev, int phy, int reg) 172 { 173 struct vte_softc *sc; 174 int i; 175 176 sc = device_get_softc(dev); 177 178 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 179 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 180 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 181 DELAY(5); 182 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 183 break; 184 } 185 186 if (i == 0) { 187 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg); 188 return (0); 189 } 190 191 return (CSR_READ_2(sc, VTE_MMRD)); 192 } 193 194 static int 195 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 196 { 197 struct vte_softc *sc; 198 int i; 199 200 sc = device_get_softc(dev); 201 202 CSR_WRITE_2(sc, VTE_MMWD, val); 203 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 204 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 205 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 206 DELAY(5); 207 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 208 break; 209 } 210 211 if (i == 0) 212 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg); 213 214 return (0); 215 } 216 217 static void 218 vte_miibus_statchg(device_t dev) 219 { 220 struct vte_softc *sc; 221 struct mii_data *mii; 222 if_t ifp; 223 uint16_t val; 224 225 sc = device_get_softc(dev); 226 227 mii = device_get_softc(sc->vte_miibus); 228 ifp = sc->vte_ifp; 229 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 230 return; 231 232 sc->vte_flags &= ~VTE_FLAG_LINK; 233 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 234 (IFM_ACTIVE | IFM_AVALID)) { 235 switch (IFM_SUBTYPE(mii->mii_media_active)) { 236 case IFM_10_T: 237 case IFM_100_TX: 238 sc->vte_flags |= VTE_FLAG_LINK; 239 break; 240 default: 241 break; 242 } 243 } 244 245 /* Stop RX/TX MACs. */ 246 vte_stop_mac(sc); 247 /* Program MACs with resolved duplex and flow control. */ 248 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 249 /* 250 * Timer waiting time : (63 + TIMER * 64) MII clock. 251 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 252 */ 253 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 254 val = 18 << VTE_IM_TIMER_SHIFT; 255 else 256 val = 1 << VTE_IM_TIMER_SHIFT; 257 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 258 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 259 CSR_WRITE_2(sc, VTE_MRICR, val); 260 261 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 262 val = 18 << VTE_IM_TIMER_SHIFT; 263 else 264 val = 1 << VTE_IM_TIMER_SHIFT; 265 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 266 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 267 CSR_WRITE_2(sc, VTE_MTICR, val); 268 269 vte_mac_config(sc); 270 vte_start_mac(sc); 271 } 272 } 273 274 static void 275 vte_mediastatus(if_t ifp, struct ifmediareq *ifmr) 276 { 277 struct vte_softc *sc; 278 struct mii_data *mii; 279 280 sc = if_getsoftc(ifp); 281 VTE_LOCK(sc); 282 if ((if_getflags(ifp) & IFF_UP) == 0) { 283 VTE_UNLOCK(sc); 284 return; 285 } 286 mii = device_get_softc(sc->vte_miibus); 287 288 mii_pollstat(mii); 289 ifmr->ifm_status = mii->mii_media_status; 290 ifmr->ifm_active = mii->mii_media_active; 291 VTE_UNLOCK(sc); 292 } 293 294 static int 295 vte_mediachange(if_t ifp) 296 { 297 struct vte_softc *sc; 298 int error; 299 300 sc = if_getsoftc(ifp); 301 VTE_LOCK(sc); 302 error = vte_mediachange_locked(ifp); 303 VTE_UNLOCK(sc); 304 return (error); 305 } 306 307 static int 308 vte_mediachange_locked(if_t ifp) 309 { 310 struct vte_softc *sc; 311 struct mii_data *mii; 312 struct mii_softc *miisc; 313 int error; 314 315 sc = if_getsoftc(ifp); 316 mii = device_get_softc(sc->vte_miibus); 317 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 318 PHY_RESET(miisc); 319 error = mii_mediachg(mii); 320 321 return (error); 322 } 323 324 static const struct vte_ident * 325 vte_find_ident(device_t dev) 326 { 327 const struct vte_ident *ident; 328 uint16_t vendor, devid; 329 330 vendor = pci_get_vendor(dev); 331 devid = pci_get_device(dev); 332 for (ident = vte_ident_table; ident->name != NULL; ident++) { 333 if (vendor == ident->vendorid && devid == ident->deviceid) 334 return (ident); 335 } 336 337 return (NULL); 338 } 339 340 static int 341 vte_probe(device_t dev) 342 { 343 const struct vte_ident *ident; 344 345 ident = vte_find_ident(dev); 346 if (ident != NULL) { 347 device_set_desc(dev, ident->name); 348 return (BUS_PROBE_DEFAULT); 349 } 350 351 return (ENXIO); 352 } 353 354 static void 355 vte_get_macaddr(struct vte_softc *sc) 356 { 357 uint16_t mid; 358 359 /* 360 * It seems there is no way to reload station address and 361 * it is supposed to be set by BIOS. 362 */ 363 mid = CSR_READ_2(sc, VTE_MID0L); 364 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 365 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 366 mid = CSR_READ_2(sc, VTE_MID0M); 367 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 368 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 369 mid = CSR_READ_2(sc, VTE_MID0H); 370 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 371 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 372 } 373 374 static int 375 vte_attach(device_t dev) 376 { 377 struct vte_softc *sc; 378 if_t ifp; 379 uint16_t macid; 380 int error, rid; 381 382 error = 0; 383 sc = device_get_softc(dev); 384 sc->vte_dev = dev; 385 386 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 387 MTX_DEF); 388 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0); 389 sc->vte_ident = vte_find_ident(dev); 390 391 /* Map the device. */ 392 pci_enable_busmaster(dev); 393 sc->vte_res_id = PCIR_BAR(1); 394 sc->vte_res_type = SYS_RES_MEMORY; 395 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 396 &sc->vte_res_id, RF_ACTIVE); 397 if (sc->vte_res == NULL) { 398 sc->vte_res_id = PCIR_BAR(0); 399 sc->vte_res_type = SYS_RES_IOPORT; 400 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 401 &sc->vte_res_id, RF_ACTIVE); 402 if (sc->vte_res == NULL) { 403 device_printf(dev, "cannot map memory/ports.\n"); 404 mtx_destroy(&sc->vte_mtx); 405 return (ENXIO); 406 } 407 } 408 if (bootverbose) { 409 device_printf(dev, "using %s space register mapping\n", 410 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 411 device_printf(dev, "MAC Identifier : 0x%04x\n", 412 CSR_READ_2(sc, VTE_MACID)); 413 macid = CSR_READ_2(sc, VTE_MACID_REV); 414 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n", 415 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT, 416 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT); 417 } 418 419 rid = 0; 420 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 421 RF_SHAREABLE | RF_ACTIVE); 422 if (sc->vte_irq == NULL) { 423 device_printf(dev, "cannot allocate IRQ resources.\n"); 424 error = ENXIO; 425 goto fail; 426 } 427 428 /* Reset the ethernet controller. */ 429 vte_reset(sc); 430 431 if ((error = vte_dma_alloc(sc)) != 0) 432 goto fail; 433 434 /* Create device sysctl node. */ 435 vte_sysctl_node(sc); 436 437 /* Load station address. */ 438 vte_get_macaddr(sc); 439 440 ifp = sc->vte_ifp = if_alloc(IFT_ETHER); 441 if (ifp == NULL) { 442 device_printf(dev, "cannot allocate ifnet structure.\n"); 443 error = ENXIO; 444 goto fail; 445 } 446 447 if_setsoftc(ifp, sc); 448 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 449 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 450 if_setioctlfn(ifp, vte_ioctl); 451 if_setstartfn(ifp, vte_start); 452 if_setinitfn(ifp, vte_init); 453 if_setgetcounterfn(ifp, vte_get_counter); 454 if_setsendqlen(ifp, VTE_TX_RING_CNT - 1); 455 if_setsendqready(ifp); 456 457 /* 458 * Set up MII bus. 459 * BIOS would have initialized VTE_MPSCCR to catch PHY 460 * status changes so driver may be able to extract 461 * configured PHY address. Since it's common to see BIOS 462 * fails to initialize the register(including the sample 463 * board I have), let mii(4) probe it. This is more 464 * reliable than relying on BIOS's initialization. 465 * 466 * Advertising flow control capability to mii(4) was 467 * intentionally disabled due to severe problems in TX 468 * pause frame generation. See vte_rxeof() for more 469 * details. 470 */ 471 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange, 472 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 473 if (error != 0) { 474 device_printf(dev, "attaching PHYs failed\n"); 475 goto fail; 476 } 477 478 ether_ifattach(ifp, sc->vte_eaddr); 479 480 /* VLAN capability setup. */ 481 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 482 if_setcapenable(ifp, if_getcapabilities(ifp)); 483 /* Tell the upper layer we support VLAN over-sized frames. */ 484 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 485 486 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE, 487 NULL, vte_intr, sc, &sc->vte_intrhand); 488 if (error != 0) { 489 device_printf(dev, "could not set up interrupt handler.\n"); 490 ether_ifdetach(ifp); 491 goto fail; 492 } 493 494 fail: 495 if (error != 0) 496 vte_detach(dev); 497 498 return (error); 499 } 500 501 static int 502 vte_detach(device_t dev) 503 { 504 struct vte_softc *sc; 505 if_t ifp; 506 507 sc = device_get_softc(dev); 508 509 ifp = sc->vte_ifp; 510 if (device_is_attached(dev)) { 511 VTE_LOCK(sc); 512 vte_stop(sc); 513 VTE_UNLOCK(sc); 514 callout_drain(&sc->vte_tick_ch); 515 ether_ifdetach(ifp); 516 } 517 518 if (sc->vte_miibus != NULL) { 519 device_delete_child(dev, sc->vte_miibus); 520 sc->vte_miibus = NULL; 521 } 522 bus_generic_detach(dev); 523 524 if (sc->vte_intrhand != NULL) { 525 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand); 526 sc->vte_intrhand = NULL; 527 } 528 if (sc->vte_irq != NULL) { 529 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq); 530 sc->vte_irq = NULL; 531 } 532 if (sc->vte_res != NULL) { 533 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id, 534 sc->vte_res); 535 sc->vte_res = NULL; 536 } 537 if (ifp != NULL) { 538 if_free(ifp); 539 sc->vte_ifp = NULL; 540 } 541 vte_dma_free(sc); 542 mtx_destroy(&sc->vte_mtx); 543 544 return (0); 545 } 546 547 #define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 548 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 549 550 static void 551 vte_sysctl_node(struct vte_softc *sc) 552 { 553 struct sysctl_ctx_list *ctx; 554 struct sysctl_oid_list *child, *parent; 555 struct sysctl_oid *tree; 556 struct vte_hw_stats *stats; 557 int error; 558 559 stats = &sc->vte_stats; 560 ctx = device_get_sysctl_ctx(sc->vte_dev); 561 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev)); 562 563 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 564 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 565 &sc->vte_int_rx_mod, 0, sysctl_hw_vte_int_mod, "I", 566 "vte RX interrupt moderation"); 567 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 568 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 569 &sc->vte_int_tx_mod, 0, sysctl_hw_vte_int_mod, "I", 570 "vte TX interrupt moderation"); 571 /* Pull in device tunables. */ 572 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 573 error = resource_int_value(device_get_name(sc->vte_dev), 574 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod); 575 if (error == 0) { 576 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN || 577 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) { 578 device_printf(sc->vte_dev, "int_rx_mod value out of " 579 "range; using default: %d\n", 580 VTE_IM_RX_BUNDLE_DEFAULT); 581 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 582 } 583 } 584 585 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 586 error = resource_int_value(device_get_name(sc->vte_dev), 587 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod); 588 if (error == 0) { 589 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN || 590 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) { 591 device_printf(sc->vte_dev, "int_tx_mod value out of " 592 "range; using default: %d\n", 593 VTE_IM_TX_BUNDLE_DEFAULT); 594 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 595 } 596 } 597 598 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 599 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VTE statistics"); 600 parent = SYSCTL_CHILDREN(tree); 601 602 /* RX statistics. */ 603 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 604 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics"); 605 child = SYSCTL_CHILDREN(tree); 606 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 607 &stats->rx_frames, "Good frames"); 608 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 609 &stats->rx_bcast_frames, "Good broadcast frames"); 610 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 611 &stats->rx_mcast_frames, "Good multicast frames"); 612 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt", 613 &stats->rx_runts, "Too short frames"); 614 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 615 &stats->rx_crcerrs, "CRC errors"); 616 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames", 617 &stats->rx_long_frames, 618 "Frames that have longer length than maximum packet length"); 619 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full", 620 &stats->rx_fifo_full, "FIFO full"); 621 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail", 622 &stats->rx_desc_unavail, "Descriptor unavailable frames"); 623 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 624 &stats->rx_pause_frames, "Pause control frames"); 625 626 /* TX statistics. */ 627 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 628 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics"); 629 child = SYSCTL_CHILDREN(tree); 630 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 631 &stats->tx_frames, "Good frames"); 632 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 633 &stats->tx_underruns, "FIFO underruns"); 634 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 635 &stats->tx_late_colls, "Late collisions"); 636 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 637 &stats->tx_pause_frames, "Pause control frames"); 638 } 639 640 #undef VTE_SYSCTL_STAT_ADD32 641 642 struct vte_dmamap_arg { 643 bus_addr_t vte_busaddr; 644 }; 645 646 static void 647 vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 648 { 649 struct vte_dmamap_arg *ctx; 650 651 if (error != 0) 652 return; 653 654 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 655 656 ctx = (struct vte_dmamap_arg *)arg; 657 ctx->vte_busaddr = segs[0].ds_addr; 658 } 659 660 static int 661 vte_dma_alloc(struct vte_softc *sc) 662 { 663 struct vte_txdesc *txd; 664 struct vte_rxdesc *rxd; 665 struct vte_dmamap_arg ctx; 666 int error, i; 667 668 /* Create parent DMA tag. */ 669 error = bus_dma_tag_create( 670 bus_get_dma_tag(sc->vte_dev), /* parent */ 671 1, 0, /* alignment, boundary */ 672 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 673 BUS_SPACE_MAXADDR, /* highaddr */ 674 NULL, NULL, /* filter, filterarg */ 675 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 676 0, /* nsegments */ 677 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 678 0, /* flags */ 679 NULL, NULL, /* lockfunc, lockarg */ 680 &sc->vte_cdata.vte_parent_tag); 681 if (error != 0) { 682 device_printf(sc->vte_dev, 683 "could not create parent DMA tag.\n"); 684 goto fail; 685 } 686 687 /* Create DMA tag for TX descriptor ring. */ 688 error = bus_dma_tag_create( 689 sc->vte_cdata.vte_parent_tag, /* parent */ 690 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */ 691 BUS_SPACE_MAXADDR, /* lowaddr */ 692 BUS_SPACE_MAXADDR, /* highaddr */ 693 NULL, NULL, /* filter, filterarg */ 694 VTE_TX_RING_SZ, /* maxsize */ 695 1, /* nsegments */ 696 VTE_TX_RING_SZ, /* maxsegsize */ 697 0, /* flags */ 698 NULL, NULL, /* lockfunc, lockarg */ 699 &sc->vte_cdata.vte_tx_ring_tag); 700 if (error != 0) { 701 device_printf(sc->vte_dev, 702 "could not create TX ring DMA tag.\n"); 703 goto fail; 704 } 705 706 /* Create DMA tag for RX free descriptor ring. */ 707 error = bus_dma_tag_create( 708 sc->vte_cdata.vte_parent_tag, /* parent */ 709 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */ 710 BUS_SPACE_MAXADDR, /* lowaddr */ 711 BUS_SPACE_MAXADDR, /* highaddr */ 712 NULL, NULL, /* filter, filterarg */ 713 VTE_RX_RING_SZ, /* maxsize */ 714 1, /* nsegments */ 715 VTE_RX_RING_SZ, /* maxsegsize */ 716 0, /* flags */ 717 NULL, NULL, /* lockfunc, lockarg */ 718 &sc->vte_cdata.vte_rx_ring_tag); 719 if (error != 0) { 720 device_printf(sc->vte_dev, 721 "could not create RX ring DMA tag.\n"); 722 goto fail; 723 } 724 725 /* Allocate DMA'able memory and load the DMA map for TX ring. */ 726 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag, 727 (void **)&sc->vte_cdata.vte_tx_ring, 728 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 729 &sc->vte_cdata.vte_tx_ring_map); 730 if (error != 0) { 731 device_printf(sc->vte_dev, 732 "could not allocate DMA'able memory for TX ring.\n"); 733 goto fail; 734 } 735 ctx.vte_busaddr = 0; 736 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag, 737 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 738 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0); 739 if (error != 0 || ctx.vte_busaddr == 0) { 740 device_printf(sc->vte_dev, 741 "could not load DMA'able memory for TX ring.\n"); 742 goto fail; 743 } 744 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr; 745 746 /* Allocate DMA'able memory and load the DMA map for RX ring. */ 747 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag, 748 (void **)&sc->vte_cdata.vte_rx_ring, 749 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 750 &sc->vte_cdata.vte_rx_ring_map); 751 if (error != 0) { 752 device_printf(sc->vte_dev, 753 "could not allocate DMA'able memory for RX ring.\n"); 754 goto fail; 755 } 756 ctx.vte_busaddr = 0; 757 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag, 758 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 759 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0); 760 if (error != 0 || ctx.vte_busaddr == 0) { 761 device_printf(sc->vte_dev, 762 "could not load DMA'able memory for RX ring.\n"); 763 goto fail; 764 } 765 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr; 766 767 /* Create TX buffer parent tag. */ 768 error = bus_dma_tag_create( 769 bus_get_dma_tag(sc->vte_dev), /* parent */ 770 1, 0, /* alignment, boundary */ 771 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 772 BUS_SPACE_MAXADDR, /* highaddr */ 773 NULL, NULL, /* filter, filterarg */ 774 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 775 0, /* nsegments */ 776 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 777 0, /* flags */ 778 NULL, NULL, /* lockfunc, lockarg */ 779 &sc->vte_cdata.vte_buffer_tag); 780 if (error != 0) { 781 device_printf(sc->vte_dev, 782 "could not create parent buffer DMA tag.\n"); 783 goto fail; 784 } 785 786 /* Create DMA tag for TX buffers. */ 787 error = bus_dma_tag_create( 788 sc->vte_cdata.vte_buffer_tag, /* parent */ 789 1, 0, /* alignment, boundary */ 790 BUS_SPACE_MAXADDR, /* lowaddr */ 791 BUS_SPACE_MAXADDR, /* highaddr */ 792 NULL, NULL, /* filter, filterarg */ 793 MCLBYTES, /* maxsize */ 794 1, /* nsegments */ 795 MCLBYTES, /* maxsegsize */ 796 0, /* flags */ 797 NULL, NULL, /* lockfunc, lockarg */ 798 &sc->vte_cdata.vte_tx_tag); 799 if (error != 0) { 800 device_printf(sc->vte_dev, "could not create TX DMA tag.\n"); 801 goto fail; 802 } 803 804 /* Create DMA tag for RX buffers. */ 805 error = bus_dma_tag_create( 806 sc->vte_cdata.vte_buffer_tag, /* parent */ 807 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */ 808 BUS_SPACE_MAXADDR, /* lowaddr */ 809 BUS_SPACE_MAXADDR, /* highaddr */ 810 NULL, NULL, /* filter, filterarg */ 811 MCLBYTES, /* maxsize */ 812 1, /* nsegments */ 813 MCLBYTES, /* maxsegsize */ 814 0, /* flags */ 815 NULL, NULL, /* lockfunc, lockarg */ 816 &sc->vte_cdata.vte_rx_tag); 817 if (error != 0) { 818 device_printf(sc->vte_dev, "could not create RX DMA tag.\n"); 819 goto fail; 820 } 821 /* Create DMA maps for TX buffers. */ 822 for (i = 0; i < VTE_TX_RING_CNT; i++) { 823 txd = &sc->vte_cdata.vte_txdesc[i]; 824 txd->tx_m = NULL; 825 txd->tx_dmamap = NULL; 826 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0, 827 &txd->tx_dmamap); 828 if (error != 0) { 829 device_printf(sc->vte_dev, 830 "could not create TX dmamap.\n"); 831 goto fail; 832 } 833 } 834 /* Create DMA maps for RX buffers. */ 835 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 836 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 837 device_printf(sc->vte_dev, 838 "could not create spare RX dmamap.\n"); 839 goto fail; 840 } 841 for (i = 0; i < VTE_RX_RING_CNT; i++) { 842 rxd = &sc->vte_cdata.vte_rxdesc[i]; 843 rxd->rx_m = NULL; 844 rxd->rx_dmamap = NULL; 845 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 846 &rxd->rx_dmamap); 847 if (error != 0) { 848 device_printf(sc->vte_dev, 849 "could not create RX dmamap.\n"); 850 goto fail; 851 } 852 } 853 854 fail: 855 return (error); 856 } 857 858 static void 859 vte_dma_free(struct vte_softc *sc) 860 { 861 struct vte_txdesc *txd; 862 struct vte_rxdesc *rxd; 863 int i; 864 865 /* TX buffers. */ 866 if (sc->vte_cdata.vte_tx_tag != NULL) { 867 for (i = 0; i < VTE_TX_RING_CNT; i++) { 868 txd = &sc->vte_cdata.vte_txdesc[i]; 869 if (txd->tx_dmamap != NULL) { 870 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag, 871 txd->tx_dmamap); 872 txd->tx_dmamap = NULL; 873 } 874 } 875 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag); 876 sc->vte_cdata.vte_tx_tag = NULL; 877 } 878 /* RX buffers */ 879 if (sc->vte_cdata.vte_rx_tag != NULL) { 880 for (i = 0; i < VTE_RX_RING_CNT; i++) { 881 rxd = &sc->vte_cdata.vte_rxdesc[i]; 882 if (rxd->rx_dmamap != NULL) { 883 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 884 rxd->rx_dmamap); 885 rxd->rx_dmamap = NULL; 886 } 887 } 888 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 889 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 890 sc->vte_cdata.vte_rx_sparemap); 891 sc->vte_cdata.vte_rx_sparemap = NULL; 892 } 893 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag); 894 sc->vte_cdata.vte_rx_tag = NULL; 895 } 896 /* TX descriptor ring. */ 897 if (sc->vte_cdata.vte_tx_ring_tag != NULL) { 898 if (sc->vte_cdata.vte_tx_ring_paddr != 0) 899 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag, 900 sc->vte_cdata.vte_tx_ring_map); 901 if (sc->vte_cdata.vte_tx_ring != NULL) 902 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag, 903 sc->vte_cdata.vte_tx_ring, 904 sc->vte_cdata.vte_tx_ring_map); 905 sc->vte_cdata.vte_tx_ring = NULL; 906 sc->vte_cdata.vte_tx_ring_paddr = 0; 907 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag); 908 sc->vte_cdata.vte_tx_ring_tag = NULL; 909 } 910 /* RX ring. */ 911 if (sc->vte_cdata.vte_rx_ring_tag != NULL) { 912 if (sc->vte_cdata.vte_rx_ring_paddr != 0) 913 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag, 914 sc->vte_cdata.vte_rx_ring_map); 915 if (sc->vte_cdata.vte_rx_ring != NULL) 916 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag, 917 sc->vte_cdata.vte_rx_ring, 918 sc->vte_cdata.vte_rx_ring_map); 919 sc->vte_cdata.vte_rx_ring = NULL; 920 sc->vte_cdata.vte_rx_ring_paddr = 0; 921 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag); 922 sc->vte_cdata.vte_rx_ring_tag = NULL; 923 } 924 if (sc->vte_cdata.vte_buffer_tag != NULL) { 925 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag); 926 sc->vte_cdata.vte_buffer_tag = NULL; 927 } 928 if (sc->vte_cdata.vte_parent_tag != NULL) { 929 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag); 930 sc->vte_cdata.vte_parent_tag = NULL; 931 } 932 } 933 934 static int 935 vte_shutdown(device_t dev) 936 { 937 938 return (vte_suspend(dev)); 939 } 940 941 static int 942 vte_suspend(device_t dev) 943 { 944 struct vte_softc *sc; 945 if_t ifp; 946 947 sc = device_get_softc(dev); 948 949 VTE_LOCK(sc); 950 ifp = sc->vte_ifp; 951 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 952 vte_stop(sc); 953 VTE_UNLOCK(sc); 954 955 return (0); 956 } 957 958 static int 959 vte_resume(device_t dev) 960 { 961 struct vte_softc *sc; 962 if_t ifp; 963 964 sc = device_get_softc(dev); 965 966 VTE_LOCK(sc); 967 ifp = sc->vte_ifp; 968 if ((if_getflags(ifp) & IFF_UP) != 0) { 969 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 970 vte_init_locked(sc); 971 } 972 VTE_UNLOCK(sc); 973 974 return (0); 975 } 976 977 static struct vte_txdesc * 978 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 979 { 980 struct vte_txdesc *txd; 981 struct mbuf *m, *n; 982 bus_dma_segment_t txsegs[1]; 983 int copy, error, nsegs, padlen; 984 985 VTE_LOCK_ASSERT(sc); 986 987 M_ASSERTPKTHDR((*m_head)); 988 989 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 990 m = *m_head; 991 /* 992 * Controller doesn't auto-pad, so we have to make sure pad 993 * short frames out to the minimum frame length. 994 */ 995 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 996 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 997 else 998 padlen = 0; 999 1000 /* 1001 * Controller does not support multi-fragmented TX buffers. 1002 * Controller spends most of its TX processing time in 1003 * de-fragmenting TX buffers. Either faster CPU or more 1004 * advanced controller DMA engine is required to speed up 1005 * TX path processing. 1006 * To mitigate the de-fragmenting issue, perform deep copy 1007 * from fragmented mbuf chains to a pre-allocated mbuf 1008 * cluster with extra cost of kernel memory. For frames 1009 * that is composed of single TX buffer, the deep copy is 1010 * bypassed. 1011 */ 1012 if (tx_deep_copy != 0) { 1013 copy = 0; 1014 if (m->m_next != NULL) 1015 copy++; 1016 if (padlen > 0 && (M_WRITABLE(m) == 0 || 1017 padlen > M_TRAILINGSPACE(m))) 1018 copy++; 1019 if (copy != 0) { 1020 /* Avoid expensive m_defrag(9) and do deep copy. */ 1021 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 1022 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 1023 n->m_pkthdr.len = m->m_pkthdr.len; 1024 n->m_len = m->m_pkthdr.len; 1025 m = n; 1026 txd->tx_flags |= VTE_TXMBUF; 1027 } 1028 1029 if (padlen > 0) { 1030 /* Zero out the bytes in the pad area. */ 1031 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1032 m->m_pkthdr.len += padlen; 1033 m->m_len = m->m_pkthdr.len; 1034 } 1035 } else { 1036 if (M_WRITABLE(m) == 0) { 1037 if (m->m_next != NULL || padlen > 0) { 1038 /* Get a writable copy. */ 1039 m = m_dup(*m_head, M_NOWAIT); 1040 /* Release original mbuf chains. */ 1041 m_freem(*m_head); 1042 if (m == NULL) { 1043 *m_head = NULL; 1044 return (NULL); 1045 } 1046 *m_head = m; 1047 } 1048 } 1049 1050 if (m->m_next != NULL) { 1051 m = m_defrag(*m_head, M_NOWAIT); 1052 if (m == NULL) { 1053 m_freem(*m_head); 1054 *m_head = NULL; 1055 return (NULL); 1056 } 1057 *m_head = m; 1058 } 1059 1060 if (padlen > 0) { 1061 if (M_TRAILINGSPACE(m) < padlen) { 1062 m = m_defrag(*m_head, M_NOWAIT); 1063 if (m == NULL) { 1064 m_freem(*m_head); 1065 *m_head = NULL; 1066 return (NULL); 1067 } 1068 *m_head = m; 1069 } 1070 /* Zero out the bytes in the pad area. */ 1071 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1072 m->m_pkthdr.len += padlen; 1073 m->m_len = m->m_pkthdr.len; 1074 } 1075 } 1076 1077 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag, 1078 txd->tx_dmamap, m, txsegs, &nsegs, 0); 1079 if (error != 0) { 1080 txd->tx_flags &= ~VTE_TXMBUF; 1081 return (NULL); 1082 } 1083 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1084 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1085 BUS_DMASYNC_PREWRITE); 1086 1087 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len)); 1088 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr); 1089 sc->vte_cdata.vte_tx_cnt++; 1090 /* Update producer index. */ 1091 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 1092 1093 /* Finally hand over ownership to controller. */ 1094 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 1095 txd->tx_m = m; 1096 1097 return (txd); 1098 } 1099 1100 static void 1101 vte_start(if_t ifp) 1102 { 1103 struct vte_softc *sc; 1104 1105 sc = if_getsoftc(ifp); 1106 VTE_LOCK(sc); 1107 vte_start_locked(sc); 1108 VTE_UNLOCK(sc); 1109 } 1110 1111 static void 1112 vte_start_locked(struct vte_softc *sc) 1113 { 1114 if_t ifp; 1115 struct vte_txdesc *txd; 1116 struct mbuf *m_head; 1117 int enq; 1118 1119 ifp = sc->vte_ifp; 1120 1121 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1122 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 1123 return; 1124 1125 for (enq = 0; !if_sendq_empty(ifp); ) { 1126 /* Reserve one free TX descriptor. */ 1127 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 1128 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1129 break; 1130 } 1131 m_head = if_dequeue(ifp); 1132 if (m_head == NULL) 1133 break; 1134 /* 1135 * Pack the data into the transmit ring. If we 1136 * don't have room, set the OACTIVE flag and wait 1137 * for the NIC to drain the ring. 1138 */ 1139 if ((txd = vte_encap(sc, &m_head)) == NULL) { 1140 if (m_head != NULL) 1141 if_sendq_prepend(ifp, m_head); 1142 break; 1143 } 1144 1145 enq++; 1146 /* 1147 * If there's a BPF listener, bounce a copy of this frame 1148 * to him. 1149 */ 1150 ETHER_BPF_MTAP(ifp, m_head); 1151 /* Free consumed TX frame. */ 1152 if ((txd->tx_flags & VTE_TXMBUF) != 0) 1153 m_freem(m_head); 1154 } 1155 1156 if (enq > 0) { 1157 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1158 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1159 BUS_DMASYNC_PREWRITE); 1160 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 1161 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 1162 } 1163 } 1164 1165 static void 1166 vte_watchdog(struct vte_softc *sc) 1167 { 1168 if_t ifp; 1169 1170 VTE_LOCK_ASSERT(sc); 1171 1172 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 1173 return; 1174 1175 ifp = sc->vte_ifp; 1176 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n"); 1177 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1178 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1179 vte_init_locked(sc); 1180 if (!if_sendq_empty(ifp)) 1181 vte_start_locked(sc); 1182 } 1183 1184 static int 1185 vte_ioctl(if_t ifp, u_long cmd, caddr_t data) 1186 { 1187 struct vte_softc *sc; 1188 struct ifreq *ifr; 1189 struct mii_data *mii; 1190 int error; 1191 1192 sc = if_getsoftc(ifp); 1193 ifr = (struct ifreq *)data; 1194 error = 0; 1195 switch (cmd) { 1196 case SIOCSIFFLAGS: 1197 VTE_LOCK(sc); 1198 if ((if_getflags(ifp) & IFF_UP) != 0) { 1199 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 1200 ((if_getflags(ifp) ^ sc->vte_if_flags) & 1201 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1202 vte_rxfilter(sc); 1203 else 1204 vte_init_locked(sc); 1205 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1206 vte_stop(sc); 1207 sc->vte_if_flags = if_getflags(ifp); 1208 VTE_UNLOCK(sc); 1209 break; 1210 case SIOCADDMULTI: 1211 case SIOCDELMULTI: 1212 VTE_LOCK(sc); 1213 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1214 vte_rxfilter(sc); 1215 VTE_UNLOCK(sc); 1216 break; 1217 case SIOCSIFMEDIA: 1218 case SIOCGIFMEDIA: 1219 mii = device_get_softc(sc->vte_miibus); 1220 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1221 break; 1222 default: 1223 error = ether_ioctl(ifp, cmd, data); 1224 break; 1225 } 1226 1227 return (error); 1228 } 1229 1230 static void 1231 vte_mac_config(struct vte_softc *sc) 1232 { 1233 struct mii_data *mii; 1234 uint16_t mcr; 1235 1236 VTE_LOCK_ASSERT(sc); 1237 1238 mii = device_get_softc(sc->vte_miibus); 1239 mcr = CSR_READ_2(sc, VTE_MCR0); 1240 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 1241 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1242 mcr |= MCR0_FULL_DUPLEX; 1243 #ifdef notyet 1244 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1245 mcr |= MCR0_FC_ENB; 1246 /* 1247 * The data sheet is not clear whether the controller 1248 * honors received pause frames or not. The is no 1249 * separate control bit for RX pause frame so just 1250 * enable MCR0_FC_ENB bit. 1251 */ 1252 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1253 mcr |= MCR0_FC_ENB; 1254 #endif 1255 } 1256 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1257 } 1258 1259 static void 1260 vte_stats_clear(struct vte_softc *sc) 1261 { 1262 1263 /* Reading counter registers clears its contents. */ 1264 CSR_READ_2(sc, VTE_CNT_RX_DONE); 1265 CSR_READ_2(sc, VTE_CNT_MECNT0); 1266 CSR_READ_2(sc, VTE_CNT_MECNT1); 1267 CSR_READ_2(sc, VTE_CNT_MECNT2); 1268 CSR_READ_2(sc, VTE_CNT_MECNT3); 1269 CSR_READ_2(sc, VTE_CNT_TX_DONE); 1270 CSR_READ_2(sc, VTE_CNT_MECNT4); 1271 CSR_READ_2(sc, VTE_CNT_PAUSE); 1272 } 1273 1274 static void 1275 vte_stats_update(struct vte_softc *sc) 1276 { 1277 struct vte_hw_stats *stat; 1278 uint16_t value; 1279 1280 VTE_LOCK_ASSERT(sc); 1281 1282 stat = &sc->vte_stats; 1283 1284 CSR_READ_2(sc, VTE_MECISR); 1285 /* RX stats. */ 1286 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 1287 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 1288 stat->rx_bcast_frames += (value >> 8); 1289 stat->rx_mcast_frames += (value & 0xFF); 1290 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 1291 stat->rx_runts += (value >> 8); 1292 stat->rx_crcerrs += (value & 0xFF); 1293 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 1294 stat->rx_long_frames += (value & 0xFF); 1295 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 1296 stat->rx_fifo_full += (value >> 8); 1297 stat->rx_desc_unavail += (value & 0xFF); 1298 1299 /* TX stats. */ 1300 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 1301 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 1302 stat->tx_underruns += (value >> 8); 1303 stat->tx_late_colls += (value & 0xFF); 1304 1305 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 1306 stat->tx_pause_frames += (value >> 8); 1307 stat->rx_pause_frames += (value & 0xFF); 1308 } 1309 1310 static uint64_t 1311 vte_get_counter(if_t ifp, ift_counter cnt) 1312 { 1313 struct vte_softc *sc; 1314 struct vte_hw_stats *stat; 1315 1316 sc = if_getsoftc(ifp); 1317 stat = &sc->vte_stats; 1318 1319 switch (cnt) { 1320 case IFCOUNTER_OPACKETS: 1321 return (stat->tx_frames); 1322 case IFCOUNTER_COLLISIONS: 1323 return (stat->tx_late_colls); 1324 case IFCOUNTER_OERRORS: 1325 return (stat->tx_late_colls + stat->tx_underruns); 1326 case IFCOUNTER_IPACKETS: 1327 return (stat->rx_frames); 1328 case IFCOUNTER_IERRORS: 1329 return (stat->rx_crcerrs + stat->rx_runts + 1330 stat->rx_long_frames + stat->rx_fifo_full); 1331 default: 1332 return (if_get_counter_default(ifp, cnt)); 1333 } 1334 } 1335 1336 static void 1337 vte_intr(void *arg) 1338 { 1339 struct vte_softc *sc; 1340 if_t ifp; 1341 uint16_t status; 1342 int n; 1343 1344 sc = (struct vte_softc *)arg; 1345 VTE_LOCK(sc); 1346 1347 ifp = sc->vte_ifp; 1348 /* Reading VTE_MISR acknowledges interrupts. */ 1349 status = CSR_READ_2(sc, VTE_MISR); 1350 if ((status & VTE_INTRS) == 0) { 1351 /* Not ours. */ 1352 VTE_UNLOCK(sc); 1353 return; 1354 } 1355 1356 /* Disable interrupts. */ 1357 CSR_WRITE_2(sc, VTE_MIER, 0); 1358 for (n = 8; (status & VTE_INTRS) != 0;) { 1359 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1360 break; 1361 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 1362 MISR_RX_FIFO_FULL)) != 0) 1363 vte_rxeof(sc); 1364 if ((status & MISR_TX_DONE) != 0) 1365 vte_txeof(sc); 1366 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 1367 vte_stats_update(sc); 1368 if (!if_sendq_empty(ifp)) 1369 vte_start_locked(sc); 1370 if (--n > 0) 1371 status = CSR_READ_2(sc, VTE_MISR); 1372 else 1373 break; 1374 } 1375 1376 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1377 /* Re-enable interrupts. */ 1378 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1379 } 1380 VTE_UNLOCK(sc); 1381 } 1382 1383 static void 1384 vte_txeof(struct vte_softc *sc) 1385 { 1386 if_t ifp; 1387 struct vte_txdesc *txd; 1388 uint16_t status; 1389 int cons, prog; 1390 1391 VTE_LOCK_ASSERT(sc); 1392 1393 ifp = sc->vte_ifp; 1394 1395 if (sc->vte_cdata.vte_tx_cnt == 0) 1396 return; 1397 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1398 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD | 1399 BUS_DMASYNC_POSTWRITE); 1400 cons = sc->vte_cdata.vte_tx_cons; 1401 /* 1402 * Go through our TX list and free mbufs for those 1403 * frames which have been transmitted. 1404 */ 1405 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1406 txd = &sc->vte_cdata.vte_txdesc[cons]; 1407 status = le16toh(txd->tx_desc->dtst); 1408 if ((status & VTE_DTST_TX_OWN) != 0) 1409 break; 1410 sc->vte_cdata.vte_tx_cnt--; 1411 /* Reclaim transmitted mbufs. */ 1412 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1413 BUS_DMASYNC_POSTWRITE); 1414 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap); 1415 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1416 m_freem(txd->tx_m); 1417 txd->tx_flags &= ~VTE_TXMBUF; 1418 txd->tx_m = NULL; 1419 prog++; 1420 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1421 } 1422 1423 if (prog > 0) { 1424 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1425 sc->vte_cdata.vte_tx_cons = cons; 1426 /* 1427 * Unarm watchdog timer only when there is no pending 1428 * frames in TX queue. 1429 */ 1430 if (sc->vte_cdata.vte_tx_cnt == 0) 1431 sc->vte_watchdog_timer = 0; 1432 } 1433 } 1434 1435 static int 1436 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1437 { 1438 struct mbuf *m; 1439 bus_dma_segment_t segs[1]; 1440 bus_dmamap_t map; 1441 int nsegs; 1442 1443 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1444 if (m == NULL) 1445 return (ENOBUFS); 1446 m->m_len = m->m_pkthdr.len = MCLBYTES; 1447 m_adj(m, sizeof(uint32_t)); 1448 1449 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag, 1450 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1451 m_freem(m); 1452 return (ENOBUFS); 1453 } 1454 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1455 1456 if (rxd->rx_m != NULL) { 1457 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1458 BUS_DMASYNC_POSTREAD); 1459 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap); 1460 } 1461 map = rxd->rx_dmamap; 1462 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1463 sc->vte_cdata.vte_rx_sparemap = map; 1464 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1465 BUS_DMASYNC_PREREAD); 1466 rxd->rx_m = m; 1467 rxd->rx_desc->drbp = htole32(segs[0].ds_addr); 1468 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len)); 1469 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1470 1471 return (0); 1472 } 1473 1474 /* 1475 * It's not supposed to see this controller on strict-alignment 1476 * architectures but make it work for completeness. 1477 */ 1478 #ifndef __NO_STRICT_ALIGNMENT 1479 static struct mbuf * 1480 vte_fixup_rx(if_t ifp, struct mbuf *m) 1481 { 1482 uint16_t *src, *dst; 1483 int i; 1484 1485 src = mtod(m, uint16_t *); 1486 dst = src - 1; 1487 1488 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1489 *dst++ = *src++; 1490 m->m_data -= ETHER_ALIGN; 1491 return (m); 1492 } 1493 #endif 1494 1495 static void 1496 vte_rxeof(struct vte_softc *sc) 1497 { 1498 if_t ifp; 1499 struct vte_rxdesc *rxd; 1500 struct mbuf *m; 1501 uint16_t status, total_len; 1502 int cons, prog; 1503 1504 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1505 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD | 1506 BUS_DMASYNC_POSTWRITE); 1507 cons = sc->vte_cdata.vte_rx_cons; 1508 ifp = sc->vte_ifp; 1509 for (prog = 0; (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; prog++, 1510 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1511 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1512 status = le16toh(rxd->rx_desc->drst); 1513 if ((status & VTE_DRST_RX_OWN) != 0) 1514 break; 1515 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1516 m = rxd->rx_m; 1517 if ((status & VTE_DRST_RX_OK) == 0) { 1518 /* Discard errored frame. */ 1519 rxd->rx_desc->drlen = 1520 htole16(MCLBYTES - sizeof(uint32_t)); 1521 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1522 continue; 1523 } 1524 if (vte_newbuf(sc, rxd) != 0) { 1525 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1526 rxd->rx_desc->drlen = 1527 htole16(MCLBYTES - sizeof(uint32_t)); 1528 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1529 continue; 1530 } 1531 1532 /* 1533 * It seems there is no way to strip FCS bytes. 1534 */ 1535 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1536 m->m_pkthdr.rcvif = ifp; 1537 #ifndef __NO_STRICT_ALIGNMENT 1538 vte_fixup_rx(ifp, m); 1539 #endif 1540 VTE_UNLOCK(sc); 1541 if_input(ifp, m); 1542 VTE_LOCK(sc); 1543 } 1544 1545 if (prog > 0) { 1546 /* Update the consumer index. */ 1547 sc->vte_cdata.vte_rx_cons = cons; 1548 /* 1549 * Sync updated RX descriptors such that controller see 1550 * modified RX buffer addresses. 1551 */ 1552 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1553 sc->vte_cdata.vte_rx_ring_map, 1554 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1555 #ifdef notyet 1556 /* 1557 * Update residue counter. Controller does not 1558 * keep track of number of available RX descriptors 1559 * such that driver should have to update VTE_MRDCR 1560 * to make controller know how many free RX 1561 * descriptors were added to controller. This is 1562 * a similar mechanism used in VIA velocity 1563 * controllers and it indicates controller just 1564 * polls OWN bit of current RX descriptor pointer. 1565 * A couple of severe issues were seen on sample 1566 * board where the controller continuously emits TX 1567 * pause frames once RX pause threshold crossed. 1568 * Once triggered it never recovered form that 1569 * state, I couldn't find a way to make it back to 1570 * work at least. This issue effectively 1571 * disconnected the system from network. Also, the 1572 * controller used 00:00:00:00:00:00 as source 1573 * station address of TX pause frame. Probably this 1574 * is one of reason why vendor recommends not to 1575 * enable flow control on R6040 controller. 1576 */ 1577 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1578 (((VTE_RX_RING_CNT * 2) / 10) << 1579 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1580 #endif 1581 } 1582 } 1583 1584 static void 1585 vte_tick(void *arg) 1586 { 1587 struct vte_softc *sc; 1588 struct mii_data *mii; 1589 1590 sc = (struct vte_softc *)arg; 1591 1592 VTE_LOCK_ASSERT(sc); 1593 1594 mii = device_get_softc(sc->vte_miibus); 1595 mii_tick(mii); 1596 vte_stats_update(sc); 1597 vte_txeof(sc); 1598 vte_watchdog(sc); 1599 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1600 } 1601 1602 static void 1603 vte_reset(struct vte_softc *sc) 1604 { 1605 uint16_t mcr, mdcsc; 1606 int i; 1607 1608 mdcsc = CSR_READ_2(sc, VTE_MDCSC); 1609 mcr = CSR_READ_2(sc, VTE_MCR1); 1610 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1611 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1612 DELAY(10); 1613 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1614 break; 1615 } 1616 if (i == 0) 1617 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1618 /* 1619 * Follow the guide of vendor recommended way to reset MAC. 1620 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1621 * not reliable so manually reset internal state machine. 1622 */ 1623 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1624 CSR_WRITE_2(sc, VTE_MACSM, 0); 1625 DELAY(5000); 1626 1627 /* 1628 * On some SoCs (like Vortex86DX3) MDC speed control register value 1629 * needs to be restored to original value instead of default one, 1630 * otherwise some PHY registers may fail to be read. 1631 */ 1632 if (mdcsc != MDCSC_DEFAULT) 1633 CSR_WRITE_2(sc, VTE_MDCSC, mdcsc); 1634 } 1635 1636 static void 1637 vte_init(void *xsc) 1638 { 1639 struct vte_softc *sc; 1640 1641 sc = (struct vte_softc *)xsc; 1642 VTE_LOCK(sc); 1643 vte_init_locked(sc); 1644 VTE_UNLOCK(sc); 1645 } 1646 1647 static void 1648 vte_init_locked(struct vte_softc *sc) 1649 { 1650 if_t ifp; 1651 bus_addr_t paddr; 1652 uint8_t *eaddr; 1653 1654 VTE_LOCK_ASSERT(sc); 1655 1656 ifp = sc->vte_ifp; 1657 1658 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1659 return; 1660 /* 1661 * Cancel any pending I/O. 1662 */ 1663 vte_stop(sc); 1664 /* 1665 * Reset the chip to a known state. 1666 */ 1667 vte_reset(sc); 1668 1669 /* Initialize RX descriptors. */ 1670 if (vte_init_rx_ring(sc) != 0) { 1671 device_printf(sc->vte_dev, "no memory for RX buffers.\n"); 1672 vte_stop(sc); 1673 return; 1674 } 1675 if (vte_init_tx_ring(sc) != 0) { 1676 device_printf(sc->vte_dev, "no memory for TX buffers.\n"); 1677 vte_stop(sc); 1678 return; 1679 } 1680 1681 /* 1682 * Reprogram the station address. Controller supports up 1683 * to 4 different station addresses so driver programs the 1684 * first station address as its own ethernet address and 1685 * configure the remaining three addresses as perfect 1686 * multicast addresses. 1687 */ 1688 eaddr = if_getlladdr(sc->vte_ifp); 1689 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1690 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1691 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1692 1693 /* Set TX descriptor base addresses. */ 1694 paddr = sc->vte_cdata.vte_tx_ring_paddr; 1695 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1696 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1697 /* Set RX descriptor base addresses. */ 1698 paddr = sc->vte_cdata.vte_rx_ring_paddr; 1699 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1700 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1701 /* 1702 * Initialize RX descriptor residue counter and set RX 1703 * pause threshold to 20% of available RX descriptors. 1704 * See comments on vte_rxeof() for details on flow control 1705 * issues. 1706 */ 1707 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1708 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1709 1710 /* 1711 * Always use maximum frame size that controller can 1712 * support. Otherwise received frames that has longer 1713 * frame length than vte(4) MTU would be silently dropped 1714 * in controller. This would break path-MTU discovery as 1715 * sender wouldn't get any responses from receiver. The 1716 * RX buffer size should be multiple of 4. 1717 * Note, jumbo frames are silently ignored by controller 1718 * and even MAC counters do not detect them. 1719 */ 1720 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1721 1722 /* Configure FIFO. */ 1723 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1724 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1725 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1726 1727 /* 1728 * Configure TX/RX MACs. Actual resolved duplex and flow 1729 * control configuration is done after detecting a valid 1730 * link. Note, we don't generate early interrupt here 1731 * as well since FreeBSD does not have interrupt latency 1732 * problems like Windows. 1733 */ 1734 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1735 /* 1736 * We manually keep track of PHY status changes to 1737 * configure resolved duplex and flow control since only 1738 * duplex configuration can be automatically reflected to 1739 * MCR0. 1740 */ 1741 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1742 MCR1_EXCESS_COL_RETRY_16); 1743 1744 /* Initialize RX filter. */ 1745 vte_rxfilter(sc); 1746 1747 /* Disable TX/RX interrupt moderation control. */ 1748 CSR_WRITE_2(sc, VTE_MRICR, 0); 1749 CSR_WRITE_2(sc, VTE_MTICR, 0); 1750 1751 /* Enable MAC event counter interrupts. */ 1752 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1753 /* Clear MAC statistics. */ 1754 vte_stats_clear(sc); 1755 1756 /* Acknowledge all pending interrupts and clear it. */ 1757 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1758 CSR_WRITE_2(sc, VTE_MISR, 0); 1759 1760 sc->vte_flags &= ~VTE_FLAG_LINK; 1761 /* Switch to the current media. */ 1762 vte_mediachange_locked(ifp); 1763 1764 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1765 1766 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1767 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1768 } 1769 1770 static void 1771 vte_stop(struct vte_softc *sc) 1772 { 1773 if_t ifp; 1774 struct vte_txdesc *txd; 1775 struct vte_rxdesc *rxd; 1776 int i; 1777 1778 VTE_LOCK_ASSERT(sc); 1779 /* 1780 * Mark the interface down and cancel the watchdog timer. 1781 */ 1782 ifp = sc->vte_ifp; 1783 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 1784 sc->vte_flags &= ~VTE_FLAG_LINK; 1785 callout_stop(&sc->vte_tick_ch); 1786 sc->vte_watchdog_timer = 0; 1787 vte_stats_update(sc); 1788 /* Disable interrupts. */ 1789 CSR_WRITE_2(sc, VTE_MIER, 0); 1790 CSR_WRITE_2(sc, VTE_MECIER, 0); 1791 /* Stop RX/TX MACs. */ 1792 vte_stop_mac(sc); 1793 /* Clear interrupts. */ 1794 CSR_READ_2(sc, VTE_MISR); 1795 /* 1796 * Free TX/RX mbufs still in the queues. 1797 */ 1798 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1799 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1800 if (rxd->rx_m != NULL) { 1801 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, 1802 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1803 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, 1804 rxd->rx_dmamap); 1805 m_freem(rxd->rx_m); 1806 rxd->rx_m = NULL; 1807 } 1808 } 1809 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1810 txd = &sc->vte_cdata.vte_txdesc[i]; 1811 if (txd->tx_m != NULL) { 1812 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, 1813 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1814 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, 1815 txd->tx_dmamap); 1816 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1817 m_freem(txd->tx_m); 1818 txd->tx_m = NULL; 1819 txd->tx_flags &= ~VTE_TXMBUF; 1820 } 1821 } 1822 /* Free TX mbuf pools used for deep copy. */ 1823 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1824 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1825 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1826 sc->vte_cdata.vte_txmbufs[i] = NULL; 1827 } 1828 } 1829 } 1830 1831 static void 1832 vte_start_mac(struct vte_softc *sc) 1833 { 1834 uint16_t mcr; 1835 int i; 1836 1837 VTE_LOCK_ASSERT(sc); 1838 1839 /* Enable RX/TX MACs. */ 1840 mcr = CSR_READ_2(sc, VTE_MCR0); 1841 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1842 (MCR0_RX_ENB | MCR0_TX_ENB)) { 1843 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1844 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1845 for (i = VTE_TIMEOUT; i > 0; i--) { 1846 mcr = CSR_READ_2(sc, VTE_MCR0); 1847 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1848 (MCR0_RX_ENB | MCR0_TX_ENB)) 1849 break; 1850 DELAY(10); 1851 } 1852 if (i == 0) 1853 device_printf(sc->vte_dev, 1854 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1855 } 1856 } 1857 1858 static void 1859 vte_stop_mac(struct vte_softc *sc) 1860 { 1861 uint16_t mcr; 1862 int i; 1863 1864 VTE_LOCK_ASSERT(sc); 1865 1866 /* Disable RX/TX MACs. */ 1867 mcr = CSR_READ_2(sc, VTE_MCR0); 1868 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1869 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1870 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1871 for (i = VTE_TIMEOUT; i > 0; i--) { 1872 mcr = CSR_READ_2(sc, VTE_MCR0); 1873 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1874 break; 1875 DELAY(10); 1876 } 1877 if (i == 0) 1878 device_printf(sc->vte_dev, 1879 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1880 } 1881 } 1882 1883 static int 1884 vte_init_tx_ring(struct vte_softc *sc) 1885 { 1886 struct vte_tx_desc *desc; 1887 struct vte_txdesc *txd; 1888 bus_addr_t addr; 1889 int i; 1890 1891 VTE_LOCK_ASSERT(sc); 1892 1893 sc->vte_cdata.vte_tx_prod = 0; 1894 sc->vte_cdata.vte_tx_cons = 0; 1895 sc->vte_cdata.vte_tx_cnt = 0; 1896 1897 /* Pre-allocate TX mbufs for deep copy. */ 1898 if (tx_deep_copy != 0) { 1899 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1900 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT, 1901 MT_DATA, M_PKTHDR); 1902 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1903 return (ENOBUFS); 1904 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1905 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1906 } 1907 } 1908 desc = sc->vte_cdata.vte_tx_ring; 1909 bzero(desc, VTE_TX_RING_SZ); 1910 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1911 txd = &sc->vte_cdata.vte_txdesc[i]; 1912 txd->tx_m = NULL; 1913 if (i != VTE_TX_RING_CNT - 1) 1914 addr = sc->vte_cdata.vte_tx_ring_paddr + 1915 sizeof(struct vte_tx_desc) * (i + 1); 1916 else 1917 addr = sc->vte_cdata.vte_tx_ring_paddr + 1918 sizeof(struct vte_tx_desc) * 0; 1919 desc = &sc->vte_cdata.vte_tx_ring[i]; 1920 desc->dtnp = htole32(addr); 1921 txd->tx_desc = desc; 1922 } 1923 1924 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1925 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1926 BUS_DMASYNC_PREWRITE); 1927 return (0); 1928 } 1929 1930 static int 1931 vte_init_rx_ring(struct vte_softc *sc) 1932 { 1933 struct vte_rx_desc *desc; 1934 struct vte_rxdesc *rxd; 1935 bus_addr_t addr; 1936 int i; 1937 1938 VTE_LOCK_ASSERT(sc); 1939 1940 sc->vte_cdata.vte_rx_cons = 0; 1941 desc = sc->vte_cdata.vte_rx_ring; 1942 bzero(desc, VTE_RX_RING_SZ); 1943 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1944 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1945 rxd->rx_m = NULL; 1946 if (i != VTE_RX_RING_CNT - 1) 1947 addr = sc->vte_cdata.vte_rx_ring_paddr + 1948 sizeof(struct vte_rx_desc) * (i + 1); 1949 else 1950 addr = sc->vte_cdata.vte_rx_ring_paddr + 1951 sizeof(struct vte_rx_desc) * 0; 1952 desc = &sc->vte_cdata.vte_rx_ring[i]; 1953 desc->drnp = htole32(addr); 1954 rxd->rx_desc = desc; 1955 if (vte_newbuf(sc, rxd) != 0) 1956 return (ENOBUFS); 1957 } 1958 1959 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1960 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD | 1961 BUS_DMASYNC_PREWRITE); 1962 1963 return (0); 1964 } 1965 1966 struct vte_maddr_ctx { 1967 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1968 uint16_t mchash[4]; 1969 u_int nperf; 1970 }; 1971 1972 static u_int 1973 vte_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 1974 { 1975 struct vte_maddr_ctx *ctx = arg; 1976 uint8_t *eaddr; 1977 uint32_t crc; 1978 1979 /* 1980 * Program the first 3 multicast groups into the perfect filter. 1981 * For all others, use the hash table. 1982 */ 1983 if (ctx->nperf < VTE_RXFILT_PERFECT_CNT) { 1984 eaddr = LLADDR(sdl); 1985 ctx->rxfilt_perf[ctx->nperf][0] = eaddr[1] << 8 | eaddr[0]; 1986 ctx->rxfilt_perf[ctx->nperf][1] = eaddr[3] << 8 | eaddr[2]; 1987 ctx->rxfilt_perf[ctx->nperf][2] = eaddr[5] << 8 | eaddr[4]; 1988 ctx->nperf++; 1989 1990 return (1); 1991 } 1992 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 1993 ctx->mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1994 1995 return (1); 1996 } 1997 1998 static void 1999 vte_rxfilter(struct vte_softc *sc) 2000 { 2001 if_t ifp; 2002 struct vte_maddr_ctx ctx; 2003 uint16_t mcr; 2004 int i; 2005 2006 VTE_LOCK_ASSERT(sc); 2007 2008 ifp = sc->vte_ifp; 2009 2010 bzero(ctx.mchash, sizeof(ctx.mchash)); 2011 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2012 ctx.rxfilt_perf[i][0] = 0xFFFF; 2013 ctx.rxfilt_perf[i][1] = 0xFFFF; 2014 ctx.rxfilt_perf[i][2] = 0xFFFF; 2015 } 2016 ctx.nperf = 0; 2017 2018 mcr = CSR_READ_2(sc, VTE_MCR0); 2019 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST); 2020 mcr |= MCR0_BROADCAST_DIS; 2021 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 2022 mcr &= ~MCR0_BROADCAST_DIS; 2023 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2024 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 2025 mcr |= MCR0_PROMISC; 2026 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) 2027 mcr |= MCR0_MULTICAST; 2028 ctx.mchash[0] = 0xFFFF; 2029 ctx.mchash[1] = 0xFFFF; 2030 ctx.mchash[2] = 0xFFFF; 2031 ctx.mchash[3] = 0xFFFF; 2032 goto chipit; 2033 } 2034 2035 if_foreach_llmaddr(ifp, vte_hash_maddr, &ctx); 2036 if (ctx.mchash[0] != 0 || ctx.mchash[1] != 0 || 2037 ctx.mchash[2] != 0 || ctx.mchash[3] != 0) 2038 mcr |= MCR0_MULTICAST; 2039 2040 chipit: 2041 /* Program multicast hash table. */ 2042 CSR_WRITE_2(sc, VTE_MAR0, ctx.mchash[0]); 2043 CSR_WRITE_2(sc, VTE_MAR1, ctx.mchash[1]); 2044 CSR_WRITE_2(sc, VTE_MAR2, ctx.mchash[2]); 2045 CSR_WRITE_2(sc, VTE_MAR3, ctx.mchash[3]); 2046 /* Program perfect filter table. */ 2047 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2048 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 2049 ctx.rxfilt_perf[i][0]); 2050 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 2051 ctx.rxfilt_perf[i][1]); 2052 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 2053 ctx.rxfilt_perf[i][2]); 2054 } 2055 CSR_WRITE_2(sc, VTE_MCR0, mcr); 2056 CSR_READ_2(sc, VTE_MCR0); 2057 } 2058 2059 static int 2060 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2061 { 2062 int error, value; 2063 2064 if (arg1 == NULL) 2065 return (EINVAL); 2066 value = *(int *)arg1; 2067 error = sysctl_handle_int(oidp, &value, 0, req); 2068 if (error || req->newptr == NULL) 2069 return (error); 2070 if (value < low || value > high) 2071 return (EINVAL); 2072 *(int *)arg1 = value; 2073 2074 return (0); 2075 } 2076 2077 static int 2078 sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS) 2079 { 2080 2081 return (sysctl_int_range(oidp, arg1, arg2, req, 2082 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX)); 2083 } 2084