1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/endian.h> 36 #include <sys/kernel.h> 37 #include <sys/lock.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/module.h> 41 #include <sys/mutex.h> 42 #include <sys/rman.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 47 #include <net/bpf.h> 48 #include <net/if.h> 49 #include <net/if_var.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_llc.h> 54 #include <net/if_media.h> 55 #include <net/if_types.h> 56 #include <net/if_vlan_var.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 #include <machine/bus.h> 68 69 #include <dev/vte/if_vtereg.h> 70 #include <dev/vte/if_vtevar.h> 71 72 /* "device miibus" required. See GENERIC if you get errors here. */ 73 #include "miibus_if.h" 74 75 MODULE_DEPEND(vte, pci, 1, 1, 1); 76 MODULE_DEPEND(vte, ether, 1, 1, 1); 77 MODULE_DEPEND(vte, miibus, 1, 1, 1); 78 79 /* Tunables. */ 80 static int tx_deep_copy = 1; 81 TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy); 82 83 /* 84 * Devices supported by this driver. 85 */ 86 static const struct vte_ident vte_ident_table[] = { 87 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"}, 88 { 0, 0, NULL} 89 }; 90 91 static int vte_attach(device_t); 92 static int vte_detach(device_t); 93 static int vte_dma_alloc(struct vte_softc *); 94 static void vte_dma_free(struct vte_softc *); 95 static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int); 96 static struct vte_txdesc * 97 vte_encap(struct vte_softc *, struct mbuf **); 98 static const struct vte_ident * 99 vte_find_ident(device_t); 100 #ifndef __NO_STRICT_ALIGNMENT 101 static struct mbuf * 102 vte_fixup_rx(if_t, struct mbuf *); 103 #endif 104 static void vte_get_macaddr(struct vte_softc *); 105 static void vte_init(void *); 106 static void vte_init_locked(struct vte_softc *); 107 static int vte_init_rx_ring(struct vte_softc *); 108 static int vte_init_tx_ring(struct vte_softc *); 109 static void vte_intr(void *); 110 static int vte_ioctl(if_t, u_long, caddr_t); 111 static uint64_t vte_get_counter(if_t, ift_counter); 112 static void vte_mac_config(struct vte_softc *); 113 static int vte_miibus_readreg(device_t, int, int); 114 static void vte_miibus_statchg(device_t); 115 static int vte_miibus_writereg(device_t, int, int, int); 116 static int vte_mediachange(if_t); 117 static int vte_mediachange_locked(if_t); 118 static void vte_mediastatus(if_t, struct ifmediareq *); 119 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 120 static int vte_probe(device_t); 121 static void vte_reset(struct vte_softc *); 122 static int vte_resume(device_t); 123 static void vte_rxeof(struct vte_softc *); 124 static void vte_rxfilter(struct vte_softc *); 125 static int vte_shutdown(device_t); 126 static void vte_start(if_t); 127 static void vte_start_locked(struct vte_softc *); 128 static void vte_start_mac(struct vte_softc *); 129 static void vte_stats_clear(struct vte_softc *); 130 static void vte_stats_update(struct vte_softc *); 131 static void vte_stop(struct vte_softc *); 132 static void vte_stop_mac(struct vte_softc *); 133 static int vte_suspend(device_t); 134 static void vte_sysctl_node(struct vte_softc *); 135 static void vte_tick(void *); 136 static void vte_txeof(struct vte_softc *); 137 static void vte_watchdog(struct vte_softc *); 138 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 139 static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS); 140 141 static device_method_t vte_methods[] = { 142 /* Device interface. */ 143 DEVMETHOD(device_probe, vte_probe), 144 DEVMETHOD(device_attach, vte_attach), 145 DEVMETHOD(device_detach, vte_detach), 146 DEVMETHOD(device_shutdown, vte_shutdown), 147 DEVMETHOD(device_suspend, vte_suspend), 148 DEVMETHOD(device_resume, vte_resume), 149 150 /* MII interface. */ 151 DEVMETHOD(miibus_readreg, vte_miibus_readreg), 152 DEVMETHOD(miibus_writereg, vte_miibus_writereg), 153 DEVMETHOD(miibus_statchg, vte_miibus_statchg), 154 155 DEVMETHOD_END 156 }; 157 158 static driver_t vte_driver = { 159 "vte", 160 vte_methods, 161 sizeof(struct vte_softc) 162 }; 163 164 DRIVER_MODULE(vte, pci, vte_driver, 0, 0); 165 DRIVER_MODULE(miibus, vte, miibus_driver, 0, 0); 166 167 static int 168 vte_miibus_readreg(device_t dev, int phy, int reg) 169 { 170 struct vte_softc *sc; 171 int i; 172 173 sc = device_get_softc(dev); 174 175 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 176 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 177 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 178 DELAY(5); 179 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 180 break; 181 } 182 183 if (i == 0) { 184 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg); 185 return (0); 186 } 187 188 return (CSR_READ_2(sc, VTE_MMRD)); 189 } 190 191 static int 192 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 193 { 194 struct vte_softc *sc; 195 int i; 196 197 sc = device_get_softc(dev); 198 199 CSR_WRITE_2(sc, VTE_MMWD, val); 200 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 201 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 202 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 203 DELAY(5); 204 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 205 break; 206 } 207 208 if (i == 0) 209 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg); 210 211 return (0); 212 } 213 214 static void 215 vte_miibus_statchg(device_t dev) 216 { 217 struct vte_softc *sc; 218 struct mii_data *mii; 219 if_t ifp; 220 uint16_t val; 221 222 sc = device_get_softc(dev); 223 224 mii = device_get_softc(sc->vte_miibus); 225 ifp = sc->vte_ifp; 226 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 227 return; 228 229 sc->vte_flags &= ~VTE_FLAG_LINK; 230 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 231 (IFM_ACTIVE | IFM_AVALID)) { 232 switch (IFM_SUBTYPE(mii->mii_media_active)) { 233 case IFM_10_T: 234 case IFM_100_TX: 235 sc->vte_flags |= VTE_FLAG_LINK; 236 break; 237 default: 238 break; 239 } 240 } 241 242 /* Stop RX/TX MACs. */ 243 vte_stop_mac(sc); 244 /* Program MACs with resolved duplex and flow control. */ 245 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 246 /* 247 * Timer waiting time : (63 + TIMER * 64) MII clock. 248 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 249 */ 250 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 251 val = 18 << VTE_IM_TIMER_SHIFT; 252 else 253 val = 1 << VTE_IM_TIMER_SHIFT; 254 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 255 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 256 CSR_WRITE_2(sc, VTE_MRICR, val); 257 258 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 259 val = 18 << VTE_IM_TIMER_SHIFT; 260 else 261 val = 1 << VTE_IM_TIMER_SHIFT; 262 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 263 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 264 CSR_WRITE_2(sc, VTE_MTICR, val); 265 266 vte_mac_config(sc); 267 vte_start_mac(sc); 268 } 269 } 270 271 static void 272 vte_mediastatus(if_t ifp, struct ifmediareq *ifmr) 273 { 274 struct vte_softc *sc; 275 struct mii_data *mii; 276 277 sc = if_getsoftc(ifp); 278 VTE_LOCK(sc); 279 if ((if_getflags(ifp) & IFF_UP) == 0) { 280 VTE_UNLOCK(sc); 281 return; 282 } 283 mii = device_get_softc(sc->vte_miibus); 284 285 mii_pollstat(mii); 286 ifmr->ifm_status = mii->mii_media_status; 287 ifmr->ifm_active = mii->mii_media_active; 288 VTE_UNLOCK(sc); 289 } 290 291 static int 292 vte_mediachange(if_t ifp) 293 { 294 struct vte_softc *sc; 295 int error; 296 297 sc = if_getsoftc(ifp); 298 VTE_LOCK(sc); 299 error = vte_mediachange_locked(ifp); 300 VTE_UNLOCK(sc); 301 return (error); 302 } 303 304 static int 305 vte_mediachange_locked(if_t ifp) 306 { 307 struct vte_softc *sc; 308 struct mii_data *mii; 309 struct mii_softc *miisc; 310 int error; 311 312 sc = if_getsoftc(ifp); 313 mii = device_get_softc(sc->vte_miibus); 314 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 315 PHY_RESET(miisc); 316 error = mii_mediachg(mii); 317 318 return (error); 319 } 320 321 static const struct vte_ident * 322 vte_find_ident(device_t dev) 323 { 324 const struct vte_ident *ident; 325 uint16_t vendor, devid; 326 327 vendor = pci_get_vendor(dev); 328 devid = pci_get_device(dev); 329 for (ident = vte_ident_table; ident->name != NULL; ident++) { 330 if (vendor == ident->vendorid && devid == ident->deviceid) 331 return (ident); 332 } 333 334 return (NULL); 335 } 336 337 static int 338 vte_probe(device_t dev) 339 { 340 const struct vte_ident *ident; 341 342 ident = vte_find_ident(dev); 343 if (ident != NULL) { 344 device_set_desc(dev, ident->name); 345 return (BUS_PROBE_DEFAULT); 346 } 347 348 return (ENXIO); 349 } 350 351 static void 352 vte_get_macaddr(struct vte_softc *sc) 353 { 354 uint16_t mid; 355 356 /* 357 * It seems there is no way to reload station address and 358 * it is supposed to be set by BIOS. 359 */ 360 mid = CSR_READ_2(sc, VTE_MID0L); 361 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 362 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 363 mid = CSR_READ_2(sc, VTE_MID0M); 364 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 365 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 366 mid = CSR_READ_2(sc, VTE_MID0H); 367 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 368 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 369 } 370 371 static int 372 vte_attach(device_t dev) 373 { 374 struct vte_softc *sc; 375 if_t ifp; 376 uint16_t macid; 377 int error, rid; 378 379 error = 0; 380 sc = device_get_softc(dev); 381 sc->vte_dev = dev; 382 383 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 384 MTX_DEF); 385 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0); 386 sc->vte_ident = vte_find_ident(dev); 387 388 /* Map the device. */ 389 pci_enable_busmaster(dev); 390 sc->vte_res_id = PCIR_BAR(1); 391 sc->vte_res_type = SYS_RES_MEMORY; 392 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 393 &sc->vte_res_id, RF_ACTIVE); 394 if (sc->vte_res == NULL) { 395 sc->vte_res_id = PCIR_BAR(0); 396 sc->vte_res_type = SYS_RES_IOPORT; 397 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 398 &sc->vte_res_id, RF_ACTIVE); 399 if (sc->vte_res == NULL) { 400 device_printf(dev, "cannot map memory/ports.\n"); 401 mtx_destroy(&sc->vte_mtx); 402 return (ENXIO); 403 } 404 } 405 if (bootverbose) { 406 device_printf(dev, "using %s space register mapping\n", 407 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 408 device_printf(dev, "MAC Identifier : 0x%04x\n", 409 CSR_READ_2(sc, VTE_MACID)); 410 macid = CSR_READ_2(sc, VTE_MACID_REV); 411 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n", 412 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT, 413 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT); 414 } 415 416 rid = 0; 417 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 418 RF_SHAREABLE | RF_ACTIVE); 419 if (sc->vte_irq == NULL) { 420 device_printf(dev, "cannot allocate IRQ resources.\n"); 421 error = ENXIO; 422 goto fail; 423 } 424 425 /* Reset the ethernet controller. */ 426 vte_reset(sc); 427 428 if ((error = vte_dma_alloc(sc)) != 0) 429 goto fail; 430 431 /* Create device sysctl node. */ 432 vte_sysctl_node(sc); 433 434 /* Load station address. */ 435 vte_get_macaddr(sc); 436 437 ifp = sc->vte_ifp = if_alloc(IFT_ETHER); 438 if_setsoftc(ifp, sc); 439 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 440 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 441 if_setioctlfn(ifp, vte_ioctl); 442 if_setstartfn(ifp, vte_start); 443 if_setinitfn(ifp, vte_init); 444 if_setgetcounterfn(ifp, vte_get_counter); 445 if_setsendqlen(ifp, VTE_TX_RING_CNT - 1); 446 if_setsendqready(ifp); 447 448 /* 449 * Set up MII bus. 450 * BIOS would have initialized VTE_MPSCCR to catch PHY 451 * status changes so driver may be able to extract 452 * configured PHY address. Since it's common to see BIOS 453 * fails to initialize the register(including the sample 454 * board I have), let mii(4) probe it. This is more 455 * reliable than relying on BIOS's initialization. 456 * 457 * Advertising flow control capability to mii(4) was 458 * intentionally disabled due to severe problems in TX 459 * pause frame generation. See vte_rxeof() for more 460 * details. 461 */ 462 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange, 463 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 464 if (error != 0) { 465 device_printf(dev, "attaching PHYs failed\n"); 466 goto fail; 467 } 468 469 ether_ifattach(ifp, sc->vte_eaddr); 470 471 /* VLAN capability setup. */ 472 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 473 if_setcapenable(ifp, if_getcapabilities(ifp)); 474 /* Tell the upper layer we support VLAN over-sized frames. */ 475 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 476 477 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE, 478 NULL, vte_intr, sc, &sc->vte_intrhand); 479 if (error != 0) { 480 device_printf(dev, "could not set up interrupt handler.\n"); 481 ether_ifdetach(ifp); 482 goto fail; 483 } 484 485 fail: 486 if (error != 0) 487 vte_detach(dev); 488 489 return (error); 490 } 491 492 static int 493 vte_detach(device_t dev) 494 { 495 struct vte_softc *sc; 496 if_t ifp; 497 498 sc = device_get_softc(dev); 499 500 ifp = sc->vte_ifp; 501 if (device_is_attached(dev)) { 502 VTE_LOCK(sc); 503 vte_stop(sc); 504 VTE_UNLOCK(sc); 505 callout_drain(&sc->vte_tick_ch); 506 ether_ifdetach(ifp); 507 } 508 509 if (sc->vte_miibus != NULL) { 510 device_delete_child(dev, sc->vte_miibus); 511 sc->vte_miibus = NULL; 512 } 513 bus_generic_detach(dev); 514 515 if (sc->vte_intrhand != NULL) { 516 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand); 517 sc->vte_intrhand = NULL; 518 } 519 if (sc->vte_irq != NULL) { 520 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq); 521 sc->vte_irq = NULL; 522 } 523 if (sc->vte_res != NULL) { 524 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id, 525 sc->vte_res); 526 sc->vte_res = NULL; 527 } 528 if (ifp != NULL) { 529 if_free(ifp); 530 sc->vte_ifp = NULL; 531 } 532 vte_dma_free(sc); 533 mtx_destroy(&sc->vte_mtx); 534 535 return (0); 536 } 537 538 #define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 539 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 540 541 static void 542 vte_sysctl_node(struct vte_softc *sc) 543 { 544 struct sysctl_ctx_list *ctx; 545 struct sysctl_oid_list *child, *parent; 546 struct sysctl_oid *tree; 547 struct vte_hw_stats *stats; 548 int error; 549 550 stats = &sc->vte_stats; 551 ctx = device_get_sysctl_ctx(sc->vte_dev); 552 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev)); 553 554 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 555 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 556 &sc->vte_int_rx_mod, 0, sysctl_hw_vte_int_mod, "I", 557 "vte RX interrupt moderation"); 558 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 559 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 560 &sc->vte_int_tx_mod, 0, sysctl_hw_vte_int_mod, "I", 561 "vte TX interrupt moderation"); 562 /* Pull in device tunables. */ 563 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 564 error = resource_int_value(device_get_name(sc->vte_dev), 565 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod); 566 if (error == 0) { 567 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN || 568 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) { 569 device_printf(sc->vte_dev, "int_rx_mod value out of " 570 "range; using default: %d\n", 571 VTE_IM_RX_BUNDLE_DEFAULT); 572 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 573 } 574 } 575 576 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 577 error = resource_int_value(device_get_name(sc->vte_dev), 578 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod); 579 if (error == 0) { 580 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN || 581 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) { 582 device_printf(sc->vte_dev, "int_tx_mod value out of " 583 "range; using default: %d\n", 584 VTE_IM_TX_BUNDLE_DEFAULT); 585 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 586 } 587 } 588 589 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 590 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VTE statistics"); 591 parent = SYSCTL_CHILDREN(tree); 592 593 /* RX statistics. */ 594 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 595 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics"); 596 child = SYSCTL_CHILDREN(tree); 597 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 598 &stats->rx_frames, "Good frames"); 599 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 600 &stats->rx_bcast_frames, "Good broadcast frames"); 601 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 602 &stats->rx_mcast_frames, "Good multicast frames"); 603 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt", 604 &stats->rx_runts, "Too short frames"); 605 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 606 &stats->rx_crcerrs, "CRC errors"); 607 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames", 608 &stats->rx_long_frames, 609 "Frames that have longer length than maximum packet length"); 610 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full", 611 &stats->rx_fifo_full, "FIFO full"); 612 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail", 613 &stats->rx_desc_unavail, "Descriptor unavailable frames"); 614 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 615 &stats->rx_pause_frames, "Pause control frames"); 616 617 /* TX statistics. */ 618 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 619 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics"); 620 child = SYSCTL_CHILDREN(tree); 621 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 622 &stats->tx_frames, "Good frames"); 623 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 624 &stats->tx_underruns, "FIFO underruns"); 625 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 626 &stats->tx_late_colls, "Late collisions"); 627 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 628 &stats->tx_pause_frames, "Pause control frames"); 629 } 630 631 #undef VTE_SYSCTL_STAT_ADD32 632 633 struct vte_dmamap_arg { 634 bus_addr_t vte_busaddr; 635 }; 636 637 static void 638 vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 639 { 640 struct vte_dmamap_arg *ctx; 641 642 if (error != 0) 643 return; 644 645 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 646 647 ctx = (struct vte_dmamap_arg *)arg; 648 ctx->vte_busaddr = segs[0].ds_addr; 649 } 650 651 static int 652 vte_dma_alloc(struct vte_softc *sc) 653 { 654 struct vte_txdesc *txd; 655 struct vte_rxdesc *rxd; 656 struct vte_dmamap_arg ctx; 657 int error, i; 658 659 /* Create parent DMA tag. */ 660 error = bus_dma_tag_create( 661 bus_get_dma_tag(sc->vte_dev), /* parent */ 662 1, 0, /* alignment, boundary */ 663 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 664 BUS_SPACE_MAXADDR, /* highaddr */ 665 NULL, NULL, /* filter, filterarg */ 666 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 667 0, /* nsegments */ 668 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 669 0, /* flags */ 670 NULL, NULL, /* lockfunc, lockarg */ 671 &sc->vte_cdata.vte_parent_tag); 672 if (error != 0) { 673 device_printf(sc->vte_dev, 674 "could not create parent DMA tag.\n"); 675 goto fail; 676 } 677 678 /* Create DMA tag for TX descriptor ring. */ 679 error = bus_dma_tag_create( 680 sc->vte_cdata.vte_parent_tag, /* parent */ 681 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */ 682 BUS_SPACE_MAXADDR, /* lowaddr */ 683 BUS_SPACE_MAXADDR, /* highaddr */ 684 NULL, NULL, /* filter, filterarg */ 685 VTE_TX_RING_SZ, /* maxsize */ 686 1, /* nsegments */ 687 VTE_TX_RING_SZ, /* maxsegsize */ 688 0, /* flags */ 689 NULL, NULL, /* lockfunc, lockarg */ 690 &sc->vte_cdata.vte_tx_ring_tag); 691 if (error != 0) { 692 device_printf(sc->vte_dev, 693 "could not create TX ring DMA tag.\n"); 694 goto fail; 695 } 696 697 /* Create DMA tag for RX free descriptor ring. */ 698 error = bus_dma_tag_create( 699 sc->vte_cdata.vte_parent_tag, /* parent */ 700 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */ 701 BUS_SPACE_MAXADDR, /* lowaddr */ 702 BUS_SPACE_MAXADDR, /* highaddr */ 703 NULL, NULL, /* filter, filterarg */ 704 VTE_RX_RING_SZ, /* maxsize */ 705 1, /* nsegments */ 706 VTE_RX_RING_SZ, /* maxsegsize */ 707 0, /* flags */ 708 NULL, NULL, /* lockfunc, lockarg */ 709 &sc->vte_cdata.vte_rx_ring_tag); 710 if (error != 0) { 711 device_printf(sc->vte_dev, 712 "could not create RX ring DMA tag.\n"); 713 goto fail; 714 } 715 716 /* Allocate DMA'able memory and load the DMA map for TX ring. */ 717 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag, 718 (void **)&sc->vte_cdata.vte_tx_ring, 719 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 720 &sc->vte_cdata.vte_tx_ring_map); 721 if (error != 0) { 722 device_printf(sc->vte_dev, 723 "could not allocate DMA'able memory for TX ring.\n"); 724 goto fail; 725 } 726 ctx.vte_busaddr = 0; 727 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag, 728 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 729 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0); 730 if (error != 0 || ctx.vte_busaddr == 0) { 731 device_printf(sc->vte_dev, 732 "could not load DMA'able memory for TX ring.\n"); 733 goto fail; 734 } 735 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr; 736 737 /* Allocate DMA'able memory and load the DMA map for RX ring. */ 738 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag, 739 (void **)&sc->vte_cdata.vte_rx_ring, 740 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 741 &sc->vte_cdata.vte_rx_ring_map); 742 if (error != 0) { 743 device_printf(sc->vte_dev, 744 "could not allocate DMA'able memory for RX ring.\n"); 745 goto fail; 746 } 747 ctx.vte_busaddr = 0; 748 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag, 749 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 750 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0); 751 if (error != 0 || ctx.vte_busaddr == 0) { 752 device_printf(sc->vte_dev, 753 "could not load DMA'able memory for RX ring.\n"); 754 goto fail; 755 } 756 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr; 757 758 /* Create TX buffer parent tag. */ 759 error = bus_dma_tag_create( 760 bus_get_dma_tag(sc->vte_dev), /* parent */ 761 1, 0, /* alignment, boundary */ 762 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 763 BUS_SPACE_MAXADDR, /* highaddr */ 764 NULL, NULL, /* filter, filterarg */ 765 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 766 0, /* nsegments */ 767 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 768 0, /* flags */ 769 NULL, NULL, /* lockfunc, lockarg */ 770 &sc->vte_cdata.vte_buffer_tag); 771 if (error != 0) { 772 device_printf(sc->vte_dev, 773 "could not create parent buffer DMA tag.\n"); 774 goto fail; 775 } 776 777 /* Create DMA tag for TX buffers. */ 778 error = bus_dma_tag_create( 779 sc->vte_cdata.vte_buffer_tag, /* parent */ 780 1, 0, /* alignment, boundary */ 781 BUS_SPACE_MAXADDR, /* lowaddr */ 782 BUS_SPACE_MAXADDR, /* highaddr */ 783 NULL, NULL, /* filter, filterarg */ 784 MCLBYTES, /* maxsize */ 785 1, /* nsegments */ 786 MCLBYTES, /* maxsegsize */ 787 0, /* flags */ 788 NULL, NULL, /* lockfunc, lockarg */ 789 &sc->vte_cdata.vte_tx_tag); 790 if (error != 0) { 791 device_printf(sc->vte_dev, "could not create TX DMA tag.\n"); 792 goto fail; 793 } 794 795 /* Create DMA tag for RX buffers. */ 796 error = bus_dma_tag_create( 797 sc->vte_cdata.vte_buffer_tag, /* parent */ 798 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */ 799 BUS_SPACE_MAXADDR, /* lowaddr */ 800 BUS_SPACE_MAXADDR, /* highaddr */ 801 NULL, NULL, /* filter, filterarg */ 802 MCLBYTES, /* maxsize */ 803 1, /* nsegments */ 804 MCLBYTES, /* maxsegsize */ 805 0, /* flags */ 806 NULL, NULL, /* lockfunc, lockarg */ 807 &sc->vte_cdata.vte_rx_tag); 808 if (error != 0) { 809 device_printf(sc->vte_dev, "could not create RX DMA tag.\n"); 810 goto fail; 811 } 812 /* Create DMA maps for TX buffers. */ 813 for (i = 0; i < VTE_TX_RING_CNT; i++) { 814 txd = &sc->vte_cdata.vte_txdesc[i]; 815 txd->tx_m = NULL; 816 txd->tx_dmamap = NULL; 817 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0, 818 &txd->tx_dmamap); 819 if (error != 0) { 820 device_printf(sc->vte_dev, 821 "could not create TX dmamap.\n"); 822 goto fail; 823 } 824 } 825 /* Create DMA maps for RX buffers. */ 826 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 827 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 828 device_printf(sc->vte_dev, 829 "could not create spare RX dmamap.\n"); 830 goto fail; 831 } 832 for (i = 0; i < VTE_RX_RING_CNT; i++) { 833 rxd = &sc->vte_cdata.vte_rxdesc[i]; 834 rxd->rx_m = NULL; 835 rxd->rx_dmamap = NULL; 836 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 837 &rxd->rx_dmamap); 838 if (error != 0) { 839 device_printf(sc->vte_dev, 840 "could not create RX dmamap.\n"); 841 goto fail; 842 } 843 } 844 845 fail: 846 return (error); 847 } 848 849 static void 850 vte_dma_free(struct vte_softc *sc) 851 { 852 struct vte_txdesc *txd; 853 struct vte_rxdesc *rxd; 854 int i; 855 856 /* TX buffers. */ 857 if (sc->vte_cdata.vte_tx_tag != NULL) { 858 for (i = 0; i < VTE_TX_RING_CNT; i++) { 859 txd = &sc->vte_cdata.vte_txdesc[i]; 860 if (txd->tx_dmamap != NULL) { 861 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag, 862 txd->tx_dmamap); 863 txd->tx_dmamap = NULL; 864 } 865 } 866 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag); 867 sc->vte_cdata.vte_tx_tag = NULL; 868 } 869 /* RX buffers */ 870 if (sc->vte_cdata.vte_rx_tag != NULL) { 871 for (i = 0; i < VTE_RX_RING_CNT; i++) { 872 rxd = &sc->vte_cdata.vte_rxdesc[i]; 873 if (rxd->rx_dmamap != NULL) { 874 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 875 rxd->rx_dmamap); 876 rxd->rx_dmamap = NULL; 877 } 878 } 879 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 880 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 881 sc->vte_cdata.vte_rx_sparemap); 882 sc->vte_cdata.vte_rx_sparemap = NULL; 883 } 884 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag); 885 sc->vte_cdata.vte_rx_tag = NULL; 886 } 887 /* TX descriptor ring. */ 888 if (sc->vte_cdata.vte_tx_ring_tag != NULL) { 889 if (sc->vte_cdata.vte_tx_ring_paddr != 0) 890 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag, 891 sc->vte_cdata.vte_tx_ring_map); 892 if (sc->vte_cdata.vte_tx_ring != NULL) 893 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag, 894 sc->vte_cdata.vte_tx_ring, 895 sc->vte_cdata.vte_tx_ring_map); 896 sc->vte_cdata.vte_tx_ring = NULL; 897 sc->vte_cdata.vte_tx_ring_paddr = 0; 898 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag); 899 sc->vte_cdata.vte_tx_ring_tag = NULL; 900 } 901 /* RX ring. */ 902 if (sc->vte_cdata.vte_rx_ring_tag != NULL) { 903 if (sc->vte_cdata.vte_rx_ring_paddr != 0) 904 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag, 905 sc->vte_cdata.vte_rx_ring_map); 906 if (sc->vte_cdata.vte_rx_ring != NULL) 907 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag, 908 sc->vte_cdata.vte_rx_ring, 909 sc->vte_cdata.vte_rx_ring_map); 910 sc->vte_cdata.vte_rx_ring = NULL; 911 sc->vte_cdata.vte_rx_ring_paddr = 0; 912 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag); 913 sc->vte_cdata.vte_rx_ring_tag = NULL; 914 } 915 if (sc->vte_cdata.vte_buffer_tag != NULL) { 916 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag); 917 sc->vte_cdata.vte_buffer_tag = NULL; 918 } 919 if (sc->vte_cdata.vte_parent_tag != NULL) { 920 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag); 921 sc->vte_cdata.vte_parent_tag = NULL; 922 } 923 } 924 925 static int 926 vte_shutdown(device_t dev) 927 { 928 929 return (vte_suspend(dev)); 930 } 931 932 static int 933 vte_suspend(device_t dev) 934 { 935 struct vte_softc *sc; 936 if_t ifp; 937 938 sc = device_get_softc(dev); 939 940 VTE_LOCK(sc); 941 ifp = sc->vte_ifp; 942 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 943 vte_stop(sc); 944 VTE_UNLOCK(sc); 945 946 return (0); 947 } 948 949 static int 950 vte_resume(device_t dev) 951 { 952 struct vte_softc *sc; 953 if_t ifp; 954 955 sc = device_get_softc(dev); 956 957 VTE_LOCK(sc); 958 ifp = sc->vte_ifp; 959 if ((if_getflags(ifp) & IFF_UP) != 0) { 960 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 961 vte_init_locked(sc); 962 } 963 VTE_UNLOCK(sc); 964 965 return (0); 966 } 967 968 static struct vte_txdesc * 969 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 970 { 971 struct vte_txdesc *txd; 972 struct mbuf *m, *n; 973 bus_dma_segment_t txsegs[1]; 974 int copy, error, nsegs, padlen; 975 976 VTE_LOCK_ASSERT(sc); 977 978 M_ASSERTPKTHDR((*m_head)); 979 980 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 981 m = *m_head; 982 /* 983 * Controller doesn't auto-pad, so we have to make sure pad 984 * short frames out to the minimum frame length. 985 */ 986 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 987 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 988 else 989 padlen = 0; 990 991 /* 992 * Controller does not support multi-fragmented TX buffers. 993 * Controller spends most of its TX processing time in 994 * de-fragmenting TX buffers. Either faster CPU or more 995 * advanced controller DMA engine is required to speed up 996 * TX path processing. 997 * To mitigate the de-fragmenting issue, perform deep copy 998 * from fragmented mbuf chains to a pre-allocated mbuf 999 * cluster with extra cost of kernel memory. For frames 1000 * that is composed of single TX buffer, the deep copy is 1001 * bypassed. 1002 */ 1003 if (tx_deep_copy != 0) { 1004 copy = 0; 1005 if (m->m_next != NULL) 1006 copy++; 1007 if (padlen > 0 && (M_WRITABLE(m) == 0 || 1008 padlen > M_TRAILINGSPACE(m))) 1009 copy++; 1010 if (copy != 0) { 1011 /* Avoid expensive m_defrag(9) and do deep copy. */ 1012 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 1013 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 1014 n->m_pkthdr.len = m->m_pkthdr.len; 1015 n->m_len = m->m_pkthdr.len; 1016 m = n; 1017 txd->tx_flags |= VTE_TXMBUF; 1018 } 1019 1020 if (padlen > 0) { 1021 /* Zero out the bytes in the pad area. */ 1022 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1023 m->m_pkthdr.len += padlen; 1024 m->m_len = m->m_pkthdr.len; 1025 } 1026 } else { 1027 if (M_WRITABLE(m) == 0) { 1028 if (m->m_next != NULL || padlen > 0) { 1029 /* Get a writable copy. */ 1030 m = m_dup(*m_head, M_NOWAIT); 1031 /* Release original mbuf chains. */ 1032 m_freem(*m_head); 1033 if (m == NULL) { 1034 *m_head = NULL; 1035 return (NULL); 1036 } 1037 *m_head = m; 1038 } 1039 } 1040 1041 if (m->m_next != NULL) { 1042 m = m_defrag(*m_head, M_NOWAIT); 1043 if (m == NULL) { 1044 m_freem(*m_head); 1045 *m_head = NULL; 1046 return (NULL); 1047 } 1048 *m_head = m; 1049 } 1050 1051 if (padlen > 0) { 1052 if (M_TRAILINGSPACE(m) < padlen) { 1053 m = m_defrag(*m_head, M_NOWAIT); 1054 if (m == NULL) { 1055 m_freem(*m_head); 1056 *m_head = NULL; 1057 return (NULL); 1058 } 1059 *m_head = m; 1060 } 1061 /* Zero out the bytes in the pad area. */ 1062 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1063 m->m_pkthdr.len += padlen; 1064 m->m_len = m->m_pkthdr.len; 1065 } 1066 } 1067 1068 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag, 1069 txd->tx_dmamap, m, txsegs, &nsegs, 0); 1070 if (error != 0) { 1071 txd->tx_flags &= ~VTE_TXMBUF; 1072 return (NULL); 1073 } 1074 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1075 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1076 BUS_DMASYNC_PREWRITE); 1077 1078 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len)); 1079 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr); 1080 sc->vte_cdata.vte_tx_cnt++; 1081 /* Update producer index. */ 1082 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 1083 1084 /* Finally hand over ownership to controller. */ 1085 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 1086 txd->tx_m = m; 1087 1088 return (txd); 1089 } 1090 1091 static void 1092 vte_start(if_t ifp) 1093 { 1094 struct vte_softc *sc; 1095 1096 sc = if_getsoftc(ifp); 1097 VTE_LOCK(sc); 1098 vte_start_locked(sc); 1099 VTE_UNLOCK(sc); 1100 } 1101 1102 static void 1103 vte_start_locked(struct vte_softc *sc) 1104 { 1105 if_t ifp; 1106 struct vte_txdesc *txd; 1107 struct mbuf *m_head; 1108 int enq; 1109 1110 ifp = sc->vte_ifp; 1111 1112 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1113 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 1114 return; 1115 1116 for (enq = 0; !if_sendq_empty(ifp); ) { 1117 /* Reserve one free TX descriptor. */ 1118 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 1119 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1120 break; 1121 } 1122 m_head = if_dequeue(ifp); 1123 if (m_head == NULL) 1124 break; 1125 /* 1126 * Pack the data into the transmit ring. If we 1127 * don't have room, set the OACTIVE flag and wait 1128 * for the NIC to drain the ring. 1129 */ 1130 if ((txd = vte_encap(sc, &m_head)) == NULL) { 1131 if (m_head != NULL) 1132 if_sendq_prepend(ifp, m_head); 1133 break; 1134 } 1135 1136 enq++; 1137 /* 1138 * If there's a BPF listener, bounce a copy of this frame 1139 * to him. 1140 */ 1141 ETHER_BPF_MTAP(ifp, m_head); 1142 /* Free consumed TX frame. */ 1143 if ((txd->tx_flags & VTE_TXMBUF) != 0) 1144 m_freem(m_head); 1145 } 1146 1147 if (enq > 0) { 1148 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1149 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1150 BUS_DMASYNC_PREWRITE); 1151 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 1152 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 1153 } 1154 } 1155 1156 static void 1157 vte_watchdog(struct vte_softc *sc) 1158 { 1159 if_t ifp; 1160 1161 VTE_LOCK_ASSERT(sc); 1162 1163 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 1164 return; 1165 1166 ifp = sc->vte_ifp; 1167 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n"); 1168 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1169 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1170 vte_init_locked(sc); 1171 if (!if_sendq_empty(ifp)) 1172 vte_start_locked(sc); 1173 } 1174 1175 static int 1176 vte_ioctl(if_t ifp, u_long cmd, caddr_t data) 1177 { 1178 struct vte_softc *sc; 1179 struct ifreq *ifr; 1180 struct mii_data *mii; 1181 int error; 1182 1183 sc = if_getsoftc(ifp); 1184 ifr = (struct ifreq *)data; 1185 error = 0; 1186 switch (cmd) { 1187 case SIOCSIFFLAGS: 1188 VTE_LOCK(sc); 1189 if ((if_getflags(ifp) & IFF_UP) != 0) { 1190 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && 1191 ((if_getflags(ifp) ^ sc->vte_if_flags) & 1192 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1193 vte_rxfilter(sc); 1194 else 1195 vte_init_locked(sc); 1196 } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1197 vte_stop(sc); 1198 sc->vte_if_flags = if_getflags(ifp); 1199 VTE_UNLOCK(sc); 1200 break; 1201 case SIOCADDMULTI: 1202 case SIOCDELMULTI: 1203 VTE_LOCK(sc); 1204 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1205 vte_rxfilter(sc); 1206 VTE_UNLOCK(sc); 1207 break; 1208 case SIOCSIFMEDIA: 1209 case SIOCGIFMEDIA: 1210 mii = device_get_softc(sc->vte_miibus); 1211 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1212 break; 1213 default: 1214 error = ether_ioctl(ifp, cmd, data); 1215 break; 1216 } 1217 1218 return (error); 1219 } 1220 1221 static void 1222 vte_mac_config(struct vte_softc *sc) 1223 { 1224 struct mii_data *mii; 1225 uint16_t mcr; 1226 1227 VTE_LOCK_ASSERT(sc); 1228 1229 mii = device_get_softc(sc->vte_miibus); 1230 mcr = CSR_READ_2(sc, VTE_MCR0); 1231 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 1232 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1233 mcr |= MCR0_FULL_DUPLEX; 1234 #ifdef notyet 1235 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1236 mcr |= MCR0_FC_ENB; 1237 /* 1238 * The data sheet is not clear whether the controller 1239 * honors received pause frames or not. The is no 1240 * separate control bit for RX pause frame so just 1241 * enable MCR0_FC_ENB bit. 1242 */ 1243 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1244 mcr |= MCR0_FC_ENB; 1245 #endif 1246 } 1247 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1248 } 1249 1250 static void 1251 vte_stats_clear(struct vte_softc *sc) 1252 { 1253 1254 /* Reading counter registers clears its contents. */ 1255 CSR_READ_2(sc, VTE_CNT_RX_DONE); 1256 CSR_READ_2(sc, VTE_CNT_MECNT0); 1257 CSR_READ_2(sc, VTE_CNT_MECNT1); 1258 CSR_READ_2(sc, VTE_CNT_MECNT2); 1259 CSR_READ_2(sc, VTE_CNT_MECNT3); 1260 CSR_READ_2(sc, VTE_CNT_TX_DONE); 1261 CSR_READ_2(sc, VTE_CNT_MECNT4); 1262 CSR_READ_2(sc, VTE_CNT_PAUSE); 1263 } 1264 1265 static void 1266 vte_stats_update(struct vte_softc *sc) 1267 { 1268 struct vte_hw_stats *stat; 1269 uint16_t value; 1270 1271 VTE_LOCK_ASSERT(sc); 1272 1273 stat = &sc->vte_stats; 1274 1275 CSR_READ_2(sc, VTE_MECISR); 1276 /* RX stats. */ 1277 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 1278 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 1279 stat->rx_bcast_frames += (value >> 8); 1280 stat->rx_mcast_frames += (value & 0xFF); 1281 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 1282 stat->rx_runts += (value >> 8); 1283 stat->rx_crcerrs += (value & 0xFF); 1284 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 1285 stat->rx_long_frames += (value & 0xFF); 1286 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 1287 stat->rx_fifo_full += (value >> 8); 1288 stat->rx_desc_unavail += (value & 0xFF); 1289 1290 /* TX stats. */ 1291 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 1292 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 1293 stat->tx_underruns += (value >> 8); 1294 stat->tx_late_colls += (value & 0xFF); 1295 1296 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 1297 stat->tx_pause_frames += (value >> 8); 1298 stat->rx_pause_frames += (value & 0xFF); 1299 } 1300 1301 static uint64_t 1302 vte_get_counter(if_t ifp, ift_counter cnt) 1303 { 1304 struct vte_softc *sc; 1305 struct vte_hw_stats *stat; 1306 1307 sc = if_getsoftc(ifp); 1308 stat = &sc->vte_stats; 1309 1310 switch (cnt) { 1311 case IFCOUNTER_OPACKETS: 1312 return (stat->tx_frames); 1313 case IFCOUNTER_COLLISIONS: 1314 return (stat->tx_late_colls); 1315 case IFCOUNTER_OERRORS: 1316 return (stat->tx_late_colls + stat->tx_underruns); 1317 case IFCOUNTER_IPACKETS: 1318 return (stat->rx_frames); 1319 case IFCOUNTER_IERRORS: 1320 return (stat->rx_crcerrs + stat->rx_runts + 1321 stat->rx_long_frames + stat->rx_fifo_full); 1322 default: 1323 return (if_get_counter_default(ifp, cnt)); 1324 } 1325 } 1326 1327 static void 1328 vte_intr(void *arg) 1329 { 1330 struct vte_softc *sc; 1331 if_t ifp; 1332 uint16_t status; 1333 int n; 1334 1335 sc = (struct vte_softc *)arg; 1336 VTE_LOCK(sc); 1337 1338 ifp = sc->vte_ifp; 1339 /* Reading VTE_MISR acknowledges interrupts. */ 1340 status = CSR_READ_2(sc, VTE_MISR); 1341 if ((status & VTE_INTRS) == 0) { 1342 /* Not ours. */ 1343 VTE_UNLOCK(sc); 1344 return; 1345 } 1346 1347 /* Disable interrupts. */ 1348 CSR_WRITE_2(sc, VTE_MIER, 0); 1349 for (n = 8; (status & VTE_INTRS) != 0;) { 1350 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1351 break; 1352 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 1353 MISR_RX_FIFO_FULL)) != 0) 1354 vte_rxeof(sc); 1355 if ((status & MISR_TX_DONE) != 0) 1356 vte_txeof(sc); 1357 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 1358 vte_stats_update(sc); 1359 if (!if_sendq_empty(ifp)) 1360 vte_start_locked(sc); 1361 if (--n > 0) 1362 status = CSR_READ_2(sc, VTE_MISR); 1363 else 1364 break; 1365 } 1366 1367 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1368 /* Re-enable interrupts. */ 1369 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1370 } 1371 VTE_UNLOCK(sc); 1372 } 1373 1374 static void 1375 vte_txeof(struct vte_softc *sc) 1376 { 1377 if_t ifp; 1378 struct vte_txdesc *txd; 1379 uint16_t status; 1380 int cons, prog; 1381 1382 VTE_LOCK_ASSERT(sc); 1383 1384 ifp = sc->vte_ifp; 1385 1386 if (sc->vte_cdata.vte_tx_cnt == 0) 1387 return; 1388 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1389 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD | 1390 BUS_DMASYNC_POSTWRITE); 1391 cons = sc->vte_cdata.vte_tx_cons; 1392 /* 1393 * Go through our TX list and free mbufs for those 1394 * frames which have been transmitted. 1395 */ 1396 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1397 txd = &sc->vte_cdata.vte_txdesc[cons]; 1398 status = le16toh(txd->tx_desc->dtst); 1399 if ((status & VTE_DTST_TX_OWN) != 0) 1400 break; 1401 sc->vte_cdata.vte_tx_cnt--; 1402 /* Reclaim transmitted mbufs. */ 1403 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1404 BUS_DMASYNC_POSTWRITE); 1405 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap); 1406 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1407 m_freem(txd->tx_m); 1408 txd->tx_flags &= ~VTE_TXMBUF; 1409 txd->tx_m = NULL; 1410 prog++; 1411 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1412 } 1413 1414 if (prog > 0) { 1415 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1416 sc->vte_cdata.vte_tx_cons = cons; 1417 /* 1418 * Unarm watchdog timer only when there is no pending 1419 * frames in TX queue. 1420 */ 1421 if (sc->vte_cdata.vte_tx_cnt == 0) 1422 sc->vte_watchdog_timer = 0; 1423 } 1424 } 1425 1426 static int 1427 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1428 { 1429 struct mbuf *m; 1430 bus_dma_segment_t segs[1]; 1431 bus_dmamap_t map; 1432 int nsegs; 1433 1434 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1435 if (m == NULL) 1436 return (ENOBUFS); 1437 m->m_len = m->m_pkthdr.len = MCLBYTES; 1438 m_adj(m, sizeof(uint32_t)); 1439 1440 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag, 1441 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1442 m_freem(m); 1443 return (ENOBUFS); 1444 } 1445 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1446 1447 if (rxd->rx_m != NULL) { 1448 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1449 BUS_DMASYNC_POSTREAD); 1450 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap); 1451 } 1452 map = rxd->rx_dmamap; 1453 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1454 sc->vte_cdata.vte_rx_sparemap = map; 1455 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1456 BUS_DMASYNC_PREREAD); 1457 rxd->rx_m = m; 1458 rxd->rx_desc->drbp = htole32(segs[0].ds_addr); 1459 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len)); 1460 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1461 1462 return (0); 1463 } 1464 1465 /* 1466 * It's not supposed to see this controller on strict-alignment 1467 * architectures but make it work for completeness. 1468 */ 1469 #ifndef __NO_STRICT_ALIGNMENT 1470 static struct mbuf * 1471 vte_fixup_rx(if_t ifp, struct mbuf *m) 1472 { 1473 uint16_t *src, *dst; 1474 int i; 1475 1476 src = mtod(m, uint16_t *); 1477 dst = src - 1; 1478 1479 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1480 *dst++ = *src++; 1481 m->m_data -= ETHER_ALIGN; 1482 return (m); 1483 } 1484 #endif 1485 1486 static void 1487 vte_rxeof(struct vte_softc *sc) 1488 { 1489 if_t ifp; 1490 struct vte_rxdesc *rxd; 1491 struct mbuf *m; 1492 uint16_t status, total_len; 1493 int cons, prog; 1494 1495 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1496 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD | 1497 BUS_DMASYNC_POSTWRITE); 1498 cons = sc->vte_cdata.vte_rx_cons; 1499 ifp = sc->vte_ifp; 1500 for (prog = 0; (if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0; prog++, 1501 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1502 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1503 status = le16toh(rxd->rx_desc->drst); 1504 if ((status & VTE_DRST_RX_OWN) != 0) 1505 break; 1506 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1507 m = rxd->rx_m; 1508 if ((status & VTE_DRST_RX_OK) == 0) { 1509 /* Discard errored frame. */ 1510 rxd->rx_desc->drlen = 1511 htole16(MCLBYTES - sizeof(uint32_t)); 1512 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1513 continue; 1514 } 1515 if (vte_newbuf(sc, rxd) != 0) { 1516 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1517 rxd->rx_desc->drlen = 1518 htole16(MCLBYTES - sizeof(uint32_t)); 1519 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1520 continue; 1521 } 1522 1523 /* 1524 * It seems there is no way to strip FCS bytes. 1525 */ 1526 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1527 m->m_pkthdr.rcvif = ifp; 1528 #ifndef __NO_STRICT_ALIGNMENT 1529 vte_fixup_rx(ifp, m); 1530 #endif 1531 VTE_UNLOCK(sc); 1532 if_input(ifp, m); 1533 VTE_LOCK(sc); 1534 } 1535 1536 if (prog > 0) { 1537 /* Update the consumer index. */ 1538 sc->vte_cdata.vte_rx_cons = cons; 1539 /* 1540 * Sync updated RX descriptors such that controller see 1541 * modified RX buffer addresses. 1542 */ 1543 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1544 sc->vte_cdata.vte_rx_ring_map, 1545 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1546 #ifdef notyet 1547 /* 1548 * Update residue counter. Controller does not 1549 * keep track of number of available RX descriptors 1550 * such that driver should have to update VTE_MRDCR 1551 * to make controller know how many free RX 1552 * descriptors were added to controller. This is 1553 * a similar mechanism used in VIA velocity 1554 * controllers and it indicates controller just 1555 * polls OWN bit of current RX descriptor pointer. 1556 * A couple of severe issues were seen on sample 1557 * board where the controller continuously emits TX 1558 * pause frames once RX pause threshold crossed. 1559 * Once triggered it never recovered form that 1560 * state, I couldn't find a way to make it back to 1561 * work at least. This issue effectively 1562 * disconnected the system from network. Also, the 1563 * controller used 00:00:00:00:00:00 as source 1564 * station address of TX pause frame. Probably this 1565 * is one of reason why vendor recommends not to 1566 * enable flow control on R6040 controller. 1567 */ 1568 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1569 (((VTE_RX_RING_CNT * 2) / 10) << 1570 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1571 #endif 1572 } 1573 } 1574 1575 static void 1576 vte_tick(void *arg) 1577 { 1578 struct vte_softc *sc; 1579 struct mii_data *mii; 1580 1581 sc = (struct vte_softc *)arg; 1582 1583 VTE_LOCK_ASSERT(sc); 1584 1585 mii = device_get_softc(sc->vte_miibus); 1586 mii_tick(mii); 1587 vte_stats_update(sc); 1588 vte_txeof(sc); 1589 vte_watchdog(sc); 1590 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1591 } 1592 1593 static void 1594 vte_reset(struct vte_softc *sc) 1595 { 1596 uint16_t mcr, mdcsc; 1597 int i; 1598 1599 mdcsc = CSR_READ_2(sc, VTE_MDCSC); 1600 mcr = CSR_READ_2(sc, VTE_MCR1); 1601 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1602 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1603 DELAY(10); 1604 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1605 break; 1606 } 1607 if (i == 0) 1608 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1609 /* 1610 * Follow the guide of vendor recommended way to reset MAC. 1611 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1612 * not reliable so manually reset internal state machine. 1613 */ 1614 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1615 CSR_WRITE_2(sc, VTE_MACSM, 0); 1616 DELAY(5000); 1617 1618 /* 1619 * On some SoCs (like Vortex86DX3) MDC speed control register value 1620 * needs to be restored to original value instead of default one, 1621 * otherwise some PHY registers may fail to be read. 1622 */ 1623 if (mdcsc != MDCSC_DEFAULT) 1624 CSR_WRITE_2(sc, VTE_MDCSC, mdcsc); 1625 } 1626 1627 static void 1628 vte_init(void *xsc) 1629 { 1630 struct vte_softc *sc; 1631 1632 sc = (struct vte_softc *)xsc; 1633 VTE_LOCK(sc); 1634 vte_init_locked(sc); 1635 VTE_UNLOCK(sc); 1636 } 1637 1638 static void 1639 vte_init_locked(struct vte_softc *sc) 1640 { 1641 if_t ifp; 1642 bus_addr_t paddr; 1643 uint8_t *eaddr; 1644 1645 VTE_LOCK_ASSERT(sc); 1646 1647 ifp = sc->vte_ifp; 1648 1649 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1650 return; 1651 /* 1652 * Cancel any pending I/O. 1653 */ 1654 vte_stop(sc); 1655 /* 1656 * Reset the chip to a known state. 1657 */ 1658 vte_reset(sc); 1659 1660 /* Initialize RX descriptors. */ 1661 if (vte_init_rx_ring(sc) != 0) { 1662 device_printf(sc->vte_dev, "no memory for RX buffers.\n"); 1663 vte_stop(sc); 1664 return; 1665 } 1666 if (vte_init_tx_ring(sc) != 0) { 1667 device_printf(sc->vte_dev, "no memory for TX buffers.\n"); 1668 vte_stop(sc); 1669 return; 1670 } 1671 1672 /* 1673 * Reprogram the station address. Controller supports up 1674 * to 4 different station addresses so driver programs the 1675 * first station address as its own ethernet address and 1676 * configure the remaining three addresses as perfect 1677 * multicast addresses. 1678 */ 1679 eaddr = if_getlladdr(sc->vte_ifp); 1680 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1681 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1682 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1683 1684 /* Set TX descriptor base addresses. */ 1685 paddr = sc->vte_cdata.vte_tx_ring_paddr; 1686 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1687 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1688 /* Set RX descriptor base addresses. */ 1689 paddr = sc->vte_cdata.vte_rx_ring_paddr; 1690 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1691 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1692 /* 1693 * Initialize RX descriptor residue counter and set RX 1694 * pause threshold to 20% of available RX descriptors. 1695 * See comments on vte_rxeof() for details on flow control 1696 * issues. 1697 */ 1698 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1699 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1700 1701 /* 1702 * Always use maximum frame size that controller can 1703 * support. Otherwise received frames that has longer 1704 * frame length than vte(4) MTU would be silently dropped 1705 * in controller. This would break path-MTU discovery as 1706 * sender wouldn't get any responses from receiver. The 1707 * RX buffer size should be multiple of 4. 1708 * Note, jumbo frames are silently ignored by controller 1709 * and even MAC counters do not detect them. 1710 */ 1711 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1712 1713 /* Configure FIFO. */ 1714 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1715 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1716 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1717 1718 /* 1719 * Configure TX/RX MACs. Actual resolved duplex and flow 1720 * control configuration is done after detecting a valid 1721 * link. Note, we don't generate early interrupt here 1722 * as well since FreeBSD does not have interrupt latency 1723 * problems like Windows. 1724 */ 1725 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1726 /* 1727 * We manually keep track of PHY status changes to 1728 * configure resolved duplex and flow control since only 1729 * duplex configuration can be automatically reflected to 1730 * MCR0. 1731 */ 1732 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1733 MCR1_EXCESS_COL_RETRY_16); 1734 1735 /* Initialize RX filter. */ 1736 vte_rxfilter(sc); 1737 1738 /* Disable TX/RX interrupt moderation control. */ 1739 CSR_WRITE_2(sc, VTE_MRICR, 0); 1740 CSR_WRITE_2(sc, VTE_MTICR, 0); 1741 1742 /* Enable MAC event counter interrupts. */ 1743 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1744 /* Clear MAC statistics. */ 1745 vte_stats_clear(sc); 1746 1747 /* Acknowledge all pending interrupts and clear it. */ 1748 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1749 CSR_WRITE_2(sc, VTE_MISR, 0); 1750 1751 sc->vte_flags &= ~VTE_FLAG_LINK; 1752 /* Switch to the current media. */ 1753 vte_mediachange_locked(ifp); 1754 1755 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1756 1757 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1758 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1759 } 1760 1761 static void 1762 vte_stop(struct vte_softc *sc) 1763 { 1764 if_t ifp; 1765 struct vte_txdesc *txd; 1766 struct vte_rxdesc *rxd; 1767 int i; 1768 1769 VTE_LOCK_ASSERT(sc); 1770 /* 1771 * Mark the interface down and cancel the watchdog timer. 1772 */ 1773 ifp = sc->vte_ifp; 1774 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 1775 sc->vte_flags &= ~VTE_FLAG_LINK; 1776 callout_stop(&sc->vte_tick_ch); 1777 sc->vte_watchdog_timer = 0; 1778 vte_stats_update(sc); 1779 /* Disable interrupts. */ 1780 CSR_WRITE_2(sc, VTE_MIER, 0); 1781 CSR_WRITE_2(sc, VTE_MECIER, 0); 1782 /* Stop RX/TX MACs. */ 1783 vte_stop_mac(sc); 1784 /* Clear interrupts. */ 1785 CSR_READ_2(sc, VTE_MISR); 1786 /* 1787 * Free TX/RX mbufs still in the queues. 1788 */ 1789 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1790 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1791 if (rxd->rx_m != NULL) { 1792 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, 1793 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1794 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, 1795 rxd->rx_dmamap); 1796 m_freem(rxd->rx_m); 1797 rxd->rx_m = NULL; 1798 } 1799 } 1800 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1801 txd = &sc->vte_cdata.vte_txdesc[i]; 1802 if (txd->tx_m != NULL) { 1803 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, 1804 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1805 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, 1806 txd->tx_dmamap); 1807 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1808 m_freem(txd->tx_m); 1809 txd->tx_m = NULL; 1810 txd->tx_flags &= ~VTE_TXMBUF; 1811 } 1812 } 1813 /* Free TX mbuf pools used for deep copy. */ 1814 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1815 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1816 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1817 sc->vte_cdata.vte_txmbufs[i] = NULL; 1818 } 1819 } 1820 } 1821 1822 static void 1823 vte_start_mac(struct vte_softc *sc) 1824 { 1825 uint16_t mcr; 1826 int i; 1827 1828 VTE_LOCK_ASSERT(sc); 1829 1830 /* Enable RX/TX MACs. */ 1831 mcr = CSR_READ_2(sc, VTE_MCR0); 1832 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1833 (MCR0_RX_ENB | MCR0_TX_ENB)) { 1834 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1835 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1836 for (i = VTE_TIMEOUT; i > 0; i--) { 1837 mcr = CSR_READ_2(sc, VTE_MCR0); 1838 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1839 (MCR0_RX_ENB | MCR0_TX_ENB)) 1840 break; 1841 DELAY(10); 1842 } 1843 if (i == 0) 1844 device_printf(sc->vte_dev, 1845 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1846 } 1847 } 1848 1849 static void 1850 vte_stop_mac(struct vte_softc *sc) 1851 { 1852 uint16_t mcr; 1853 int i; 1854 1855 VTE_LOCK_ASSERT(sc); 1856 1857 /* Disable RX/TX MACs. */ 1858 mcr = CSR_READ_2(sc, VTE_MCR0); 1859 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1860 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1861 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1862 for (i = VTE_TIMEOUT; i > 0; i--) { 1863 mcr = CSR_READ_2(sc, VTE_MCR0); 1864 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1865 break; 1866 DELAY(10); 1867 } 1868 if (i == 0) 1869 device_printf(sc->vte_dev, 1870 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1871 } 1872 } 1873 1874 static int 1875 vte_init_tx_ring(struct vte_softc *sc) 1876 { 1877 struct vte_tx_desc *desc; 1878 struct vte_txdesc *txd; 1879 bus_addr_t addr; 1880 int i; 1881 1882 VTE_LOCK_ASSERT(sc); 1883 1884 sc->vte_cdata.vte_tx_prod = 0; 1885 sc->vte_cdata.vte_tx_cons = 0; 1886 sc->vte_cdata.vte_tx_cnt = 0; 1887 1888 /* Pre-allocate TX mbufs for deep copy. */ 1889 if (tx_deep_copy != 0) { 1890 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1891 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT, 1892 MT_DATA, M_PKTHDR); 1893 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1894 return (ENOBUFS); 1895 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1896 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1897 } 1898 } 1899 desc = sc->vte_cdata.vte_tx_ring; 1900 bzero(desc, VTE_TX_RING_SZ); 1901 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1902 txd = &sc->vte_cdata.vte_txdesc[i]; 1903 txd->tx_m = NULL; 1904 if (i != VTE_TX_RING_CNT - 1) 1905 addr = sc->vte_cdata.vte_tx_ring_paddr + 1906 sizeof(struct vte_tx_desc) * (i + 1); 1907 else 1908 addr = sc->vte_cdata.vte_tx_ring_paddr + 1909 sizeof(struct vte_tx_desc) * 0; 1910 desc = &sc->vte_cdata.vte_tx_ring[i]; 1911 desc->dtnp = htole32(addr); 1912 txd->tx_desc = desc; 1913 } 1914 1915 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1916 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1917 BUS_DMASYNC_PREWRITE); 1918 return (0); 1919 } 1920 1921 static int 1922 vte_init_rx_ring(struct vte_softc *sc) 1923 { 1924 struct vte_rx_desc *desc; 1925 struct vte_rxdesc *rxd; 1926 bus_addr_t addr; 1927 int i; 1928 1929 VTE_LOCK_ASSERT(sc); 1930 1931 sc->vte_cdata.vte_rx_cons = 0; 1932 desc = sc->vte_cdata.vte_rx_ring; 1933 bzero(desc, VTE_RX_RING_SZ); 1934 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1935 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1936 rxd->rx_m = NULL; 1937 if (i != VTE_RX_RING_CNT - 1) 1938 addr = sc->vte_cdata.vte_rx_ring_paddr + 1939 sizeof(struct vte_rx_desc) * (i + 1); 1940 else 1941 addr = sc->vte_cdata.vte_rx_ring_paddr + 1942 sizeof(struct vte_rx_desc) * 0; 1943 desc = &sc->vte_cdata.vte_rx_ring[i]; 1944 desc->drnp = htole32(addr); 1945 rxd->rx_desc = desc; 1946 if (vte_newbuf(sc, rxd) != 0) 1947 return (ENOBUFS); 1948 } 1949 1950 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1951 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD | 1952 BUS_DMASYNC_PREWRITE); 1953 1954 return (0); 1955 } 1956 1957 struct vte_maddr_ctx { 1958 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1959 uint16_t mchash[4]; 1960 u_int nperf; 1961 }; 1962 1963 static u_int 1964 vte_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 1965 { 1966 struct vte_maddr_ctx *ctx = arg; 1967 uint8_t *eaddr; 1968 uint32_t crc; 1969 1970 /* 1971 * Program the first 3 multicast groups into the perfect filter. 1972 * For all others, use the hash table. 1973 */ 1974 if (ctx->nperf < VTE_RXFILT_PERFECT_CNT) { 1975 eaddr = LLADDR(sdl); 1976 ctx->rxfilt_perf[ctx->nperf][0] = eaddr[1] << 8 | eaddr[0]; 1977 ctx->rxfilt_perf[ctx->nperf][1] = eaddr[3] << 8 | eaddr[2]; 1978 ctx->rxfilt_perf[ctx->nperf][2] = eaddr[5] << 8 | eaddr[4]; 1979 ctx->nperf++; 1980 1981 return (1); 1982 } 1983 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 1984 ctx->mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1985 1986 return (1); 1987 } 1988 1989 static void 1990 vte_rxfilter(struct vte_softc *sc) 1991 { 1992 if_t ifp; 1993 struct vte_maddr_ctx ctx; 1994 uint16_t mcr; 1995 int i; 1996 1997 VTE_LOCK_ASSERT(sc); 1998 1999 ifp = sc->vte_ifp; 2000 2001 bzero(ctx.mchash, sizeof(ctx.mchash)); 2002 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2003 ctx.rxfilt_perf[i][0] = 0xFFFF; 2004 ctx.rxfilt_perf[i][1] = 0xFFFF; 2005 ctx.rxfilt_perf[i][2] = 0xFFFF; 2006 } 2007 ctx.nperf = 0; 2008 2009 mcr = CSR_READ_2(sc, VTE_MCR0); 2010 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST); 2011 mcr |= MCR0_BROADCAST_DIS; 2012 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 2013 mcr &= ~MCR0_BROADCAST_DIS; 2014 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2015 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 2016 mcr |= MCR0_PROMISC; 2017 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) 2018 mcr |= MCR0_MULTICAST; 2019 ctx.mchash[0] = 0xFFFF; 2020 ctx.mchash[1] = 0xFFFF; 2021 ctx.mchash[2] = 0xFFFF; 2022 ctx.mchash[3] = 0xFFFF; 2023 goto chipit; 2024 } 2025 2026 if_foreach_llmaddr(ifp, vte_hash_maddr, &ctx); 2027 if (ctx.mchash[0] != 0 || ctx.mchash[1] != 0 || 2028 ctx.mchash[2] != 0 || ctx.mchash[3] != 0) 2029 mcr |= MCR0_MULTICAST; 2030 2031 chipit: 2032 /* Program multicast hash table. */ 2033 CSR_WRITE_2(sc, VTE_MAR0, ctx.mchash[0]); 2034 CSR_WRITE_2(sc, VTE_MAR1, ctx.mchash[1]); 2035 CSR_WRITE_2(sc, VTE_MAR2, ctx.mchash[2]); 2036 CSR_WRITE_2(sc, VTE_MAR3, ctx.mchash[3]); 2037 /* Program perfect filter table. */ 2038 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2039 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 2040 ctx.rxfilt_perf[i][0]); 2041 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 2042 ctx.rxfilt_perf[i][1]); 2043 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 2044 ctx.rxfilt_perf[i][2]); 2045 } 2046 CSR_WRITE_2(sc, VTE_MCR0, mcr); 2047 CSR_READ_2(sc, VTE_MCR0); 2048 } 2049 2050 static int 2051 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2052 { 2053 int error, value; 2054 2055 if (arg1 == NULL) 2056 return (EINVAL); 2057 value = *(int *)arg1; 2058 error = sysctl_handle_int(oidp, &value, 0, req); 2059 if (error || req->newptr == NULL) 2060 return (error); 2061 if (value < low || value > high) 2062 return (EINVAL); 2063 *(int *)arg1 = value; 2064 2065 return (0); 2066 } 2067 2068 static int 2069 sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS) 2070 { 2071 2072 return (sysctl_int_range(oidp, arg1, arg2, req, 2073 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX)); 2074 } 2075