1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/module.h> 44 #include <sys/mutex.h> 45 #include <sys/rman.h> 46 #include <sys/socket.h> 47 #include <sys/sockio.h> 48 #include <sys/sysctl.h> 49 50 #include <net/bpf.h> 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/if_arp.h> 54 #include <net/ethernet.h> 55 #include <net/if_dl.h> 56 #include <net/if_llc.h> 57 #include <net/if_media.h> 58 #include <net/if_types.h> 59 #include <net/if_vlan_var.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 64 #include <dev/mii/mii.h> 65 #include <dev/mii/miivar.h> 66 67 #include <dev/pci/pcireg.h> 68 #include <dev/pci/pcivar.h> 69 70 #include <machine/bus.h> 71 72 #include <dev/vte/if_vtereg.h> 73 #include <dev/vte/if_vtevar.h> 74 75 /* "device miibus" required. See GENERIC if you get errors here. */ 76 #include "miibus_if.h" 77 78 MODULE_DEPEND(vte, pci, 1, 1, 1); 79 MODULE_DEPEND(vte, ether, 1, 1, 1); 80 MODULE_DEPEND(vte, miibus, 1, 1, 1); 81 82 /* Tunables. */ 83 static int tx_deep_copy = 1; 84 TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy); 85 86 /* 87 * Devices supported by this driver. 88 */ 89 static const struct vte_ident vte_ident_table[] = { 90 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"}, 91 { 0, 0, NULL} 92 }; 93 94 static int vte_attach(device_t); 95 static int vte_detach(device_t); 96 static int vte_dma_alloc(struct vte_softc *); 97 static void vte_dma_free(struct vte_softc *); 98 static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int); 99 static struct vte_txdesc * 100 vte_encap(struct vte_softc *, struct mbuf **); 101 static const struct vte_ident * 102 vte_find_ident(device_t); 103 #ifndef __NO_STRICT_ALIGNMENT 104 static struct mbuf * 105 vte_fixup_rx(struct ifnet *, struct mbuf *); 106 #endif 107 static void vte_get_macaddr(struct vte_softc *); 108 static void vte_init(void *); 109 static void vte_init_locked(struct vte_softc *); 110 static int vte_init_rx_ring(struct vte_softc *); 111 static int vte_init_tx_ring(struct vte_softc *); 112 static void vte_intr(void *); 113 static int vte_ioctl(struct ifnet *, u_long, caddr_t); 114 static uint64_t vte_get_counter(struct ifnet *, ift_counter); 115 static void vte_mac_config(struct vte_softc *); 116 static int vte_miibus_readreg(device_t, int, int); 117 static void vte_miibus_statchg(device_t); 118 static int vte_miibus_writereg(device_t, int, int, int); 119 static int vte_mediachange(struct ifnet *); 120 static int vte_mediachange_locked(struct ifnet *); 121 static void vte_mediastatus(struct ifnet *, struct ifmediareq *); 122 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 123 static int vte_probe(device_t); 124 static void vte_reset(struct vte_softc *); 125 static int vte_resume(device_t); 126 static void vte_rxeof(struct vte_softc *); 127 static void vte_rxfilter(struct vte_softc *); 128 static int vte_shutdown(device_t); 129 static void vte_start(struct ifnet *); 130 static void vte_start_locked(struct vte_softc *); 131 static void vte_start_mac(struct vte_softc *); 132 static void vte_stats_clear(struct vte_softc *); 133 static void vte_stats_update(struct vte_softc *); 134 static void vte_stop(struct vte_softc *); 135 static void vte_stop_mac(struct vte_softc *); 136 static int vte_suspend(device_t); 137 static void vte_sysctl_node(struct vte_softc *); 138 static void vte_tick(void *); 139 static void vte_txeof(struct vte_softc *); 140 static void vte_watchdog(struct vte_softc *); 141 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 142 static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS); 143 144 static device_method_t vte_methods[] = { 145 /* Device interface. */ 146 DEVMETHOD(device_probe, vte_probe), 147 DEVMETHOD(device_attach, vte_attach), 148 DEVMETHOD(device_detach, vte_detach), 149 DEVMETHOD(device_shutdown, vte_shutdown), 150 DEVMETHOD(device_suspend, vte_suspend), 151 DEVMETHOD(device_resume, vte_resume), 152 153 /* MII interface. */ 154 DEVMETHOD(miibus_readreg, vte_miibus_readreg), 155 DEVMETHOD(miibus_writereg, vte_miibus_writereg), 156 DEVMETHOD(miibus_statchg, vte_miibus_statchg), 157 158 DEVMETHOD_END 159 }; 160 161 static driver_t vte_driver = { 162 "vte", 163 vte_methods, 164 sizeof(struct vte_softc) 165 }; 166 167 DRIVER_MODULE(vte, pci, vte_driver, 0, 0); 168 DRIVER_MODULE(miibus, vte, miibus_driver, 0, 0); 169 170 static int 171 vte_miibus_readreg(device_t dev, int phy, int reg) 172 { 173 struct vte_softc *sc; 174 int i; 175 176 sc = device_get_softc(dev); 177 178 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 179 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 180 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 181 DELAY(5); 182 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 183 break; 184 } 185 186 if (i == 0) { 187 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg); 188 return (0); 189 } 190 191 return (CSR_READ_2(sc, VTE_MMRD)); 192 } 193 194 static int 195 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 196 { 197 struct vte_softc *sc; 198 int i; 199 200 sc = device_get_softc(dev); 201 202 CSR_WRITE_2(sc, VTE_MMWD, val); 203 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 204 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 205 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 206 DELAY(5); 207 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 208 break; 209 } 210 211 if (i == 0) 212 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg); 213 214 return (0); 215 } 216 217 static void 218 vte_miibus_statchg(device_t dev) 219 { 220 struct vte_softc *sc; 221 struct mii_data *mii; 222 struct ifnet *ifp; 223 uint16_t val; 224 225 sc = device_get_softc(dev); 226 227 mii = device_get_softc(sc->vte_miibus); 228 ifp = sc->vte_ifp; 229 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 230 return; 231 232 sc->vte_flags &= ~VTE_FLAG_LINK; 233 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 234 (IFM_ACTIVE | IFM_AVALID)) { 235 switch (IFM_SUBTYPE(mii->mii_media_active)) { 236 case IFM_10_T: 237 case IFM_100_TX: 238 sc->vte_flags |= VTE_FLAG_LINK; 239 break; 240 default: 241 break; 242 } 243 } 244 245 /* Stop RX/TX MACs. */ 246 vte_stop_mac(sc); 247 /* Program MACs with resolved duplex and flow control. */ 248 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 249 /* 250 * Timer waiting time : (63 + TIMER * 64) MII clock. 251 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 252 */ 253 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 254 val = 18 << VTE_IM_TIMER_SHIFT; 255 else 256 val = 1 << VTE_IM_TIMER_SHIFT; 257 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 258 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 259 CSR_WRITE_2(sc, VTE_MRICR, val); 260 261 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 262 val = 18 << VTE_IM_TIMER_SHIFT; 263 else 264 val = 1 << VTE_IM_TIMER_SHIFT; 265 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 266 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 267 CSR_WRITE_2(sc, VTE_MTICR, val); 268 269 vte_mac_config(sc); 270 vte_start_mac(sc); 271 } 272 } 273 274 static void 275 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 276 { 277 struct vte_softc *sc; 278 struct mii_data *mii; 279 280 sc = ifp->if_softc; 281 VTE_LOCK(sc); 282 if ((ifp->if_flags & IFF_UP) == 0) { 283 VTE_UNLOCK(sc); 284 return; 285 } 286 mii = device_get_softc(sc->vte_miibus); 287 288 mii_pollstat(mii); 289 ifmr->ifm_status = mii->mii_media_status; 290 ifmr->ifm_active = mii->mii_media_active; 291 VTE_UNLOCK(sc); 292 } 293 294 static int 295 vte_mediachange(struct ifnet *ifp) 296 { 297 struct vte_softc *sc; 298 int error; 299 300 sc = ifp->if_softc; 301 VTE_LOCK(sc); 302 error = vte_mediachange_locked(ifp); 303 VTE_UNLOCK(sc); 304 return (error); 305 } 306 307 static int 308 vte_mediachange_locked(struct ifnet *ifp) 309 { 310 struct vte_softc *sc; 311 struct mii_data *mii; 312 struct mii_softc *miisc; 313 int error; 314 315 sc = ifp->if_softc; 316 mii = device_get_softc(sc->vte_miibus); 317 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 318 PHY_RESET(miisc); 319 error = mii_mediachg(mii); 320 321 return (error); 322 } 323 324 static const struct vte_ident * 325 vte_find_ident(device_t dev) 326 { 327 const struct vte_ident *ident; 328 uint16_t vendor, devid; 329 330 vendor = pci_get_vendor(dev); 331 devid = pci_get_device(dev); 332 for (ident = vte_ident_table; ident->name != NULL; ident++) { 333 if (vendor == ident->vendorid && devid == ident->deviceid) 334 return (ident); 335 } 336 337 return (NULL); 338 } 339 340 static int 341 vte_probe(device_t dev) 342 { 343 const struct vte_ident *ident; 344 345 ident = vte_find_ident(dev); 346 if (ident != NULL) { 347 device_set_desc(dev, ident->name); 348 return (BUS_PROBE_DEFAULT); 349 } 350 351 return (ENXIO); 352 } 353 354 static void 355 vte_get_macaddr(struct vte_softc *sc) 356 { 357 uint16_t mid; 358 359 /* 360 * It seems there is no way to reload station address and 361 * it is supposed to be set by BIOS. 362 */ 363 mid = CSR_READ_2(sc, VTE_MID0L); 364 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 365 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 366 mid = CSR_READ_2(sc, VTE_MID0M); 367 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 368 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 369 mid = CSR_READ_2(sc, VTE_MID0H); 370 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 371 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 372 } 373 374 static int 375 vte_attach(device_t dev) 376 { 377 struct vte_softc *sc; 378 struct ifnet *ifp; 379 uint16_t macid; 380 int error, rid; 381 382 error = 0; 383 sc = device_get_softc(dev); 384 sc->vte_dev = dev; 385 386 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 387 MTX_DEF); 388 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0); 389 sc->vte_ident = vte_find_ident(dev); 390 391 /* Map the device. */ 392 pci_enable_busmaster(dev); 393 sc->vte_res_id = PCIR_BAR(1); 394 sc->vte_res_type = SYS_RES_MEMORY; 395 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 396 &sc->vte_res_id, RF_ACTIVE); 397 if (sc->vte_res == NULL) { 398 sc->vte_res_id = PCIR_BAR(0); 399 sc->vte_res_type = SYS_RES_IOPORT; 400 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 401 &sc->vte_res_id, RF_ACTIVE); 402 if (sc->vte_res == NULL) { 403 device_printf(dev, "cannot map memory/ports.\n"); 404 mtx_destroy(&sc->vte_mtx); 405 return (ENXIO); 406 } 407 } 408 if (bootverbose) { 409 device_printf(dev, "using %s space register mapping\n", 410 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 411 device_printf(dev, "MAC Identifier : 0x%04x\n", 412 CSR_READ_2(sc, VTE_MACID)); 413 macid = CSR_READ_2(sc, VTE_MACID_REV); 414 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n", 415 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT, 416 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT); 417 } 418 419 rid = 0; 420 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 421 RF_SHAREABLE | RF_ACTIVE); 422 if (sc->vte_irq == NULL) { 423 device_printf(dev, "cannot allocate IRQ resources.\n"); 424 error = ENXIO; 425 goto fail; 426 } 427 428 /* Reset the ethernet controller. */ 429 vte_reset(sc); 430 431 if ((error = vte_dma_alloc(sc)) != 0) 432 goto fail; 433 434 /* Create device sysctl node. */ 435 vte_sysctl_node(sc); 436 437 /* Load station address. */ 438 vte_get_macaddr(sc); 439 440 ifp = sc->vte_ifp = if_alloc(IFT_ETHER); 441 if (ifp == NULL) { 442 device_printf(dev, "cannot allocate ifnet structure.\n"); 443 error = ENXIO; 444 goto fail; 445 } 446 447 ifp->if_softc = sc; 448 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 449 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 450 ifp->if_ioctl = vte_ioctl; 451 ifp->if_start = vte_start; 452 ifp->if_init = vte_init; 453 ifp->if_get_counter = vte_get_counter; 454 ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1; 455 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 456 IFQ_SET_READY(&ifp->if_snd); 457 458 /* 459 * Set up MII bus. 460 * BIOS would have initialized VTE_MPSCCR to catch PHY 461 * status changes so driver may be able to extract 462 * configured PHY address. Since it's common to see BIOS 463 * fails to initialize the register(including the sample 464 * board I have), let mii(4) probe it. This is more 465 * reliable than relying on BIOS's initialization. 466 * 467 * Advertising flow control capability to mii(4) was 468 * intentionally disabled due to severe problems in TX 469 * pause frame generation. See vte_rxeof() for more 470 * details. 471 */ 472 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange, 473 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 474 if (error != 0) { 475 device_printf(dev, "attaching PHYs failed\n"); 476 goto fail; 477 } 478 479 ether_ifattach(ifp, sc->vte_eaddr); 480 481 /* VLAN capability setup. */ 482 ifp->if_capabilities |= IFCAP_VLAN_MTU; 483 ifp->if_capenable = ifp->if_capabilities; 484 /* Tell the upper layer we support VLAN over-sized frames. */ 485 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 486 487 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE, 488 NULL, vte_intr, sc, &sc->vte_intrhand); 489 if (error != 0) { 490 device_printf(dev, "could not set up interrupt handler.\n"); 491 ether_ifdetach(ifp); 492 goto fail; 493 } 494 495 fail: 496 if (error != 0) 497 vte_detach(dev); 498 499 return (error); 500 } 501 502 static int 503 vte_detach(device_t dev) 504 { 505 struct vte_softc *sc; 506 struct ifnet *ifp; 507 508 sc = device_get_softc(dev); 509 510 ifp = sc->vte_ifp; 511 if (device_is_attached(dev)) { 512 VTE_LOCK(sc); 513 vte_stop(sc); 514 VTE_UNLOCK(sc); 515 callout_drain(&sc->vte_tick_ch); 516 ether_ifdetach(ifp); 517 } 518 519 if (sc->vte_miibus != NULL) { 520 device_delete_child(dev, sc->vte_miibus); 521 sc->vte_miibus = NULL; 522 } 523 bus_generic_detach(dev); 524 525 if (sc->vte_intrhand != NULL) { 526 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand); 527 sc->vte_intrhand = NULL; 528 } 529 if (sc->vte_irq != NULL) { 530 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq); 531 sc->vte_irq = NULL; 532 } 533 if (sc->vte_res != NULL) { 534 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id, 535 sc->vte_res); 536 sc->vte_res = NULL; 537 } 538 if (ifp != NULL) { 539 if_free(ifp); 540 sc->vte_ifp = NULL; 541 } 542 vte_dma_free(sc); 543 mtx_destroy(&sc->vte_mtx); 544 545 return (0); 546 } 547 548 #define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 549 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 550 551 static void 552 vte_sysctl_node(struct vte_softc *sc) 553 { 554 struct sysctl_ctx_list *ctx; 555 struct sysctl_oid_list *child, *parent; 556 struct sysctl_oid *tree; 557 struct vte_hw_stats *stats; 558 int error; 559 560 stats = &sc->vte_stats; 561 ctx = device_get_sysctl_ctx(sc->vte_dev); 562 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev)); 563 564 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 565 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 566 &sc->vte_int_rx_mod, 0, sysctl_hw_vte_int_mod, "I", 567 "vte RX interrupt moderation"); 568 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 569 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 570 &sc->vte_int_tx_mod, 0, sysctl_hw_vte_int_mod, "I", 571 "vte TX interrupt moderation"); 572 /* Pull in device tunables. */ 573 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 574 error = resource_int_value(device_get_name(sc->vte_dev), 575 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod); 576 if (error == 0) { 577 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN || 578 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) { 579 device_printf(sc->vte_dev, "int_rx_mod value out of " 580 "range; using default: %d\n", 581 VTE_IM_RX_BUNDLE_DEFAULT); 582 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 583 } 584 } 585 586 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 587 error = resource_int_value(device_get_name(sc->vte_dev), 588 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod); 589 if (error == 0) { 590 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN || 591 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) { 592 device_printf(sc->vte_dev, "int_tx_mod value out of " 593 "range; using default: %d\n", 594 VTE_IM_TX_BUNDLE_DEFAULT); 595 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 596 } 597 } 598 599 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", 600 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "VTE statistics"); 601 parent = SYSCTL_CHILDREN(tree); 602 603 /* RX statistics. */ 604 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 605 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics"); 606 child = SYSCTL_CHILDREN(tree); 607 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 608 &stats->rx_frames, "Good frames"); 609 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 610 &stats->rx_bcast_frames, "Good broadcast frames"); 611 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 612 &stats->rx_mcast_frames, "Good multicast frames"); 613 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt", 614 &stats->rx_runts, "Too short frames"); 615 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 616 &stats->rx_crcerrs, "CRC errors"); 617 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames", 618 &stats->rx_long_frames, 619 "Frames that have longer length than maximum packet length"); 620 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full", 621 &stats->rx_fifo_full, "FIFO full"); 622 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail", 623 &stats->rx_desc_unavail, "Descriptor unavailable frames"); 624 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 625 &stats->rx_pause_frames, "Pause control frames"); 626 627 /* TX statistics. */ 628 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 629 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics"); 630 child = SYSCTL_CHILDREN(tree); 631 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 632 &stats->tx_frames, "Good frames"); 633 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 634 &stats->tx_underruns, "FIFO underruns"); 635 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 636 &stats->tx_late_colls, "Late collisions"); 637 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 638 &stats->tx_pause_frames, "Pause control frames"); 639 } 640 641 #undef VTE_SYSCTL_STAT_ADD32 642 643 struct vte_dmamap_arg { 644 bus_addr_t vte_busaddr; 645 }; 646 647 static void 648 vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 649 { 650 struct vte_dmamap_arg *ctx; 651 652 if (error != 0) 653 return; 654 655 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 656 657 ctx = (struct vte_dmamap_arg *)arg; 658 ctx->vte_busaddr = segs[0].ds_addr; 659 } 660 661 static int 662 vte_dma_alloc(struct vte_softc *sc) 663 { 664 struct vte_txdesc *txd; 665 struct vte_rxdesc *rxd; 666 struct vte_dmamap_arg ctx; 667 int error, i; 668 669 /* Create parent DMA tag. */ 670 error = bus_dma_tag_create( 671 bus_get_dma_tag(sc->vte_dev), /* parent */ 672 1, 0, /* alignment, boundary */ 673 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 674 BUS_SPACE_MAXADDR, /* highaddr */ 675 NULL, NULL, /* filter, filterarg */ 676 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 677 0, /* nsegments */ 678 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 679 0, /* flags */ 680 NULL, NULL, /* lockfunc, lockarg */ 681 &sc->vte_cdata.vte_parent_tag); 682 if (error != 0) { 683 device_printf(sc->vte_dev, 684 "could not create parent DMA tag.\n"); 685 goto fail; 686 } 687 688 /* Create DMA tag for TX descriptor ring. */ 689 error = bus_dma_tag_create( 690 sc->vte_cdata.vte_parent_tag, /* parent */ 691 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */ 692 BUS_SPACE_MAXADDR, /* lowaddr */ 693 BUS_SPACE_MAXADDR, /* highaddr */ 694 NULL, NULL, /* filter, filterarg */ 695 VTE_TX_RING_SZ, /* maxsize */ 696 1, /* nsegments */ 697 VTE_TX_RING_SZ, /* maxsegsize */ 698 0, /* flags */ 699 NULL, NULL, /* lockfunc, lockarg */ 700 &sc->vte_cdata.vte_tx_ring_tag); 701 if (error != 0) { 702 device_printf(sc->vte_dev, 703 "could not create TX ring DMA tag.\n"); 704 goto fail; 705 } 706 707 /* Create DMA tag for RX free descriptor ring. */ 708 error = bus_dma_tag_create( 709 sc->vte_cdata.vte_parent_tag, /* parent */ 710 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */ 711 BUS_SPACE_MAXADDR, /* lowaddr */ 712 BUS_SPACE_MAXADDR, /* highaddr */ 713 NULL, NULL, /* filter, filterarg */ 714 VTE_RX_RING_SZ, /* maxsize */ 715 1, /* nsegments */ 716 VTE_RX_RING_SZ, /* maxsegsize */ 717 0, /* flags */ 718 NULL, NULL, /* lockfunc, lockarg */ 719 &sc->vte_cdata.vte_rx_ring_tag); 720 if (error != 0) { 721 device_printf(sc->vte_dev, 722 "could not create RX ring DMA tag.\n"); 723 goto fail; 724 } 725 726 /* Allocate DMA'able memory and load the DMA map for TX ring. */ 727 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag, 728 (void **)&sc->vte_cdata.vte_tx_ring, 729 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 730 &sc->vte_cdata.vte_tx_ring_map); 731 if (error != 0) { 732 device_printf(sc->vte_dev, 733 "could not allocate DMA'able memory for TX ring.\n"); 734 goto fail; 735 } 736 ctx.vte_busaddr = 0; 737 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag, 738 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 739 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0); 740 if (error != 0 || ctx.vte_busaddr == 0) { 741 device_printf(sc->vte_dev, 742 "could not load DMA'able memory for TX ring.\n"); 743 goto fail; 744 } 745 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr; 746 747 /* Allocate DMA'able memory and load the DMA map for RX ring. */ 748 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag, 749 (void **)&sc->vte_cdata.vte_rx_ring, 750 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 751 &sc->vte_cdata.vte_rx_ring_map); 752 if (error != 0) { 753 device_printf(sc->vte_dev, 754 "could not allocate DMA'able memory for RX ring.\n"); 755 goto fail; 756 } 757 ctx.vte_busaddr = 0; 758 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag, 759 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 760 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0); 761 if (error != 0 || ctx.vte_busaddr == 0) { 762 device_printf(sc->vte_dev, 763 "could not load DMA'able memory for RX ring.\n"); 764 goto fail; 765 } 766 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr; 767 768 /* Create TX buffer parent tag. */ 769 error = bus_dma_tag_create( 770 bus_get_dma_tag(sc->vte_dev), /* parent */ 771 1, 0, /* alignment, boundary */ 772 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 773 BUS_SPACE_MAXADDR, /* highaddr */ 774 NULL, NULL, /* filter, filterarg */ 775 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 776 0, /* nsegments */ 777 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 778 0, /* flags */ 779 NULL, NULL, /* lockfunc, lockarg */ 780 &sc->vte_cdata.vte_buffer_tag); 781 if (error != 0) { 782 device_printf(sc->vte_dev, 783 "could not create parent buffer DMA tag.\n"); 784 goto fail; 785 } 786 787 /* Create DMA tag for TX buffers. */ 788 error = bus_dma_tag_create( 789 sc->vte_cdata.vte_buffer_tag, /* parent */ 790 1, 0, /* alignment, boundary */ 791 BUS_SPACE_MAXADDR, /* lowaddr */ 792 BUS_SPACE_MAXADDR, /* highaddr */ 793 NULL, NULL, /* filter, filterarg */ 794 MCLBYTES, /* maxsize */ 795 1, /* nsegments */ 796 MCLBYTES, /* maxsegsize */ 797 0, /* flags */ 798 NULL, NULL, /* lockfunc, lockarg */ 799 &sc->vte_cdata.vte_tx_tag); 800 if (error != 0) { 801 device_printf(sc->vte_dev, "could not create TX DMA tag.\n"); 802 goto fail; 803 } 804 805 /* Create DMA tag for RX buffers. */ 806 error = bus_dma_tag_create( 807 sc->vte_cdata.vte_buffer_tag, /* parent */ 808 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */ 809 BUS_SPACE_MAXADDR, /* lowaddr */ 810 BUS_SPACE_MAXADDR, /* highaddr */ 811 NULL, NULL, /* filter, filterarg */ 812 MCLBYTES, /* maxsize */ 813 1, /* nsegments */ 814 MCLBYTES, /* maxsegsize */ 815 0, /* flags */ 816 NULL, NULL, /* lockfunc, lockarg */ 817 &sc->vte_cdata.vte_rx_tag); 818 if (error != 0) { 819 device_printf(sc->vte_dev, "could not create RX DMA tag.\n"); 820 goto fail; 821 } 822 /* Create DMA maps for TX buffers. */ 823 for (i = 0; i < VTE_TX_RING_CNT; i++) { 824 txd = &sc->vte_cdata.vte_txdesc[i]; 825 txd->tx_m = NULL; 826 txd->tx_dmamap = NULL; 827 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0, 828 &txd->tx_dmamap); 829 if (error != 0) { 830 device_printf(sc->vte_dev, 831 "could not create TX dmamap.\n"); 832 goto fail; 833 } 834 } 835 /* Create DMA maps for RX buffers. */ 836 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 837 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 838 device_printf(sc->vte_dev, 839 "could not create spare RX dmamap.\n"); 840 goto fail; 841 } 842 for (i = 0; i < VTE_RX_RING_CNT; i++) { 843 rxd = &sc->vte_cdata.vte_rxdesc[i]; 844 rxd->rx_m = NULL; 845 rxd->rx_dmamap = NULL; 846 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 847 &rxd->rx_dmamap); 848 if (error != 0) { 849 device_printf(sc->vte_dev, 850 "could not create RX dmamap.\n"); 851 goto fail; 852 } 853 } 854 855 fail: 856 return (error); 857 } 858 859 static void 860 vte_dma_free(struct vte_softc *sc) 861 { 862 struct vte_txdesc *txd; 863 struct vte_rxdesc *rxd; 864 int i; 865 866 /* TX buffers. */ 867 if (sc->vte_cdata.vte_tx_tag != NULL) { 868 for (i = 0; i < VTE_TX_RING_CNT; i++) { 869 txd = &sc->vte_cdata.vte_txdesc[i]; 870 if (txd->tx_dmamap != NULL) { 871 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag, 872 txd->tx_dmamap); 873 txd->tx_dmamap = NULL; 874 } 875 } 876 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag); 877 sc->vte_cdata.vte_tx_tag = NULL; 878 } 879 /* RX buffers */ 880 if (sc->vte_cdata.vte_rx_tag != NULL) { 881 for (i = 0; i < VTE_RX_RING_CNT; i++) { 882 rxd = &sc->vte_cdata.vte_rxdesc[i]; 883 if (rxd->rx_dmamap != NULL) { 884 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 885 rxd->rx_dmamap); 886 rxd->rx_dmamap = NULL; 887 } 888 } 889 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 890 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 891 sc->vte_cdata.vte_rx_sparemap); 892 sc->vte_cdata.vte_rx_sparemap = NULL; 893 } 894 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag); 895 sc->vte_cdata.vte_rx_tag = NULL; 896 } 897 /* TX descriptor ring. */ 898 if (sc->vte_cdata.vte_tx_ring_tag != NULL) { 899 if (sc->vte_cdata.vte_tx_ring_paddr != 0) 900 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag, 901 sc->vte_cdata.vte_tx_ring_map); 902 if (sc->vte_cdata.vte_tx_ring != NULL) 903 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag, 904 sc->vte_cdata.vte_tx_ring, 905 sc->vte_cdata.vte_tx_ring_map); 906 sc->vte_cdata.vte_tx_ring = NULL; 907 sc->vte_cdata.vte_tx_ring_paddr = 0; 908 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag); 909 sc->vte_cdata.vte_tx_ring_tag = NULL; 910 } 911 /* RX ring. */ 912 if (sc->vte_cdata.vte_rx_ring_tag != NULL) { 913 if (sc->vte_cdata.vte_rx_ring_paddr != 0) 914 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag, 915 sc->vte_cdata.vte_rx_ring_map); 916 if (sc->vte_cdata.vte_rx_ring != NULL) 917 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag, 918 sc->vte_cdata.vte_rx_ring, 919 sc->vte_cdata.vte_rx_ring_map); 920 sc->vte_cdata.vte_rx_ring = NULL; 921 sc->vte_cdata.vte_rx_ring_paddr = 0; 922 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag); 923 sc->vte_cdata.vte_rx_ring_tag = NULL; 924 } 925 if (sc->vte_cdata.vte_buffer_tag != NULL) { 926 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag); 927 sc->vte_cdata.vte_buffer_tag = NULL; 928 } 929 if (sc->vte_cdata.vte_parent_tag != NULL) { 930 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag); 931 sc->vte_cdata.vte_parent_tag = NULL; 932 } 933 } 934 935 static int 936 vte_shutdown(device_t dev) 937 { 938 939 return (vte_suspend(dev)); 940 } 941 942 static int 943 vte_suspend(device_t dev) 944 { 945 struct vte_softc *sc; 946 struct ifnet *ifp; 947 948 sc = device_get_softc(dev); 949 950 VTE_LOCK(sc); 951 ifp = sc->vte_ifp; 952 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 953 vte_stop(sc); 954 VTE_UNLOCK(sc); 955 956 return (0); 957 } 958 959 static int 960 vte_resume(device_t dev) 961 { 962 struct vte_softc *sc; 963 struct ifnet *ifp; 964 965 sc = device_get_softc(dev); 966 967 VTE_LOCK(sc); 968 ifp = sc->vte_ifp; 969 if ((ifp->if_flags & IFF_UP) != 0) { 970 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 971 vte_init_locked(sc); 972 } 973 VTE_UNLOCK(sc); 974 975 return (0); 976 } 977 978 static struct vte_txdesc * 979 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 980 { 981 struct vte_txdesc *txd; 982 struct mbuf *m, *n; 983 bus_dma_segment_t txsegs[1]; 984 int copy, error, nsegs, padlen; 985 986 VTE_LOCK_ASSERT(sc); 987 988 M_ASSERTPKTHDR((*m_head)); 989 990 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 991 m = *m_head; 992 /* 993 * Controller doesn't auto-pad, so we have to make sure pad 994 * short frames out to the minimum frame length. 995 */ 996 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 997 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 998 else 999 padlen = 0; 1000 1001 /* 1002 * Controller does not support multi-fragmented TX buffers. 1003 * Controller spends most of its TX processing time in 1004 * de-fragmenting TX buffers. Either faster CPU or more 1005 * advanced controller DMA engine is required to speed up 1006 * TX path processing. 1007 * To mitigate the de-fragmenting issue, perform deep copy 1008 * from fragmented mbuf chains to a pre-allocated mbuf 1009 * cluster with extra cost of kernel memory. For frames 1010 * that is composed of single TX buffer, the deep copy is 1011 * bypassed. 1012 */ 1013 if (tx_deep_copy != 0) { 1014 copy = 0; 1015 if (m->m_next != NULL) 1016 copy++; 1017 if (padlen > 0 && (M_WRITABLE(m) == 0 || 1018 padlen > M_TRAILINGSPACE(m))) 1019 copy++; 1020 if (copy != 0) { 1021 /* Avoid expensive m_defrag(9) and do deep copy. */ 1022 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 1023 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 1024 n->m_pkthdr.len = m->m_pkthdr.len; 1025 n->m_len = m->m_pkthdr.len; 1026 m = n; 1027 txd->tx_flags |= VTE_TXMBUF; 1028 } 1029 1030 if (padlen > 0) { 1031 /* Zero out the bytes in the pad area. */ 1032 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1033 m->m_pkthdr.len += padlen; 1034 m->m_len = m->m_pkthdr.len; 1035 } 1036 } else { 1037 if (M_WRITABLE(m) == 0) { 1038 if (m->m_next != NULL || padlen > 0) { 1039 /* Get a writable copy. */ 1040 m = m_dup(*m_head, M_NOWAIT); 1041 /* Release original mbuf chains. */ 1042 m_freem(*m_head); 1043 if (m == NULL) { 1044 *m_head = NULL; 1045 return (NULL); 1046 } 1047 *m_head = m; 1048 } 1049 } 1050 1051 if (m->m_next != NULL) { 1052 m = m_defrag(*m_head, M_NOWAIT); 1053 if (m == NULL) { 1054 m_freem(*m_head); 1055 *m_head = NULL; 1056 return (NULL); 1057 } 1058 *m_head = m; 1059 } 1060 1061 if (padlen > 0) { 1062 if (M_TRAILINGSPACE(m) < padlen) { 1063 m = m_defrag(*m_head, M_NOWAIT); 1064 if (m == NULL) { 1065 m_freem(*m_head); 1066 *m_head = NULL; 1067 return (NULL); 1068 } 1069 *m_head = m; 1070 } 1071 /* Zero out the bytes in the pad area. */ 1072 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1073 m->m_pkthdr.len += padlen; 1074 m->m_len = m->m_pkthdr.len; 1075 } 1076 } 1077 1078 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag, 1079 txd->tx_dmamap, m, txsegs, &nsegs, 0); 1080 if (error != 0) { 1081 txd->tx_flags &= ~VTE_TXMBUF; 1082 return (NULL); 1083 } 1084 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1085 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1086 BUS_DMASYNC_PREWRITE); 1087 1088 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len)); 1089 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr); 1090 sc->vte_cdata.vte_tx_cnt++; 1091 /* Update producer index. */ 1092 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 1093 1094 /* Finally hand over ownership to controller. */ 1095 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 1096 txd->tx_m = m; 1097 1098 return (txd); 1099 } 1100 1101 static void 1102 vte_start(struct ifnet *ifp) 1103 { 1104 struct vte_softc *sc; 1105 1106 sc = ifp->if_softc; 1107 VTE_LOCK(sc); 1108 vte_start_locked(sc); 1109 VTE_UNLOCK(sc); 1110 } 1111 1112 static void 1113 vte_start_locked(struct vte_softc *sc) 1114 { 1115 struct ifnet *ifp; 1116 struct vte_txdesc *txd; 1117 struct mbuf *m_head; 1118 int enq; 1119 1120 ifp = sc->vte_ifp; 1121 1122 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1123 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 1124 return; 1125 1126 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1127 /* Reserve one free TX descriptor. */ 1128 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 1129 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1130 break; 1131 } 1132 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1133 if (m_head == NULL) 1134 break; 1135 /* 1136 * Pack the data into the transmit ring. If we 1137 * don't have room, set the OACTIVE flag and wait 1138 * for the NIC to drain the ring. 1139 */ 1140 if ((txd = vte_encap(sc, &m_head)) == NULL) { 1141 if (m_head != NULL) 1142 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1143 break; 1144 } 1145 1146 enq++; 1147 /* 1148 * If there's a BPF listener, bounce a copy of this frame 1149 * to him. 1150 */ 1151 ETHER_BPF_MTAP(ifp, m_head); 1152 /* Free consumed TX frame. */ 1153 if ((txd->tx_flags & VTE_TXMBUF) != 0) 1154 m_freem(m_head); 1155 } 1156 1157 if (enq > 0) { 1158 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1159 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1160 BUS_DMASYNC_PREWRITE); 1161 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 1162 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 1163 } 1164 } 1165 1166 static void 1167 vte_watchdog(struct vte_softc *sc) 1168 { 1169 struct ifnet *ifp; 1170 1171 VTE_LOCK_ASSERT(sc); 1172 1173 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 1174 return; 1175 1176 ifp = sc->vte_ifp; 1177 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n"); 1178 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1179 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1180 vte_init_locked(sc); 1181 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1182 vte_start_locked(sc); 1183 } 1184 1185 static int 1186 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1187 { 1188 struct vte_softc *sc; 1189 struct ifreq *ifr; 1190 struct mii_data *mii; 1191 int error; 1192 1193 sc = ifp->if_softc; 1194 ifr = (struct ifreq *)data; 1195 error = 0; 1196 switch (cmd) { 1197 case SIOCSIFFLAGS: 1198 VTE_LOCK(sc); 1199 if ((ifp->if_flags & IFF_UP) != 0) { 1200 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1201 ((ifp->if_flags ^ sc->vte_if_flags) & 1202 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1203 vte_rxfilter(sc); 1204 else 1205 vte_init_locked(sc); 1206 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1207 vte_stop(sc); 1208 sc->vte_if_flags = ifp->if_flags; 1209 VTE_UNLOCK(sc); 1210 break; 1211 case SIOCADDMULTI: 1212 case SIOCDELMULTI: 1213 VTE_LOCK(sc); 1214 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1215 vte_rxfilter(sc); 1216 VTE_UNLOCK(sc); 1217 break; 1218 case SIOCSIFMEDIA: 1219 case SIOCGIFMEDIA: 1220 mii = device_get_softc(sc->vte_miibus); 1221 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1222 break; 1223 default: 1224 error = ether_ioctl(ifp, cmd, data); 1225 break; 1226 } 1227 1228 return (error); 1229 } 1230 1231 static void 1232 vte_mac_config(struct vte_softc *sc) 1233 { 1234 struct mii_data *mii; 1235 uint16_t mcr; 1236 1237 VTE_LOCK_ASSERT(sc); 1238 1239 mii = device_get_softc(sc->vte_miibus); 1240 mcr = CSR_READ_2(sc, VTE_MCR0); 1241 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 1242 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1243 mcr |= MCR0_FULL_DUPLEX; 1244 #ifdef notyet 1245 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1246 mcr |= MCR0_FC_ENB; 1247 /* 1248 * The data sheet is not clear whether the controller 1249 * honors received pause frames or not. The is no 1250 * separate control bit for RX pause frame so just 1251 * enable MCR0_FC_ENB bit. 1252 */ 1253 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1254 mcr |= MCR0_FC_ENB; 1255 #endif 1256 } 1257 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1258 } 1259 1260 static void 1261 vte_stats_clear(struct vte_softc *sc) 1262 { 1263 1264 /* Reading counter registers clears its contents. */ 1265 CSR_READ_2(sc, VTE_CNT_RX_DONE); 1266 CSR_READ_2(sc, VTE_CNT_MECNT0); 1267 CSR_READ_2(sc, VTE_CNT_MECNT1); 1268 CSR_READ_2(sc, VTE_CNT_MECNT2); 1269 CSR_READ_2(sc, VTE_CNT_MECNT3); 1270 CSR_READ_2(sc, VTE_CNT_TX_DONE); 1271 CSR_READ_2(sc, VTE_CNT_MECNT4); 1272 CSR_READ_2(sc, VTE_CNT_PAUSE); 1273 } 1274 1275 static void 1276 vte_stats_update(struct vte_softc *sc) 1277 { 1278 struct vte_hw_stats *stat; 1279 uint16_t value; 1280 1281 VTE_LOCK_ASSERT(sc); 1282 1283 stat = &sc->vte_stats; 1284 1285 CSR_READ_2(sc, VTE_MECISR); 1286 /* RX stats. */ 1287 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 1288 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 1289 stat->rx_bcast_frames += (value >> 8); 1290 stat->rx_mcast_frames += (value & 0xFF); 1291 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 1292 stat->rx_runts += (value >> 8); 1293 stat->rx_crcerrs += (value & 0xFF); 1294 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 1295 stat->rx_long_frames += (value & 0xFF); 1296 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 1297 stat->rx_fifo_full += (value >> 8); 1298 stat->rx_desc_unavail += (value & 0xFF); 1299 1300 /* TX stats. */ 1301 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 1302 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 1303 stat->tx_underruns += (value >> 8); 1304 stat->tx_late_colls += (value & 0xFF); 1305 1306 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 1307 stat->tx_pause_frames += (value >> 8); 1308 stat->rx_pause_frames += (value & 0xFF); 1309 } 1310 1311 static uint64_t 1312 vte_get_counter(struct ifnet *ifp, ift_counter cnt) 1313 { 1314 struct vte_softc *sc; 1315 struct vte_hw_stats *stat; 1316 1317 sc = if_getsoftc(ifp); 1318 stat = &sc->vte_stats; 1319 1320 switch (cnt) { 1321 case IFCOUNTER_OPACKETS: 1322 return (stat->tx_frames); 1323 case IFCOUNTER_COLLISIONS: 1324 return (stat->tx_late_colls); 1325 case IFCOUNTER_OERRORS: 1326 return (stat->tx_late_colls + stat->tx_underruns); 1327 case IFCOUNTER_IPACKETS: 1328 return (stat->rx_frames); 1329 case IFCOUNTER_IERRORS: 1330 return (stat->rx_crcerrs + stat->rx_runts + 1331 stat->rx_long_frames + stat->rx_fifo_full); 1332 default: 1333 return (if_get_counter_default(ifp, cnt)); 1334 } 1335 } 1336 1337 static void 1338 vte_intr(void *arg) 1339 { 1340 struct vte_softc *sc; 1341 struct ifnet *ifp; 1342 uint16_t status; 1343 int n; 1344 1345 sc = (struct vte_softc *)arg; 1346 VTE_LOCK(sc); 1347 1348 ifp = sc->vte_ifp; 1349 /* Reading VTE_MISR acknowledges interrupts. */ 1350 status = CSR_READ_2(sc, VTE_MISR); 1351 if ((status & VTE_INTRS) == 0) { 1352 /* Not ours. */ 1353 VTE_UNLOCK(sc); 1354 return; 1355 } 1356 1357 /* Disable interrupts. */ 1358 CSR_WRITE_2(sc, VTE_MIER, 0); 1359 for (n = 8; (status & VTE_INTRS) != 0;) { 1360 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1361 break; 1362 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 1363 MISR_RX_FIFO_FULL)) != 0) 1364 vte_rxeof(sc); 1365 if ((status & MISR_TX_DONE) != 0) 1366 vte_txeof(sc); 1367 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 1368 vte_stats_update(sc); 1369 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1370 vte_start_locked(sc); 1371 if (--n > 0) 1372 status = CSR_READ_2(sc, VTE_MISR); 1373 else 1374 break; 1375 } 1376 1377 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1378 /* Re-enable interrupts. */ 1379 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1380 } 1381 VTE_UNLOCK(sc); 1382 } 1383 1384 static void 1385 vte_txeof(struct vte_softc *sc) 1386 { 1387 struct ifnet *ifp; 1388 struct vte_txdesc *txd; 1389 uint16_t status; 1390 int cons, prog; 1391 1392 VTE_LOCK_ASSERT(sc); 1393 1394 ifp = sc->vte_ifp; 1395 1396 if (sc->vte_cdata.vte_tx_cnt == 0) 1397 return; 1398 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1399 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD | 1400 BUS_DMASYNC_POSTWRITE); 1401 cons = sc->vte_cdata.vte_tx_cons; 1402 /* 1403 * Go through our TX list and free mbufs for those 1404 * frames which have been transmitted. 1405 */ 1406 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1407 txd = &sc->vte_cdata.vte_txdesc[cons]; 1408 status = le16toh(txd->tx_desc->dtst); 1409 if ((status & VTE_DTST_TX_OWN) != 0) 1410 break; 1411 sc->vte_cdata.vte_tx_cnt--; 1412 /* Reclaim transmitted mbufs. */ 1413 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1414 BUS_DMASYNC_POSTWRITE); 1415 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap); 1416 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1417 m_freem(txd->tx_m); 1418 txd->tx_flags &= ~VTE_TXMBUF; 1419 txd->tx_m = NULL; 1420 prog++; 1421 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1422 } 1423 1424 if (prog > 0) { 1425 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1426 sc->vte_cdata.vte_tx_cons = cons; 1427 /* 1428 * Unarm watchdog timer only when there is no pending 1429 * frames in TX queue. 1430 */ 1431 if (sc->vte_cdata.vte_tx_cnt == 0) 1432 sc->vte_watchdog_timer = 0; 1433 } 1434 } 1435 1436 static int 1437 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1438 { 1439 struct mbuf *m; 1440 bus_dma_segment_t segs[1]; 1441 bus_dmamap_t map; 1442 int nsegs; 1443 1444 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1445 if (m == NULL) 1446 return (ENOBUFS); 1447 m->m_len = m->m_pkthdr.len = MCLBYTES; 1448 m_adj(m, sizeof(uint32_t)); 1449 1450 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag, 1451 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1452 m_freem(m); 1453 return (ENOBUFS); 1454 } 1455 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1456 1457 if (rxd->rx_m != NULL) { 1458 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1459 BUS_DMASYNC_POSTREAD); 1460 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap); 1461 } 1462 map = rxd->rx_dmamap; 1463 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1464 sc->vte_cdata.vte_rx_sparemap = map; 1465 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1466 BUS_DMASYNC_PREREAD); 1467 rxd->rx_m = m; 1468 rxd->rx_desc->drbp = htole32(segs[0].ds_addr); 1469 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len)); 1470 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1471 1472 return (0); 1473 } 1474 1475 /* 1476 * It's not supposed to see this controller on strict-alignment 1477 * architectures but make it work for completeness. 1478 */ 1479 #ifndef __NO_STRICT_ALIGNMENT 1480 static struct mbuf * 1481 vte_fixup_rx(struct ifnet *ifp, struct mbuf *m) 1482 { 1483 uint16_t *src, *dst; 1484 int i; 1485 1486 src = mtod(m, uint16_t *); 1487 dst = src - 1; 1488 1489 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1490 *dst++ = *src++; 1491 m->m_data -= ETHER_ALIGN; 1492 return (m); 1493 } 1494 #endif 1495 1496 static void 1497 vte_rxeof(struct vte_softc *sc) 1498 { 1499 struct ifnet *ifp; 1500 struct vte_rxdesc *rxd; 1501 struct mbuf *m; 1502 uint16_t status, total_len; 1503 int cons, prog; 1504 1505 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1506 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD | 1507 BUS_DMASYNC_POSTWRITE); 1508 cons = sc->vte_cdata.vte_rx_cons; 1509 ifp = sc->vte_ifp; 1510 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++, 1511 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1512 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1513 status = le16toh(rxd->rx_desc->drst); 1514 if ((status & VTE_DRST_RX_OWN) != 0) 1515 break; 1516 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1517 m = rxd->rx_m; 1518 if ((status & VTE_DRST_RX_OK) == 0) { 1519 /* Discard errored frame. */ 1520 rxd->rx_desc->drlen = 1521 htole16(MCLBYTES - sizeof(uint32_t)); 1522 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1523 continue; 1524 } 1525 if (vte_newbuf(sc, rxd) != 0) { 1526 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1527 rxd->rx_desc->drlen = 1528 htole16(MCLBYTES - sizeof(uint32_t)); 1529 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1530 continue; 1531 } 1532 1533 /* 1534 * It seems there is no way to strip FCS bytes. 1535 */ 1536 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1537 m->m_pkthdr.rcvif = ifp; 1538 #ifndef __NO_STRICT_ALIGNMENT 1539 vte_fixup_rx(ifp, m); 1540 #endif 1541 VTE_UNLOCK(sc); 1542 (*ifp->if_input)(ifp, m); 1543 VTE_LOCK(sc); 1544 } 1545 1546 if (prog > 0) { 1547 /* Update the consumer index. */ 1548 sc->vte_cdata.vte_rx_cons = cons; 1549 /* 1550 * Sync updated RX descriptors such that controller see 1551 * modified RX buffer addresses. 1552 */ 1553 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1554 sc->vte_cdata.vte_rx_ring_map, 1555 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1556 #ifdef notyet 1557 /* 1558 * Update residue counter. Controller does not 1559 * keep track of number of available RX descriptors 1560 * such that driver should have to update VTE_MRDCR 1561 * to make controller know how many free RX 1562 * descriptors were added to controller. This is 1563 * a similar mechanism used in VIA velocity 1564 * controllers and it indicates controller just 1565 * polls OWN bit of current RX descriptor pointer. 1566 * A couple of severe issues were seen on sample 1567 * board where the controller continuously emits TX 1568 * pause frames once RX pause threshold crossed. 1569 * Once triggered it never recovered form that 1570 * state, I couldn't find a way to make it back to 1571 * work at least. This issue effectively 1572 * disconnected the system from network. Also, the 1573 * controller used 00:00:00:00:00:00 as source 1574 * station address of TX pause frame. Probably this 1575 * is one of reason why vendor recommends not to 1576 * enable flow control on R6040 controller. 1577 */ 1578 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1579 (((VTE_RX_RING_CNT * 2) / 10) << 1580 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1581 #endif 1582 } 1583 } 1584 1585 static void 1586 vte_tick(void *arg) 1587 { 1588 struct vte_softc *sc; 1589 struct mii_data *mii; 1590 1591 sc = (struct vte_softc *)arg; 1592 1593 VTE_LOCK_ASSERT(sc); 1594 1595 mii = device_get_softc(sc->vte_miibus); 1596 mii_tick(mii); 1597 vte_stats_update(sc); 1598 vte_txeof(sc); 1599 vte_watchdog(sc); 1600 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1601 } 1602 1603 static void 1604 vte_reset(struct vte_softc *sc) 1605 { 1606 uint16_t mcr, mdcsc; 1607 int i; 1608 1609 mdcsc = CSR_READ_2(sc, VTE_MDCSC); 1610 mcr = CSR_READ_2(sc, VTE_MCR1); 1611 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1612 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1613 DELAY(10); 1614 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1615 break; 1616 } 1617 if (i == 0) 1618 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1619 /* 1620 * Follow the guide of vendor recommended way to reset MAC. 1621 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1622 * not reliable so manually reset internal state machine. 1623 */ 1624 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1625 CSR_WRITE_2(sc, VTE_MACSM, 0); 1626 DELAY(5000); 1627 1628 /* 1629 * On some SoCs (like Vortex86DX3) MDC speed control register value 1630 * needs to be restored to original value instead of default one, 1631 * otherwise some PHY registers may fail to be read. 1632 */ 1633 if (mdcsc != MDCSC_DEFAULT) 1634 CSR_WRITE_2(sc, VTE_MDCSC, mdcsc); 1635 } 1636 1637 static void 1638 vte_init(void *xsc) 1639 { 1640 struct vte_softc *sc; 1641 1642 sc = (struct vte_softc *)xsc; 1643 VTE_LOCK(sc); 1644 vte_init_locked(sc); 1645 VTE_UNLOCK(sc); 1646 } 1647 1648 static void 1649 vte_init_locked(struct vte_softc *sc) 1650 { 1651 struct ifnet *ifp; 1652 bus_addr_t paddr; 1653 uint8_t *eaddr; 1654 1655 VTE_LOCK_ASSERT(sc); 1656 1657 ifp = sc->vte_ifp; 1658 1659 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1660 return; 1661 /* 1662 * Cancel any pending I/O. 1663 */ 1664 vte_stop(sc); 1665 /* 1666 * Reset the chip to a known state. 1667 */ 1668 vte_reset(sc); 1669 1670 /* Initialize RX descriptors. */ 1671 if (vte_init_rx_ring(sc) != 0) { 1672 device_printf(sc->vte_dev, "no memory for RX buffers.\n"); 1673 vte_stop(sc); 1674 return; 1675 } 1676 if (vte_init_tx_ring(sc) != 0) { 1677 device_printf(sc->vte_dev, "no memory for TX buffers.\n"); 1678 vte_stop(sc); 1679 return; 1680 } 1681 1682 /* 1683 * Reprogram the station address. Controller supports up 1684 * to 4 different station addresses so driver programs the 1685 * first station address as its own ethernet address and 1686 * configure the remaining three addresses as perfect 1687 * multicast addresses. 1688 */ 1689 eaddr = IF_LLADDR(sc->vte_ifp); 1690 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1691 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1692 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1693 1694 /* Set TX descriptor base addresses. */ 1695 paddr = sc->vte_cdata.vte_tx_ring_paddr; 1696 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1697 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1698 /* Set RX descriptor base addresses. */ 1699 paddr = sc->vte_cdata.vte_rx_ring_paddr; 1700 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1701 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1702 /* 1703 * Initialize RX descriptor residue counter and set RX 1704 * pause threshold to 20% of available RX descriptors. 1705 * See comments on vte_rxeof() for details on flow control 1706 * issues. 1707 */ 1708 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1709 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1710 1711 /* 1712 * Always use maximum frame size that controller can 1713 * support. Otherwise received frames that has longer 1714 * frame length than vte(4) MTU would be silently dropped 1715 * in controller. This would break path-MTU discovery as 1716 * sender wouldn't get any responses from receiver. The 1717 * RX buffer size should be multiple of 4. 1718 * Note, jumbo frames are silently ignored by controller 1719 * and even MAC counters do not detect them. 1720 */ 1721 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1722 1723 /* Configure FIFO. */ 1724 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1725 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1726 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1727 1728 /* 1729 * Configure TX/RX MACs. Actual resolved duplex and flow 1730 * control configuration is done after detecting a valid 1731 * link. Note, we don't generate early interrupt here 1732 * as well since FreeBSD does not have interrupt latency 1733 * problems like Windows. 1734 */ 1735 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1736 /* 1737 * We manually keep track of PHY status changes to 1738 * configure resolved duplex and flow control since only 1739 * duplex configuration can be automatically reflected to 1740 * MCR0. 1741 */ 1742 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1743 MCR1_EXCESS_COL_RETRY_16); 1744 1745 /* Initialize RX filter. */ 1746 vte_rxfilter(sc); 1747 1748 /* Disable TX/RX interrupt moderation control. */ 1749 CSR_WRITE_2(sc, VTE_MRICR, 0); 1750 CSR_WRITE_2(sc, VTE_MTICR, 0); 1751 1752 /* Enable MAC event counter interrupts. */ 1753 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1754 /* Clear MAC statistics. */ 1755 vte_stats_clear(sc); 1756 1757 /* Acknowledge all pending interrupts and clear it. */ 1758 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1759 CSR_WRITE_2(sc, VTE_MISR, 0); 1760 1761 sc->vte_flags &= ~VTE_FLAG_LINK; 1762 /* Switch to the current media. */ 1763 vte_mediachange_locked(ifp); 1764 1765 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1766 1767 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1768 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1769 } 1770 1771 static void 1772 vte_stop(struct vte_softc *sc) 1773 { 1774 struct ifnet *ifp; 1775 struct vte_txdesc *txd; 1776 struct vte_rxdesc *rxd; 1777 int i; 1778 1779 VTE_LOCK_ASSERT(sc); 1780 /* 1781 * Mark the interface down and cancel the watchdog timer. 1782 */ 1783 ifp = sc->vte_ifp; 1784 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1785 sc->vte_flags &= ~VTE_FLAG_LINK; 1786 callout_stop(&sc->vte_tick_ch); 1787 sc->vte_watchdog_timer = 0; 1788 vte_stats_update(sc); 1789 /* Disable interrupts. */ 1790 CSR_WRITE_2(sc, VTE_MIER, 0); 1791 CSR_WRITE_2(sc, VTE_MECIER, 0); 1792 /* Stop RX/TX MACs. */ 1793 vte_stop_mac(sc); 1794 /* Clear interrupts. */ 1795 CSR_READ_2(sc, VTE_MISR); 1796 /* 1797 * Free TX/RX mbufs still in the queues. 1798 */ 1799 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1800 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1801 if (rxd->rx_m != NULL) { 1802 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, 1803 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1804 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, 1805 rxd->rx_dmamap); 1806 m_freem(rxd->rx_m); 1807 rxd->rx_m = NULL; 1808 } 1809 } 1810 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1811 txd = &sc->vte_cdata.vte_txdesc[i]; 1812 if (txd->tx_m != NULL) { 1813 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, 1814 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1815 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, 1816 txd->tx_dmamap); 1817 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1818 m_freem(txd->tx_m); 1819 txd->tx_m = NULL; 1820 txd->tx_flags &= ~VTE_TXMBUF; 1821 } 1822 } 1823 /* Free TX mbuf pools used for deep copy. */ 1824 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1825 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1826 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1827 sc->vte_cdata.vte_txmbufs[i] = NULL; 1828 } 1829 } 1830 } 1831 1832 static void 1833 vte_start_mac(struct vte_softc *sc) 1834 { 1835 uint16_t mcr; 1836 int i; 1837 1838 VTE_LOCK_ASSERT(sc); 1839 1840 /* Enable RX/TX MACs. */ 1841 mcr = CSR_READ_2(sc, VTE_MCR0); 1842 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1843 (MCR0_RX_ENB | MCR0_TX_ENB)) { 1844 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1845 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1846 for (i = VTE_TIMEOUT; i > 0; i--) { 1847 mcr = CSR_READ_2(sc, VTE_MCR0); 1848 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1849 (MCR0_RX_ENB | MCR0_TX_ENB)) 1850 break; 1851 DELAY(10); 1852 } 1853 if (i == 0) 1854 device_printf(sc->vte_dev, 1855 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1856 } 1857 } 1858 1859 static void 1860 vte_stop_mac(struct vte_softc *sc) 1861 { 1862 uint16_t mcr; 1863 int i; 1864 1865 VTE_LOCK_ASSERT(sc); 1866 1867 /* Disable RX/TX MACs. */ 1868 mcr = CSR_READ_2(sc, VTE_MCR0); 1869 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1870 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1871 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1872 for (i = VTE_TIMEOUT; i > 0; i--) { 1873 mcr = CSR_READ_2(sc, VTE_MCR0); 1874 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1875 break; 1876 DELAY(10); 1877 } 1878 if (i == 0) 1879 device_printf(sc->vte_dev, 1880 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1881 } 1882 } 1883 1884 static int 1885 vte_init_tx_ring(struct vte_softc *sc) 1886 { 1887 struct vte_tx_desc *desc; 1888 struct vte_txdesc *txd; 1889 bus_addr_t addr; 1890 int i; 1891 1892 VTE_LOCK_ASSERT(sc); 1893 1894 sc->vte_cdata.vte_tx_prod = 0; 1895 sc->vte_cdata.vte_tx_cons = 0; 1896 sc->vte_cdata.vte_tx_cnt = 0; 1897 1898 /* Pre-allocate TX mbufs for deep copy. */ 1899 if (tx_deep_copy != 0) { 1900 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1901 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT, 1902 MT_DATA, M_PKTHDR); 1903 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1904 return (ENOBUFS); 1905 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1906 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1907 } 1908 } 1909 desc = sc->vte_cdata.vte_tx_ring; 1910 bzero(desc, VTE_TX_RING_SZ); 1911 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1912 txd = &sc->vte_cdata.vte_txdesc[i]; 1913 txd->tx_m = NULL; 1914 if (i != VTE_TX_RING_CNT - 1) 1915 addr = sc->vte_cdata.vte_tx_ring_paddr + 1916 sizeof(struct vte_tx_desc) * (i + 1); 1917 else 1918 addr = sc->vte_cdata.vte_tx_ring_paddr + 1919 sizeof(struct vte_tx_desc) * 0; 1920 desc = &sc->vte_cdata.vte_tx_ring[i]; 1921 desc->dtnp = htole32(addr); 1922 txd->tx_desc = desc; 1923 } 1924 1925 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1926 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1927 BUS_DMASYNC_PREWRITE); 1928 return (0); 1929 } 1930 1931 static int 1932 vte_init_rx_ring(struct vte_softc *sc) 1933 { 1934 struct vte_rx_desc *desc; 1935 struct vte_rxdesc *rxd; 1936 bus_addr_t addr; 1937 int i; 1938 1939 VTE_LOCK_ASSERT(sc); 1940 1941 sc->vte_cdata.vte_rx_cons = 0; 1942 desc = sc->vte_cdata.vte_rx_ring; 1943 bzero(desc, VTE_RX_RING_SZ); 1944 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1945 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1946 rxd->rx_m = NULL; 1947 if (i != VTE_RX_RING_CNT - 1) 1948 addr = sc->vte_cdata.vte_rx_ring_paddr + 1949 sizeof(struct vte_rx_desc) * (i + 1); 1950 else 1951 addr = sc->vte_cdata.vte_rx_ring_paddr + 1952 sizeof(struct vte_rx_desc) * 0; 1953 desc = &sc->vte_cdata.vte_rx_ring[i]; 1954 desc->drnp = htole32(addr); 1955 rxd->rx_desc = desc; 1956 if (vte_newbuf(sc, rxd) != 0) 1957 return (ENOBUFS); 1958 } 1959 1960 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1961 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD | 1962 BUS_DMASYNC_PREWRITE); 1963 1964 return (0); 1965 } 1966 1967 struct vte_maddr_ctx { 1968 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1969 uint16_t mchash[4]; 1970 u_int nperf; 1971 }; 1972 1973 static u_int 1974 vte_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 1975 { 1976 struct vte_maddr_ctx *ctx = arg; 1977 uint8_t *eaddr; 1978 uint32_t crc; 1979 1980 /* 1981 * Program the first 3 multicast groups into the perfect filter. 1982 * For all others, use the hash table. 1983 */ 1984 if (ctx->nperf < VTE_RXFILT_PERFECT_CNT) { 1985 eaddr = LLADDR(sdl); 1986 ctx->rxfilt_perf[ctx->nperf][0] = eaddr[1] << 8 | eaddr[0]; 1987 ctx->rxfilt_perf[ctx->nperf][1] = eaddr[3] << 8 | eaddr[2]; 1988 ctx->rxfilt_perf[ctx->nperf][2] = eaddr[5] << 8 | eaddr[4]; 1989 ctx->nperf++; 1990 1991 return (1); 1992 } 1993 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 1994 ctx->mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1995 1996 return (1); 1997 } 1998 1999 static void 2000 vte_rxfilter(struct vte_softc *sc) 2001 { 2002 struct ifnet *ifp; 2003 struct vte_maddr_ctx ctx; 2004 uint16_t mcr; 2005 int i; 2006 2007 VTE_LOCK_ASSERT(sc); 2008 2009 ifp = sc->vte_ifp; 2010 2011 bzero(ctx.mchash, sizeof(ctx.mchash)); 2012 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2013 ctx.rxfilt_perf[i][0] = 0xFFFF; 2014 ctx.rxfilt_perf[i][1] = 0xFFFF; 2015 ctx.rxfilt_perf[i][2] = 0xFFFF; 2016 } 2017 ctx.nperf = 0; 2018 2019 mcr = CSR_READ_2(sc, VTE_MCR0); 2020 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST); 2021 mcr |= MCR0_BROADCAST_DIS; 2022 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2023 mcr &= ~MCR0_BROADCAST_DIS; 2024 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2025 if ((ifp->if_flags & IFF_PROMISC) != 0) 2026 mcr |= MCR0_PROMISC; 2027 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2028 mcr |= MCR0_MULTICAST; 2029 ctx.mchash[0] = 0xFFFF; 2030 ctx.mchash[1] = 0xFFFF; 2031 ctx.mchash[2] = 0xFFFF; 2032 ctx.mchash[3] = 0xFFFF; 2033 goto chipit; 2034 } 2035 2036 if_foreach_llmaddr(ifp, vte_hash_maddr, &ctx); 2037 if (ctx.mchash[0] != 0 || ctx.mchash[1] != 0 || 2038 ctx.mchash[2] != 0 || ctx.mchash[3] != 0) 2039 mcr |= MCR0_MULTICAST; 2040 2041 chipit: 2042 /* Program multicast hash table. */ 2043 CSR_WRITE_2(sc, VTE_MAR0, ctx.mchash[0]); 2044 CSR_WRITE_2(sc, VTE_MAR1, ctx.mchash[1]); 2045 CSR_WRITE_2(sc, VTE_MAR2, ctx.mchash[2]); 2046 CSR_WRITE_2(sc, VTE_MAR3, ctx.mchash[3]); 2047 /* Program perfect filter table. */ 2048 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2049 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 2050 ctx.rxfilt_perf[i][0]); 2051 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 2052 ctx.rxfilt_perf[i][1]); 2053 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 2054 ctx.rxfilt_perf[i][2]); 2055 } 2056 CSR_WRITE_2(sc, VTE_MCR0, mcr); 2057 CSR_READ_2(sc, VTE_MCR0); 2058 } 2059 2060 static int 2061 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2062 { 2063 int error, value; 2064 2065 if (arg1 == NULL) 2066 return (EINVAL); 2067 value = *(int *)arg1; 2068 error = sysctl_handle_int(oidp, &value, 0, req); 2069 if (error || req->newptr == NULL) 2070 return (error); 2071 if (value < low || value > high) 2072 return (EINVAL); 2073 *(int *)arg1 = value; 2074 2075 return (0); 2076 } 2077 2078 static int 2079 sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS) 2080 { 2081 2082 return (sysctl_int_range(oidp, arg1, arg2, req, 2083 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX)); 2084 } 2085