1 /*- 2 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/rman.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 #include <sys/sysctl.h> 47 48 #include <net/bpf.h> 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_llc.h> 54 #include <net/if_media.h> 55 #include <net/if_types.h> 56 #include <net/if_vlan_var.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 #include <machine/bus.h> 68 69 #include <dev/vte/if_vtereg.h> 70 #include <dev/vte/if_vtevar.h> 71 72 /* "device miibus" required. See GENERIC if you get errors here. */ 73 #include "miibus_if.h" 74 75 MODULE_DEPEND(vte, pci, 1, 1, 1); 76 MODULE_DEPEND(vte, ether, 1, 1, 1); 77 MODULE_DEPEND(vte, miibus, 1, 1, 1); 78 79 /* Tunables. */ 80 static int tx_deep_copy = 1; 81 TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy); 82 83 /* 84 * Devices supported by this driver. 85 */ 86 static const struct vte_ident vte_ident_table[] = { 87 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"}, 88 { 0, 0, NULL} 89 }; 90 91 static int vte_attach(device_t); 92 static int vte_detach(device_t); 93 static int vte_dma_alloc(struct vte_softc *); 94 static void vte_dma_free(struct vte_softc *); 95 static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int); 96 static struct vte_txdesc * 97 vte_encap(struct vte_softc *, struct mbuf **); 98 static const struct vte_ident * 99 vte_find_ident(device_t); 100 #ifndef __NO_STRICT_ALIGNMENT 101 static struct mbuf * 102 vte_fixup_rx(struct ifnet *, struct mbuf *); 103 #endif 104 static void vte_get_macaddr(struct vte_softc *); 105 static void vte_init(void *); 106 static void vte_init_locked(struct vte_softc *); 107 static int vte_init_rx_ring(struct vte_softc *); 108 static int vte_init_tx_ring(struct vte_softc *); 109 static void vte_intr(void *); 110 static int vte_ioctl(struct ifnet *, u_long, caddr_t); 111 static void vte_mac_config(struct vte_softc *); 112 static int vte_miibus_readreg(device_t, int, int); 113 static void vte_miibus_statchg(device_t); 114 static int vte_miibus_writereg(device_t, int, int, int); 115 static int vte_mediachange(struct ifnet *); 116 static int vte_mediachange_locked(struct ifnet *); 117 static void vte_mediastatus(struct ifnet *, struct ifmediareq *); 118 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 119 static int vte_probe(device_t); 120 static void vte_reset(struct vte_softc *); 121 static int vte_resume(device_t); 122 static void vte_rxeof(struct vte_softc *); 123 static void vte_rxfilter(struct vte_softc *); 124 static int vte_shutdown(device_t); 125 static void vte_start(struct ifnet *); 126 static void vte_start_locked(struct vte_softc *); 127 static void vte_start_mac(struct vte_softc *); 128 static void vte_stats_clear(struct vte_softc *); 129 static void vte_stats_update(struct vte_softc *); 130 static void vte_stop(struct vte_softc *); 131 static void vte_stop_mac(struct vte_softc *); 132 static int vte_suspend(device_t); 133 static void vte_sysctl_node(struct vte_softc *); 134 static void vte_tick(void *); 135 static void vte_txeof(struct vte_softc *); 136 static void vte_watchdog(struct vte_softc *); 137 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 138 static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS); 139 140 static device_method_t vte_methods[] = { 141 /* Device interface. */ 142 DEVMETHOD(device_probe, vte_probe), 143 DEVMETHOD(device_attach, vte_attach), 144 DEVMETHOD(device_detach, vte_detach), 145 DEVMETHOD(device_shutdown, vte_shutdown), 146 DEVMETHOD(device_suspend, vte_suspend), 147 DEVMETHOD(device_resume, vte_resume), 148 149 /* MII interface. */ 150 DEVMETHOD(miibus_readreg, vte_miibus_readreg), 151 DEVMETHOD(miibus_writereg, vte_miibus_writereg), 152 DEVMETHOD(miibus_statchg, vte_miibus_statchg), 153 154 DEVMETHOD_END 155 }; 156 157 static driver_t vte_driver = { 158 "vte", 159 vte_methods, 160 sizeof(struct vte_softc) 161 }; 162 163 static devclass_t vte_devclass; 164 165 DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0); 166 DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0); 167 168 static int 169 vte_miibus_readreg(device_t dev, int phy, int reg) 170 { 171 struct vte_softc *sc; 172 int i; 173 174 sc = device_get_softc(dev); 175 176 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 177 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 178 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 179 DELAY(5); 180 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 181 break; 182 } 183 184 if (i == 0) { 185 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg); 186 return (0); 187 } 188 189 return (CSR_READ_2(sc, VTE_MMRD)); 190 } 191 192 static int 193 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 194 { 195 struct vte_softc *sc; 196 int i; 197 198 sc = device_get_softc(dev); 199 200 CSR_WRITE_2(sc, VTE_MMWD, val); 201 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 202 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 203 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 204 DELAY(5); 205 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 206 break; 207 } 208 209 if (i == 0) 210 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg); 211 212 return (0); 213 } 214 215 static void 216 vte_miibus_statchg(device_t dev) 217 { 218 struct vte_softc *sc; 219 struct mii_data *mii; 220 struct ifnet *ifp; 221 uint16_t val; 222 223 sc = device_get_softc(dev); 224 225 mii = device_get_softc(sc->vte_miibus); 226 ifp = sc->vte_ifp; 227 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 228 return; 229 230 sc->vte_flags &= ~VTE_FLAG_LINK; 231 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 232 (IFM_ACTIVE | IFM_AVALID)) { 233 switch (IFM_SUBTYPE(mii->mii_media_active)) { 234 case IFM_10_T: 235 case IFM_100_TX: 236 sc->vte_flags |= VTE_FLAG_LINK; 237 break; 238 default: 239 break; 240 } 241 } 242 243 /* Stop RX/TX MACs. */ 244 vte_stop_mac(sc); 245 /* Program MACs with resolved duplex and flow control. */ 246 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 247 /* 248 * Timer waiting time : (63 + TIMER * 64) MII clock. 249 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 250 */ 251 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 252 val = 18 << VTE_IM_TIMER_SHIFT; 253 else 254 val = 1 << VTE_IM_TIMER_SHIFT; 255 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 256 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 257 CSR_WRITE_2(sc, VTE_MRICR, val); 258 259 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 260 val = 18 << VTE_IM_TIMER_SHIFT; 261 else 262 val = 1 << VTE_IM_TIMER_SHIFT; 263 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 264 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 265 CSR_WRITE_2(sc, VTE_MTICR, val); 266 267 vte_mac_config(sc); 268 vte_start_mac(sc); 269 } 270 } 271 272 static void 273 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 274 { 275 struct vte_softc *sc; 276 struct mii_data *mii; 277 278 sc = ifp->if_softc; 279 VTE_LOCK(sc); 280 if ((ifp->if_flags & IFF_UP) == 0) { 281 VTE_UNLOCK(sc); 282 return; 283 } 284 mii = device_get_softc(sc->vte_miibus); 285 286 mii_pollstat(mii); 287 ifmr->ifm_status = mii->mii_media_status; 288 ifmr->ifm_active = mii->mii_media_active; 289 VTE_UNLOCK(sc); 290 } 291 292 static int 293 vte_mediachange(struct ifnet *ifp) 294 { 295 struct vte_softc *sc; 296 int error; 297 298 sc = ifp->if_softc; 299 VTE_LOCK(sc); 300 error = vte_mediachange_locked(ifp); 301 VTE_UNLOCK(sc); 302 return (error); 303 } 304 305 static int 306 vte_mediachange_locked(struct ifnet *ifp) 307 { 308 struct vte_softc *sc; 309 struct mii_data *mii; 310 struct mii_softc *miisc; 311 int error; 312 313 sc = ifp->if_softc; 314 mii = device_get_softc(sc->vte_miibus); 315 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 316 PHY_RESET(miisc); 317 error = mii_mediachg(mii); 318 319 return (error); 320 } 321 322 static const struct vte_ident * 323 vte_find_ident(device_t dev) 324 { 325 const struct vte_ident *ident; 326 uint16_t vendor, devid; 327 328 vendor = pci_get_vendor(dev); 329 devid = pci_get_device(dev); 330 for (ident = vte_ident_table; ident->name != NULL; ident++) { 331 if (vendor == ident->vendorid && devid == ident->deviceid) 332 return (ident); 333 } 334 335 return (NULL); 336 } 337 338 static int 339 vte_probe(device_t dev) 340 { 341 const struct vte_ident *ident; 342 343 ident = vte_find_ident(dev); 344 if (ident != NULL) { 345 device_set_desc(dev, ident->name); 346 return (BUS_PROBE_DEFAULT); 347 } 348 349 return (ENXIO); 350 } 351 352 static void 353 vte_get_macaddr(struct vte_softc *sc) 354 { 355 uint16_t mid; 356 357 /* 358 * It seems there is no way to reload station address and 359 * it is supposed to be set by BIOS. 360 */ 361 mid = CSR_READ_2(sc, VTE_MID0L); 362 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 363 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 364 mid = CSR_READ_2(sc, VTE_MID0M); 365 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 366 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 367 mid = CSR_READ_2(sc, VTE_MID0H); 368 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 369 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 370 } 371 372 static int 373 vte_attach(device_t dev) 374 { 375 struct vte_softc *sc; 376 struct ifnet *ifp; 377 uint16_t macid; 378 int error, rid; 379 380 error = 0; 381 sc = device_get_softc(dev); 382 sc->vte_dev = dev; 383 384 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 385 MTX_DEF); 386 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0); 387 sc->vte_ident = vte_find_ident(dev); 388 389 /* Map the device. */ 390 pci_enable_busmaster(dev); 391 sc->vte_res_id = PCIR_BAR(1); 392 sc->vte_res_type = SYS_RES_MEMORY; 393 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 394 &sc->vte_res_id, RF_ACTIVE); 395 if (sc->vte_res == NULL) { 396 sc->vte_res_id = PCIR_BAR(0); 397 sc->vte_res_type = SYS_RES_IOPORT; 398 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 399 &sc->vte_res_id, RF_ACTIVE); 400 if (sc->vte_res == NULL) { 401 device_printf(dev, "cannot map memory/ports.\n"); 402 mtx_destroy(&sc->vte_mtx); 403 return (ENXIO); 404 } 405 } 406 if (bootverbose) { 407 device_printf(dev, "using %s space register mapping\n", 408 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 409 device_printf(dev, "MAC Identifier : 0x%04x\n", 410 CSR_READ_2(sc, VTE_MACID)); 411 macid = CSR_READ_2(sc, VTE_MACID_REV); 412 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n", 413 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT, 414 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT); 415 } 416 417 rid = 0; 418 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 419 RF_SHAREABLE | RF_ACTIVE); 420 if (sc->vte_irq == NULL) { 421 device_printf(dev, "cannot allocate IRQ resources.\n"); 422 error = ENXIO; 423 goto fail; 424 } 425 426 /* Reset the ethernet controller. */ 427 vte_reset(sc); 428 429 if ((error = vte_dma_alloc(sc) != 0)) 430 goto fail; 431 432 /* Create device sysctl node. */ 433 vte_sysctl_node(sc); 434 435 /* Load station address. */ 436 vte_get_macaddr(sc); 437 438 ifp = sc->vte_ifp = if_alloc(IFT_ETHER); 439 if (ifp == NULL) { 440 device_printf(dev, "cannot allocate ifnet structure.\n"); 441 error = ENXIO; 442 goto fail; 443 } 444 445 ifp->if_softc = sc; 446 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 447 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 448 ifp->if_ioctl = vte_ioctl; 449 ifp->if_start = vte_start; 450 ifp->if_init = vte_init; 451 ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1; 452 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 453 IFQ_SET_READY(&ifp->if_snd); 454 455 /* 456 * Set up MII bus. 457 * BIOS would have initialized VTE_MPSCCR to catch PHY 458 * status changes so driver may be able to extract 459 * configured PHY address. Since it's common to see BIOS 460 * fails to initialize the register(including the sample 461 * board I have), let mii(4) probe it. This is more 462 * reliable than relying on BIOS's initialization. 463 * 464 * Advertising flow control capability to mii(4) was 465 * intentionally disabled due to severe problems in TX 466 * pause frame generation. See vte_rxeof() for more 467 * details. 468 */ 469 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange, 470 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 471 if (error != 0) { 472 device_printf(dev, "attaching PHYs failed\n"); 473 goto fail; 474 } 475 476 ether_ifattach(ifp, sc->vte_eaddr); 477 478 /* VLAN capability setup. */ 479 ifp->if_capabilities |= IFCAP_VLAN_MTU; 480 ifp->if_capenable = ifp->if_capabilities; 481 /* Tell the upper layer we support VLAN over-sized frames. */ 482 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 483 484 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE, 485 NULL, vte_intr, sc, &sc->vte_intrhand); 486 if (error != 0) { 487 device_printf(dev, "could not set up interrupt handler.\n"); 488 ether_ifdetach(ifp); 489 goto fail; 490 } 491 492 fail: 493 if (error != 0) 494 vte_detach(dev); 495 496 return (error); 497 } 498 499 static int 500 vte_detach(device_t dev) 501 { 502 struct vte_softc *sc; 503 struct ifnet *ifp; 504 505 sc = device_get_softc(dev); 506 507 ifp = sc->vte_ifp; 508 if (device_is_attached(dev)) { 509 VTE_LOCK(sc); 510 vte_stop(sc); 511 VTE_UNLOCK(sc); 512 callout_drain(&sc->vte_tick_ch); 513 ether_ifdetach(ifp); 514 } 515 516 if (sc->vte_miibus != NULL) { 517 device_delete_child(dev, sc->vte_miibus); 518 sc->vte_miibus = NULL; 519 } 520 bus_generic_detach(dev); 521 522 if (sc->vte_intrhand != NULL) { 523 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand); 524 sc->vte_intrhand = NULL; 525 } 526 if (sc->vte_irq != NULL) { 527 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq); 528 sc->vte_irq = NULL; 529 } 530 if (sc->vte_res != NULL) { 531 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id, 532 sc->vte_res); 533 sc->vte_res = NULL; 534 } 535 if (ifp != NULL) { 536 if_free(ifp); 537 sc->vte_ifp = NULL; 538 } 539 vte_dma_free(sc); 540 mtx_destroy(&sc->vte_mtx); 541 542 return (0); 543 } 544 545 #define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 546 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 547 548 static void 549 vte_sysctl_node(struct vte_softc *sc) 550 { 551 struct sysctl_ctx_list *ctx; 552 struct sysctl_oid_list *child, *parent; 553 struct sysctl_oid *tree; 554 struct vte_hw_stats *stats; 555 int error; 556 557 stats = &sc->vte_stats; 558 ctx = device_get_sysctl_ctx(sc->vte_dev); 559 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev)); 560 561 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 562 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_rx_mod, 0, 563 sysctl_hw_vte_int_mod, "I", "vte RX interrupt moderation"); 564 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 565 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_tx_mod, 0, 566 sysctl_hw_vte_int_mod, "I", "vte TX interrupt moderation"); 567 /* Pull in device tunables. */ 568 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 569 error = resource_int_value(device_get_name(sc->vte_dev), 570 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod); 571 if (error == 0) { 572 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN || 573 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) { 574 device_printf(sc->vte_dev, "int_rx_mod value out of " 575 "range; using default: %d\n", 576 VTE_IM_RX_BUNDLE_DEFAULT); 577 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 578 } 579 } 580 581 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 582 error = resource_int_value(device_get_name(sc->vte_dev), 583 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod); 584 if (error == 0) { 585 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN || 586 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) { 587 device_printf(sc->vte_dev, "int_tx_mod value out of " 588 "range; using default: %d\n", 589 VTE_IM_TX_BUNDLE_DEFAULT); 590 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 591 } 592 } 593 594 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 595 NULL, "VTE statistics"); 596 parent = SYSCTL_CHILDREN(tree); 597 598 /* RX statistics. */ 599 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 600 NULL, "RX MAC statistics"); 601 child = SYSCTL_CHILDREN(tree); 602 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 603 &stats->rx_frames, "Good frames"); 604 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 605 &stats->rx_bcast_frames, "Good broadcast frames"); 606 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 607 &stats->rx_mcast_frames, "Good multicast frames"); 608 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt", 609 &stats->rx_runts, "Too short frames"); 610 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 611 &stats->rx_crcerrs, "CRC errors"); 612 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames", 613 &stats->rx_long_frames, 614 "Frames that have longer length than maximum packet length"); 615 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full", 616 &stats->rx_fifo_full, "FIFO full"); 617 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail", 618 &stats->rx_desc_unavail, "Descriptor unavailable frames"); 619 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 620 &stats->rx_pause_frames, "Pause control frames"); 621 622 /* TX statistics. */ 623 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 624 NULL, "TX MAC statistics"); 625 child = SYSCTL_CHILDREN(tree); 626 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 627 &stats->tx_frames, "Good frames"); 628 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 629 &stats->tx_underruns, "FIFO underruns"); 630 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 631 &stats->tx_late_colls, "Late collisions"); 632 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 633 &stats->tx_pause_frames, "Pause control frames"); 634 } 635 636 #undef VTE_SYSCTL_STAT_ADD32 637 638 struct vte_dmamap_arg { 639 bus_addr_t vte_busaddr; 640 }; 641 642 static void 643 vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 644 { 645 struct vte_dmamap_arg *ctx; 646 647 if (error != 0) 648 return; 649 650 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 651 652 ctx = (struct vte_dmamap_arg *)arg; 653 ctx->vte_busaddr = segs[0].ds_addr; 654 } 655 656 static int 657 vte_dma_alloc(struct vte_softc *sc) 658 { 659 struct vte_txdesc *txd; 660 struct vte_rxdesc *rxd; 661 struct vte_dmamap_arg ctx; 662 int error, i; 663 664 /* Create parent DMA tag. */ 665 error = bus_dma_tag_create( 666 bus_get_dma_tag(sc->vte_dev), /* parent */ 667 1, 0, /* alignment, boundary */ 668 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 669 BUS_SPACE_MAXADDR, /* highaddr */ 670 NULL, NULL, /* filter, filterarg */ 671 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 672 0, /* nsegments */ 673 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 674 0, /* flags */ 675 NULL, NULL, /* lockfunc, lockarg */ 676 &sc->vte_cdata.vte_parent_tag); 677 if (error != 0) { 678 device_printf(sc->vte_dev, 679 "could not create parent DMA tag.\n"); 680 goto fail; 681 } 682 683 /* Create DMA tag for TX descriptor ring. */ 684 error = bus_dma_tag_create( 685 sc->vte_cdata.vte_parent_tag, /* parent */ 686 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */ 687 BUS_SPACE_MAXADDR, /* lowaddr */ 688 BUS_SPACE_MAXADDR, /* highaddr */ 689 NULL, NULL, /* filter, filterarg */ 690 VTE_TX_RING_SZ, /* maxsize */ 691 1, /* nsegments */ 692 VTE_TX_RING_SZ, /* maxsegsize */ 693 0, /* flags */ 694 NULL, NULL, /* lockfunc, lockarg */ 695 &sc->vte_cdata.vte_tx_ring_tag); 696 if (error != 0) { 697 device_printf(sc->vte_dev, 698 "could not create TX ring DMA tag.\n"); 699 goto fail; 700 } 701 702 /* Create DMA tag for RX free descriptor ring. */ 703 error = bus_dma_tag_create( 704 sc->vte_cdata.vte_parent_tag, /* parent */ 705 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */ 706 BUS_SPACE_MAXADDR, /* lowaddr */ 707 BUS_SPACE_MAXADDR, /* highaddr */ 708 NULL, NULL, /* filter, filterarg */ 709 VTE_RX_RING_SZ, /* maxsize */ 710 1, /* nsegments */ 711 VTE_RX_RING_SZ, /* maxsegsize */ 712 0, /* flags */ 713 NULL, NULL, /* lockfunc, lockarg */ 714 &sc->vte_cdata.vte_rx_ring_tag); 715 if (error != 0) { 716 device_printf(sc->vte_dev, 717 "could not create RX ring DMA tag.\n"); 718 goto fail; 719 } 720 721 /* Allocate DMA'able memory and load the DMA map for TX ring. */ 722 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag, 723 (void **)&sc->vte_cdata.vte_tx_ring, 724 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 725 &sc->vte_cdata.vte_tx_ring_map); 726 if (error != 0) { 727 device_printf(sc->vte_dev, 728 "could not allocate DMA'able memory for TX ring.\n"); 729 goto fail; 730 } 731 ctx.vte_busaddr = 0; 732 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag, 733 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 734 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0); 735 if (error != 0 || ctx.vte_busaddr == 0) { 736 device_printf(sc->vte_dev, 737 "could not load DMA'able memory for TX ring.\n"); 738 goto fail; 739 } 740 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr; 741 742 /* Allocate DMA'able memory and load the DMA map for RX ring. */ 743 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag, 744 (void **)&sc->vte_cdata.vte_rx_ring, 745 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 746 &sc->vte_cdata.vte_rx_ring_map); 747 if (error != 0) { 748 device_printf(sc->vte_dev, 749 "could not allocate DMA'able memory for RX ring.\n"); 750 goto fail; 751 } 752 ctx.vte_busaddr = 0; 753 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag, 754 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 755 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0); 756 if (error != 0 || ctx.vte_busaddr == 0) { 757 device_printf(sc->vte_dev, 758 "could not load DMA'able memory for RX ring.\n"); 759 goto fail; 760 } 761 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr; 762 763 /* Create TX buffer parent tag. */ 764 error = bus_dma_tag_create( 765 bus_get_dma_tag(sc->vte_dev), /* parent */ 766 1, 0, /* alignment, boundary */ 767 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 768 BUS_SPACE_MAXADDR, /* highaddr */ 769 NULL, NULL, /* filter, filterarg */ 770 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 771 0, /* nsegments */ 772 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 773 0, /* flags */ 774 NULL, NULL, /* lockfunc, lockarg */ 775 &sc->vte_cdata.vte_buffer_tag); 776 if (error != 0) { 777 device_printf(sc->vte_dev, 778 "could not create parent buffer DMA tag.\n"); 779 goto fail; 780 } 781 782 /* Create DMA tag for TX buffers. */ 783 error = bus_dma_tag_create( 784 sc->vte_cdata.vte_buffer_tag, /* parent */ 785 1, 0, /* alignment, boundary */ 786 BUS_SPACE_MAXADDR, /* lowaddr */ 787 BUS_SPACE_MAXADDR, /* highaddr */ 788 NULL, NULL, /* filter, filterarg */ 789 MCLBYTES, /* maxsize */ 790 1, /* nsegments */ 791 MCLBYTES, /* maxsegsize */ 792 0, /* flags */ 793 NULL, NULL, /* lockfunc, lockarg */ 794 &sc->vte_cdata.vte_tx_tag); 795 if (error != 0) { 796 device_printf(sc->vte_dev, "could not create TX DMA tag.\n"); 797 goto fail; 798 } 799 800 /* Create DMA tag for RX buffers. */ 801 error = bus_dma_tag_create( 802 sc->vte_cdata.vte_buffer_tag, /* parent */ 803 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */ 804 BUS_SPACE_MAXADDR, /* lowaddr */ 805 BUS_SPACE_MAXADDR, /* highaddr */ 806 NULL, NULL, /* filter, filterarg */ 807 MCLBYTES, /* maxsize */ 808 1, /* nsegments */ 809 MCLBYTES, /* maxsegsize */ 810 0, /* flags */ 811 NULL, NULL, /* lockfunc, lockarg */ 812 &sc->vte_cdata.vte_rx_tag); 813 if (error != 0) { 814 device_printf(sc->vte_dev, "could not create RX DMA tag.\n"); 815 goto fail; 816 } 817 /* Create DMA maps for TX buffers. */ 818 for (i = 0; i < VTE_TX_RING_CNT; i++) { 819 txd = &sc->vte_cdata.vte_txdesc[i]; 820 txd->tx_m = NULL; 821 txd->tx_dmamap = NULL; 822 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0, 823 &txd->tx_dmamap); 824 if (error != 0) { 825 device_printf(sc->vte_dev, 826 "could not create TX dmamap.\n"); 827 goto fail; 828 } 829 } 830 /* Create DMA maps for RX buffers. */ 831 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 832 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 833 device_printf(sc->vte_dev, 834 "could not create spare RX dmamap.\n"); 835 goto fail; 836 } 837 for (i = 0; i < VTE_RX_RING_CNT; i++) { 838 rxd = &sc->vte_cdata.vte_rxdesc[i]; 839 rxd->rx_m = NULL; 840 rxd->rx_dmamap = NULL; 841 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 842 &rxd->rx_dmamap); 843 if (error != 0) { 844 device_printf(sc->vte_dev, 845 "could not create RX dmamap.\n"); 846 goto fail; 847 } 848 } 849 850 fail: 851 return (error); 852 } 853 854 static void 855 vte_dma_free(struct vte_softc *sc) 856 { 857 struct vte_txdesc *txd; 858 struct vte_rxdesc *rxd; 859 int i; 860 861 /* TX buffers. */ 862 if (sc->vte_cdata.vte_tx_tag != NULL) { 863 for (i = 0; i < VTE_TX_RING_CNT; i++) { 864 txd = &sc->vte_cdata.vte_txdesc[i]; 865 if (txd->tx_dmamap != NULL) { 866 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag, 867 txd->tx_dmamap); 868 txd->tx_dmamap = NULL; 869 } 870 } 871 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag); 872 sc->vte_cdata.vte_tx_tag = NULL; 873 } 874 /* RX buffers */ 875 if (sc->vte_cdata.vte_rx_tag != NULL) { 876 for (i = 0; i < VTE_RX_RING_CNT; i++) { 877 rxd = &sc->vte_cdata.vte_rxdesc[i]; 878 if (rxd->rx_dmamap != NULL) { 879 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 880 rxd->rx_dmamap); 881 rxd->rx_dmamap = NULL; 882 } 883 } 884 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 885 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 886 sc->vte_cdata.vte_rx_sparemap); 887 sc->vte_cdata.vte_rx_sparemap = NULL; 888 } 889 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag); 890 sc->vte_cdata.vte_rx_tag = NULL; 891 } 892 /* TX descriptor ring. */ 893 if (sc->vte_cdata.vte_tx_ring_tag != NULL) { 894 if (sc->vte_cdata.vte_tx_ring_map != NULL) 895 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag, 896 sc->vte_cdata.vte_tx_ring_map); 897 if (sc->vte_cdata.vte_tx_ring_map != NULL && 898 sc->vte_cdata.vte_tx_ring != NULL) 899 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag, 900 sc->vte_cdata.vte_tx_ring, 901 sc->vte_cdata.vte_tx_ring_map); 902 sc->vte_cdata.vte_tx_ring = NULL; 903 sc->vte_cdata.vte_tx_ring_map = NULL; 904 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag); 905 sc->vte_cdata.vte_tx_ring_tag = NULL; 906 } 907 /* RX ring. */ 908 if (sc->vte_cdata.vte_rx_ring_tag != NULL) { 909 if (sc->vte_cdata.vte_rx_ring_map != NULL) 910 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag, 911 sc->vte_cdata.vte_rx_ring_map); 912 if (sc->vte_cdata.vte_rx_ring_map != NULL && 913 sc->vte_cdata.vte_rx_ring != NULL) 914 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag, 915 sc->vte_cdata.vte_rx_ring, 916 sc->vte_cdata.vte_rx_ring_map); 917 sc->vte_cdata.vte_rx_ring = NULL; 918 sc->vte_cdata.vte_rx_ring_map = NULL; 919 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag); 920 sc->vte_cdata.vte_rx_ring_tag = NULL; 921 } 922 if (sc->vte_cdata.vte_buffer_tag != NULL) { 923 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag); 924 sc->vte_cdata.vte_buffer_tag = NULL; 925 } 926 if (sc->vte_cdata.vte_parent_tag != NULL) { 927 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag); 928 sc->vte_cdata.vte_parent_tag = NULL; 929 } 930 } 931 932 static int 933 vte_shutdown(device_t dev) 934 { 935 936 return (vte_suspend(dev)); 937 } 938 939 static int 940 vte_suspend(device_t dev) 941 { 942 struct vte_softc *sc; 943 struct ifnet *ifp; 944 945 sc = device_get_softc(dev); 946 947 VTE_LOCK(sc); 948 ifp = sc->vte_ifp; 949 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 950 vte_stop(sc); 951 VTE_UNLOCK(sc); 952 953 return (0); 954 } 955 956 static int 957 vte_resume(device_t dev) 958 { 959 struct vte_softc *sc; 960 struct ifnet *ifp; 961 962 sc = device_get_softc(dev); 963 964 VTE_LOCK(sc); 965 ifp = sc->vte_ifp; 966 if ((ifp->if_flags & IFF_UP) != 0) { 967 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 968 vte_init_locked(sc); 969 } 970 VTE_UNLOCK(sc); 971 972 return (0); 973 } 974 975 static struct vte_txdesc * 976 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 977 { 978 struct vte_txdesc *txd; 979 struct mbuf *m, *n; 980 bus_dma_segment_t txsegs[1]; 981 int copy, error, nsegs, padlen; 982 983 VTE_LOCK_ASSERT(sc); 984 985 M_ASSERTPKTHDR((*m_head)); 986 987 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 988 m = *m_head; 989 /* 990 * Controller doesn't auto-pad, so we have to make sure pad 991 * short frames out to the minimum frame length. 992 */ 993 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 994 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 995 else 996 padlen = 0; 997 998 /* 999 * Controller does not support multi-fragmented TX buffers. 1000 * Controller spends most of its TX processing time in 1001 * de-fragmenting TX buffers. Either faster CPU or more 1002 * advanced controller DMA engine is required to speed up 1003 * TX path processing. 1004 * To mitigate the de-fragmenting issue, perform deep copy 1005 * from fragmented mbuf chains to a pre-allocated mbuf 1006 * cluster with extra cost of kernel memory. For frames 1007 * that is composed of single TX buffer, the deep copy is 1008 * bypassed. 1009 */ 1010 if (tx_deep_copy != 0) { 1011 copy = 0; 1012 if (m->m_next != NULL) 1013 copy++; 1014 if (padlen > 0 && (M_WRITABLE(m) == 0 || 1015 padlen > M_TRAILINGSPACE(m))) 1016 copy++; 1017 if (copy != 0) { 1018 /* Avoid expensive m_defrag(9) and do deep copy. */ 1019 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 1020 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 1021 n->m_pkthdr.len = m->m_pkthdr.len; 1022 n->m_len = m->m_pkthdr.len; 1023 m = n; 1024 txd->tx_flags |= VTE_TXMBUF; 1025 } 1026 1027 if (padlen > 0) { 1028 /* Zero out the bytes in the pad area. */ 1029 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1030 m->m_pkthdr.len += padlen; 1031 m->m_len = m->m_pkthdr.len; 1032 } 1033 } else { 1034 if (M_WRITABLE(m) == 0) { 1035 if (m->m_next != NULL || padlen > 0) { 1036 /* Get a writable copy. */ 1037 m = m_dup(*m_head, M_NOWAIT); 1038 /* Release original mbuf chains. */ 1039 m_freem(*m_head); 1040 if (m == NULL) { 1041 *m_head = NULL; 1042 return (NULL); 1043 } 1044 *m_head = m; 1045 } 1046 } 1047 1048 if (m->m_next != NULL) { 1049 m = m_defrag(*m_head, M_NOWAIT); 1050 if (m == NULL) { 1051 m_freem(*m_head); 1052 *m_head = NULL; 1053 return (NULL); 1054 } 1055 *m_head = m; 1056 } 1057 1058 if (padlen > 0) { 1059 if (M_TRAILINGSPACE(m) < padlen) { 1060 m = m_defrag(*m_head, M_NOWAIT); 1061 if (m == NULL) { 1062 m_freem(*m_head); 1063 *m_head = NULL; 1064 return (NULL); 1065 } 1066 *m_head = m; 1067 } 1068 /* Zero out the bytes in the pad area. */ 1069 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1070 m->m_pkthdr.len += padlen; 1071 m->m_len = m->m_pkthdr.len; 1072 } 1073 } 1074 1075 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag, 1076 txd->tx_dmamap, m, txsegs, &nsegs, 0); 1077 if (error != 0) { 1078 txd->tx_flags &= ~VTE_TXMBUF; 1079 return (NULL); 1080 } 1081 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1082 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1083 BUS_DMASYNC_PREWRITE); 1084 1085 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len)); 1086 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr); 1087 sc->vte_cdata.vte_tx_cnt++; 1088 /* Update producer index. */ 1089 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 1090 1091 /* Finally hand over ownership to controller. */ 1092 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 1093 txd->tx_m = m; 1094 1095 return (txd); 1096 } 1097 1098 static void 1099 vte_start(struct ifnet *ifp) 1100 { 1101 struct vte_softc *sc; 1102 1103 sc = ifp->if_softc; 1104 VTE_LOCK(sc); 1105 vte_start_locked(sc); 1106 VTE_UNLOCK(sc); 1107 } 1108 1109 static void 1110 vte_start_locked(struct vte_softc *sc) 1111 { 1112 struct ifnet *ifp; 1113 struct vte_txdesc *txd; 1114 struct mbuf *m_head; 1115 int enq; 1116 1117 ifp = sc->vte_ifp; 1118 1119 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1120 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 1121 return; 1122 1123 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1124 /* Reserve one free TX descriptor. */ 1125 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 1126 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1127 break; 1128 } 1129 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1130 if (m_head == NULL) 1131 break; 1132 /* 1133 * Pack the data into the transmit ring. If we 1134 * don't have room, set the OACTIVE flag and wait 1135 * for the NIC to drain the ring. 1136 */ 1137 if ((txd = vte_encap(sc, &m_head)) == NULL) { 1138 if (m_head != NULL) 1139 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1140 break; 1141 } 1142 1143 enq++; 1144 /* 1145 * If there's a BPF listener, bounce a copy of this frame 1146 * to him. 1147 */ 1148 ETHER_BPF_MTAP(ifp, m_head); 1149 /* Free consumed TX frame. */ 1150 if ((txd->tx_flags & VTE_TXMBUF) != 0) 1151 m_freem(m_head); 1152 } 1153 1154 if (enq > 0) { 1155 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1156 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1157 BUS_DMASYNC_PREWRITE); 1158 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 1159 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 1160 } 1161 } 1162 1163 static void 1164 vte_watchdog(struct vte_softc *sc) 1165 { 1166 struct ifnet *ifp; 1167 1168 VTE_LOCK_ASSERT(sc); 1169 1170 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 1171 return; 1172 1173 ifp = sc->vte_ifp; 1174 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n"); 1175 ifp->if_oerrors++; 1176 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1177 vte_init_locked(sc); 1178 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1179 vte_start_locked(sc); 1180 } 1181 1182 static int 1183 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1184 { 1185 struct vte_softc *sc; 1186 struct ifreq *ifr; 1187 struct mii_data *mii; 1188 int error; 1189 1190 sc = ifp->if_softc; 1191 ifr = (struct ifreq *)data; 1192 error = 0; 1193 switch (cmd) { 1194 case SIOCSIFFLAGS: 1195 VTE_LOCK(sc); 1196 if ((ifp->if_flags & IFF_UP) != 0) { 1197 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1198 ((ifp->if_flags ^ sc->vte_if_flags) & 1199 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1200 vte_rxfilter(sc); 1201 else 1202 vte_init_locked(sc); 1203 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1204 vte_stop(sc); 1205 sc->vte_if_flags = ifp->if_flags; 1206 VTE_UNLOCK(sc); 1207 break; 1208 case SIOCADDMULTI: 1209 case SIOCDELMULTI: 1210 VTE_LOCK(sc); 1211 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1212 vte_rxfilter(sc); 1213 VTE_UNLOCK(sc); 1214 break; 1215 case SIOCSIFMEDIA: 1216 case SIOCGIFMEDIA: 1217 mii = device_get_softc(sc->vte_miibus); 1218 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1219 break; 1220 default: 1221 error = ether_ioctl(ifp, cmd, data); 1222 break; 1223 } 1224 1225 return (error); 1226 } 1227 1228 static void 1229 vte_mac_config(struct vte_softc *sc) 1230 { 1231 struct mii_data *mii; 1232 uint16_t mcr; 1233 1234 VTE_LOCK_ASSERT(sc); 1235 1236 mii = device_get_softc(sc->vte_miibus); 1237 mcr = CSR_READ_2(sc, VTE_MCR0); 1238 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 1239 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1240 mcr |= MCR0_FULL_DUPLEX; 1241 #ifdef notyet 1242 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1243 mcr |= MCR0_FC_ENB; 1244 /* 1245 * The data sheet is not clear whether the controller 1246 * honors received pause frames or not. The is no 1247 * separate control bit for RX pause frame so just 1248 * enable MCR0_FC_ENB bit. 1249 */ 1250 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1251 mcr |= MCR0_FC_ENB; 1252 #endif 1253 } 1254 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1255 } 1256 1257 static void 1258 vte_stats_clear(struct vte_softc *sc) 1259 { 1260 1261 /* Reading counter registers clears its contents. */ 1262 CSR_READ_2(sc, VTE_CNT_RX_DONE); 1263 CSR_READ_2(sc, VTE_CNT_MECNT0); 1264 CSR_READ_2(sc, VTE_CNT_MECNT1); 1265 CSR_READ_2(sc, VTE_CNT_MECNT2); 1266 CSR_READ_2(sc, VTE_CNT_MECNT3); 1267 CSR_READ_2(sc, VTE_CNT_TX_DONE); 1268 CSR_READ_2(sc, VTE_CNT_MECNT4); 1269 CSR_READ_2(sc, VTE_CNT_PAUSE); 1270 } 1271 1272 static void 1273 vte_stats_update(struct vte_softc *sc) 1274 { 1275 struct vte_hw_stats *stat; 1276 struct ifnet *ifp; 1277 uint16_t value; 1278 1279 VTE_LOCK_ASSERT(sc); 1280 1281 ifp = sc->vte_ifp; 1282 stat = &sc->vte_stats; 1283 1284 CSR_READ_2(sc, VTE_MECISR); 1285 /* RX stats. */ 1286 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 1287 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 1288 stat->rx_bcast_frames += (value >> 8); 1289 stat->rx_mcast_frames += (value & 0xFF); 1290 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 1291 stat->rx_runts += (value >> 8); 1292 stat->rx_crcerrs += (value & 0xFF); 1293 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 1294 stat->rx_long_frames += (value & 0xFF); 1295 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 1296 stat->rx_fifo_full += (value >> 8); 1297 stat->rx_desc_unavail += (value & 0xFF); 1298 1299 /* TX stats. */ 1300 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 1301 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 1302 stat->tx_underruns += (value >> 8); 1303 stat->tx_late_colls += (value & 0xFF); 1304 1305 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 1306 stat->tx_pause_frames += (value >> 8); 1307 stat->rx_pause_frames += (value & 0xFF); 1308 1309 /* Update ifp counters. */ 1310 ifp->if_opackets = stat->tx_frames; 1311 ifp->if_collisions = stat->tx_late_colls; 1312 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; 1313 ifp->if_ipackets = stat->rx_frames; 1314 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + 1315 stat->rx_long_frames + stat->rx_fifo_full; 1316 } 1317 1318 static void 1319 vte_intr(void *arg) 1320 { 1321 struct vte_softc *sc; 1322 struct ifnet *ifp; 1323 uint16_t status; 1324 int n; 1325 1326 sc = (struct vte_softc *)arg; 1327 VTE_LOCK(sc); 1328 1329 ifp = sc->vte_ifp; 1330 /* Reading VTE_MISR acknowledges interrupts. */ 1331 status = CSR_READ_2(sc, VTE_MISR); 1332 if ((status & VTE_INTRS) == 0) { 1333 /* Not ours. */ 1334 VTE_UNLOCK(sc); 1335 return; 1336 } 1337 1338 /* Disable interrupts. */ 1339 CSR_WRITE_2(sc, VTE_MIER, 0); 1340 for (n = 8; (status & VTE_INTRS) != 0;) { 1341 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1342 break; 1343 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 1344 MISR_RX_FIFO_FULL)) != 0) 1345 vte_rxeof(sc); 1346 if ((status & MISR_TX_DONE) != 0) 1347 vte_txeof(sc); 1348 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 1349 vte_stats_update(sc); 1350 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1351 vte_start_locked(sc); 1352 if (--n > 0) 1353 status = CSR_READ_2(sc, VTE_MISR); 1354 else 1355 break; 1356 } 1357 1358 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1359 /* Re-enable interrupts. */ 1360 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1361 } 1362 VTE_UNLOCK(sc); 1363 } 1364 1365 static void 1366 vte_txeof(struct vte_softc *sc) 1367 { 1368 struct ifnet *ifp; 1369 struct vte_txdesc *txd; 1370 uint16_t status; 1371 int cons, prog; 1372 1373 VTE_LOCK_ASSERT(sc); 1374 1375 ifp = sc->vte_ifp; 1376 1377 if (sc->vte_cdata.vte_tx_cnt == 0) 1378 return; 1379 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1380 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD | 1381 BUS_DMASYNC_POSTWRITE); 1382 cons = sc->vte_cdata.vte_tx_cons; 1383 /* 1384 * Go through our TX list and free mbufs for those 1385 * frames which have been transmitted. 1386 */ 1387 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1388 txd = &sc->vte_cdata.vte_txdesc[cons]; 1389 status = le16toh(txd->tx_desc->dtst); 1390 if ((status & VTE_DTST_TX_OWN) != 0) 1391 break; 1392 sc->vte_cdata.vte_tx_cnt--; 1393 /* Reclaim transmitted mbufs. */ 1394 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1395 BUS_DMASYNC_POSTWRITE); 1396 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap); 1397 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1398 m_freem(txd->tx_m); 1399 txd->tx_flags &= ~VTE_TXMBUF; 1400 txd->tx_m = NULL; 1401 prog++; 1402 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1403 } 1404 1405 if (prog > 0) { 1406 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1407 sc->vte_cdata.vte_tx_cons = cons; 1408 /* 1409 * Unarm watchdog timer only when there is no pending 1410 * frames in TX queue. 1411 */ 1412 if (sc->vte_cdata.vte_tx_cnt == 0) 1413 sc->vte_watchdog_timer = 0; 1414 } 1415 } 1416 1417 static int 1418 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1419 { 1420 struct mbuf *m; 1421 bus_dma_segment_t segs[1]; 1422 bus_dmamap_t map; 1423 int nsegs; 1424 1425 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 1426 if (m == NULL) 1427 return (ENOBUFS); 1428 m->m_len = m->m_pkthdr.len = MCLBYTES; 1429 m_adj(m, sizeof(uint32_t)); 1430 1431 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag, 1432 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1433 m_freem(m); 1434 return (ENOBUFS); 1435 } 1436 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1437 1438 if (rxd->rx_m != NULL) { 1439 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1440 BUS_DMASYNC_POSTREAD); 1441 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap); 1442 } 1443 map = rxd->rx_dmamap; 1444 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1445 sc->vte_cdata.vte_rx_sparemap = map; 1446 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1447 BUS_DMASYNC_PREREAD); 1448 rxd->rx_m = m; 1449 rxd->rx_desc->drbp = htole32(segs[0].ds_addr); 1450 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len)); 1451 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1452 1453 return (0); 1454 } 1455 1456 /* 1457 * It's not supposed to see this controller on strict-alignment 1458 * architectures but make it work for completeness. 1459 */ 1460 #ifndef __NO_STRICT_ALIGNMENT 1461 static struct mbuf * 1462 vte_fixup_rx(struct ifnet *ifp, struct mbuf *m) 1463 { 1464 uint16_t *src, *dst; 1465 int i; 1466 1467 src = mtod(m, uint16_t *); 1468 dst = src - 1; 1469 1470 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1471 *dst++ = *src++; 1472 m->m_data -= ETHER_ALIGN; 1473 return (m); 1474 } 1475 #endif 1476 1477 static void 1478 vte_rxeof(struct vte_softc *sc) 1479 { 1480 struct ifnet *ifp; 1481 struct vte_rxdesc *rxd; 1482 struct mbuf *m; 1483 uint16_t status, total_len; 1484 int cons, prog; 1485 1486 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1487 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD | 1488 BUS_DMASYNC_POSTWRITE); 1489 cons = sc->vte_cdata.vte_rx_cons; 1490 ifp = sc->vte_ifp; 1491 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++, 1492 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1493 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1494 status = le16toh(rxd->rx_desc->drst); 1495 if ((status & VTE_DRST_RX_OWN) != 0) 1496 break; 1497 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1498 m = rxd->rx_m; 1499 if ((status & VTE_DRST_RX_OK) == 0) { 1500 /* Discard errored frame. */ 1501 rxd->rx_desc->drlen = 1502 htole16(MCLBYTES - sizeof(uint32_t)); 1503 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1504 continue; 1505 } 1506 if (vte_newbuf(sc, rxd) != 0) { 1507 ifp->if_iqdrops++; 1508 rxd->rx_desc->drlen = 1509 htole16(MCLBYTES - sizeof(uint32_t)); 1510 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1511 continue; 1512 } 1513 1514 /* 1515 * It seems there is no way to strip FCS bytes. 1516 */ 1517 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1518 m->m_pkthdr.rcvif = ifp; 1519 #ifndef __NO_STRICT_ALIGNMENT 1520 vte_fixup_rx(ifp, m); 1521 #endif 1522 VTE_UNLOCK(sc); 1523 (*ifp->if_input)(ifp, m); 1524 VTE_LOCK(sc); 1525 } 1526 1527 if (prog > 0) { 1528 /* Update the consumer index. */ 1529 sc->vte_cdata.vte_rx_cons = cons; 1530 /* 1531 * Sync updated RX descriptors such that controller see 1532 * modified RX buffer addresses. 1533 */ 1534 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1535 sc->vte_cdata.vte_rx_ring_map, 1536 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1537 #ifdef notyet 1538 /* 1539 * Update residue counter. Controller does not 1540 * keep track of number of available RX descriptors 1541 * such that driver should have to update VTE_MRDCR 1542 * to make controller know how many free RX 1543 * descriptors were added to controller. This is 1544 * a similar mechanism used in VIA velocity 1545 * controllers and it indicates controller just 1546 * polls OWN bit of current RX descriptor pointer. 1547 * A couple of severe issues were seen on sample 1548 * board where the controller continuously emits TX 1549 * pause frames once RX pause threshold crossed. 1550 * Once triggered it never recovered form that 1551 * state, I couldn't find a way to make it back to 1552 * work at least. This issue effectively 1553 * disconnected the system from network. Also, the 1554 * controller used 00:00:00:00:00:00 as source 1555 * station address of TX pause frame. Probably this 1556 * is one of reason why vendor recommends not to 1557 * enable flow control on R6040 controller. 1558 */ 1559 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1560 (((VTE_RX_RING_CNT * 2) / 10) << 1561 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1562 #endif 1563 } 1564 } 1565 1566 static void 1567 vte_tick(void *arg) 1568 { 1569 struct vte_softc *sc; 1570 struct mii_data *mii; 1571 1572 sc = (struct vte_softc *)arg; 1573 1574 VTE_LOCK_ASSERT(sc); 1575 1576 mii = device_get_softc(sc->vte_miibus); 1577 mii_tick(mii); 1578 vte_stats_update(sc); 1579 vte_txeof(sc); 1580 vte_watchdog(sc); 1581 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1582 } 1583 1584 static void 1585 vte_reset(struct vte_softc *sc) 1586 { 1587 uint16_t mcr; 1588 int i; 1589 1590 mcr = CSR_READ_2(sc, VTE_MCR1); 1591 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1592 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1593 DELAY(10); 1594 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1595 break; 1596 } 1597 if (i == 0) 1598 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1599 /* 1600 * Follow the guide of vendor recommended way to reset MAC. 1601 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1602 * not reliable so manually reset internal state machine. 1603 */ 1604 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1605 CSR_WRITE_2(sc, VTE_MACSM, 0); 1606 DELAY(5000); 1607 } 1608 1609 static void 1610 vte_init(void *xsc) 1611 { 1612 struct vte_softc *sc; 1613 1614 sc = (struct vte_softc *)xsc; 1615 VTE_LOCK(sc); 1616 vte_init_locked(sc); 1617 VTE_UNLOCK(sc); 1618 } 1619 1620 static void 1621 vte_init_locked(struct vte_softc *sc) 1622 { 1623 struct ifnet *ifp; 1624 bus_addr_t paddr; 1625 uint8_t *eaddr; 1626 1627 VTE_LOCK_ASSERT(sc); 1628 1629 ifp = sc->vte_ifp; 1630 1631 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1632 return; 1633 /* 1634 * Cancel any pending I/O. 1635 */ 1636 vte_stop(sc); 1637 /* 1638 * Reset the chip to a known state. 1639 */ 1640 vte_reset(sc); 1641 1642 /* Initialize RX descriptors. */ 1643 if (vte_init_rx_ring(sc) != 0) { 1644 device_printf(sc->vte_dev, "no memory for RX buffers.\n"); 1645 vte_stop(sc); 1646 return; 1647 } 1648 if (vte_init_tx_ring(sc) != 0) { 1649 device_printf(sc->vte_dev, "no memory for TX buffers.\n"); 1650 vte_stop(sc); 1651 return; 1652 } 1653 1654 /* 1655 * Reprogram the station address. Controller supports up 1656 * to 4 different station addresses so driver programs the 1657 * first station address as its own ethernet address and 1658 * configure the remaining three addresses as perfect 1659 * multicast addresses. 1660 */ 1661 eaddr = IF_LLADDR(sc->vte_ifp); 1662 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1663 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1664 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1665 1666 /* Set TX descriptor base addresses. */ 1667 paddr = sc->vte_cdata.vte_tx_ring_paddr; 1668 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1669 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1670 /* Set RX descriptor base addresses. */ 1671 paddr = sc->vte_cdata.vte_rx_ring_paddr; 1672 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1673 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1674 /* 1675 * Initialize RX descriptor residue counter and set RX 1676 * pause threshold to 20% of available RX descriptors. 1677 * See comments on vte_rxeof() for details on flow control 1678 * issues. 1679 */ 1680 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1681 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1682 1683 /* 1684 * Always use maximum frame size that controller can 1685 * support. Otherwise received frames that has longer 1686 * frame length than vte(4) MTU would be silently dropped 1687 * in controller. This would break path-MTU discovery as 1688 * sender wouldn't get any responses from receiver. The 1689 * RX buffer size should be multiple of 4. 1690 * Note, jumbo frames are silently ignored by controller 1691 * and even MAC counters do not detect them. 1692 */ 1693 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1694 1695 /* Configure FIFO. */ 1696 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1697 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1698 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1699 1700 /* 1701 * Configure TX/RX MACs. Actual resolved duplex and flow 1702 * control configuration is done after detecting a valid 1703 * link. Note, we don't generate early interrupt here 1704 * as well since FreeBSD does not have interrupt latency 1705 * problems like Windows. 1706 */ 1707 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1708 /* 1709 * We manually keep track of PHY status changes to 1710 * configure resolved duplex and flow control since only 1711 * duplex configuration can be automatically reflected to 1712 * MCR0. 1713 */ 1714 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1715 MCR1_EXCESS_COL_RETRY_16); 1716 1717 /* Initialize RX filter. */ 1718 vte_rxfilter(sc); 1719 1720 /* Disable TX/RX interrupt moderation control. */ 1721 CSR_WRITE_2(sc, VTE_MRICR, 0); 1722 CSR_WRITE_2(sc, VTE_MTICR, 0); 1723 1724 /* Enable MAC event counter interrupts. */ 1725 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1726 /* Clear MAC statistics. */ 1727 vte_stats_clear(sc); 1728 1729 /* Acknowledge all pending interrupts and clear it. */ 1730 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1731 CSR_WRITE_2(sc, VTE_MISR, 0); 1732 1733 sc->vte_flags &= ~VTE_FLAG_LINK; 1734 /* Switch to the current media. */ 1735 vte_mediachange_locked(ifp); 1736 1737 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1738 1739 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1740 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1741 } 1742 1743 static void 1744 vte_stop(struct vte_softc *sc) 1745 { 1746 struct ifnet *ifp; 1747 struct vte_txdesc *txd; 1748 struct vte_rxdesc *rxd; 1749 int i; 1750 1751 VTE_LOCK_ASSERT(sc); 1752 /* 1753 * Mark the interface down and cancel the watchdog timer. 1754 */ 1755 ifp = sc->vte_ifp; 1756 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1757 sc->vte_flags &= ~VTE_FLAG_LINK; 1758 callout_stop(&sc->vte_tick_ch); 1759 sc->vte_watchdog_timer = 0; 1760 vte_stats_update(sc); 1761 /* Disable interrupts. */ 1762 CSR_WRITE_2(sc, VTE_MIER, 0); 1763 CSR_WRITE_2(sc, VTE_MECIER, 0); 1764 /* Stop RX/TX MACs. */ 1765 vte_stop_mac(sc); 1766 /* Clear interrupts. */ 1767 CSR_READ_2(sc, VTE_MISR); 1768 /* 1769 * Free TX/RX mbufs still in the queues. 1770 */ 1771 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1772 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1773 if (rxd->rx_m != NULL) { 1774 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, 1775 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1776 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, 1777 rxd->rx_dmamap); 1778 m_freem(rxd->rx_m); 1779 rxd->rx_m = NULL; 1780 } 1781 } 1782 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1783 txd = &sc->vte_cdata.vte_txdesc[i]; 1784 if (txd->tx_m != NULL) { 1785 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, 1786 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1787 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, 1788 txd->tx_dmamap); 1789 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1790 m_freem(txd->tx_m); 1791 txd->tx_m = NULL; 1792 txd->tx_flags &= ~VTE_TXMBUF; 1793 } 1794 } 1795 /* Free TX mbuf pools used for deep copy. */ 1796 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1797 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1798 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1799 sc->vte_cdata.vte_txmbufs[i] = NULL; 1800 } 1801 } 1802 } 1803 1804 static void 1805 vte_start_mac(struct vte_softc *sc) 1806 { 1807 uint16_t mcr; 1808 int i; 1809 1810 VTE_LOCK_ASSERT(sc); 1811 1812 /* Enable RX/TX MACs. */ 1813 mcr = CSR_READ_2(sc, VTE_MCR0); 1814 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1815 (MCR0_RX_ENB | MCR0_TX_ENB)) { 1816 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1817 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1818 for (i = VTE_TIMEOUT; i > 0; i--) { 1819 mcr = CSR_READ_2(sc, VTE_MCR0); 1820 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1821 (MCR0_RX_ENB | MCR0_TX_ENB)) 1822 break; 1823 DELAY(10); 1824 } 1825 if (i == 0) 1826 device_printf(sc->vte_dev, 1827 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1828 } 1829 } 1830 1831 static void 1832 vte_stop_mac(struct vte_softc *sc) 1833 { 1834 uint16_t mcr; 1835 int i; 1836 1837 VTE_LOCK_ASSERT(sc); 1838 1839 /* Disable RX/TX MACs. */ 1840 mcr = CSR_READ_2(sc, VTE_MCR0); 1841 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1842 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1843 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1844 for (i = VTE_TIMEOUT; i > 0; i--) { 1845 mcr = CSR_READ_2(sc, VTE_MCR0); 1846 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1847 break; 1848 DELAY(10); 1849 } 1850 if (i == 0) 1851 device_printf(sc->vte_dev, 1852 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1853 } 1854 } 1855 1856 static int 1857 vte_init_tx_ring(struct vte_softc *sc) 1858 { 1859 struct vte_tx_desc *desc; 1860 struct vte_txdesc *txd; 1861 bus_addr_t addr; 1862 int i; 1863 1864 VTE_LOCK_ASSERT(sc); 1865 1866 sc->vte_cdata.vte_tx_prod = 0; 1867 sc->vte_cdata.vte_tx_cons = 0; 1868 sc->vte_cdata.vte_tx_cnt = 0; 1869 1870 /* Pre-allocate TX mbufs for deep copy. */ 1871 if (tx_deep_copy != 0) { 1872 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1873 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_NOWAIT, 1874 MT_DATA, M_PKTHDR); 1875 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1876 return (ENOBUFS); 1877 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1878 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1879 } 1880 } 1881 desc = sc->vte_cdata.vte_tx_ring; 1882 bzero(desc, VTE_TX_RING_SZ); 1883 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1884 txd = &sc->vte_cdata.vte_txdesc[i]; 1885 txd->tx_m = NULL; 1886 if (i != VTE_TX_RING_CNT - 1) 1887 addr = sc->vte_cdata.vte_tx_ring_paddr + 1888 sizeof(struct vte_tx_desc) * (i + 1); 1889 else 1890 addr = sc->vte_cdata.vte_tx_ring_paddr + 1891 sizeof(struct vte_tx_desc) * 0; 1892 desc = &sc->vte_cdata.vte_tx_ring[i]; 1893 desc->dtnp = htole32(addr); 1894 txd->tx_desc = desc; 1895 } 1896 1897 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1898 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1899 BUS_DMASYNC_PREWRITE); 1900 return (0); 1901 } 1902 1903 static int 1904 vte_init_rx_ring(struct vte_softc *sc) 1905 { 1906 struct vte_rx_desc *desc; 1907 struct vte_rxdesc *rxd; 1908 bus_addr_t addr; 1909 int i; 1910 1911 VTE_LOCK_ASSERT(sc); 1912 1913 sc->vte_cdata.vte_rx_cons = 0; 1914 desc = sc->vte_cdata.vte_rx_ring; 1915 bzero(desc, VTE_RX_RING_SZ); 1916 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1917 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1918 rxd->rx_m = NULL; 1919 if (i != VTE_RX_RING_CNT - 1) 1920 addr = sc->vte_cdata.vte_rx_ring_paddr + 1921 sizeof(struct vte_rx_desc) * (i + 1); 1922 else 1923 addr = sc->vte_cdata.vte_rx_ring_paddr + 1924 sizeof(struct vte_rx_desc) * 0; 1925 desc = &sc->vte_cdata.vte_rx_ring[i]; 1926 desc->drnp = htole32(addr); 1927 rxd->rx_desc = desc; 1928 if (vte_newbuf(sc, rxd) != 0) 1929 return (ENOBUFS); 1930 } 1931 1932 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1933 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD | 1934 BUS_DMASYNC_PREWRITE); 1935 1936 return (0); 1937 } 1938 1939 static void 1940 vte_rxfilter(struct vte_softc *sc) 1941 { 1942 struct ifnet *ifp; 1943 struct ifmultiaddr *ifma; 1944 uint8_t *eaddr; 1945 uint32_t crc; 1946 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1947 uint16_t mchash[4], mcr; 1948 int i, nperf; 1949 1950 VTE_LOCK_ASSERT(sc); 1951 1952 ifp = sc->vte_ifp; 1953 1954 bzero(mchash, sizeof(mchash)); 1955 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1956 rxfilt_perf[i][0] = 0xFFFF; 1957 rxfilt_perf[i][1] = 0xFFFF; 1958 rxfilt_perf[i][2] = 0xFFFF; 1959 } 1960 1961 mcr = CSR_READ_2(sc, VTE_MCR0); 1962 mcr &= ~(MCR0_PROMISC | MCR0_MULTICAST); 1963 mcr |= MCR0_BROADCAST_DIS; 1964 if ((ifp->if_flags & IFF_BROADCAST) != 0) 1965 mcr &= ~MCR0_BROADCAST_DIS; 1966 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1967 if ((ifp->if_flags & IFF_PROMISC) != 0) 1968 mcr |= MCR0_PROMISC; 1969 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 1970 mcr |= MCR0_MULTICAST; 1971 mchash[0] = 0xFFFF; 1972 mchash[1] = 0xFFFF; 1973 mchash[2] = 0xFFFF; 1974 mchash[3] = 0xFFFF; 1975 goto chipit; 1976 } 1977 1978 nperf = 0; 1979 if_maddr_rlock(ifp); 1980 TAILQ_FOREACH(ifma, &sc->vte_ifp->if_multiaddrs, ifma_link) { 1981 if (ifma->ifma_addr->sa_family != AF_LINK) 1982 continue; 1983 /* 1984 * Program the first 3 multicast groups into 1985 * the perfect filter. For all others, use the 1986 * hash table. 1987 */ 1988 if (nperf < VTE_RXFILT_PERFECT_CNT) { 1989 eaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1990 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0]; 1991 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2]; 1992 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4]; 1993 nperf++; 1994 continue; 1995 } 1996 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1997 ifma->ifma_addr), ETHER_ADDR_LEN); 1998 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 1999 } 2000 if_maddr_runlock(ifp); 2001 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || 2002 mchash[3] != 0) 2003 mcr |= MCR0_MULTICAST; 2004 2005 chipit: 2006 /* Program multicast hash table. */ 2007 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); 2008 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); 2009 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); 2010 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); 2011 /* Program perfect filter table. */ 2012 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2013 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 2014 rxfilt_perf[i][0]); 2015 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 2016 rxfilt_perf[i][1]); 2017 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 2018 rxfilt_perf[i][2]); 2019 } 2020 CSR_WRITE_2(sc, VTE_MCR0, mcr); 2021 CSR_READ_2(sc, VTE_MCR0); 2022 } 2023 2024 static int 2025 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2026 { 2027 int error, value; 2028 2029 if (arg1 == NULL) 2030 return (EINVAL); 2031 value = *(int *)arg1; 2032 error = sysctl_handle_int(oidp, &value, 0, req); 2033 if (error || req->newptr == NULL) 2034 return (error); 2035 if (value < low || value > high) 2036 return (EINVAL); 2037 *(int *)arg1 = value; 2038 2039 return (0); 2040 } 2041 2042 static int 2043 sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS) 2044 { 2045 2046 return (sysctl_int_range(oidp, arg1, arg2, req, 2047 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX)); 2048 } 2049