1 /*- 2 * Copyright (c) 2010, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* Driver for DM&P Electronics, Inc, Vortex86 RDC R6040 FastEthernet. */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/rman.h> 44 #include <sys/socket.h> 45 #include <sys/sockio.h> 46 #include <sys/sysctl.h> 47 48 #include <net/bpf.h> 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_llc.h> 54 #include <net/if_media.h> 55 #include <net/if_types.h> 56 #include <net/if_vlan_var.h> 57 58 #include <netinet/in.h> 59 #include <netinet/in_systm.h> 60 61 #include <dev/mii/mii.h> 62 #include <dev/mii/miivar.h> 63 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 #include <machine/bus.h> 68 69 #include <dev/vte/if_vtereg.h> 70 #include <dev/vte/if_vtevar.h> 71 72 /* "device miibus" required. See GENERIC if you get errors here. */ 73 #include "miibus_if.h" 74 75 MODULE_DEPEND(vte, pci, 1, 1, 1); 76 MODULE_DEPEND(vte, ether, 1, 1, 1); 77 MODULE_DEPEND(vte, miibus, 1, 1, 1); 78 79 /* Tunables. */ 80 static int tx_deep_copy = 1; 81 TUNABLE_INT("hw.vte.tx_deep_copy", &tx_deep_copy); 82 83 /* 84 * Devices supported by this driver. 85 */ 86 static const struct vte_ident vte_ident_table[] = { 87 { VENDORID_RDC, DEVICEID_RDC_R6040, "RDC R6040 FastEthernet"}, 88 { 0, 0, NULL} 89 }; 90 91 static int vte_attach(device_t); 92 static int vte_detach(device_t); 93 static int vte_dma_alloc(struct vte_softc *); 94 static void vte_dma_free(struct vte_softc *); 95 static void vte_dmamap_cb(void *, bus_dma_segment_t *, int, int); 96 static struct vte_txdesc * 97 vte_encap(struct vte_softc *, struct mbuf **); 98 static const struct vte_ident * 99 vte_find_ident(device_t); 100 #ifndef __NO_STRICT_ALIGNMENT 101 static struct mbuf * 102 vte_fixup_rx(struct ifnet *, struct mbuf *); 103 #endif 104 static void vte_get_macaddr(struct vte_softc *); 105 static void vte_init(void *); 106 static void vte_init_locked(struct vte_softc *); 107 static int vte_init_rx_ring(struct vte_softc *); 108 static int vte_init_tx_ring(struct vte_softc *); 109 static void vte_intr(void *); 110 static int vte_ioctl(struct ifnet *, u_long, caddr_t); 111 static void vte_mac_config(struct vte_softc *); 112 static int vte_miibus_readreg(device_t, int, int); 113 static void vte_miibus_statchg(device_t); 114 static int vte_miibus_writereg(device_t, int, int, int); 115 static int vte_mediachange(struct ifnet *); 116 static int vte_mediachange_locked(struct ifnet *); 117 static void vte_mediastatus(struct ifnet *, struct ifmediareq *); 118 static int vte_newbuf(struct vte_softc *, struct vte_rxdesc *); 119 static int vte_probe(device_t); 120 static void vte_reset(struct vte_softc *); 121 static int vte_resume(device_t); 122 static void vte_rxeof(struct vte_softc *); 123 static void vte_rxfilter(struct vte_softc *); 124 static int vte_shutdown(device_t); 125 static void vte_start(struct ifnet *); 126 static void vte_start_locked(struct vte_softc *); 127 static void vte_start_mac(struct vte_softc *); 128 static void vte_stats_clear(struct vte_softc *); 129 static void vte_stats_update(struct vte_softc *); 130 static void vte_stop(struct vte_softc *); 131 static void vte_stop_mac(struct vte_softc *); 132 static int vte_suspend(device_t); 133 static void vte_sysctl_node(struct vte_softc *); 134 static void vte_tick(void *); 135 static void vte_txeof(struct vte_softc *); 136 static void vte_watchdog(struct vte_softc *); 137 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 138 static int sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS); 139 140 static device_method_t vte_methods[] = { 141 /* Device interface. */ 142 DEVMETHOD(device_probe, vte_probe), 143 DEVMETHOD(device_attach, vte_attach), 144 DEVMETHOD(device_detach, vte_detach), 145 DEVMETHOD(device_shutdown, vte_shutdown), 146 DEVMETHOD(device_suspend, vte_suspend), 147 DEVMETHOD(device_resume, vte_resume), 148 149 /* MII interface. */ 150 DEVMETHOD(miibus_readreg, vte_miibus_readreg), 151 DEVMETHOD(miibus_writereg, vte_miibus_writereg), 152 DEVMETHOD(miibus_statchg, vte_miibus_statchg), 153 154 KOBJMETHOD_END 155 }; 156 157 static driver_t vte_driver = { 158 "vte", 159 vte_methods, 160 sizeof(struct vte_softc) 161 }; 162 163 static devclass_t vte_devclass; 164 165 DRIVER_MODULE(vte, pci, vte_driver, vte_devclass, 0, 0); 166 DRIVER_MODULE(miibus, vte, miibus_driver, miibus_devclass, 0, 0); 167 168 static int 169 vte_miibus_readreg(device_t dev, int phy, int reg) 170 { 171 struct vte_softc *sc; 172 int i; 173 174 sc = device_get_softc(dev); 175 176 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_READ | 177 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 178 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 179 DELAY(5); 180 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_READ) == 0) 181 break; 182 } 183 184 if (i == 0) { 185 device_printf(sc->vte_dev, "phy read timeout : %d\n", reg); 186 return (0); 187 } 188 189 return (CSR_READ_2(sc, VTE_MMRD)); 190 } 191 192 static int 193 vte_miibus_writereg(device_t dev, int phy, int reg, int val) 194 { 195 struct vte_softc *sc; 196 int i; 197 198 sc = device_get_softc(dev); 199 200 CSR_WRITE_2(sc, VTE_MMWD, val); 201 CSR_WRITE_2(sc, VTE_MMDIO, MMDIO_WRITE | 202 (phy << MMDIO_PHY_ADDR_SHIFT) | (reg << MMDIO_REG_ADDR_SHIFT)); 203 for (i = VTE_PHY_TIMEOUT; i > 0; i--) { 204 DELAY(5); 205 if ((CSR_READ_2(sc, VTE_MMDIO) & MMDIO_WRITE) == 0) 206 break; 207 } 208 209 if (i == 0) 210 device_printf(sc->vte_dev, "phy write timeout : %d\n", reg); 211 212 return (0); 213 } 214 215 static void 216 vte_miibus_statchg(device_t dev) 217 { 218 struct vte_softc *sc; 219 struct mii_data *mii; 220 struct ifnet *ifp; 221 uint16_t val; 222 223 sc = device_get_softc(dev); 224 225 mii = device_get_softc(sc->vte_miibus); 226 ifp = sc->vte_ifp; 227 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 228 return; 229 230 sc->vte_flags &= ~VTE_FLAG_LINK; 231 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 232 (IFM_ACTIVE | IFM_AVALID)) { 233 switch (IFM_SUBTYPE(mii->mii_media_active)) { 234 case IFM_10_T: 235 case IFM_100_TX: 236 sc->vte_flags |= VTE_FLAG_LINK; 237 break; 238 default: 239 break; 240 } 241 } 242 243 /* Stop RX/TX MACs. */ 244 vte_stop_mac(sc); 245 /* Program MACs with resolved duplex and flow control. */ 246 if ((sc->vte_flags & VTE_FLAG_LINK) != 0) { 247 /* 248 * Timer waiting time : (63 + TIMER * 64) MII clock. 249 * MII clock : 25MHz(100Mbps) or 2.5MHz(10Mbps). 250 */ 251 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 252 val = 18 << VTE_IM_TIMER_SHIFT; 253 else 254 val = 1 << VTE_IM_TIMER_SHIFT; 255 val |= sc->vte_int_rx_mod << VTE_IM_BUNDLE_SHIFT; 256 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 257 CSR_WRITE_2(sc, VTE_MRICR, val); 258 259 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 260 val = 18 << VTE_IM_TIMER_SHIFT; 261 else 262 val = 1 << VTE_IM_TIMER_SHIFT; 263 val |= sc->vte_int_tx_mod << VTE_IM_BUNDLE_SHIFT; 264 /* 48.6us for 100Mbps, 50.8us for 10Mbps */ 265 CSR_WRITE_2(sc, VTE_MTICR, val); 266 267 vte_mac_config(sc); 268 vte_start_mac(sc); 269 } 270 } 271 272 static void 273 vte_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 274 { 275 struct vte_softc *sc; 276 struct mii_data *mii; 277 278 sc = ifp->if_softc; 279 VTE_LOCK(sc); 280 if ((ifp->if_flags & IFF_UP) == 0) { 281 VTE_UNLOCK(sc); 282 return; 283 } 284 mii = device_get_softc(sc->vte_miibus); 285 286 mii_pollstat(mii); 287 VTE_UNLOCK(sc); 288 ifmr->ifm_status = mii->mii_media_status; 289 ifmr->ifm_active = mii->mii_media_active; 290 } 291 292 static int 293 vte_mediachange(struct ifnet *ifp) 294 { 295 struct vte_softc *sc; 296 int error; 297 298 sc = ifp->if_softc; 299 VTE_LOCK(sc); 300 error = vte_mediachange_locked(ifp); 301 VTE_UNLOCK(sc); 302 return (error); 303 } 304 305 static int 306 vte_mediachange_locked(struct ifnet *ifp) 307 { 308 struct vte_softc *sc; 309 struct mii_data *mii; 310 struct mii_softc *miisc; 311 int error; 312 313 sc = ifp->if_softc; 314 mii = device_get_softc(sc->vte_miibus); 315 if (mii->mii_instance != 0) { 316 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 317 mii_phy_reset(miisc); 318 } 319 error = mii_mediachg(mii); 320 321 return (error); 322 } 323 324 static const struct vte_ident * 325 vte_find_ident(device_t dev) 326 { 327 const struct vte_ident *ident; 328 uint16_t vendor, devid; 329 330 vendor = pci_get_vendor(dev); 331 devid = pci_get_device(dev); 332 for (ident = vte_ident_table; ident->name != NULL; ident++) { 333 if (vendor == ident->vendorid && devid == ident->deviceid) 334 return (ident); 335 } 336 337 return (NULL); 338 } 339 340 static int 341 vte_probe(device_t dev) 342 { 343 const struct vte_ident *ident; 344 345 ident = vte_find_ident(dev); 346 if (ident != NULL) { 347 device_set_desc(dev, ident->name); 348 return (BUS_PROBE_DEFAULT); 349 } 350 351 return (ENXIO); 352 } 353 354 static void 355 vte_get_macaddr(struct vte_softc *sc) 356 { 357 uint16_t mid; 358 359 /* 360 * It seems there is no way to reload station address and 361 * it is supposed to be set by BIOS. 362 */ 363 mid = CSR_READ_2(sc, VTE_MID0L); 364 sc->vte_eaddr[0] = (mid >> 0) & 0xFF; 365 sc->vte_eaddr[1] = (mid >> 8) & 0xFF; 366 mid = CSR_READ_2(sc, VTE_MID0M); 367 sc->vte_eaddr[2] = (mid >> 0) & 0xFF; 368 sc->vte_eaddr[3] = (mid >> 8) & 0xFF; 369 mid = CSR_READ_2(sc, VTE_MID0H); 370 sc->vte_eaddr[4] = (mid >> 0) & 0xFF; 371 sc->vte_eaddr[5] = (mid >> 8) & 0xFF; 372 } 373 374 static int 375 vte_attach(device_t dev) 376 { 377 struct vte_softc *sc; 378 struct ifnet *ifp; 379 uint16_t macid; 380 int error, rid; 381 382 error = 0; 383 sc = device_get_softc(dev); 384 sc->vte_dev = dev; 385 386 mtx_init(&sc->vte_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 387 MTX_DEF); 388 callout_init_mtx(&sc->vte_tick_ch, &sc->vte_mtx, 0); 389 sc->vte_ident = vte_find_ident(dev); 390 391 /* Map the device. */ 392 pci_enable_busmaster(dev); 393 sc->vte_res_id = PCIR_BAR(1); 394 sc->vte_res_type = SYS_RES_MEMORY; 395 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 396 &sc->vte_res_id, RF_ACTIVE); 397 if (sc->vte_res == NULL) { 398 sc->vte_res_id = PCIR_BAR(0); 399 sc->vte_res_type = SYS_RES_IOPORT; 400 sc->vte_res = bus_alloc_resource_any(dev, sc->vte_res_type, 401 &sc->vte_res_id, RF_ACTIVE); 402 if (sc->vte_res == NULL) { 403 device_printf(dev, "cannot map memory/ports.\n"); 404 mtx_destroy(&sc->vte_mtx); 405 return (ENXIO); 406 } 407 } 408 if (bootverbose) { 409 device_printf(dev, "using %s space register mapping\n", 410 sc->vte_res_type == SYS_RES_MEMORY ? "memory" : "I/O"); 411 device_printf(dev, "MAC Identifier : 0x%04x\n", 412 CSR_READ_2(sc, VTE_MACID)); 413 macid = CSR_READ_2(sc, VTE_MACID_REV); 414 device_printf(dev, "MAC Id. 0x%02x, Rev. 0x%02x\n", 415 (macid & VTE_MACID_MASK) >> VTE_MACID_SHIFT, 416 (macid & VTE_MACID_REV_MASK) >> VTE_MACID_REV_SHIFT); 417 } 418 419 rid = 0; 420 sc->vte_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 421 RF_SHAREABLE | RF_ACTIVE); 422 if (sc->vte_irq == NULL) { 423 device_printf(dev, "cannot allocate IRQ resources.\n"); 424 error = ENXIO; 425 goto fail; 426 } 427 428 /* Reset the ethernet controller. */ 429 vte_reset(sc); 430 431 if ((error = vte_dma_alloc(sc) != 0)) 432 goto fail; 433 434 /* Create device sysctl node. */ 435 vte_sysctl_node(sc); 436 437 /* Load station address. */ 438 vte_get_macaddr(sc); 439 440 ifp = sc->vte_ifp = if_alloc(IFT_ETHER); 441 if (ifp == NULL) { 442 device_printf(dev, "cannot allocate ifnet structure.\n"); 443 error = ENXIO; 444 goto fail; 445 } 446 447 ifp->if_softc = sc; 448 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 449 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 450 ifp->if_ioctl = vte_ioctl; 451 ifp->if_start = vte_start; 452 ifp->if_init = vte_init; 453 ifp->if_snd.ifq_drv_maxlen = VTE_TX_RING_CNT - 1; 454 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 455 IFQ_SET_READY(&ifp->if_snd); 456 457 /* 458 * Set up MII bus. 459 * BIOS would have initialized VTE_MPSCCR to catch PHY 460 * status changes so driver may be able to extract 461 * configured PHY address. Since it's common to see BIOS 462 * fails to initialize the register(including the sample 463 * board I have), let mii(4) probe it. This is more 464 * reliable than relying on BIOS's initialization. 465 * 466 * Advertising flow control capability to mii(4) was 467 * intentionally disabled due to severe problems in TX 468 * pause frame generation. See vte_rxeof() for more 469 * details. 470 */ 471 error = mii_attach(dev, &sc->vte_miibus, ifp, vte_mediachange, 472 vte_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); 473 if (error != 0) { 474 device_printf(dev, "attaching PHYs failed\n"); 475 goto fail; 476 } 477 478 ether_ifattach(ifp, sc->vte_eaddr); 479 480 /* VLAN capability setup. */ 481 ifp->if_capabilities |= IFCAP_VLAN_MTU; 482 ifp->if_capenable = ifp->if_capabilities; 483 /* Tell the upper layer we support VLAN over-sized frames. */ 484 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 485 486 error = bus_setup_intr(dev, sc->vte_irq, INTR_TYPE_NET | INTR_MPSAFE, 487 NULL, vte_intr, sc, &sc->vte_intrhand); 488 if (error != 0) { 489 device_printf(dev, "could not set up interrupt handler.\n"); 490 ether_ifdetach(ifp); 491 goto fail; 492 } 493 494 fail: 495 if (error != 0) 496 vte_detach(dev); 497 498 return (error); 499 } 500 501 static int 502 vte_detach(device_t dev) 503 { 504 struct vte_softc *sc; 505 struct ifnet *ifp; 506 507 sc = device_get_softc(dev); 508 509 ifp = sc->vte_ifp; 510 if (device_is_attached(dev)) { 511 VTE_LOCK(sc); 512 vte_stop(sc); 513 VTE_UNLOCK(sc); 514 callout_drain(&sc->vte_tick_ch); 515 ether_ifdetach(ifp); 516 } 517 518 if (sc->vte_miibus != NULL) { 519 device_delete_child(dev, sc->vte_miibus); 520 sc->vte_miibus = NULL; 521 } 522 bus_generic_detach(dev); 523 524 if (sc->vte_intrhand != NULL) { 525 bus_teardown_intr(dev, sc->vte_irq, sc->vte_intrhand); 526 sc->vte_intrhand = NULL; 527 } 528 if (sc->vte_irq != NULL) { 529 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vte_irq); 530 sc->vte_irq = NULL; 531 } 532 if (sc->vte_res != NULL) { 533 bus_release_resource(dev, sc->vte_res_type, sc->vte_res_id, 534 sc->vte_res); 535 sc->vte_res = NULL; 536 } 537 if (ifp != NULL) { 538 if_free(ifp); 539 sc->vte_ifp = NULL; 540 } 541 vte_dma_free(sc); 542 mtx_destroy(&sc->vte_mtx); 543 544 return (0); 545 } 546 547 #define VTE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 548 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 549 550 static void 551 vte_sysctl_node(struct vte_softc *sc) 552 { 553 struct sysctl_ctx_list *ctx; 554 struct sysctl_oid_list *child, *parent; 555 struct sysctl_oid *tree; 556 struct vte_hw_stats *stats; 557 int error; 558 559 stats = &sc->vte_stats; 560 ctx = device_get_sysctl_ctx(sc->vte_dev); 561 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->vte_dev)); 562 563 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 564 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_rx_mod, 0, 565 sysctl_hw_vte_int_mod, "I", "vte RX interrupt moderation"); 566 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 567 CTLTYPE_INT | CTLFLAG_RW, &sc->vte_int_tx_mod, 0, 568 sysctl_hw_vte_int_mod, "I", "vte TX interrupt moderation"); 569 /* Pull in device tunables. */ 570 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 571 error = resource_int_value(device_get_name(sc->vte_dev), 572 device_get_unit(sc->vte_dev), "int_rx_mod", &sc->vte_int_rx_mod); 573 if (error == 0) { 574 if (sc->vte_int_rx_mod < VTE_IM_BUNDLE_MIN || 575 sc->vte_int_rx_mod > VTE_IM_BUNDLE_MAX) { 576 device_printf(sc->vte_dev, "int_rx_mod value out of " 577 "range; using default: %d\n", 578 VTE_IM_RX_BUNDLE_DEFAULT); 579 sc->vte_int_rx_mod = VTE_IM_RX_BUNDLE_DEFAULT; 580 } 581 } 582 583 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 584 error = resource_int_value(device_get_name(sc->vte_dev), 585 device_get_unit(sc->vte_dev), "int_tx_mod", &sc->vte_int_tx_mod); 586 if (error == 0) { 587 if (sc->vte_int_tx_mod < VTE_IM_BUNDLE_MIN || 588 sc->vte_int_tx_mod > VTE_IM_BUNDLE_MAX) { 589 device_printf(sc->vte_dev, "int_tx_mod value out of " 590 "range; using default: %d\n", 591 VTE_IM_TX_BUNDLE_DEFAULT); 592 sc->vte_int_tx_mod = VTE_IM_TX_BUNDLE_DEFAULT; 593 } 594 } 595 596 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 597 NULL, "VTE statistics"); 598 parent = SYSCTL_CHILDREN(tree); 599 600 /* RX statistics. */ 601 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 602 NULL, "RX MAC statistics"); 603 child = SYSCTL_CHILDREN(tree); 604 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 605 &stats->rx_frames, "Good frames"); 606 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 607 &stats->rx_bcast_frames, "Good broadcast frames"); 608 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 609 &stats->rx_mcast_frames, "Good multicast frames"); 610 VTE_SYSCTL_STAT_ADD32(ctx, child, "runt", 611 &stats->rx_runts, "Too short frames"); 612 VTE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 613 &stats->rx_crcerrs, "CRC errors"); 614 VTE_SYSCTL_STAT_ADD32(ctx, child, "long_frames", 615 &stats->rx_long_frames, 616 "Frames that have longer length than maximum packet length"); 617 VTE_SYSCTL_STAT_ADD32(ctx, child, "fifo_full", 618 &stats->rx_fifo_full, "FIFO full"); 619 VTE_SYSCTL_STAT_ADD32(ctx, child, "desc_unavail", 620 &stats->rx_desc_unavail, "Descriptor unavailable frames"); 621 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 622 &stats->rx_pause_frames, "Pause control frames"); 623 624 /* TX statistics. */ 625 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 626 NULL, "TX MAC statistics"); 627 child = SYSCTL_CHILDREN(tree); 628 VTE_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 629 &stats->tx_frames, "Good frames"); 630 VTE_SYSCTL_STAT_ADD32(ctx, child, "underruns", 631 &stats->tx_underruns, "FIFO underruns"); 632 VTE_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 633 &stats->tx_late_colls, "Late collisions"); 634 VTE_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 635 &stats->tx_pause_frames, "Pause control frames"); 636 } 637 638 #undef VTE_SYSCTL_STAT_ADD32 639 640 struct vte_dmamap_arg { 641 bus_addr_t vte_busaddr; 642 }; 643 644 static void 645 vte_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 646 { 647 struct vte_dmamap_arg *ctx; 648 649 if (error != 0) 650 return; 651 652 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 653 654 ctx = (struct vte_dmamap_arg *)arg; 655 ctx->vte_busaddr = segs[0].ds_addr; 656 } 657 658 static int 659 vte_dma_alloc(struct vte_softc *sc) 660 { 661 struct vte_txdesc *txd; 662 struct vte_rxdesc *rxd; 663 struct vte_dmamap_arg ctx; 664 int error, i; 665 666 /* Create parent DMA tag. */ 667 error = bus_dma_tag_create( 668 bus_get_dma_tag(sc->vte_dev), /* parent */ 669 1, 0, /* alignment, boundary */ 670 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 671 BUS_SPACE_MAXADDR, /* highaddr */ 672 NULL, NULL, /* filter, filterarg */ 673 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 674 0, /* nsegments */ 675 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 676 0, /* flags */ 677 NULL, NULL, /* lockfunc, lockarg */ 678 &sc->vte_cdata.vte_parent_tag); 679 if (error != 0) { 680 device_printf(sc->vte_dev, 681 "could not create parent DMA tag.\n"); 682 goto fail; 683 } 684 685 /* Create DMA tag for TX descriptor ring. */ 686 error = bus_dma_tag_create( 687 sc->vte_cdata.vte_parent_tag, /* parent */ 688 VTE_TX_RING_ALIGN, 0, /* alignment, boundary */ 689 BUS_SPACE_MAXADDR, /* lowaddr */ 690 BUS_SPACE_MAXADDR, /* highaddr */ 691 NULL, NULL, /* filter, filterarg */ 692 VTE_TX_RING_SZ, /* maxsize */ 693 1, /* nsegments */ 694 VTE_TX_RING_SZ, /* maxsegsize */ 695 0, /* flags */ 696 NULL, NULL, /* lockfunc, lockarg */ 697 &sc->vte_cdata.vte_tx_ring_tag); 698 if (error != 0) { 699 device_printf(sc->vte_dev, 700 "could not create TX ring DMA tag.\n"); 701 goto fail; 702 } 703 704 /* Create DMA tag for RX free descriptor ring. */ 705 error = bus_dma_tag_create( 706 sc->vte_cdata.vte_parent_tag, /* parent */ 707 VTE_RX_RING_ALIGN, 0, /* alignment, boundary */ 708 BUS_SPACE_MAXADDR, /* lowaddr */ 709 BUS_SPACE_MAXADDR, /* highaddr */ 710 NULL, NULL, /* filter, filterarg */ 711 VTE_RX_RING_SZ, /* maxsize */ 712 1, /* nsegments */ 713 VTE_RX_RING_SZ, /* maxsegsize */ 714 0, /* flags */ 715 NULL, NULL, /* lockfunc, lockarg */ 716 &sc->vte_cdata.vte_rx_ring_tag); 717 if (error != 0) { 718 device_printf(sc->vte_dev, 719 "could not create RX ring DMA tag.\n"); 720 goto fail; 721 } 722 723 /* Allocate DMA'able memory and load the DMA map for TX ring. */ 724 error = bus_dmamem_alloc(sc->vte_cdata.vte_tx_ring_tag, 725 (void **)&sc->vte_cdata.vte_tx_ring, 726 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 727 &sc->vte_cdata.vte_tx_ring_map); 728 if (error != 0) { 729 device_printf(sc->vte_dev, 730 "could not allocate DMA'able memory for TX ring.\n"); 731 goto fail; 732 } 733 ctx.vte_busaddr = 0; 734 error = bus_dmamap_load(sc->vte_cdata.vte_tx_ring_tag, 735 sc->vte_cdata.vte_tx_ring_map, sc->vte_cdata.vte_tx_ring, 736 VTE_TX_RING_SZ, vte_dmamap_cb, &ctx, 0); 737 if (error != 0 || ctx.vte_busaddr == 0) { 738 device_printf(sc->vte_dev, 739 "could not load DMA'able memory for TX ring.\n"); 740 goto fail; 741 } 742 sc->vte_cdata.vte_tx_ring_paddr = ctx.vte_busaddr; 743 744 /* Allocate DMA'able memory and load the DMA map for RX ring. */ 745 error = bus_dmamem_alloc(sc->vte_cdata.vte_rx_ring_tag, 746 (void **)&sc->vte_cdata.vte_rx_ring, 747 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 748 &sc->vte_cdata.vte_rx_ring_map); 749 if (error != 0) { 750 device_printf(sc->vte_dev, 751 "could not allocate DMA'able memory for RX ring.\n"); 752 goto fail; 753 } 754 ctx.vte_busaddr = 0; 755 error = bus_dmamap_load(sc->vte_cdata.vte_rx_ring_tag, 756 sc->vte_cdata.vte_rx_ring_map, sc->vte_cdata.vte_rx_ring, 757 VTE_RX_RING_SZ, vte_dmamap_cb, &ctx, 0); 758 if (error != 0 || ctx.vte_busaddr == 0) { 759 device_printf(sc->vte_dev, 760 "could not load DMA'able memory for RX ring.\n"); 761 goto fail; 762 } 763 sc->vte_cdata.vte_rx_ring_paddr = ctx.vte_busaddr; 764 765 /* Create TX buffer parent tag. */ 766 error = bus_dma_tag_create( 767 bus_get_dma_tag(sc->vte_dev), /* parent */ 768 1, 0, /* alignment, boundary */ 769 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 770 BUS_SPACE_MAXADDR, /* highaddr */ 771 NULL, NULL, /* filter, filterarg */ 772 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 773 0, /* nsegments */ 774 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 775 0, /* flags */ 776 NULL, NULL, /* lockfunc, lockarg */ 777 &sc->vte_cdata.vte_buffer_tag); 778 if (error != 0) { 779 device_printf(sc->vte_dev, 780 "could not create parent buffer DMA tag.\n"); 781 goto fail; 782 } 783 784 /* Create DMA tag for TX buffers. */ 785 error = bus_dma_tag_create( 786 sc->vte_cdata.vte_buffer_tag, /* parent */ 787 1, 0, /* alignment, boundary */ 788 BUS_SPACE_MAXADDR, /* lowaddr */ 789 BUS_SPACE_MAXADDR, /* highaddr */ 790 NULL, NULL, /* filter, filterarg */ 791 MCLBYTES, /* maxsize */ 792 1, /* nsegments */ 793 MCLBYTES, /* maxsegsize */ 794 0, /* flags */ 795 NULL, NULL, /* lockfunc, lockarg */ 796 &sc->vte_cdata.vte_tx_tag); 797 if (error != 0) { 798 device_printf(sc->vte_dev, "could not create TX DMA tag.\n"); 799 goto fail; 800 } 801 802 /* Create DMA tag for RX buffers. */ 803 error = bus_dma_tag_create( 804 sc->vte_cdata.vte_buffer_tag, /* parent */ 805 VTE_RX_BUF_ALIGN, 0, /* alignment, boundary */ 806 BUS_SPACE_MAXADDR, /* lowaddr */ 807 BUS_SPACE_MAXADDR, /* highaddr */ 808 NULL, NULL, /* filter, filterarg */ 809 MCLBYTES, /* maxsize */ 810 1, /* nsegments */ 811 MCLBYTES, /* maxsegsize */ 812 0, /* flags */ 813 NULL, NULL, /* lockfunc, lockarg */ 814 &sc->vte_cdata.vte_rx_tag); 815 if (error != 0) { 816 device_printf(sc->vte_dev, "could not create RX DMA tag.\n"); 817 goto fail; 818 } 819 /* Create DMA maps for TX buffers. */ 820 for (i = 0; i < VTE_TX_RING_CNT; i++) { 821 txd = &sc->vte_cdata.vte_txdesc[i]; 822 txd->tx_m = NULL; 823 txd->tx_dmamap = NULL; 824 error = bus_dmamap_create(sc->vte_cdata.vte_tx_tag, 0, 825 &txd->tx_dmamap); 826 if (error != 0) { 827 device_printf(sc->vte_dev, 828 "could not create TX dmamap.\n"); 829 goto fail; 830 } 831 } 832 /* Create DMA maps for RX buffers. */ 833 if ((error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 834 &sc->vte_cdata.vte_rx_sparemap)) != 0) { 835 device_printf(sc->vte_dev, 836 "could not create spare RX dmamap.\n"); 837 goto fail; 838 } 839 for (i = 0; i < VTE_RX_RING_CNT; i++) { 840 rxd = &sc->vte_cdata.vte_rxdesc[i]; 841 rxd->rx_m = NULL; 842 rxd->rx_dmamap = NULL; 843 error = bus_dmamap_create(sc->vte_cdata.vte_rx_tag, 0, 844 &rxd->rx_dmamap); 845 if (error != 0) { 846 device_printf(sc->vte_dev, 847 "could not create RX dmamap.\n"); 848 goto fail; 849 } 850 } 851 852 fail: 853 return (error); 854 } 855 856 static void 857 vte_dma_free(struct vte_softc *sc) 858 { 859 struct vte_txdesc *txd; 860 struct vte_rxdesc *rxd; 861 int i; 862 863 /* TX buffers. */ 864 if (sc->vte_cdata.vte_tx_tag != NULL) { 865 for (i = 0; i < VTE_TX_RING_CNT; i++) { 866 txd = &sc->vte_cdata.vte_txdesc[i]; 867 if (txd->tx_dmamap != NULL) { 868 bus_dmamap_destroy(sc->vte_cdata.vte_tx_tag, 869 txd->tx_dmamap); 870 txd->tx_dmamap = NULL; 871 } 872 } 873 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_tag); 874 sc->vte_cdata.vte_tx_tag = NULL; 875 } 876 /* RX buffers */ 877 if (sc->vte_cdata.vte_rx_tag != NULL) { 878 for (i = 0; i < VTE_RX_RING_CNT; i++) { 879 rxd = &sc->vte_cdata.vte_rxdesc[i]; 880 if (rxd->rx_dmamap != NULL) { 881 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 882 rxd->rx_dmamap); 883 rxd->rx_dmamap = NULL; 884 } 885 } 886 if (sc->vte_cdata.vte_rx_sparemap != NULL) { 887 bus_dmamap_destroy(sc->vte_cdata.vte_rx_tag, 888 sc->vte_cdata.vte_rx_sparemap); 889 sc->vte_cdata.vte_rx_sparemap = NULL; 890 } 891 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_tag); 892 sc->vte_cdata.vte_rx_tag = NULL; 893 } 894 /* TX descriptor ring. */ 895 if (sc->vte_cdata.vte_tx_ring_tag != NULL) { 896 if (sc->vte_cdata.vte_tx_ring_map != NULL) 897 bus_dmamap_unload(sc->vte_cdata.vte_tx_ring_tag, 898 sc->vte_cdata.vte_tx_ring_map); 899 if (sc->vte_cdata.vte_tx_ring_map != NULL && 900 sc->vte_cdata.vte_tx_ring != NULL) 901 bus_dmamem_free(sc->vte_cdata.vte_tx_ring_tag, 902 sc->vte_cdata.vte_tx_ring, 903 sc->vte_cdata.vte_tx_ring_map); 904 sc->vte_cdata.vte_tx_ring = NULL; 905 sc->vte_cdata.vte_tx_ring_map = NULL; 906 bus_dma_tag_destroy(sc->vte_cdata.vte_tx_ring_tag); 907 sc->vte_cdata.vte_tx_ring_tag = NULL; 908 } 909 /* RX ring. */ 910 if (sc->vte_cdata.vte_rx_ring_tag != NULL) { 911 if (sc->vte_cdata.vte_rx_ring_map != NULL) 912 bus_dmamap_unload(sc->vte_cdata.vte_rx_ring_tag, 913 sc->vte_cdata.vte_rx_ring_map); 914 if (sc->vte_cdata.vte_rx_ring_map != NULL && 915 sc->vte_cdata.vte_rx_ring != NULL) 916 bus_dmamem_free(sc->vte_cdata.vte_rx_ring_tag, 917 sc->vte_cdata.vte_rx_ring, 918 sc->vte_cdata.vte_rx_ring_map); 919 sc->vte_cdata.vte_rx_ring = NULL; 920 sc->vte_cdata.vte_rx_ring_map = NULL; 921 bus_dma_tag_destroy(sc->vte_cdata.vte_rx_ring_tag); 922 sc->vte_cdata.vte_rx_ring_tag = NULL; 923 } 924 if (sc->vte_cdata.vte_buffer_tag != NULL) { 925 bus_dma_tag_destroy(sc->vte_cdata.vte_buffer_tag); 926 sc->vte_cdata.vte_buffer_tag = NULL; 927 } 928 if (sc->vte_cdata.vte_parent_tag != NULL) { 929 bus_dma_tag_destroy(sc->vte_cdata.vte_parent_tag); 930 sc->vte_cdata.vte_parent_tag = NULL; 931 } 932 } 933 934 static int 935 vte_shutdown(device_t dev) 936 { 937 938 return (vte_suspend(dev)); 939 } 940 941 static int 942 vte_suspend(device_t dev) 943 { 944 struct vte_softc *sc; 945 struct ifnet *ifp; 946 947 sc = device_get_softc(dev); 948 949 VTE_LOCK(sc); 950 ifp = sc->vte_ifp; 951 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 952 vte_stop(sc); 953 VTE_UNLOCK(sc); 954 955 return (0); 956 } 957 958 static int 959 vte_resume(device_t dev) 960 { 961 struct vte_softc *sc; 962 struct ifnet *ifp; 963 964 sc = device_get_softc(dev); 965 966 VTE_LOCK(sc); 967 ifp = sc->vte_ifp; 968 if ((ifp->if_flags & IFF_UP) != 0) { 969 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 970 vte_init_locked(sc); 971 } 972 VTE_UNLOCK(sc); 973 974 return (0); 975 } 976 977 static struct vte_txdesc * 978 vte_encap(struct vte_softc *sc, struct mbuf **m_head) 979 { 980 struct vte_txdesc *txd; 981 struct mbuf *m, *n; 982 bus_dma_segment_t txsegs[1]; 983 int copy, error, nsegs, padlen; 984 985 VTE_LOCK_ASSERT(sc); 986 987 M_ASSERTPKTHDR((*m_head)); 988 989 txd = &sc->vte_cdata.vte_txdesc[sc->vte_cdata.vte_tx_prod]; 990 m = *m_head; 991 /* 992 * Controller doesn't auto-pad, so we have to make sure pad 993 * short frames out to the minimum frame length. 994 */ 995 if (m->m_pkthdr.len < VTE_MIN_FRAMELEN) 996 padlen = VTE_MIN_FRAMELEN - m->m_pkthdr.len; 997 else 998 padlen = 0; 999 1000 /* 1001 * Controller does not support multi-fragmented TX buffers. 1002 * Controller spends most of its TX processing time in 1003 * de-fragmenting TX buffers. Either faster CPU or more 1004 * advanced controller DMA engine is required to speed up 1005 * TX path processing. 1006 * To mitigate the de-fragmenting issue, perform deep copy 1007 * from fragmented mbuf chains to a pre-allocated mbuf 1008 * cluster with extra cost of kernel memory. For frames 1009 * that is composed of single TX buffer, the deep copy is 1010 * bypassed. 1011 */ 1012 if (tx_deep_copy != 0) { 1013 copy = 0; 1014 if (m->m_next != NULL) 1015 copy++; 1016 if (padlen > 0 && (M_WRITABLE(m) == 0 || 1017 padlen > M_TRAILINGSPACE(m))) 1018 copy++; 1019 if (copy != 0) { 1020 /* Avoid expensive m_defrag(9) and do deep copy. */ 1021 n = sc->vte_cdata.vte_txmbufs[sc->vte_cdata.vte_tx_prod]; 1022 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, char *)); 1023 n->m_pkthdr.len = m->m_pkthdr.len; 1024 n->m_len = m->m_pkthdr.len; 1025 m = n; 1026 txd->tx_flags |= VTE_TXMBUF; 1027 } 1028 1029 if (padlen > 0) { 1030 /* Zero out the bytes in the pad area. */ 1031 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1032 m->m_pkthdr.len += padlen; 1033 m->m_len = m->m_pkthdr.len; 1034 } 1035 } else { 1036 if (M_WRITABLE(m) == 0) { 1037 if (m->m_next != NULL || padlen > 0) { 1038 /* Get a writable copy. */ 1039 m = m_dup(*m_head, M_DONTWAIT); 1040 /* Release original mbuf chains. */ 1041 m_freem(*m_head); 1042 if (m == NULL) { 1043 *m_head = NULL; 1044 return (NULL); 1045 } 1046 *m_head = m; 1047 } 1048 } 1049 1050 if (m->m_next != NULL) { 1051 m = m_defrag(*m_head, M_DONTWAIT); 1052 if (m == NULL) { 1053 m_freem(*m_head); 1054 *m_head = NULL; 1055 return (NULL); 1056 } 1057 *m_head = m; 1058 } 1059 1060 if (padlen > 0) { 1061 if (M_TRAILINGSPACE(m) < padlen) { 1062 m = m_defrag(*m_head, M_DONTWAIT); 1063 if (m == NULL) { 1064 m_freem(*m_head); 1065 *m_head = NULL; 1066 return (NULL); 1067 } 1068 *m_head = m; 1069 } 1070 /* Zero out the bytes in the pad area. */ 1071 bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); 1072 m->m_pkthdr.len += padlen; 1073 m->m_len = m->m_pkthdr.len; 1074 } 1075 } 1076 1077 error = bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_tx_tag, 1078 txd->tx_dmamap, m, txsegs, &nsegs, 0); 1079 if (error != 0) { 1080 txd->tx_flags &= ~VTE_TXMBUF; 1081 return (NULL); 1082 } 1083 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1084 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1085 BUS_DMASYNC_PREWRITE); 1086 1087 txd->tx_desc->dtlen = htole16(VTE_TX_LEN(txsegs[0].ds_len)); 1088 txd->tx_desc->dtbp = htole32(txsegs[0].ds_addr); 1089 sc->vte_cdata.vte_tx_cnt++; 1090 /* Update producer index. */ 1091 VTE_DESC_INC(sc->vte_cdata.vte_tx_prod, VTE_TX_RING_CNT); 1092 1093 /* Finally hand over ownership to controller. */ 1094 txd->tx_desc->dtst = htole16(VTE_DTST_TX_OWN); 1095 txd->tx_m = m; 1096 1097 return (txd); 1098 } 1099 1100 static void 1101 vte_start(struct ifnet *ifp) 1102 { 1103 struct vte_softc *sc; 1104 1105 sc = ifp->if_softc; 1106 VTE_LOCK(sc); 1107 vte_start_locked(sc); 1108 VTE_UNLOCK(sc); 1109 } 1110 1111 static void 1112 vte_start_locked(struct vte_softc *sc) 1113 { 1114 struct ifnet *ifp; 1115 struct vte_txdesc *txd; 1116 struct mbuf *m_head; 1117 int enq; 1118 1119 ifp = sc->vte_ifp; 1120 1121 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1122 IFF_DRV_RUNNING || (sc->vte_flags & VTE_FLAG_LINK) == 0) 1123 return; 1124 1125 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1126 /* Reserve one free TX descriptor. */ 1127 if (sc->vte_cdata.vte_tx_cnt >= VTE_TX_RING_CNT - 1) { 1128 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1129 break; 1130 } 1131 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1132 if (m_head == NULL) 1133 break; 1134 /* 1135 * Pack the data into the transmit ring. If we 1136 * don't have room, set the OACTIVE flag and wait 1137 * for the NIC to drain the ring. 1138 */ 1139 if ((txd = vte_encap(sc, &m_head)) == NULL) { 1140 if (m_head != NULL) 1141 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1142 break; 1143 } 1144 1145 enq++; 1146 /* 1147 * If there's a BPF listener, bounce a copy of this frame 1148 * to him. 1149 */ 1150 ETHER_BPF_MTAP(ifp, m_head); 1151 /* Free consumed TX frame. */ 1152 if ((txd->tx_flags & VTE_TXMBUF) != 0) 1153 m_freem(m_head); 1154 } 1155 1156 if (enq > 0) { 1157 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1158 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1159 BUS_DMASYNC_PREWRITE); 1160 CSR_WRITE_2(sc, VTE_TX_POLL, TX_POLL_START); 1161 sc->vte_watchdog_timer = VTE_TX_TIMEOUT; 1162 } 1163 } 1164 1165 static void 1166 vte_watchdog(struct vte_softc *sc) 1167 { 1168 struct ifnet *ifp; 1169 1170 VTE_LOCK_ASSERT(sc); 1171 1172 if (sc->vte_watchdog_timer == 0 || --sc->vte_watchdog_timer) 1173 return; 1174 1175 ifp = sc->vte_ifp; 1176 if_printf(sc->vte_ifp, "watchdog timeout -- resetting\n"); 1177 ifp->if_oerrors++; 1178 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1179 vte_init_locked(sc); 1180 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1181 vte_start_locked(sc); 1182 } 1183 1184 static int 1185 vte_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1186 { 1187 struct vte_softc *sc; 1188 struct ifreq *ifr; 1189 struct mii_data *mii; 1190 int error; 1191 1192 sc = ifp->if_softc; 1193 ifr = (struct ifreq *)data; 1194 error = 0; 1195 switch (cmd) { 1196 case SIOCSIFFLAGS: 1197 VTE_LOCK(sc); 1198 if ((ifp->if_flags & IFF_UP) != 0) { 1199 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 1200 ((ifp->if_flags ^ sc->vte_if_flags) & 1201 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1202 vte_rxfilter(sc); 1203 else 1204 vte_init_locked(sc); 1205 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1206 vte_stop(sc); 1207 sc->vte_if_flags = ifp->if_flags; 1208 VTE_UNLOCK(sc); 1209 break; 1210 case SIOCADDMULTI: 1211 case SIOCDELMULTI: 1212 VTE_LOCK(sc); 1213 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1214 vte_rxfilter(sc); 1215 VTE_UNLOCK(sc); 1216 break; 1217 case SIOCSIFMEDIA: 1218 case SIOCGIFMEDIA: 1219 mii = device_get_softc(sc->vte_miibus); 1220 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1221 break; 1222 default: 1223 error = ether_ioctl(ifp, cmd, data); 1224 break; 1225 } 1226 1227 return (error); 1228 } 1229 1230 static void 1231 vte_mac_config(struct vte_softc *sc) 1232 { 1233 struct mii_data *mii; 1234 uint16_t mcr; 1235 1236 VTE_LOCK_ASSERT(sc); 1237 1238 mii = device_get_softc(sc->vte_miibus); 1239 mcr = CSR_READ_2(sc, VTE_MCR0); 1240 mcr &= ~(MCR0_FC_ENB | MCR0_FULL_DUPLEX); 1241 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1242 mcr |= MCR0_FULL_DUPLEX; 1243 #ifdef notyet 1244 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1245 mcr |= MCR0_FC_ENB; 1246 /* 1247 * The data sheet is not clear whether the controller 1248 * honors received pause frames or not. The is no 1249 * separate control bit for RX pause frame so just 1250 * enable MCR0_FC_ENB bit. 1251 */ 1252 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1253 mcr |= MCR0_FC_ENB; 1254 #endif 1255 } 1256 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1257 } 1258 1259 static void 1260 vte_stats_clear(struct vte_softc *sc) 1261 { 1262 1263 /* Reading counter registers clears its contents. */ 1264 CSR_READ_2(sc, VTE_CNT_RX_DONE); 1265 CSR_READ_2(sc, VTE_CNT_MECNT0); 1266 CSR_READ_2(sc, VTE_CNT_MECNT1); 1267 CSR_READ_2(sc, VTE_CNT_MECNT2); 1268 CSR_READ_2(sc, VTE_CNT_MECNT3); 1269 CSR_READ_2(sc, VTE_CNT_TX_DONE); 1270 CSR_READ_2(sc, VTE_CNT_MECNT4); 1271 CSR_READ_2(sc, VTE_CNT_PAUSE); 1272 } 1273 1274 static void 1275 vte_stats_update(struct vte_softc *sc) 1276 { 1277 struct vte_hw_stats *stat; 1278 struct ifnet *ifp; 1279 uint16_t value; 1280 1281 VTE_LOCK_ASSERT(sc); 1282 1283 ifp = sc->vte_ifp; 1284 stat = &sc->vte_stats; 1285 1286 CSR_READ_2(sc, VTE_MECISR); 1287 /* RX stats. */ 1288 stat->rx_frames += CSR_READ_2(sc, VTE_CNT_RX_DONE); 1289 value = CSR_READ_2(sc, VTE_CNT_MECNT0); 1290 stat->rx_bcast_frames += (value >> 8); 1291 stat->rx_mcast_frames += (value & 0xFF); 1292 value = CSR_READ_2(sc, VTE_CNT_MECNT1); 1293 stat->rx_runts += (value >> 8); 1294 stat->rx_crcerrs += (value & 0xFF); 1295 value = CSR_READ_2(sc, VTE_CNT_MECNT2); 1296 stat->rx_long_frames += (value & 0xFF); 1297 value = CSR_READ_2(sc, VTE_CNT_MECNT3); 1298 stat->rx_fifo_full += (value >> 8); 1299 stat->rx_desc_unavail += (value & 0xFF); 1300 1301 /* TX stats. */ 1302 stat->tx_frames += CSR_READ_2(sc, VTE_CNT_TX_DONE); 1303 value = CSR_READ_2(sc, VTE_CNT_MECNT4); 1304 stat->tx_underruns += (value >> 8); 1305 stat->tx_late_colls += (value & 0xFF); 1306 1307 value = CSR_READ_2(sc, VTE_CNT_PAUSE); 1308 stat->tx_pause_frames += (value >> 8); 1309 stat->rx_pause_frames += (value & 0xFF); 1310 1311 /* Update ifp counters. */ 1312 ifp->if_opackets = stat->tx_frames; 1313 ifp->if_collisions = stat->tx_late_colls; 1314 ifp->if_oerrors = stat->tx_late_colls + stat->tx_underruns; 1315 ifp->if_ipackets = stat->rx_frames; 1316 ifp->if_ierrors = stat->rx_crcerrs + stat->rx_runts + 1317 stat->rx_long_frames + stat->rx_fifo_full; 1318 } 1319 1320 static void 1321 vte_intr(void *arg) 1322 { 1323 struct vte_softc *sc; 1324 struct ifnet *ifp; 1325 uint16_t status; 1326 int n; 1327 1328 sc = (struct vte_softc *)arg; 1329 VTE_LOCK(sc); 1330 1331 ifp = sc->vte_ifp; 1332 /* Reading VTE_MISR acknowledges interrupts. */ 1333 status = CSR_READ_2(sc, VTE_MISR); 1334 if ((status & VTE_INTRS) == 0) { 1335 /* Not ours. */ 1336 VTE_UNLOCK(sc); 1337 return; 1338 } 1339 1340 /* Disable interrupts. */ 1341 CSR_WRITE_2(sc, VTE_MIER, 0); 1342 for (n = 8; (status & VTE_INTRS) != 0;) { 1343 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1344 break; 1345 if ((status & (MISR_RX_DONE | MISR_RX_DESC_UNAVAIL | 1346 MISR_RX_FIFO_FULL)) != 0) 1347 vte_rxeof(sc); 1348 if ((status & MISR_TX_DONE) != 0) 1349 vte_txeof(sc); 1350 if ((status & MISR_EVENT_CNT_OFLOW) != 0) 1351 vte_stats_update(sc); 1352 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1353 vte_start_locked(sc); 1354 if (--n > 0) 1355 status = CSR_READ_2(sc, VTE_MISR); 1356 else 1357 break; 1358 } 1359 1360 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1361 /* Re-enable interrupts. */ 1362 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1363 } 1364 VTE_UNLOCK(sc); 1365 } 1366 1367 static void 1368 vte_txeof(struct vte_softc *sc) 1369 { 1370 struct ifnet *ifp; 1371 struct vte_txdesc *txd; 1372 uint16_t status; 1373 int cons, prog; 1374 1375 VTE_LOCK_ASSERT(sc); 1376 1377 ifp = sc->vte_ifp; 1378 1379 if (sc->vte_cdata.vte_tx_cnt == 0) 1380 return; 1381 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1382 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_POSTREAD | 1383 BUS_DMASYNC_POSTWRITE); 1384 cons = sc->vte_cdata.vte_tx_cons; 1385 /* 1386 * Go through our TX list and free mbufs for those 1387 * frames which have been transmitted. 1388 */ 1389 for (prog = 0; sc->vte_cdata.vte_tx_cnt > 0; prog++) { 1390 txd = &sc->vte_cdata.vte_txdesc[cons]; 1391 status = le16toh(txd->tx_desc->dtst); 1392 if ((status & VTE_DTST_TX_OWN) != 0) 1393 break; 1394 sc->vte_cdata.vte_tx_cnt--; 1395 /* Reclaim transmitted mbufs. */ 1396 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap, 1397 BUS_DMASYNC_POSTWRITE); 1398 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, txd->tx_dmamap); 1399 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1400 m_freem(txd->tx_m); 1401 txd->tx_flags &= ~VTE_TXMBUF; 1402 txd->tx_m = NULL; 1403 prog++; 1404 VTE_DESC_INC(cons, VTE_TX_RING_CNT); 1405 } 1406 1407 if (prog > 0) { 1408 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1409 sc->vte_cdata.vte_tx_cons = cons; 1410 /* 1411 * Unarm watchdog timer only when there is no pending 1412 * frames in TX queue. 1413 */ 1414 if (sc->vte_cdata.vte_tx_cnt == 0) 1415 sc->vte_watchdog_timer = 0; 1416 } 1417 } 1418 1419 static int 1420 vte_newbuf(struct vte_softc *sc, struct vte_rxdesc *rxd) 1421 { 1422 struct mbuf *m; 1423 bus_dma_segment_t segs[1]; 1424 bus_dmamap_t map; 1425 int nsegs; 1426 1427 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1428 if (m == NULL) 1429 return (ENOBUFS); 1430 m->m_len = m->m_pkthdr.len = MCLBYTES; 1431 m_adj(m, sizeof(uint32_t)); 1432 1433 if (bus_dmamap_load_mbuf_sg(sc->vte_cdata.vte_rx_tag, 1434 sc->vte_cdata.vte_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1435 m_freem(m); 1436 return (ENOBUFS); 1437 } 1438 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1439 1440 if (rxd->rx_m != NULL) { 1441 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1442 BUS_DMASYNC_POSTREAD); 1443 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap); 1444 } 1445 map = rxd->rx_dmamap; 1446 rxd->rx_dmamap = sc->vte_cdata.vte_rx_sparemap; 1447 sc->vte_cdata.vte_rx_sparemap = map; 1448 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, rxd->rx_dmamap, 1449 BUS_DMASYNC_PREREAD); 1450 rxd->rx_m = m; 1451 rxd->rx_desc->drbp = htole32(segs[0].ds_addr); 1452 rxd->rx_desc->drlen = htole16(VTE_RX_LEN(segs[0].ds_len)); 1453 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1454 1455 return (0); 1456 } 1457 1458 /* 1459 * It's not supposed to see this controller on strict-alignment 1460 * architectures but make it work for completeness. 1461 */ 1462 #ifndef __NO_STRICT_ALIGNMENT 1463 static struct mbuf * 1464 vte_fixup_rx(struct ifnet *ifp, struct mbuf *m) 1465 { 1466 uint16_t *src, *dst; 1467 int i; 1468 1469 src = mtod(m, uint16_t *); 1470 dst = src - 1; 1471 1472 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1473 *dst++ = *src++; 1474 m->m_data -= ETHER_ALIGN; 1475 return (m); 1476 } 1477 #endif 1478 1479 static void 1480 vte_rxeof(struct vte_softc *sc) 1481 { 1482 struct ifnet *ifp; 1483 struct vte_rxdesc *rxd; 1484 struct mbuf *m; 1485 uint16_t status, total_len; 1486 int cons, prog; 1487 1488 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1489 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_POSTREAD | 1490 BUS_DMASYNC_POSTWRITE); 1491 cons = sc->vte_cdata.vte_rx_cons; 1492 ifp = sc->vte_ifp; 1493 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; prog++, 1494 VTE_DESC_INC(cons, VTE_RX_RING_CNT)) { 1495 rxd = &sc->vte_cdata.vte_rxdesc[cons]; 1496 status = le16toh(rxd->rx_desc->drst); 1497 if ((status & VTE_DRST_RX_OWN) != 0) 1498 break; 1499 total_len = VTE_RX_LEN(le16toh(rxd->rx_desc->drlen)); 1500 m = rxd->rx_m; 1501 if ((status & VTE_DRST_RX_OK) == 0) { 1502 /* Discard errored frame. */ 1503 rxd->rx_desc->drlen = 1504 htole16(MCLBYTES - sizeof(uint32_t)); 1505 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1506 continue; 1507 } 1508 if (vte_newbuf(sc, rxd) != 0) { 1509 ifp->if_iqdrops++; 1510 rxd->rx_desc->drlen = 1511 htole16(MCLBYTES - sizeof(uint32_t)); 1512 rxd->rx_desc->drst = htole16(VTE_DRST_RX_OWN); 1513 continue; 1514 } 1515 1516 /* 1517 * It seems there is no way to strip FCS bytes. 1518 */ 1519 m->m_pkthdr.len = m->m_len = total_len - ETHER_CRC_LEN; 1520 m->m_pkthdr.rcvif = ifp; 1521 #ifndef __NO_STRICT_ALIGNMENT 1522 vte_fixup_rx(ifp, m); 1523 #endif 1524 VTE_UNLOCK(sc); 1525 (*ifp->if_input)(ifp, m); 1526 VTE_LOCK(sc); 1527 } 1528 1529 if (prog > 0) { 1530 /* Update the consumer index. */ 1531 sc->vte_cdata.vte_rx_cons = cons; 1532 /* 1533 * Sync updated RX descriptors such that controller see 1534 * modified RX buffer addresses. 1535 */ 1536 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1537 sc->vte_cdata.vte_rx_ring_map, 1538 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1539 #ifdef notyet 1540 /* 1541 * Update residue counter. Controller does not 1542 * keep track of number of available RX descriptors 1543 * such that driver should have to update VTE_MRDCR 1544 * to make controller know how many free RX 1545 * descriptors were added to controller. This is 1546 * a similar mechanism used in VIA velocity 1547 * controllers and it indicates controller just 1548 * polls OWN bit of current RX descriptor pointer. 1549 * A couple of severe issues were seen on sample 1550 * board where the controller continuously emits TX 1551 * pause frames once RX pause threshold crossed. 1552 * Once triggered it never recovered form that 1553 * state, I couldn't find a way to make it back to 1554 * work at least. This issue effectively 1555 * disconnected the system from network. Also, the 1556 * controller used 00:00:00:00:00:00 as source 1557 * station address of TX pause frame. Probably this 1558 * is one of reason why vendor recommends not to 1559 * enable flow control on R6040 controller. 1560 */ 1561 CSR_WRITE_2(sc, VTE_MRDCR, prog | 1562 (((VTE_RX_RING_CNT * 2) / 10) << 1563 VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1564 #endif 1565 } 1566 } 1567 1568 static void 1569 vte_tick(void *arg) 1570 { 1571 struct vte_softc *sc; 1572 struct mii_data *mii; 1573 1574 sc = (struct vte_softc *)arg; 1575 1576 VTE_LOCK_ASSERT(sc); 1577 1578 mii = device_get_softc(sc->vte_miibus); 1579 mii_tick(mii); 1580 vte_stats_update(sc); 1581 vte_txeof(sc); 1582 vte_watchdog(sc); 1583 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1584 } 1585 1586 static void 1587 vte_reset(struct vte_softc *sc) 1588 { 1589 uint16_t mcr; 1590 int i; 1591 1592 mcr = CSR_READ_2(sc, VTE_MCR1); 1593 CSR_WRITE_2(sc, VTE_MCR1, mcr | MCR1_MAC_RESET); 1594 for (i = VTE_RESET_TIMEOUT; i > 0; i--) { 1595 DELAY(10); 1596 if ((CSR_READ_2(sc, VTE_MCR1) & MCR1_MAC_RESET) == 0) 1597 break; 1598 } 1599 if (i == 0) 1600 device_printf(sc->vte_dev, "reset timeout(0x%04x)!\n", mcr); 1601 /* 1602 * Follow the guide of vendor recommended way to reset MAC. 1603 * Vendor confirms relying on MCR1_MAC_RESET of VTE_MCR1 is 1604 * not reliable so manually reset internal state machine. 1605 */ 1606 CSR_WRITE_2(sc, VTE_MACSM, 0x0002); 1607 CSR_WRITE_2(sc, VTE_MACSM, 0); 1608 DELAY(5000); 1609 } 1610 1611 static void 1612 vte_init(void *xsc) 1613 { 1614 struct vte_softc *sc; 1615 1616 sc = (struct vte_softc *)xsc; 1617 VTE_LOCK(sc); 1618 vte_init_locked(sc); 1619 VTE_UNLOCK(sc); 1620 } 1621 1622 static void 1623 vte_init_locked(struct vte_softc *sc) 1624 { 1625 struct ifnet *ifp; 1626 struct mii_data *mii; 1627 bus_addr_t paddr; 1628 uint8_t *eaddr; 1629 1630 VTE_LOCK_ASSERT(sc); 1631 1632 ifp = sc->vte_ifp; 1633 mii = device_get_softc(sc->vte_miibus); 1634 1635 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1636 return; 1637 /* 1638 * Cancel any pending I/O. 1639 */ 1640 vte_stop(sc); 1641 /* 1642 * Reset the chip to a known state. 1643 */ 1644 vte_reset(sc); 1645 1646 /* Initialize RX descriptors. */ 1647 if (vte_init_rx_ring(sc) != 0) { 1648 device_printf(sc->vte_dev, "no memory for RX buffers.\n"); 1649 vte_stop(sc); 1650 return; 1651 } 1652 if (vte_init_tx_ring(sc) != 0) { 1653 device_printf(sc->vte_dev, "no memory for TX buffers.\n"); 1654 vte_stop(sc); 1655 return; 1656 } 1657 1658 /* 1659 * Reprogram the station address. Controller supports up 1660 * to 4 different station addresses so driver programs the 1661 * first station address as its own ethernet address and 1662 * configure the remaining three addresses as perfect 1663 * multicast addresses. 1664 */ 1665 eaddr = IF_LLADDR(sc->vte_ifp); 1666 CSR_WRITE_2(sc, VTE_MID0L, eaddr[1] << 8 | eaddr[0]); 1667 CSR_WRITE_2(sc, VTE_MID0M, eaddr[3] << 8 | eaddr[2]); 1668 CSR_WRITE_2(sc, VTE_MID0H, eaddr[5] << 8 | eaddr[4]); 1669 1670 /* Set TX descriptor base addresses. */ 1671 paddr = sc->vte_cdata.vte_tx_ring_paddr; 1672 CSR_WRITE_2(sc, VTE_MTDSA1, paddr >> 16); 1673 CSR_WRITE_2(sc, VTE_MTDSA0, paddr & 0xFFFF); 1674 /* Set RX descriptor base addresses. */ 1675 paddr = sc->vte_cdata.vte_rx_ring_paddr; 1676 CSR_WRITE_2(sc, VTE_MRDSA1, paddr >> 16); 1677 CSR_WRITE_2(sc, VTE_MRDSA0, paddr & 0xFFFF); 1678 /* 1679 * Initialize RX descriptor residue counter and set RX 1680 * pause threshold to 20% of available RX descriptors. 1681 * See comments on vte_rxeof() for details on flow control 1682 * issues. 1683 */ 1684 CSR_WRITE_2(sc, VTE_MRDCR, (VTE_RX_RING_CNT & VTE_MRDCR_RESIDUE_MASK) | 1685 (((VTE_RX_RING_CNT * 2) / 10) << VTE_MRDCR_RX_PAUSE_THRESH_SHIFT)); 1686 1687 /* 1688 * Always use maximum frame size that controller can 1689 * support. Otherwise received frames that has longer 1690 * frame length than vte(4) MTU would be silently dropped 1691 * in controller. This would break path-MTU discovery as 1692 * sender wouldn't get any responses from receiver. The 1693 * RX buffer size should be multiple of 4. 1694 * Note, jumbo frames are silently ignored by controller 1695 * and even MAC counters do not detect them. 1696 */ 1697 CSR_WRITE_2(sc, VTE_MRBSR, VTE_RX_BUF_SIZE_MAX); 1698 1699 /* Configure FIFO. */ 1700 CSR_WRITE_2(sc, VTE_MBCR, MBCR_FIFO_XFER_LENGTH_16 | 1701 MBCR_TX_FIFO_THRESH_64 | MBCR_RX_FIFO_THRESH_16 | 1702 MBCR_SDRAM_BUS_REQ_TIMER_DEFAULT); 1703 1704 /* 1705 * Configure TX/RX MACs. Actual resolved duplex and flow 1706 * control configuration is done after detecting a valid 1707 * link. Note, we don't generate early interrupt here 1708 * as well since FreeBSD does not have interrupt latency 1709 * problems like Windows. 1710 */ 1711 CSR_WRITE_2(sc, VTE_MCR0, MCR0_ACCPT_LONG_PKT); 1712 /* 1713 * We manually keep track of PHY status changes to 1714 * configure resolved duplex and flow control since only 1715 * duplex configuration can be automatically reflected to 1716 * MCR0. 1717 */ 1718 CSR_WRITE_2(sc, VTE_MCR1, MCR1_PKT_LENGTH_1537 | 1719 MCR1_EXCESS_COL_RETRY_16); 1720 1721 /* Initialize RX filter. */ 1722 vte_rxfilter(sc); 1723 1724 /* Disable TX/RX interrupt moderation control. */ 1725 CSR_WRITE_2(sc, VTE_MRICR, 0); 1726 CSR_WRITE_2(sc, VTE_MTICR, 0); 1727 1728 /* Enable MAC event counter interrupts. */ 1729 CSR_WRITE_2(sc, VTE_MECIER, VTE_MECIER_INTRS); 1730 /* Clear MAC statistics. */ 1731 vte_stats_clear(sc); 1732 1733 /* Acknowledge all pending interrupts and clear it. */ 1734 CSR_WRITE_2(sc, VTE_MIER, VTE_INTRS); 1735 CSR_WRITE_2(sc, VTE_MISR, 0); 1736 1737 sc->vte_flags &= ~VTE_FLAG_LINK; 1738 /* Switch to the current media. */ 1739 vte_mediachange_locked(ifp); 1740 1741 callout_reset(&sc->vte_tick_ch, hz, vte_tick, sc); 1742 1743 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1744 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1745 } 1746 1747 static void 1748 vte_stop(struct vte_softc *sc) 1749 { 1750 struct ifnet *ifp; 1751 struct vte_txdesc *txd; 1752 struct vte_rxdesc *rxd; 1753 int i; 1754 1755 VTE_LOCK_ASSERT(sc); 1756 /* 1757 * Mark the interface down and cancel the watchdog timer. 1758 */ 1759 ifp = sc->vte_ifp; 1760 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1761 sc->vte_flags &= ~VTE_FLAG_LINK; 1762 callout_stop(&sc->vte_tick_ch); 1763 sc->vte_watchdog_timer = 0; 1764 vte_stats_update(sc); 1765 /* Disable interrupts. */ 1766 CSR_WRITE_2(sc, VTE_MIER, 0); 1767 CSR_WRITE_2(sc, VTE_MECIER, 0); 1768 /* Stop RX/TX MACs. */ 1769 vte_stop_mac(sc); 1770 /* Clear interrupts. */ 1771 CSR_READ_2(sc, VTE_MISR); 1772 /* 1773 * Free TX/RX mbufs still in the queues. 1774 */ 1775 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1776 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1777 if (rxd->rx_m != NULL) { 1778 bus_dmamap_sync(sc->vte_cdata.vte_rx_tag, 1779 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 1780 bus_dmamap_unload(sc->vte_cdata.vte_rx_tag, 1781 rxd->rx_dmamap); 1782 m_freem(rxd->rx_m); 1783 rxd->rx_m = NULL; 1784 } 1785 } 1786 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1787 txd = &sc->vte_cdata.vte_txdesc[i]; 1788 if (txd->tx_m != NULL) { 1789 bus_dmamap_sync(sc->vte_cdata.vte_tx_tag, 1790 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 1791 bus_dmamap_unload(sc->vte_cdata.vte_tx_tag, 1792 txd->tx_dmamap); 1793 if ((txd->tx_flags & VTE_TXMBUF) == 0) 1794 m_freem(txd->tx_m); 1795 txd->tx_m = NULL; 1796 txd->tx_flags &= ~VTE_TXMBUF; 1797 } 1798 } 1799 /* Free TX mbuf pools used for deep copy. */ 1800 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1801 if (sc->vte_cdata.vte_txmbufs[i] != NULL) { 1802 m_freem(sc->vte_cdata.vte_txmbufs[i]); 1803 sc->vte_cdata.vte_txmbufs[i] = NULL; 1804 } 1805 } 1806 } 1807 1808 static void 1809 vte_start_mac(struct vte_softc *sc) 1810 { 1811 uint16_t mcr; 1812 int i; 1813 1814 VTE_LOCK_ASSERT(sc); 1815 1816 /* Enable RX/TX MACs. */ 1817 mcr = CSR_READ_2(sc, VTE_MCR0); 1818 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 1819 (MCR0_RX_ENB | MCR0_TX_ENB)) { 1820 mcr |= MCR0_RX_ENB | MCR0_TX_ENB; 1821 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1822 for (i = VTE_TIMEOUT; i > 0; i--) { 1823 mcr = CSR_READ_2(sc, VTE_MCR0); 1824 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 1825 (MCR0_RX_ENB | MCR0_TX_ENB)) 1826 break; 1827 DELAY(10); 1828 } 1829 if (i == 0) 1830 device_printf(sc->vte_dev, 1831 "could not enable RX/TX MAC(0x%04x)!\n", mcr); 1832 } 1833 } 1834 1835 static void 1836 vte_stop_mac(struct vte_softc *sc) 1837 { 1838 uint16_t mcr; 1839 int i; 1840 1841 VTE_LOCK_ASSERT(sc); 1842 1843 /* Disable RX/TX MACs. */ 1844 mcr = CSR_READ_2(sc, VTE_MCR0); 1845 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) != 0) { 1846 mcr &= ~(MCR0_RX_ENB | MCR0_TX_ENB); 1847 CSR_WRITE_2(sc, VTE_MCR0, mcr); 1848 for (i = VTE_TIMEOUT; i > 0; i--) { 1849 mcr = CSR_READ_2(sc, VTE_MCR0); 1850 if ((mcr & (MCR0_RX_ENB | MCR0_TX_ENB)) == 0) 1851 break; 1852 DELAY(10); 1853 } 1854 if (i == 0) 1855 device_printf(sc->vte_dev, 1856 "could not disable RX/TX MAC(0x%04x)!\n", mcr); 1857 } 1858 } 1859 1860 static int 1861 vte_init_tx_ring(struct vte_softc *sc) 1862 { 1863 struct vte_tx_desc *desc; 1864 struct vte_txdesc *txd; 1865 bus_addr_t addr; 1866 int i; 1867 1868 VTE_LOCK_ASSERT(sc); 1869 1870 sc->vte_cdata.vte_tx_prod = 0; 1871 sc->vte_cdata.vte_tx_cons = 0; 1872 sc->vte_cdata.vte_tx_cnt = 0; 1873 1874 /* Pre-allocate TX mbufs for deep copy. */ 1875 if (tx_deep_copy != 0) { 1876 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1877 sc->vte_cdata.vte_txmbufs[i] = m_getcl(M_DONTWAIT, 1878 MT_DATA, M_PKTHDR); 1879 if (sc->vte_cdata.vte_txmbufs[i] == NULL) 1880 return (ENOBUFS); 1881 sc->vte_cdata.vte_txmbufs[i]->m_pkthdr.len = MCLBYTES; 1882 sc->vte_cdata.vte_txmbufs[i]->m_len = MCLBYTES; 1883 } 1884 } 1885 desc = sc->vte_cdata.vte_tx_ring; 1886 bzero(desc, VTE_TX_RING_SZ); 1887 for (i = 0; i < VTE_TX_RING_CNT; i++) { 1888 txd = &sc->vte_cdata.vte_txdesc[i]; 1889 txd->tx_m = NULL; 1890 if (i != VTE_TX_RING_CNT - 1) 1891 addr = sc->vte_cdata.vte_tx_ring_paddr + 1892 sizeof(struct vte_tx_desc) * (i + 1); 1893 else 1894 addr = sc->vte_cdata.vte_tx_ring_paddr + 1895 sizeof(struct vte_tx_desc) * 0; 1896 desc = &sc->vte_cdata.vte_tx_ring[i]; 1897 desc->dtnp = htole32(addr); 1898 txd->tx_desc = desc; 1899 } 1900 1901 bus_dmamap_sync(sc->vte_cdata.vte_tx_ring_tag, 1902 sc->vte_cdata.vte_tx_ring_map, BUS_DMASYNC_PREREAD | 1903 BUS_DMASYNC_PREWRITE); 1904 return (0); 1905 } 1906 1907 static int 1908 vte_init_rx_ring(struct vte_softc *sc) 1909 { 1910 struct vte_rx_desc *desc; 1911 struct vte_rxdesc *rxd; 1912 bus_addr_t addr; 1913 int i; 1914 1915 VTE_LOCK_ASSERT(sc); 1916 1917 sc->vte_cdata.vte_rx_cons = 0; 1918 desc = sc->vte_cdata.vte_rx_ring; 1919 bzero(desc, VTE_RX_RING_SZ); 1920 for (i = 0; i < VTE_RX_RING_CNT; i++) { 1921 rxd = &sc->vte_cdata.vte_rxdesc[i]; 1922 rxd->rx_m = NULL; 1923 if (i != VTE_RX_RING_CNT - 1) 1924 addr = sc->vte_cdata.vte_rx_ring_paddr + 1925 sizeof(struct vte_rx_desc) * (i + 1); 1926 else 1927 addr = sc->vte_cdata.vte_rx_ring_paddr + 1928 sizeof(struct vte_rx_desc) * 0; 1929 desc = &sc->vte_cdata.vte_rx_ring[i]; 1930 desc->drnp = htole32(addr); 1931 rxd->rx_desc = desc; 1932 if (vte_newbuf(sc, rxd) != 0) 1933 return (ENOBUFS); 1934 } 1935 1936 bus_dmamap_sync(sc->vte_cdata.vte_rx_ring_tag, 1937 sc->vte_cdata.vte_rx_ring_map, BUS_DMASYNC_PREREAD | 1938 BUS_DMASYNC_PREWRITE); 1939 1940 return (0); 1941 } 1942 1943 static void 1944 vte_rxfilter(struct vte_softc *sc) 1945 { 1946 struct ifnet *ifp; 1947 struct ifmultiaddr *ifma; 1948 uint8_t *eaddr; 1949 uint32_t crc; 1950 uint16_t rxfilt_perf[VTE_RXFILT_PERFECT_CNT][3]; 1951 uint16_t mchash[4], mcr; 1952 int i, nperf; 1953 1954 VTE_LOCK_ASSERT(sc); 1955 1956 ifp = sc->vte_ifp; 1957 1958 bzero(mchash, sizeof(mchash)); 1959 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 1960 rxfilt_perf[i][0] = 0xFFFF; 1961 rxfilt_perf[i][1] = 0xFFFF; 1962 rxfilt_perf[i][2] = 0xFFFF; 1963 } 1964 1965 mcr = CSR_READ_2(sc, VTE_MCR0); 1966 mcr &= ~(MCR0_PROMISC | MCR0_BROADCAST | MCR0_MULTICAST); 1967 if ((ifp->if_flags & IFF_BROADCAST) != 0) 1968 mcr |= MCR0_BROADCAST; 1969 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 1970 if ((ifp->if_flags & IFF_PROMISC) != 0) 1971 mcr |= MCR0_PROMISC; 1972 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 1973 mcr |= MCR0_MULTICAST; 1974 mchash[0] = 0xFFFF; 1975 mchash[1] = 0xFFFF; 1976 mchash[2] = 0xFFFF; 1977 mchash[3] = 0xFFFF; 1978 goto chipit; 1979 } 1980 1981 nperf = 0; 1982 if_maddr_rlock(ifp); 1983 TAILQ_FOREACH(ifma, &sc->vte_ifp->if_multiaddrs, ifma_link) { 1984 if (ifma->ifma_addr->sa_family != AF_LINK) 1985 continue; 1986 /* 1987 * Program the first 3 multicast groups into 1988 * the perfect filter. For all others, use the 1989 * hash table. 1990 */ 1991 if (nperf < VTE_RXFILT_PERFECT_CNT) { 1992 eaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); 1993 rxfilt_perf[nperf][0] = eaddr[1] << 8 | eaddr[0]; 1994 rxfilt_perf[nperf][1] = eaddr[3] << 8 | eaddr[2]; 1995 rxfilt_perf[nperf][2] = eaddr[5] << 8 | eaddr[4]; 1996 nperf++; 1997 continue; 1998 } 1999 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2000 ifma->ifma_addr), ETHER_ADDR_LEN); 2001 mchash[crc >> 30] |= 1 << ((crc >> 26) & 0x0F); 2002 } 2003 if_maddr_runlock(ifp); 2004 if (mchash[0] != 0 || mchash[1] != 0 || mchash[2] != 0 || 2005 mchash[3] != 0) 2006 mcr |= MCR0_MULTICAST; 2007 2008 chipit: 2009 /* Program multicast hash table. */ 2010 CSR_WRITE_2(sc, VTE_MAR0, mchash[0]); 2011 CSR_WRITE_2(sc, VTE_MAR1, mchash[1]); 2012 CSR_WRITE_2(sc, VTE_MAR2, mchash[2]); 2013 CSR_WRITE_2(sc, VTE_MAR3, mchash[3]); 2014 /* Program perfect filter table. */ 2015 for (i = 0; i < VTE_RXFILT_PERFECT_CNT; i++) { 2016 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 0, 2017 rxfilt_perf[i][0]); 2018 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 2, 2019 rxfilt_perf[i][1]); 2020 CSR_WRITE_2(sc, VTE_RXFILTER_PEEFECT_BASE + 8 * i + 4, 2021 rxfilt_perf[i][2]); 2022 } 2023 CSR_WRITE_2(sc, VTE_MCR0, mcr); 2024 CSR_READ_2(sc, VTE_MCR0); 2025 } 2026 2027 static int 2028 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2029 { 2030 int error, value; 2031 2032 if (arg1 == NULL) 2033 return (EINVAL); 2034 value = *(int *)arg1; 2035 error = sysctl_handle_int(oidp, &value, 0, req); 2036 if (error || req->newptr == NULL) 2037 return (error); 2038 if (value < low || value > high) 2039 return (EINVAL); 2040 *(int *)arg1 = value; 2041 2042 return (0); 2043 } 2044 2045 static int 2046 sysctl_hw_vte_int_mod(SYSCTL_HANDLER_ARGS) 2047 { 2048 2049 return (sysctl_int_range(oidp, arg1, arg2, req, 2050 VTE_IM_BUNDLE_MIN, VTE_IM_BUNDLE_MAX)); 2051 } 2052