1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Sepherosa Ziehau <sepherosa@gmail.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $ 37 */ 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/endian.h> 42 #include <sys/kernel.h> 43 #include <sys/bus.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/proc.h> 47 #include <sys/rman.h> 48 #include <sys/module.h> 49 #include <sys/socket.h> 50 #include <sys/sockio.h> 51 #include <sys/sysctl.h> 52 53 #include <net/ethernet.h> 54 #include <net/if.h> 55 #include <net/if_var.h> 56 #include <net/if_dl.h> 57 #include <net/if_types.h> 58 #include <net/bpf.h> 59 #include <net/if_arp.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 63 #include <machine/bus.h> 64 65 #include <dev/mii/mii.h> 66 #include <dev/mii/miivar.h> 67 68 #include <dev/pci/pcireg.h> 69 #include <dev/pci/pcivar.h> 70 71 #include <dev/et/if_etreg.h> 72 #include <dev/et/if_etvar.h> 73 74 #include "miibus_if.h" 75 76 MODULE_DEPEND(et, pci, 1, 1, 1); 77 MODULE_DEPEND(et, ether, 1, 1, 1); 78 MODULE_DEPEND(et, miibus, 1, 1, 1); 79 80 /* Tunables. */ 81 static int msi_disable = 0; 82 TUNABLE_INT("hw.et.msi_disable", &msi_disable); 83 84 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 85 86 static int et_probe(device_t); 87 static int et_attach(device_t); 88 static int et_detach(device_t); 89 static int et_shutdown(device_t); 90 static int et_suspend(device_t); 91 static int et_resume(device_t); 92 93 static int et_miibus_readreg(device_t, int, int); 94 static int et_miibus_writereg(device_t, int, int, int); 95 static void et_miibus_statchg(device_t); 96 97 static void et_init_locked(struct et_softc *); 98 static void et_init(void *); 99 static int et_ioctl(if_t, u_long, caddr_t); 100 static void et_start_locked(if_t); 101 static void et_start(if_t); 102 static int et_watchdog(struct et_softc *); 103 static int et_ifmedia_upd_locked(if_t); 104 static int et_ifmedia_upd(if_t); 105 static void et_ifmedia_sts(if_t, struct ifmediareq *); 106 static uint64_t et_get_counter(if_t, ift_counter); 107 108 static void et_add_sysctls(struct et_softc *); 109 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 110 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 111 112 static void et_intr(void *); 113 static void et_rxeof(struct et_softc *); 114 static void et_txeof(struct et_softc *); 115 116 static int et_dma_alloc(struct et_softc *); 117 static void et_dma_free(struct et_softc *); 118 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int); 119 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t, 120 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, 121 const char *); 122 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **, 123 bus_dmamap_t, bus_addr_t *); 124 static void et_init_tx_ring(struct et_softc *); 125 static int et_init_rx_ring(struct et_softc *); 126 static void et_free_tx_ring(struct et_softc *); 127 static void et_free_rx_ring(struct et_softc *); 128 static int et_encap(struct et_softc *, struct mbuf **); 129 static int et_newbuf_cluster(struct et_rxbuf_data *, int); 130 static int et_newbuf_hdr(struct et_rxbuf_data *, int); 131 static void et_rxbuf_discard(struct et_rxbuf_data *, int); 132 133 static void et_stop(struct et_softc *); 134 static int et_chip_init(struct et_softc *); 135 static void et_chip_attach(struct et_softc *); 136 static void et_init_mac(struct et_softc *); 137 static void et_init_rxmac(struct et_softc *); 138 static void et_init_txmac(struct et_softc *); 139 static int et_init_rxdma(struct et_softc *); 140 static int et_init_txdma(struct et_softc *); 141 static int et_start_rxdma(struct et_softc *); 142 static int et_start_txdma(struct et_softc *); 143 static int et_stop_rxdma(struct et_softc *); 144 static int et_stop_txdma(struct et_softc *); 145 static void et_reset(struct et_softc *); 146 static int et_bus_config(struct et_softc *); 147 static void et_get_eaddr(device_t, uint8_t[]); 148 static void et_setmulti(struct et_softc *); 149 static void et_tick(void *); 150 static void et_stats_update(struct et_softc *); 151 152 static const struct et_dev { 153 uint16_t vid; 154 uint16_t did; 155 const char *desc; 156 } et_devices[] = { 157 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 158 "Agere ET1310 Gigabit Ethernet" }, 159 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 160 "Agere ET1310 Fast Ethernet" }, 161 { 0, 0, NULL } 162 }; 163 164 static device_method_t et_methods[] = { 165 DEVMETHOD(device_probe, et_probe), 166 DEVMETHOD(device_attach, et_attach), 167 DEVMETHOD(device_detach, et_detach), 168 DEVMETHOD(device_shutdown, et_shutdown), 169 DEVMETHOD(device_suspend, et_suspend), 170 DEVMETHOD(device_resume, et_resume), 171 172 DEVMETHOD(miibus_readreg, et_miibus_readreg), 173 DEVMETHOD(miibus_writereg, et_miibus_writereg), 174 DEVMETHOD(miibus_statchg, et_miibus_statchg), 175 176 DEVMETHOD_END 177 }; 178 179 static driver_t et_driver = { 180 "et", 181 et_methods, 182 sizeof(struct et_softc) 183 }; 184 185 DRIVER_MODULE(et, pci, et_driver, 0, 0); 186 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, et, et_devices, 187 nitems(et_devices) - 1); 188 DRIVER_MODULE(miibus, et, miibus_driver, 0, 0); 189 190 static int et_rx_intr_npkts = 32; 191 static int et_rx_intr_delay = 20; /* x10 usec */ 192 static int et_tx_intr_nsegs = 126; 193 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 194 195 TUNABLE_INT("hw.et.timer", &et_timer); 196 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 197 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 198 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 199 200 static int 201 et_probe(device_t dev) 202 { 203 const struct et_dev *d; 204 uint16_t did, vid; 205 206 vid = pci_get_vendor(dev); 207 did = pci_get_device(dev); 208 209 for (d = et_devices; d->desc != NULL; ++d) { 210 if (vid == d->vid && did == d->did) { 211 device_set_desc(dev, d->desc); 212 return (BUS_PROBE_DEFAULT); 213 } 214 } 215 return (ENXIO); 216 } 217 218 static int 219 et_attach(device_t dev) 220 { 221 struct et_softc *sc; 222 if_t ifp; 223 uint8_t eaddr[ETHER_ADDR_LEN]; 224 uint32_t pmcfg; 225 int cap, error, msic; 226 227 sc = device_get_softc(dev); 228 sc->dev = dev; 229 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 230 MTX_DEF); 231 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0); 232 233 ifp = sc->ifp = if_alloc(IFT_ETHER); 234 235 /* 236 * Initialize tunables 237 */ 238 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 239 sc->sc_rx_intr_delay = et_rx_intr_delay; 240 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 241 sc->sc_timer = et_timer; 242 243 /* Enable bus mastering */ 244 pci_enable_busmaster(dev); 245 246 /* 247 * Allocate IO memory 248 */ 249 sc->sc_mem_rid = PCIR_BAR(0); 250 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 251 &sc->sc_mem_rid, RF_ACTIVE); 252 if (sc->sc_mem_res == NULL) { 253 device_printf(dev, "can't allocate IO memory\n"); 254 return (ENXIO); 255 } 256 257 msic = 0; 258 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 259 sc->sc_expcap = cap; 260 sc->sc_flags |= ET_FLAG_PCIE; 261 msic = pci_msi_count(dev); 262 if (bootverbose) 263 device_printf(dev, "MSI count: %d\n", msic); 264 } 265 if (msic > 0 && msi_disable == 0) { 266 msic = 1; 267 if (pci_alloc_msi(dev, &msic) == 0) { 268 if (msic == 1) { 269 device_printf(dev, "Using %d MSI message\n", 270 msic); 271 sc->sc_flags |= ET_FLAG_MSI; 272 } else 273 pci_release_msi(dev); 274 } 275 } 276 277 /* 278 * Allocate IRQ 279 */ 280 if ((sc->sc_flags & ET_FLAG_MSI) == 0) { 281 sc->sc_irq_rid = 0; 282 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 283 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE); 284 } else { 285 sc->sc_irq_rid = 1; 286 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 287 &sc->sc_irq_rid, RF_ACTIVE); 288 } 289 if (sc->sc_irq_res == NULL) { 290 device_printf(dev, "can't allocate irq\n"); 291 error = ENXIO; 292 goto fail; 293 } 294 295 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST) 296 sc->sc_flags |= ET_FLAG_FASTETHER; 297 298 error = et_bus_config(sc); 299 if (error) 300 goto fail; 301 302 et_get_eaddr(dev, eaddr); 303 304 /* Take PHY out of COMA and enable clocks. */ 305 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 306 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 307 pmcfg |= EM_PM_GIGEPHY_ENB; 308 CSR_WRITE_4(sc, ET_PM, pmcfg); 309 310 et_reset(sc); 311 312 error = et_dma_alloc(sc); 313 if (error) 314 goto fail; 315 316 if_setsoftc(ifp, sc); 317 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 318 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 319 if_setinitfn(ifp, et_init); 320 if_setioctlfn(ifp, et_ioctl); 321 if_setstartfn(ifp, et_start); 322 if_setgetcounterfn(ifp, et_get_counter); 323 if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_VLAN_MTU); 324 if_setcapenable(ifp, if_getcapabilities(ifp)); 325 if_setsendqlen(ifp, ET_TX_NDESC - 1); 326 if_setsendqready(ifp); 327 328 et_chip_attach(sc); 329 330 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd, 331 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 332 MIIF_DOPAUSE); 333 if (error) { 334 device_printf(dev, "attaching PHYs failed\n"); 335 goto fail; 336 } 337 338 ether_ifattach(ifp, eaddr); 339 340 /* Tell the upper layer(s) we support long frames. */ 341 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 342 343 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE, 344 NULL, et_intr, sc, &sc->sc_irq_handle); 345 if (error) { 346 ether_ifdetach(ifp); 347 device_printf(dev, "can't setup intr\n"); 348 goto fail; 349 } 350 351 et_add_sysctls(sc); 352 353 return (0); 354 fail: 355 et_detach(dev); 356 return (error); 357 } 358 359 static int 360 et_detach(device_t dev) 361 { 362 struct et_softc *sc; 363 364 sc = device_get_softc(dev); 365 if (device_is_attached(dev)) { 366 ether_ifdetach(sc->ifp); 367 ET_LOCK(sc); 368 et_stop(sc); 369 ET_UNLOCK(sc); 370 callout_drain(&sc->sc_tick); 371 } 372 373 if (sc->sc_miibus != NULL) 374 device_delete_child(dev, sc->sc_miibus); 375 bus_generic_detach(dev); 376 377 if (sc->sc_irq_handle != NULL) 378 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 379 if (sc->sc_irq_res != NULL) 380 bus_release_resource(dev, SYS_RES_IRQ, 381 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); 382 if ((sc->sc_flags & ET_FLAG_MSI) != 0) 383 pci_release_msi(dev); 384 if (sc->sc_mem_res != NULL) 385 bus_release_resource(dev, SYS_RES_MEMORY, 386 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res); 387 388 if (sc->ifp != NULL) 389 if_free(sc->ifp); 390 391 et_dma_free(sc); 392 393 mtx_destroy(&sc->sc_mtx); 394 395 return (0); 396 } 397 398 static int 399 et_shutdown(device_t dev) 400 { 401 struct et_softc *sc; 402 403 sc = device_get_softc(dev); 404 ET_LOCK(sc); 405 et_stop(sc); 406 ET_UNLOCK(sc); 407 return (0); 408 } 409 410 static int 411 et_miibus_readreg(device_t dev, int phy, int reg) 412 { 413 struct et_softc *sc; 414 uint32_t val; 415 int i, ret; 416 417 sc = device_get_softc(dev); 418 /* Stop any pending operations */ 419 CSR_WRITE_4(sc, ET_MII_CMD, 0); 420 421 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 422 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 423 CSR_WRITE_4(sc, ET_MII_ADDR, val); 424 425 /* Start reading */ 426 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 427 428 #define NRETRY 50 429 430 for (i = 0; i < NRETRY; ++i) { 431 val = CSR_READ_4(sc, ET_MII_IND); 432 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 433 break; 434 DELAY(50); 435 } 436 if (i == NRETRY) { 437 if_printf(sc->ifp, 438 "read phy %d, reg %d timed out\n", phy, reg); 439 ret = 0; 440 goto back; 441 } 442 443 #undef NRETRY 444 445 val = CSR_READ_4(sc, ET_MII_STAT); 446 ret = val & ET_MII_STAT_VALUE_MASK; 447 448 back: 449 /* Make sure that the current operation is stopped */ 450 CSR_WRITE_4(sc, ET_MII_CMD, 0); 451 return (ret); 452 } 453 454 static int 455 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 456 { 457 struct et_softc *sc; 458 uint32_t val; 459 int i; 460 461 sc = device_get_softc(dev); 462 /* Stop any pending operations */ 463 CSR_WRITE_4(sc, ET_MII_CMD, 0); 464 465 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 466 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 467 CSR_WRITE_4(sc, ET_MII_ADDR, val); 468 469 /* Start writing */ 470 CSR_WRITE_4(sc, ET_MII_CTRL, 471 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK); 472 473 #define NRETRY 100 474 475 for (i = 0; i < NRETRY; ++i) { 476 val = CSR_READ_4(sc, ET_MII_IND); 477 if ((val & ET_MII_IND_BUSY) == 0) 478 break; 479 DELAY(50); 480 } 481 if (i == NRETRY) { 482 if_printf(sc->ifp, 483 "write phy %d, reg %d timed out\n", phy, reg); 484 et_miibus_readreg(dev, phy, reg); 485 } 486 487 #undef NRETRY 488 489 /* Make sure that the current operation is stopped */ 490 CSR_WRITE_4(sc, ET_MII_CMD, 0); 491 return (0); 492 } 493 494 static void 495 et_miibus_statchg(device_t dev) 496 { 497 struct et_softc *sc; 498 struct mii_data *mii; 499 if_t ifp; 500 uint32_t cfg1, cfg2, ctrl; 501 int i; 502 503 sc = device_get_softc(dev); 504 505 mii = device_get_softc(sc->sc_miibus); 506 ifp = sc->ifp; 507 if (mii == NULL || ifp == NULL || 508 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 509 return; 510 511 sc->sc_flags &= ~ET_FLAG_LINK; 512 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 513 (IFM_ACTIVE | IFM_AVALID)) { 514 switch (IFM_SUBTYPE(mii->mii_media_active)) { 515 case IFM_10_T: 516 case IFM_100_TX: 517 sc->sc_flags |= ET_FLAG_LINK; 518 break; 519 case IFM_1000_T: 520 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 521 sc->sc_flags |= ET_FLAG_LINK; 522 break; 523 } 524 } 525 526 /* XXX Stop TX/RX MAC? */ 527 if ((sc->sc_flags & ET_FLAG_LINK) == 0) 528 return; 529 530 /* Program MACs with resolved speed/duplex/flow-control. */ 531 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 532 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 533 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 534 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 535 ET_MAC_CFG1_LOOPBACK); 536 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 537 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 538 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 539 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 540 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) & 541 ET_MAC_CFG2_PREAMBLE_LEN_MASK); 542 543 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 544 cfg2 |= ET_MAC_CFG2_MODE_GMII; 545 else { 546 cfg2 |= ET_MAC_CFG2_MODE_MII; 547 ctrl |= ET_MAC_CTRL_MODE_MII; 548 } 549 550 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) { 551 cfg2 |= ET_MAC_CFG2_FDX; 552 /* 553 * Controller lacks automatic TX pause frame 554 * generation so it should be handled by driver. 555 * Even though driver can send pause frame with 556 * arbitrary pause time, controller does not 557 * provide a way that tells how many free RX 558 * buffers are available in controller. This 559 * limitation makes it hard to generate XON frame 560 * in time on driver side so don't enable TX flow 561 * control. 562 */ 563 #ifdef notyet 564 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) 565 cfg1 |= ET_MAC_CFG1_TXFLOW; 566 #endif 567 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) 568 cfg1 |= ET_MAC_CFG1_RXFLOW; 569 } else 570 ctrl |= ET_MAC_CTRL_GHDX; 571 572 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 573 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 574 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 575 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1); 576 577 #define NRETRY 50 578 579 for (i = 0; i < NRETRY; ++i) { 580 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 581 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 582 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 583 break; 584 DELAY(100); 585 } 586 if (i == NRETRY) 587 if_printf(ifp, "can't enable RX/TX\n"); 588 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 589 590 #undef NRETRY 591 } 592 593 static int 594 et_ifmedia_upd_locked(if_t ifp) 595 { 596 struct et_softc *sc; 597 struct mii_data *mii; 598 struct mii_softc *miisc; 599 600 sc = if_getsoftc(ifp); 601 mii = device_get_softc(sc->sc_miibus); 602 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 603 PHY_RESET(miisc); 604 return (mii_mediachg(mii)); 605 } 606 607 static int 608 et_ifmedia_upd(if_t ifp) 609 { 610 struct et_softc *sc; 611 int res; 612 613 sc = if_getsoftc(ifp); 614 ET_LOCK(sc); 615 res = et_ifmedia_upd_locked(ifp); 616 ET_UNLOCK(sc); 617 618 return (res); 619 } 620 621 static void 622 et_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) 623 { 624 struct et_softc *sc; 625 struct mii_data *mii; 626 627 sc = if_getsoftc(ifp); 628 ET_LOCK(sc); 629 if ((if_getflags(ifp) & IFF_UP) == 0) { 630 ET_UNLOCK(sc); 631 return; 632 } 633 634 mii = device_get_softc(sc->sc_miibus); 635 mii_pollstat(mii); 636 ifmr->ifm_active = mii->mii_media_active; 637 ifmr->ifm_status = mii->mii_media_status; 638 ET_UNLOCK(sc); 639 } 640 641 static void 642 et_stop(struct et_softc *sc) 643 { 644 if_t ifp; 645 646 ET_LOCK_ASSERT(sc); 647 648 ifp = sc->ifp; 649 callout_stop(&sc->sc_tick); 650 /* Disable interrupts. */ 651 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 652 653 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~( 654 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN)); 655 DELAY(100); 656 657 et_stop_rxdma(sc); 658 et_stop_txdma(sc); 659 et_stats_update(sc); 660 661 et_free_tx_ring(sc); 662 et_free_rx_ring(sc); 663 664 sc->sc_tx = 0; 665 sc->sc_tx_intr = 0; 666 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 667 668 sc->watchdog_timer = 0; 669 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 670 } 671 672 static int 673 et_bus_config(struct et_softc *sc) 674 { 675 uint32_t val, max_plsz; 676 uint16_t ack_latency, replay_timer; 677 678 /* 679 * Test whether EEPROM is valid 680 * NOTE: Read twice to get the correct value 681 */ 682 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1); 683 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1); 684 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 685 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val); 686 return (ENXIO); 687 } 688 689 /* TODO: LED */ 690 691 if ((sc->sc_flags & ET_FLAG_PCIE) == 0) 692 return (0); 693 694 /* 695 * Configure ACK latency and replay timer according to 696 * max playload size 697 */ 698 val = pci_read_config(sc->dev, 699 sc->sc_expcap + PCIER_DEVICE_CAP, 4); 700 max_plsz = val & PCIEM_CAP_MAX_PAYLOAD; 701 702 switch (max_plsz) { 703 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 704 ack_latency = ET_PCIV_ACK_LATENCY_128; 705 replay_timer = ET_PCIV_REPLAY_TIMER_128; 706 break; 707 708 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 709 ack_latency = ET_PCIV_ACK_LATENCY_256; 710 replay_timer = ET_PCIV_REPLAY_TIMER_256; 711 break; 712 713 default: 714 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2); 715 replay_timer = pci_read_config(sc->dev, 716 ET_PCIR_REPLAY_TIMER, 2); 717 device_printf(sc->dev, "ack latency %u, replay timer %u\n", 718 ack_latency, replay_timer); 719 break; 720 } 721 if (ack_latency != 0) { 722 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 723 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer, 724 2); 725 } 726 727 /* 728 * Set L0s and L1 latency timer to 2us 729 */ 730 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4); 731 val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT); 732 /* L0s exit latency : 2us */ 733 val |= 0x00005000; 734 /* L1 exit latency : 2us */ 735 val |= 0x00028000; 736 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4); 737 738 /* 739 * Set max read request size to 2048 bytes 740 */ 741 pci_set_max_read_req(sc->dev, 2048); 742 743 return (0); 744 } 745 746 static void 747 et_get_eaddr(device_t dev, uint8_t eaddr[]) 748 { 749 uint32_t val; 750 int i; 751 752 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 753 for (i = 0; i < 4; ++i) 754 eaddr[i] = (val >> (8 * i)) & 0xff; 755 756 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 757 for (; i < ETHER_ADDR_LEN; ++i) 758 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 759 } 760 761 static void 762 et_reset(struct et_softc *sc) 763 { 764 765 CSR_WRITE_4(sc, ET_MAC_CFG1, 766 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 767 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 768 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 769 770 CSR_WRITE_4(sc, ET_SWRST, 771 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 772 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 773 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 774 775 CSR_WRITE_4(sc, ET_MAC_CFG1, 776 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 777 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 778 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 779 /* Disable interrupts. */ 780 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 781 } 782 783 struct et_dmamap_arg { 784 bus_addr_t et_busaddr; 785 }; 786 787 static void 788 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 789 { 790 struct et_dmamap_arg *ctx; 791 792 if (error) 793 return; 794 795 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); 796 797 ctx = arg; 798 ctx->et_busaddr = segs->ds_addr; 799 } 800 801 static int 802 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize, 803 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, 804 const char *msg) 805 { 806 struct et_dmamap_arg ctx; 807 int error; 808 809 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR, 810 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, 811 tag); 812 if (error != 0) { 813 device_printf(sc->dev, "could not create %s dma tag\n", msg); 814 return (error); 815 } 816 /* Allocate DMA'able memory for ring. */ 817 error = bus_dmamem_alloc(*tag, (void **)ring, 818 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); 819 if (error != 0) { 820 device_printf(sc->dev, 821 "could not allocate DMA'able memory for %s\n", msg); 822 return (error); 823 } 824 /* Load the address of the ring. */ 825 ctx.et_busaddr = 0; 826 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr, 827 &ctx, BUS_DMA_NOWAIT); 828 if (error != 0) { 829 device_printf(sc->dev, 830 "could not load DMA'able memory for %s\n", msg); 831 return (error); 832 } 833 *paddr = ctx.et_busaddr; 834 return (0); 835 } 836 837 static void 838 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring, 839 bus_dmamap_t map, bus_addr_t *paddr) 840 { 841 842 if (*paddr != 0) { 843 bus_dmamap_unload(*tag, map); 844 *paddr = 0; 845 } 846 if (*ring != NULL) { 847 bus_dmamem_free(*tag, *ring, map); 848 *ring = NULL; 849 } 850 if (*tag) { 851 bus_dma_tag_destroy(*tag); 852 *tag = NULL; 853 } 854 } 855 856 static int 857 et_dma_alloc(struct et_softc *sc) 858 { 859 struct et_txdesc_ring *tx_ring; 860 struct et_rxdesc_ring *rx_ring; 861 struct et_rxstat_ring *rxst_ring; 862 struct et_rxstatus_data *rxsd; 863 struct et_rxbuf_data *rbd; 864 struct et_txbuf_data *tbd; 865 struct et_txstatus_data *txsd; 866 int i, error; 867 868 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 869 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 870 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 871 &sc->sc_dtag); 872 if (error != 0) { 873 device_printf(sc->dev, "could not allocate parent dma tag\n"); 874 return (error); 875 } 876 877 /* TX ring. */ 878 tx_ring = &sc->sc_tx_ring; 879 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE, 880 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap, 881 &tx_ring->tr_paddr, "TX ring"); 882 if (error) 883 return (error); 884 885 /* TX status block. */ 886 txsd = &sc->sc_tx_status; 887 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t), 888 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap, 889 &txsd->txsd_paddr, "TX status block"); 890 if (error) 891 return (error); 892 893 /* RX ring 0, used as to recive small sized frames. */ 894 rx_ring = &sc->sc_rx_ring[0]; 895 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE, 896 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap, 897 &rx_ring->rr_paddr, "RX ring 0"); 898 rx_ring->rr_posreg = ET_RX_RING0_POS; 899 if (error) 900 return (error); 901 902 /* RX ring 1, used as to store normal sized frames. */ 903 rx_ring = &sc->sc_rx_ring[1]; 904 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE, 905 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap, 906 &rx_ring->rr_paddr, "RX ring 1"); 907 rx_ring->rr_posreg = ET_RX_RING1_POS; 908 if (error) 909 return (error); 910 911 /* RX stat ring. */ 912 rxst_ring = &sc->sc_rxstat_ring; 913 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE, 914 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat, 915 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring"); 916 if (error) 917 return (error); 918 919 /* RX status block. */ 920 rxsd = &sc->sc_rx_status; 921 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, 922 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag, 923 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap, 924 &rxsd->rxsd_paddr, "RX status block"); 925 if (error) 926 return (error); 927 928 /* Create parent DMA tag for mbufs. */ 929 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 930 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 931 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 932 &sc->sc_mbuf_dtag); 933 if (error != 0) { 934 device_printf(sc->dev, 935 "could not allocate parent dma tag for mbuf\n"); 936 return (error); 937 } 938 939 /* Create DMA tag for mini RX mbufs to use RX ring 0. */ 940 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 941 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1, 942 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag); 943 if (error) { 944 device_printf(sc->dev, "could not create mini RX dma tag\n"); 945 return (error); 946 } 947 948 /* Create DMA tag for standard RX mbufs to use RX ring 1. */ 949 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 950 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 951 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag); 952 if (error) { 953 device_printf(sc->dev, "could not create RX dma tag\n"); 954 return (error); 955 } 956 957 /* Create DMA tag for TX mbufs. */ 958 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 959 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 960 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL, 961 &sc->sc_tx_tag); 962 if (error) { 963 device_printf(sc->dev, "could not create TX dma tag\n"); 964 return (error); 965 } 966 967 /* Initialize RX ring 0. */ 968 rbd = &sc->sc_rx_data[0]; 969 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128; 970 rbd->rbd_newbuf = et_newbuf_hdr; 971 rbd->rbd_discard = et_rxbuf_discard; 972 rbd->rbd_softc = sc; 973 rbd->rbd_ring = &sc->sc_rx_ring[0]; 974 /* Create DMA maps for mini RX buffers, ring 0. */ 975 for (i = 0; i < ET_RX_NDESC; i++) { 976 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0, 977 &rbd->rbd_buf[i].rb_dmap); 978 if (error) { 979 device_printf(sc->dev, 980 "could not create DMA map for mini RX mbufs\n"); 981 return (error); 982 } 983 } 984 985 /* Create a spare DMA map for mini RX buffers, ring 0. */ 986 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0, 987 &sc->sc_rx_mini_sparemap); 988 if (error) { 989 device_printf(sc->dev, 990 "could not create spare DMA map for mini RX mbuf\n"); 991 return (error); 992 } 993 994 /* Initialize RX ring 1. */ 995 rbd = &sc->sc_rx_data[1]; 996 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048; 997 rbd->rbd_newbuf = et_newbuf_cluster; 998 rbd->rbd_discard = et_rxbuf_discard; 999 rbd->rbd_softc = sc; 1000 rbd->rbd_ring = &sc->sc_rx_ring[1]; 1001 /* Create DMA maps for standard RX buffers, ring 1. */ 1002 for (i = 0; i < ET_RX_NDESC; i++) { 1003 error = bus_dmamap_create(sc->sc_rx_tag, 0, 1004 &rbd->rbd_buf[i].rb_dmap); 1005 if (error) { 1006 device_printf(sc->dev, 1007 "could not create DMA map for mini RX mbufs\n"); 1008 return (error); 1009 } 1010 } 1011 1012 /* Create a spare DMA map for standard RX buffers, ring 1. */ 1013 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap); 1014 if (error) { 1015 device_printf(sc->dev, 1016 "could not create spare DMA map for RX mbuf\n"); 1017 return (error); 1018 } 1019 1020 /* Create DMA maps for TX buffers. */ 1021 tbd = &sc->sc_tx_data; 1022 for (i = 0; i < ET_TX_NDESC; i++) { 1023 error = bus_dmamap_create(sc->sc_tx_tag, 0, 1024 &tbd->tbd_buf[i].tb_dmap); 1025 if (error) { 1026 device_printf(sc->dev, 1027 "could not create DMA map for TX mbufs\n"); 1028 return (error); 1029 } 1030 } 1031 1032 return (0); 1033 } 1034 1035 static void 1036 et_dma_free(struct et_softc *sc) 1037 { 1038 struct et_txdesc_ring *tx_ring; 1039 struct et_rxdesc_ring *rx_ring; 1040 struct et_txstatus_data *txsd; 1041 struct et_rxstat_ring *rxst_ring; 1042 struct et_rxbuf_data *rbd; 1043 struct et_txbuf_data *tbd; 1044 int i; 1045 1046 /* Destroy DMA maps for mini RX buffers, ring 0. */ 1047 rbd = &sc->sc_rx_data[0]; 1048 for (i = 0; i < ET_RX_NDESC; i++) { 1049 if (rbd->rbd_buf[i].rb_dmap) { 1050 bus_dmamap_destroy(sc->sc_rx_mini_tag, 1051 rbd->rbd_buf[i].rb_dmap); 1052 rbd->rbd_buf[i].rb_dmap = NULL; 1053 } 1054 } 1055 if (sc->sc_rx_mini_sparemap) { 1056 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap); 1057 sc->sc_rx_mini_sparemap = NULL; 1058 } 1059 if (sc->sc_rx_mini_tag) { 1060 bus_dma_tag_destroy(sc->sc_rx_mini_tag); 1061 sc->sc_rx_mini_tag = NULL; 1062 } 1063 1064 /* Destroy DMA maps for standard RX buffers, ring 1. */ 1065 rbd = &sc->sc_rx_data[1]; 1066 for (i = 0; i < ET_RX_NDESC; i++) { 1067 if (rbd->rbd_buf[i].rb_dmap) { 1068 bus_dmamap_destroy(sc->sc_rx_tag, 1069 rbd->rbd_buf[i].rb_dmap); 1070 rbd->rbd_buf[i].rb_dmap = NULL; 1071 } 1072 } 1073 if (sc->sc_rx_sparemap) { 1074 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap); 1075 sc->sc_rx_sparemap = NULL; 1076 } 1077 if (sc->sc_rx_tag) { 1078 bus_dma_tag_destroy(sc->sc_rx_tag); 1079 sc->sc_rx_tag = NULL; 1080 } 1081 1082 /* Destroy DMA maps for TX buffers. */ 1083 tbd = &sc->sc_tx_data; 1084 for (i = 0; i < ET_TX_NDESC; i++) { 1085 if (tbd->tbd_buf[i].tb_dmap) { 1086 bus_dmamap_destroy(sc->sc_tx_tag, 1087 tbd->tbd_buf[i].tb_dmap); 1088 tbd->tbd_buf[i].tb_dmap = NULL; 1089 } 1090 } 1091 if (sc->sc_tx_tag) { 1092 bus_dma_tag_destroy(sc->sc_tx_tag); 1093 sc->sc_tx_tag = NULL; 1094 } 1095 1096 /* Destroy mini RX ring, ring 0. */ 1097 rx_ring = &sc->sc_rx_ring[0]; 1098 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc, 1099 rx_ring->rr_dmap, &rx_ring->rr_paddr); 1100 /* Destroy standard RX ring, ring 1. */ 1101 rx_ring = &sc->sc_rx_ring[1]; 1102 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc, 1103 rx_ring->rr_dmap, &rx_ring->rr_paddr); 1104 /* Destroy RX stat ring. */ 1105 rxst_ring = &sc->sc_rxstat_ring; 1106 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat, 1107 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr); 1108 /* Destroy RX status block. */ 1109 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat, 1110 rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr); 1111 /* Destroy TX ring. */ 1112 tx_ring = &sc->sc_tx_ring; 1113 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc, 1114 tx_ring->tr_dmap, &tx_ring->tr_paddr); 1115 /* Destroy TX status block. */ 1116 txsd = &sc->sc_tx_status; 1117 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status, 1118 txsd->txsd_dmap, &txsd->txsd_paddr); 1119 1120 /* Destroy the parent tag. */ 1121 if (sc->sc_dtag) { 1122 bus_dma_tag_destroy(sc->sc_dtag); 1123 sc->sc_dtag = NULL; 1124 } 1125 } 1126 1127 static void 1128 et_chip_attach(struct et_softc *sc) 1129 { 1130 uint32_t val; 1131 1132 /* 1133 * Perform minimal initialization 1134 */ 1135 1136 /* Disable loopback */ 1137 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1138 1139 /* Reset MAC */ 1140 CSR_WRITE_4(sc, ET_MAC_CFG1, 1141 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1142 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1143 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1144 1145 /* 1146 * Setup half duplex mode 1147 */ 1148 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1149 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1150 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1151 ET_MAC_HDX_EXC_DEFER; 1152 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1153 1154 /* Clear MAC control */ 1155 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1156 1157 /* Reset MII */ 1158 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1159 1160 /* Bring MAC out of reset state */ 1161 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1162 1163 /* Enable memory controllers */ 1164 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1165 } 1166 1167 static void 1168 et_intr(void *xsc) 1169 { 1170 struct et_softc *sc; 1171 if_t ifp; 1172 uint32_t status; 1173 1174 sc = xsc; 1175 ET_LOCK(sc); 1176 ifp = sc->ifp; 1177 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 1178 goto done; 1179 1180 status = CSR_READ_4(sc, ET_INTR_STATUS); 1181 if ((status & ET_INTRS) == 0) 1182 goto done; 1183 1184 /* Disable further interrupts. */ 1185 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 1186 1187 if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) { 1188 device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n", 1189 status); 1190 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1191 et_init_locked(sc); 1192 ET_UNLOCK(sc); 1193 return; 1194 } 1195 if (status & ET_INTR_RXDMA) 1196 et_rxeof(sc); 1197 if (status & (ET_INTR_TXDMA | ET_INTR_TIMER)) 1198 et_txeof(sc); 1199 if (status & ET_INTR_TIMER) 1200 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1201 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1202 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS); 1203 if (!if_sendq_empty(ifp)) 1204 et_start_locked(ifp); 1205 } 1206 done: 1207 ET_UNLOCK(sc); 1208 } 1209 1210 static void 1211 et_init_locked(struct et_softc *sc) 1212 { 1213 if_t ifp; 1214 int error; 1215 1216 ET_LOCK_ASSERT(sc); 1217 1218 ifp = sc->ifp; 1219 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1220 return; 1221 1222 et_stop(sc); 1223 et_reset(sc); 1224 1225 et_init_tx_ring(sc); 1226 error = et_init_rx_ring(sc); 1227 if (error) 1228 return; 1229 1230 error = et_chip_init(sc); 1231 if (error) 1232 goto fail; 1233 1234 /* 1235 * Start TX/RX DMA engine 1236 */ 1237 error = et_start_rxdma(sc); 1238 if (error) 1239 return; 1240 1241 error = et_start_txdma(sc); 1242 if (error) 1243 return; 1244 1245 /* Enable interrupts. */ 1246 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS); 1247 1248 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1249 1250 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1251 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1252 1253 sc->sc_flags &= ~ET_FLAG_LINK; 1254 et_ifmedia_upd_locked(ifp); 1255 1256 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1257 1258 fail: 1259 if (error) 1260 et_stop(sc); 1261 } 1262 1263 static void 1264 et_init(void *xsc) 1265 { 1266 struct et_softc *sc = xsc; 1267 1268 ET_LOCK(sc); 1269 et_init_locked(sc); 1270 ET_UNLOCK(sc); 1271 } 1272 1273 static int 1274 et_ioctl(if_t ifp, u_long cmd, caddr_t data) 1275 { 1276 struct et_softc *sc; 1277 struct mii_data *mii; 1278 struct ifreq *ifr; 1279 int error, mask, max_framelen; 1280 1281 sc = if_getsoftc(ifp); 1282 ifr = (struct ifreq *)data; 1283 error = 0; 1284 1285 /* XXX LOCKSUSED */ 1286 switch (cmd) { 1287 case SIOCSIFFLAGS: 1288 ET_LOCK(sc); 1289 if (if_getflags(ifp) & IFF_UP) { 1290 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1291 if ((if_getflags(ifp) ^ sc->sc_if_flags) & 1292 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST)) 1293 et_setmulti(sc); 1294 } else { 1295 et_init_locked(sc); 1296 } 1297 } else { 1298 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1299 et_stop(sc); 1300 } 1301 sc->sc_if_flags = if_getflags(ifp); 1302 ET_UNLOCK(sc); 1303 break; 1304 1305 case SIOCSIFMEDIA: 1306 case SIOCGIFMEDIA: 1307 mii = device_get_softc(sc->sc_miibus); 1308 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1309 break; 1310 1311 case SIOCADDMULTI: 1312 case SIOCDELMULTI: 1313 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1314 ET_LOCK(sc); 1315 et_setmulti(sc); 1316 ET_UNLOCK(sc); 1317 } 1318 break; 1319 1320 case SIOCSIFMTU: 1321 ET_LOCK(sc); 1322 #if 0 1323 if (sc->sc_flags & ET_FLAG_JUMBO) 1324 max_framelen = ET_JUMBO_FRAMELEN; 1325 else 1326 #endif 1327 max_framelen = MCLBYTES - 1; 1328 1329 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1330 error = EOPNOTSUPP; 1331 ET_UNLOCK(sc); 1332 break; 1333 } 1334 1335 if (if_getmtu(ifp) != ifr->ifr_mtu) { 1336 if_setmtu(ifp, ifr->ifr_mtu); 1337 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1338 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1339 et_init_locked(sc); 1340 } 1341 } 1342 ET_UNLOCK(sc); 1343 break; 1344 1345 case SIOCSIFCAP: 1346 ET_LOCK(sc); 1347 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1348 if ((mask & IFCAP_TXCSUM) != 0 && 1349 (IFCAP_TXCSUM & if_getcapabilities(ifp)) != 0) { 1350 if_togglecapenable(ifp, IFCAP_TXCSUM); 1351 if ((IFCAP_TXCSUM & if_getcapenable(ifp)) != 0) 1352 if_sethwassistbits(ifp, ET_CSUM_FEATURES, 0); 1353 else 1354 if_sethwassistbits(ifp, 0, ET_CSUM_FEATURES); 1355 } 1356 ET_UNLOCK(sc); 1357 break; 1358 1359 default: 1360 error = ether_ioctl(ifp, cmd, data); 1361 break; 1362 } 1363 return (error); 1364 } 1365 1366 static void 1367 et_start_locked(if_t ifp) 1368 { 1369 struct et_softc *sc; 1370 struct mbuf *m_head = NULL; 1371 struct et_txdesc_ring *tx_ring; 1372 struct et_txbuf_data *tbd; 1373 uint32_t tx_ready_pos; 1374 int enq; 1375 1376 sc = if_getsoftc(ifp); 1377 ET_LOCK_ASSERT(sc); 1378 1379 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1380 IFF_DRV_RUNNING || 1381 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) != 1382 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) 1383 return; 1384 1385 /* 1386 * Driver does not request TX completion interrupt for every 1387 * queued frames to prevent generating excessive interrupts. 1388 * This means driver may wait for TX completion interrupt even 1389 * though some frames were successfully transmitted. Reclaiming 1390 * transmitted frames will ensure driver see all available 1391 * descriptors. 1392 */ 1393 tbd = &sc->sc_tx_data; 1394 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3) 1395 et_txeof(sc); 1396 1397 for (enq = 0; !if_sendq_empty(ifp); ) { 1398 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) { 1399 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1400 break; 1401 } 1402 1403 m_head = if_dequeue(ifp); 1404 if (m_head == NULL) 1405 break; 1406 1407 if (et_encap(sc, &m_head)) { 1408 if (m_head == NULL) { 1409 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1410 break; 1411 } 1412 if_sendq_prepend(ifp, m_head); 1413 if (tbd->tbd_used > 0) 1414 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1415 break; 1416 } 1417 enq++; 1418 ETHER_BPF_MTAP(ifp, m_head); 1419 } 1420 1421 if (enq > 0) { 1422 tx_ring = &sc->sc_tx_ring; 1423 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1424 BUS_DMASYNC_PREWRITE); 1425 tx_ready_pos = tx_ring->tr_ready_index & 1426 ET_TX_READY_POS_INDEX_MASK; 1427 if (tx_ring->tr_ready_wrap) 1428 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1429 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1430 sc->watchdog_timer = 5; 1431 } 1432 } 1433 1434 static void 1435 et_start(if_t ifp) 1436 { 1437 struct et_softc *sc; 1438 1439 sc = if_getsoftc(ifp); 1440 ET_LOCK(sc); 1441 et_start_locked(ifp); 1442 ET_UNLOCK(sc); 1443 } 1444 1445 static int 1446 et_watchdog(struct et_softc *sc) 1447 { 1448 uint32_t status; 1449 1450 ET_LOCK_ASSERT(sc); 1451 1452 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 1453 return (0); 1454 1455 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap, 1456 BUS_DMASYNC_POSTREAD); 1457 status = le32toh(*(sc->sc_tx_status.txsd_status)); 1458 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n", 1459 status); 1460 1461 if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); 1462 if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); 1463 et_init_locked(sc); 1464 return (EJUSTRETURN); 1465 } 1466 1467 static int 1468 et_stop_rxdma(struct et_softc *sc) 1469 { 1470 1471 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1472 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1473 1474 DELAY(5); 1475 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1476 if_printf(sc->ifp, "can't stop RX DMA engine\n"); 1477 return (ETIMEDOUT); 1478 } 1479 return (0); 1480 } 1481 1482 static int 1483 et_stop_txdma(struct et_softc *sc) 1484 { 1485 1486 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1487 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1488 return (0); 1489 } 1490 1491 static void 1492 et_free_tx_ring(struct et_softc *sc) 1493 { 1494 struct et_txbuf_data *tbd; 1495 struct et_txbuf *tb; 1496 int i; 1497 1498 tbd = &sc->sc_tx_data; 1499 for (i = 0; i < ET_TX_NDESC; ++i) { 1500 tb = &tbd->tbd_buf[i]; 1501 if (tb->tb_mbuf != NULL) { 1502 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap, 1503 BUS_DMASYNC_POSTWRITE); 1504 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1505 m_freem(tb->tb_mbuf); 1506 tb->tb_mbuf = NULL; 1507 } 1508 } 1509 } 1510 1511 static void 1512 et_free_rx_ring(struct et_softc *sc) 1513 { 1514 struct et_rxbuf_data *rbd; 1515 struct et_rxdesc_ring *rx_ring; 1516 struct et_rxbuf *rb; 1517 int i; 1518 1519 /* Ring 0 */ 1520 rx_ring = &sc->sc_rx_ring[0]; 1521 rbd = &sc->sc_rx_data[0]; 1522 for (i = 0; i < ET_RX_NDESC; ++i) { 1523 rb = &rbd->rbd_buf[i]; 1524 if (rb->rb_mbuf != NULL) { 1525 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap, 1526 BUS_DMASYNC_POSTREAD); 1527 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap); 1528 m_freem(rb->rb_mbuf); 1529 rb->rb_mbuf = NULL; 1530 } 1531 } 1532 1533 /* Ring 1 */ 1534 rx_ring = &sc->sc_rx_ring[1]; 1535 rbd = &sc->sc_rx_data[1]; 1536 for (i = 0; i < ET_RX_NDESC; ++i) { 1537 rb = &rbd->rbd_buf[i]; 1538 if (rb->rb_mbuf != NULL) { 1539 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap, 1540 BUS_DMASYNC_POSTREAD); 1541 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap); 1542 m_freem(rb->rb_mbuf); 1543 rb->rb_mbuf = NULL; 1544 } 1545 } 1546 } 1547 1548 static u_int 1549 et_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 1550 { 1551 uint32_t h, *hp, *hash = arg; 1552 1553 h = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 1554 h = (h & 0x3f800000) >> 23; 1555 1556 hp = &hash[0]; 1557 if (h >= 32 && h < 64) { 1558 h -= 32; 1559 hp = &hash[1]; 1560 } else if (h >= 64 && h < 96) { 1561 h -= 64; 1562 hp = &hash[2]; 1563 } else if (h >= 96) { 1564 h -= 96; 1565 hp = &hash[3]; 1566 } 1567 *hp |= (1 << h); 1568 1569 return (1); 1570 } 1571 1572 static void 1573 et_setmulti(struct et_softc *sc) 1574 { 1575 if_t ifp; 1576 uint32_t hash[4] = { 0, 0, 0, 0 }; 1577 uint32_t rxmac_ctrl, pktfilt; 1578 int i, count; 1579 1580 ET_LOCK_ASSERT(sc); 1581 ifp = sc->ifp; 1582 1583 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1584 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1585 1586 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1587 if (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) { 1588 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1589 goto back; 1590 } 1591 1592 count = if_foreach_llmaddr(ifp, et_hash_maddr, &hash); 1593 1594 for (i = 0; i < 4; ++i) 1595 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1596 1597 if (count > 0) 1598 pktfilt |= ET_PKTFILT_MCAST; 1599 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1600 back: 1601 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1602 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1603 } 1604 1605 static int 1606 et_chip_init(struct et_softc *sc) 1607 { 1608 if_t ifp; 1609 uint32_t rxq_end; 1610 int error, frame_len, rxmem_size; 1611 1612 ifp = sc->ifp; 1613 /* 1614 * Split 16Kbytes internal memory between TX and RX 1615 * according to frame length. 1616 */ 1617 frame_len = ET_FRAMELEN(if_getmtu(ifp)); 1618 if (frame_len < 2048) { 1619 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1620 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1621 rxmem_size = ET_MEM_SIZE / 2; 1622 } else { 1623 rxmem_size = ET_MEM_SIZE - 1624 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1625 } 1626 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1627 1628 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1629 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1630 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1631 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1632 1633 /* No loopback */ 1634 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1635 1636 /* Clear MSI configure */ 1637 if ((sc->sc_flags & ET_FLAG_MSI) == 0) 1638 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1639 1640 /* Disable timer */ 1641 CSR_WRITE_4(sc, ET_TIMER, 0); 1642 1643 /* Initialize MAC */ 1644 et_init_mac(sc); 1645 1646 /* Enable memory controllers */ 1647 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1648 1649 /* Initialize RX MAC */ 1650 et_init_rxmac(sc); 1651 1652 /* Initialize TX MAC */ 1653 et_init_txmac(sc); 1654 1655 /* Initialize RX DMA engine */ 1656 error = et_init_rxdma(sc); 1657 if (error) 1658 return (error); 1659 1660 /* Initialize TX DMA engine */ 1661 error = et_init_txdma(sc); 1662 if (error) 1663 return (error); 1664 1665 return (0); 1666 } 1667 1668 static void 1669 et_init_tx_ring(struct et_softc *sc) 1670 { 1671 struct et_txdesc_ring *tx_ring; 1672 struct et_txbuf_data *tbd; 1673 struct et_txstatus_data *txsd; 1674 1675 tx_ring = &sc->sc_tx_ring; 1676 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1677 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1678 BUS_DMASYNC_PREWRITE); 1679 1680 tbd = &sc->sc_tx_data; 1681 tbd->tbd_start_index = 0; 1682 tbd->tbd_start_wrap = 0; 1683 tbd->tbd_used = 0; 1684 1685 txsd = &sc->sc_tx_status; 1686 bzero(txsd->txsd_status, sizeof(uint32_t)); 1687 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1688 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1689 } 1690 1691 static int 1692 et_init_rx_ring(struct et_softc *sc) 1693 { 1694 struct et_rxstatus_data *rxsd; 1695 struct et_rxstat_ring *rxst_ring; 1696 struct et_rxbuf_data *rbd; 1697 int i, error, n; 1698 1699 for (n = 0; n < ET_RX_NRING; ++n) { 1700 rbd = &sc->sc_rx_data[n]; 1701 for (i = 0; i < ET_RX_NDESC; ++i) { 1702 error = rbd->rbd_newbuf(rbd, i); 1703 if (error) { 1704 if_printf(sc->ifp, "%d ring %d buf, " 1705 "newbuf failed: %d\n", n, i, error); 1706 return (error); 1707 } 1708 } 1709 } 1710 1711 rxsd = &sc->sc_rx_status; 1712 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1713 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1714 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1715 1716 rxst_ring = &sc->sc_rxstat_ring; 1717 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1718 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1719 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1720 1721 return (0); 1722 } 1723 1724 static int 1725 et_init_rxdma(struct et_softc *sc) 1726 { 1727 struct et_rxstatus_data *rxsd; 1728 struct et_rxstat_ring *rxst_ring; 1729 struct et_rxdesc_ring *rx_ring; 1730 int error; 1731 1732 error = et_stop_rxdma(sc); 1733 if (error) { 1734 if_printf(sc->ifp, "can't init RX DMA engine\n"); 1735 return (error); 1736 } 1737 1738 /* 1739 * Install RX status 1740 */ 1741 rxsd = &sc->sc_rx_status; 1742 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1743 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1744 1745 /* 1746 * Install RX stat ring 1747 */ 1748 rxst_ring = &sc->sc_rxstat_ring; 1749 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1750 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1751 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1752 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1753 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1754 1755 /* Match ET_RXSTAT_POS */ 1756 rxst_ring->rsr_index = 0; 1757 rxst_ring->rsr_wrap = 0; 1758 1759 /* 1760 * Install the 2nd RX descriptor ring 1761 */ 1762 rx_ring = &sc->sc_rx_ring[1]; 1763 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1764 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1765 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1766 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1767 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1768 1769 /* Match ET_RX_RING1_POS */ 1770 rx_ring->rr_index = 0; 1771 rx_ring->rr_wrap = 1; 1772 1773 /* 1774 * Install the 1st RX descriptor ring 1775 */ 1776 rx_ring = &sc->sc_rx_ring[0]; 1777 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1778 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1779 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1780 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1781 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1782 1783 /* Match ET_RX_RING0_POS */ 1784 rx_ring->rr_index = 0; 1785 rx_ring->rr_wrap = 1; 1786 1787 /* 1788 * RX intr moderation 1789 */ 1790 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1791 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1792 1793 return (0); 1794 } 1795 1796 static int 1797 et_init_txdma(struct et_softc *sc) 1798 { 1799 struct et_txdesc_ring *tx_ring; 1800 struct et_txstatus_data *txsd; 1801 int error; 1802 1803 error = et_stop_txdma(sc); 1804 if (error) { 1805 if_printf(sc->ifp, "can't init TX DMA engine\n"); 1806 return (error); 1807 } 1808 1809 /* 1810 * Install TX descriptor ring 1811 */ 1812 tx_ring = &sc->sc_tx_ring; 1813 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1814 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1815 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1816 1817 /* 1818 * Install TX status 1819 */ 1820 txsd = &sc->sc_tx_status; 1821 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1822 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1823 1824 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1825 1826 /* Match ET_TX_READY_POS */ 1827 tx_ring->tr_ready_index = 0; 1828 tx_ring->tr_ready_wrap = 0; 1829 1830 return (0); 1831 } 1832 1833 static void 1834 et_init_mac(struct et_softc *sc) 1835 { 1836 if_t ifp; 1837 const uint8_t *eaddr; 1838 uint32_t val; 1839 1840 /* Reset MAC */ 1841 CSR_WRITE_4(sc, ET_MAC_CFG1, 1842 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1843 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1844 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1845 1846 /* 1847 * Setup inter packet gap 1848 */ 1849 val = (56 << ET_IPG_NONB2B_1_SHIFT) | 1850 (88 << ET_IPG_NONB2B_2_SHIFT) | 1851 (80 << ET_IPG_MINIFG_SHIFT) | 1852 (96 << ET_IPG_B2B_SHIFT); 1853 CSR_WRITE_4(sc, ET_IPG, val); 1854 1855 /* 1856 * Setup half duplex mode 1857 */ 1858 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1859 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1860 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1861 ET_MAC_HDX_EXC_DEFER; 1862 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1863 1864 /* Clear MAC control */ 1865 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1866 1867 /* Reset MII */ 1868 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1869 1870 /* 1871 * Set MAC address 1872 */ 1873 ifp = sc->ifp; 1874 eaddr = if_getlladdr(ifp); 1875 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1876 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1877 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1878 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1879 1880 /* Set max frame length */ 1881 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(if_getmtu(ifp))); 1882 1883 /* Bring MAC out of reset state */ 1884 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1885 } 1886 1887 static void 1888 et_init_rxmac(struct et_softc *sc) 1889 { 1890 if_t ifp; 1891 const uint8_t *eaddr; 1892 uint32_t val; 1893 int i; 1894 1895 /* Disable RX MAC and WOL */ 1896 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1897 1898 /* 1899 * Clear all WOL related registers 1900 */ 1901 for (i = 0; i < 3; ++i) 1902 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1903 for (i = 0; i < 20; ++i) 1904 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1905 1906 /* 1907 * Set WOL source address. XXX is this necessary? 1908 */ 1909 ifp = sc->ifp; 1910 eaddr = if_getlladdr(ifp); 1911 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1912 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1913 val = (eaddr[0] << 8) | eaddr[1]; 1914 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1915 1916 /* Clear packet filters */ 1917 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1918 1919 /* No ucast filtering */ 1920 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1921 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1922 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1923 1924 if (ET_FRAMELEN(if_getmtu(ifp)) > ET_RXMAC_CUT_THRU_FRMLEN) { 1925 /* 1926 * In order to transmit jumbo packets greater than 1927 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1928 * RX MAC and RX DMA needs to be reduced in size to 1929 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1930 * order to implement this, we must use "cut through" 1931 * mode in the RX MAC, which chops packets down into 1932 * segments. In this case we selected 256 bytes, 1933 * since this is the size of the PCI-Express TLP's 1934 * that the ET1310 uses. 1935 */ 1936 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) | 1937 ET_RXMAC_MC_SEGSZ_ENABLE; 1938 } else { 1939 val = 0; 1940 } 1941 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1942 1943 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1944 1945 /* Initialize RX MAC management register */ 1946 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1947 1948 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1949 1950 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1951 ET_RXMAC_MGT_PASS_ECRC | 1952 ET_RXMAC_MGT_PASS_ELEN | 1953 ET_RXMAC_MGT_PASS_ETRUNC | 1954 ET_RXMAC_MGT_CHECK_PKT); 1955 1956 /* 1957 * Configure runt filtering (may not work on certain chip generation) 1958 */ 1959 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) & 1960 ET_PKTFILT_MINLEN_MASK; 1961 val |= ET_PKTFILT_FRAG; 1962 CSR_WRITE_4(sc, ET_PKTFILT, val); 1963 1964 /* Enable RX MAC but leave WOL disabled */ 1965 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1966 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1967 1968 /* 1969 * Setup multicast hash and allmulti/promisc mode 1970 */ 1971 et_setmulti(sc); 1972 } 1973 1974 static void 1975 et_init_txmac(struct et_softc *sc) 1976 { 1977 1978 /* Disable TX MAC and FC(?) */ 1979 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1980 1981 /* 1982 * Initialize pause time. 1983 * This register should be set before XON/XOFF frame is 1984 * sent by driver. 1985 */ 1986 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT); 1987 1988 /* Enable TX MAC but leave FC(?) disabled */ 1989 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1990 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1991 } 1992 1993 static int 1994 et_start_rxdma(struct et_softc *sc) 1995 { 1996 uint32_t val; 1997 1998 val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) | 1999 ET_RXDMA_CTRL_RING0_ENABLE; 2000 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) | 2001 ET_RXDMA_CTRL_RING1_ENABLE; 2002 2003 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 2004 2005 DELAY(5); 2006 2007 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 2008 if_printf(sc->ifp, "can't start RX DMA engine\n"); 2009 return (ETIMEDOUT); 2010 } 2011 return (0); 2012 } 2013 2014 static int 2015 et_start_txdma(struct et_softc *sc) 2016 { 2017 2018 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 2019 return (0); 2020 } 2021 2022 static void 2023 et_rxeof(struct et_softc *sc) 2024 { 2025 struct et_rxstatus_data *rxsd; 2026 struct et_rxstat_ring *rxst_ring; 2027 struct et_rxbuf_data *rbd; 2028 struct et_rxdesc_ring *rx_ring; 2029 struct et_rxstat *st; 2030 if_t ifp; 2031 struct mbuf *m; 2032 uint32_t rxstat_pos, rxring_pos; 2033 uint32_t rxst_info1, rxst_info2, rxs_stat_ring; 2034 int buflen, buf_idx, npost[2], ring_idx; 2035 int rxst_index, rxst_wrap; 2036 2037 ET_LOCK_ASSERT(sc); 2038 2039 ifp = sc->ifp; 2040 rxsd = &sc->sc_rx_status; 2041 rxst_ring = &sc->sc_rxstat_ring; 2042 2043 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2044 return; 2045 2046 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 2047 BUS_DMASYNC_POSTREAD); 2048 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 2049 BUS_DMASYNC_POSTREAD); 2050 2051 npost[0] = npost[1] = 0; 2052 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring); 2053 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 2054 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >> 2055 ET_RXS_STATRING_INDEX_SHIFT; 2056 2057 while (rxst_index != rxst_ring->rsr_index || 2058 rxst_wrap != rxst_ring->rsr_wrap) { 2059 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 2060 break; 2061 2062 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT); 2063 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 2064 rxst_info1 = le32toh(st->rxst_info1); 2065 rxst_info2 = le32toh(st->rxst_info2); 2066 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >> 2067 ET_RXST_INFO2_LEN_SHIFT; 2068 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >> 2069 ET_RXST_INFO2_BUFIDX_SHIFT; 2070 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >> 2071 ET_RXST_INFO2_RINGIDX_SHIFT; 2072 2073 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 2074 rxst_ring->rsr_index = 0; 2075 rxst_ring->rsr_wrap ^= 1; 2076 } 2077 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK; 2078 if (rxst_ring->rsr_wrap) 2079 rxstat_pos |= ET_RXSTAT_POS_WRAP; 2080 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 2081 2082 if (ring_idx >= ET_RX_NRING) { 2083 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2084 if_printf(ifp, "invalid ring index %d\n", ring_idx); 2085 continue; 2086 } 2087 if (buf_idx >= ET_RX_NDESC) { 2088 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2089 if_printf(ifp, "invalid buf index %d\n", buf_idx); 2090 continue; 2091 } 2092 2093 rbd = &sc->sc_rx_data[ring_idx]; 2094 m = rbd->rbd_buf[buf_idx].rb_mbuf; 2095 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){ 2096 /* Discard errored frame. */ 2097 rbd->rbd_discard(rbd, buf_idx); 2098 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) { 2099 /* No available mbufs, discard it. */ 2100 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 2101 rbd->rbd_discard(rbd, buf_idx); 2102 } else { 2103 buflen -= ETHER_CRC_LEN; 2104 if (buflen < ETHER_HDR_LEN) { 2105 m_freem(m); 2106 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 2107 } else { 2108 m->m_pkthdr.len = m->m_len = buflen; 2109 m->m_pkthdr.rcvif = ifp; 2110 ET_UNLOCK(sc); 2111 if_input(ifp, m); 2112 ET_LOCK(sc); 2113 } 2114 } 2115 2116 rx_ring = &sc->sc_rx_ring[ring_idx]; 2117 if (buf_idx != rx_ring->rr_index) { 2118 if_printf(ifp, 2119 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n", 2120 ring_idx, buf_idx, rx_ring->rr_index); 2121 } 2122 2123 MPASS(rx_ring->rr_index < ET_RX_NDESC); 2124 if (++rx_ring->rr_index == ET_RX_NDESC) { 2125 rx_ring->rr_index = 0; 2126 rx_ring->rr_wrap ^= 1; 2127 } 2128 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK; 2129 if (rx_ring->rr_wrap) 2130 rxring_pos |= ET_RX_RING_POS_WRAP; 2131 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 2132 } 2133 2134 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 2135 BUS_DMASYNC_PREREAD); 2136 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 2137 BUS_DMASYNC_PREREAD); 2138 } 2139 2140 static int 2141 et_encap(struct et_softc *sc, struct mbuf **m0) 2142 { 2143 struct et_txdesc_ring *tx_ring; 2144 struct et_txbuf_data *tbd; 2145 struct et_txdesc *td; 2146 struct mbuf *m; 2147 bus_dma_segment_t segs[ET_NSEG_MAX]; 2148 bus_dmamap_t map; 2149 uint32_t csum_flags, last_td_ctrl2; 2150 int error, i, idx, first_idx, last_idx, nsegs; 2151 2152 tx_ring = &sc->sc_tx_ring; 2153 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2154 tbd = &sc->sc_tx_data; 2155 first_idx = tx_ring->tr_ready_index; 2156 map = tbd->tbd_buf[first_idx].tb_dmap; 2157 2158 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs, 2159 0); 2160 if (error == EFBIG) { 2161 m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX); 2162 if (m == NULL) { 2163 m_freem(*m0); 2164 *m0 = NULL; 2165 return (ENOMEM); 2166 } 2167 *m0 = m; 2168 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, 2169 &nsegs, 0); 2170 if (error != 0) { 2171 m_freem(*m0); 2172 *m0 = NULL; 2173 return (error); 2174 } 2175 } else if (error != 0) 2176 return (error); 2177 2178 /* Check for descriptor overruns. */ 2179 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) { 2180 bus_dmamap_unload(sc->sc_tx_tag, map); 2181 return (ENOBUFS); 2182 } 2183 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2184 2185 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 2186 sc->sc_tx += nsegs; 2187 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 2188 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 2189 last_td_ctrl2 |= ET_TDCTRL2_INTR; 2190 } 2191 2192 m = *m0; 2193 csum_flags = 0; 2194 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) { 2195 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2196 csum_flags |= ET_TDCTRL2_CSUM_IP; 2197 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2198 csum_flags |= ET_TDCTRL2_CSUM_UDP; 2199 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2200 csum_flags |= ET_TDCTRL2_CSUM_TCP; 2201 } 2202 last_idx = -1; 2203 for (i = 0; i < nsegs; ++i) { 2204 idx = (first_idx + i) % ET_TX_NDESC; 2205 td = &tx_ring->tr_desc[idx]; 2206 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr)); 2207 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr)); 2208 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK); 2209 if (i == nsegs - 1) { 2210 /* Last frag */ 2211 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags); 2212 last_idx = idx; 2213 } else 2214 td->td_ctrl2 = htole32(csum_flags); 2215 2216 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2217 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2218 tx_ring->tr_ready_index = 0; 2219 tx_ring->tr_ready_wrap ^= 1; 2220 } 2221 } 2222 td = &tx_ring->tr_desc[first_idx]; 2223 /* First frag */ 2224 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG); 2225 2226 MPASS(last_idx >= 0); 2227 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2228 tbd->tbd_buf[last_idx].tb_dmap = map; 2229 tbd->tbd_buf[last_idx].tb_mbuf = m; 2230 2231 tbd->tbd_used += nsegs; 2232 MPASS(tbd->tbd_used <= ET_TX_NDESC); 2233 2234 return (0); 2235 } 2236 2237 static void 2238 et_txeof(struct et_softc *sc) 2239 { 2240 struct et_txdesc_ring *tx_ring; 2241 struct et_txbuf_data *tbd; 2242 struct et_txbuf *tb; 2243 if_t ifp; 2244 uint32_t tx_done; 2245 int end, wrap; 2246 2247 ET_LOCK_ASSERT(sc); 2248 2249 ifp = sc->ifp; 2250 tx_ring = &sc->sc_tx_ring; 2251 tbd = &sc->sc_tx_data; 2252 2253 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2254 return; 2255 2256 if (tbd->tbd_used == 0) 2257 return; 2258 2259 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2260 BUS_DMASYNC_POSTWRITE); 2261 2262 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2263 end = tx_done & ET_TX_DONE_POS_INDEX_MASK; 2264 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2265 2266 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2267 MPASS(tbd->tbd_start_index < ET_TX_NDESC); 2268 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2269 if (tb->tb_mbuf != NULL) { 2270 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap, 2271 BUS_DMASYNC_POSTWRITE); 2272 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap); 2273 m_freem(tb->tb_mbuf); 2274 tb->tb_mbuf = NULL; 2275 } 2276 2277 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2278 tbd->tbd_start_index = 0; 2279 tbd->tbd_start_wrap ^= 1; 2280 } 2281 2282 MPASS(tbd->tbd_used > 0); 2283 tbd->tbd_used--; 2284 } 2285 2286 if (tbd->tbd_used == 0) 2287 sc->watchdog_timer = 0; 2288 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC) 2289 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2290 } 2291 2292 static void 2293 et_tick(void *xsc) 2294 { 2295 struct et_softc *sc; 2296 struct mii_data *mii; 2297 2298 sc = xsc; 2299 ET_LOCK_ASSERT(sc); 2300 mii = device_get_softc(sc->sc_miibus); 2301 2302 mii_tick(mii); 2303 et_stats_update(sc); 2304 if (et_watchdog(sc) == EJUSTRETURN) 2305 return; 2306 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2307 } 2308 2309 static int 2310 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx) 2311 { 2312 struct et_softc *sc; 2313 struct et_rxdesc *desc; 2314 struct et_rxbuf *rb; 2315 struct mbuf *m; 2316 bus_dma_segment_t segs[1]; 2317 bus_dmamap_t dmap; 2318 int nsegs; 2319 2320 MPASS(buf_idx < ET_RX_NDESC); 2321 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2322 if (m == NULL) 2323 return (ENOBUFS); 2324 m->m_len = m->m_pkthdr.len = MCLBYTES; 2325 m_adj(m, ETHER_ALIGN); 2326 2327 sc = rbd->rbd_softc; 2328 rb = &rbd->rbd_buf[buf_idx]; 2329 2330 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m, 2331 segs, &nsegs, 0) != 0) { 2332 m_freem(m); 2333 return (ENOBUFS); 2334 } 2335 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2336 2337 if (rb->rb_mbuf != NULL) { 2338 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, 2339 BUS_DMASYNC_POSTREAD); 2340 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap); 2341 } 2342 dmap = rb->rb_dmap; 2343 rb->rb_dmap = sc->sc_rx_sparemap; 2344 sc->sc_rx_sparemap = dmap; 2345 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD); 2346 2347 rb->rb_mbuf = m; 2348 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2349 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr)); 2350 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr)); 2351 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2352 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2353 BUS_DMASYNC_PREWRITE); 2354 return (0); 2355 } 2356 2357 static void 2358 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx) 2359 { 2360 struct et_rxdesc *desc; 2361 2362 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2363 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2364 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2365 BUS_DMASYNC_PREWRITE); 2366 } 2367 2368 static int 2369 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx) 2370 { 2371 struct et_softc *sc; 2372 struct et_rxdesc *desc; 2373 struct et_rxbuf *rb; 2374 struct mbuf *m; 2375 bus_dma_segment_t segs[1]; 2376 bus_dmamap_t dmap; 2377 int nsegs; 2378 2379 MPASS(buf_idx < ET_RX_NDESC); 2380 MGETHDR(m, M_NOWAIT, MT_DATA); 2381 if (m == NULL) 2382 return (ENOBUFS); 2383 m->m_len = m->m_pkthdr.len = MHLEN; 2384 m_adj(m, ETHER_ALIGN); 2385 2386 sc = rbd->rbd_softc; 2387 rb = &rbd->rbd_buf[buf_idx]; 2388 2389 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap, 2390 m, segs, &nsegs, 0) != 0) { 2391 m_freem(m); 2392 return (ENOBUFS); 2393 } 2394 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2395 2396 if (rb->rb_mbuf != NULL) { 2397 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, 2398 BUS_DMASYNC_POSTREAD); 2399 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap); 2400 } 2401 dmap = rb->rb_dmap; 2402 rb->rb_dmap = sc->sc_rx_mini_sparemap; 2403 sc->sc_rx_mini_sparemap = dmap; 2404 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD); 2405 2406 rb->rb_mbuf = m; 2407 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2408 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr)); 2409 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr)); 2410 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2411 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2412 BUS_DMASYNC_PREWRITE); 2413 return (0); 2414 } 2415 2416 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2417 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2418 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 2419 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 2420 2421 /* 2422 * Create sysctl tree 2423 */ 2424 static void 2425 et_add_sysctls(struct et_softc * sc) 2426 { 2427 struct sysctl_ctx_list *ctx; 2428 struct sysctl_oid_list *children, *parent; 2429 struct sysctl_oid *tree; 2430 struct et_hw_stats *stats; 2431 2432 ctx = device_get_sysctl_ctx(sc->dev); 2433 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2434 2435 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts", 2436 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2437 et_sysctl_rx_intr_npkts, "I", "RX IM, # packets per RX interrupt"); 2438 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay", 2439 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0, 2440 et_sysctl_rx_intr_delay, "I", 2441 "RX IM, RX interrupt delay (x10 usec)"); 2442 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs", 2443 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 2444 "TX IM, # segments per TX interrupt"); 2445 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer", 2446 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer"); 2447 2448 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", 2449 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ET statistics"); 2450 parent = SYSCTL_CHILDREN(tree); 2451 2452 /* TX/RX statistics. */ 2453 stats = &sc->sc_stats; 2454 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64, 2455 "0 to 64 bytes frames"); 2456 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65, 2457 "65 to 127 bytes frames"); 2458 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128, 2459 "128 to 255 bytes frames"); 2460 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256, 2461 "256 to 511 bytes frames"); 2462 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512, 2463 "512 to 1023 bytes frames"); 2464 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024, 2465 "1024 to 1518 bytes frames"); 2466 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519, 2467 "1519 to 1522 bytes frames"); 2468 2469 /* RX statistics. */ 2470 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", 2471 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX MAC statistics"); 2472 children = SYSCTL_CHILDREN(tree); 2473 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes", 2474 &stats->rx_bytes, "Good bytes"); 2475 ET_SYSCTL_STAT_ADD64(ctx, children, "frames", 2476 &stats->rx_frames, "Good frames"); 2477 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs", 2478 &stats->rx_crcerrs, "CRC errors"); 2479 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames", 2480 &stats->rx_mcast, "Multicast frames"); 2481 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames", 2482 &stats->rx_bcast, "Broadcast frames"); 2483 ET_SYSCTL_STAT_ADD32(ctx, children, "control", 2484 &stats->rx_control, "Control frames"); 2485 ET_SYSCTL_STAT_ADD32(ctx, children, "pause", 2486 &stats->rx_pause, "Pause frames"); 2487 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control", 2488 &stats->rx_unknown_control, "Unknown control frames"); 2489 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs", 2490 &stats->rx_alignerrs, "Alignment errors"); 2491 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs", 2492 &stats->rx_lenerrs, "Frames with length mismatched"); 2493 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs", 2494 &stats->rx_codeerrs, "Frames with code error"); 2495 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs", 2496 &stats->rx_cserrs, "Frames with carrier sense error"); 2497 ET_SYSCTL_STAT_ADD32(ctx, children, "runts", 2498 &stats->rx_runts, "Too short frames"); 2499 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize", 2500 &stats->rx_oversize, "Oversized frames"); 2501 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments", 2502 &stats->rx_fragments, "Fragmented frames"); 2503 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers", 2504 &stats->rx_jabbers, "Frames with jabber error"); 2505 ET_SYSCTL_STAT_ADD32(ctx, children, "drop", 2506 &stats->rx_drop, "Dropped frames"); 2507 2508 /* TX statistics. */ 2509 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", 2510 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX MAC statistics"); 2511 children = SYSCTL_CHILDREN(tree); 2512 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes", 2513 &stats->tx_bytes, "Good bytes"); 2514 ET_SYSCTL_STAT_ADD64(ctx, children, "frames", 2515 &stats->tx_frames, "Good frames"); 2516 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames", 2517 &stats->tx_mcast, "Multicast frames"); 2518 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames", 2519 &stats->tx_bcast, "Broadcast frames"); 2520 ET_SYSCTL_STAT_ADD32(ctx, children, "pause", 2521 &stats->tx_pause, "Pause frames"); 2522 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred", 2523 &stats->tx_deferred, "Deferred frames"); 2524 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred", 2525 &stats->tx_excess_deferred, "Excessively deferred frames"); 2526 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls", 2527 &stats->tx_single_colls, "Single collisions"); 2528 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls", 2529 &stats->tx_multi_colls, "Multiple collisions"); 2530 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls", 2531 &stats->tx_late_colls, "Late collisions"); 2532 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls", 2533 &stats->tx_excess_colls, "Excess collisions"); 2534 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls", 2535 &stats->tx_total_colls, "Total collisions"); 2536 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored", 2537 &stats->tx_pause_honored, "Honored pause frames"); 2538 ET_SYSCTL_STAT_ADD32(ctx, children, "drop", 2539 &stats->tx_drop, "Dropped frames"); 2540 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers", 2541 &stats->tx_jabbers, "Frames with jabber errors"); 2542 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs", 2543 &stats->tx_crcerrs, "Frames with CRC errors"); 2544 ET_SYSCTL_STAT_ADD32(ctx, children, "control", 2545 &stats->tx_control, "Control frames"); 2546 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize", 2547 &stats->tx_oversize, "Oversized frames"); 2548 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize", 2549 &stats->tx_undersize, "Undersized frames"); 2550 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments", 2551 &stats->tx_fragments, "Fragmented frames"); 2552 } 2553 2554 #undef ET_SYSCTL_STAT_ADD32 2555 #undef ET_SYSCTL_STAT_ADD64 2556 2557 static int 2558 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2559 { 2560 struct et_softc *sc; 2561 if_t ifp; 2562 int error, v; 2563 2564 sc = arg1; 2565 ifp = sc->ifp; 2566 v = sc->sc_rx_intr_npkts; 2567 error = sysctl_handle_int(oidp, &v, 0, req); 2568 if (error || req->newptr == NULL) 2569 goto back; 2570 if (v <= 0) { 2571 error = EINVAL; 2572 goto back; 2573 } 2574 2575 if (sc->sc_rx_intr_npkts != v) { 2576 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2577 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2578 sc->sc_rx_intr_npkts = v; 2579 } 2580 back: 2581 return (error); 2582 } 2583 2584 static int 2585 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2586 { 2587 struct et_softc *sc; 2588 if_t ifp; 2589 int error, v; 2590 2591 sc = arg1; 2592 ifp = sc->ifp; 2593 v = sc->sc_rx_intr_delay; 2594 error = sysctl_handle_int(oidp, &v, 0, req); 2595 if (error || req->newptr == NULL) 2596 goto back; 2597 if (v <= 0) { 2598 error = EINVAL; 2599 goto back; 2600 } 2601 2602 if (sc->sc_rx_intr_delay != v) { 2603 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2604 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2605 sc->sc_rx_intr_delay = v; 2606 } 2607 back: 2608 return (error); 2609 } 2610 2611 static void 2612 et_stats_update(struct et_softc *sc) 2613 { 2614 struct et_hw_stats *stats; 2615 2616 stats = &sc->sc_stats; 2617 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64); 2618 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127); 2619 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255); 2620 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511); 2621 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023); 2622 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518); 2623 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522); 2624 2625 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES); 2626 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES); 2627 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR); 2628 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST); 2629 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST); 2630 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL); 2631 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE); 2632 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL); 2633 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR); 2634 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR); 2635 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR); 2636 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR); 2637 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT); 2638 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE); 2639 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG); 2640 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER); 2641 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP); 2642 2643 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES); 2644 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES); 2645 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST); 2646 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST); 2647 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE); 2648 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER); 2649 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER); 2650 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL); 2651 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL); 2652 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL); 2653 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL); 2654 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL); 2655 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR); 2656 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP); 2657 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER); 2658 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR); 2659 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL); 2660 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE); 2661 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE); 2662 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG); 2663 } 2664 2665 static uint64_t 2666 et_get_counter(if_t ifp, ift_counter cnt) 2667 { 2668 struct et_softc *sc; 2669 struct et_hw_stats *stats; 2670 2671 sc = if_getsoftc(ifp); 2672 stats = &sc->sc_stats; 2673 2674 switch (cnt) { 2675 case IFCOUNTER_OPACKETS: 2676 return (stats->tx_frames); 2677 case IFCOUNTER_COLLISIONS: 2678 return (stats->tx_total_colls); 2679 case IFCOUNTER_OERRORS: 2680 return (stats->tx_drop + stats->tx_jabbers + 2681 stats->tx_crcerrs + stats->tx_excess_deferred + 2682 stats->tx_late_colls); 2683 case IFCOUNTER_IPACKETS: 2684 return (stats->rx_frames); 2685 case IFCOUNTER_IERRORS: 2686 return (stats->rx_crcerrs + stats->rx_alignerrs + 2687 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs + 2688 stats->rx_runts + stats->rx_jabbers + stats->rx_drop); 2689 default: 2690 return (if_get_counter_default(ifp, cnt)); 2691 } 2692 } 2693 2694 static int 2695 et_suspend(device_t dev) 2696 { 2697 struct et_softc *sc; 2698 uint32_t pmcfg; 2699 2700 sc = device_get_softc(dev); 2701 ET_LOCK(sc); 2702 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) != 0) 2703 et_stop(sc); 2704 /* Diable all clocks and put PHY into COMA. */ 2705 pmcfg = CSR_READ_4(sc, ET_PM); 2706 pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | 2707 ET_PM_RXCLK_GATE); 2708 pmcfg |= ET_PM_PHY_SW_COMA; 2709 CSR_WRITE_4(sc, ET_PM, pmcfg); 2710 ET_UNLOCK(sc); 2711 return (0); 2712 } 2713 2714 static int 2715 et_resume(device_t dev) 2716 { 2717 struct et_softc *sc; 2718 uint32_t pmcfg; 2719 2720 sc = device_get_softc(dev); 2721 ET_LOCK(sc); 2722 /* Take PHY out of COMA and enable clocks. */ 2723 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 2724 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 2725 pmcfg |= EM_PM_GIGEPHY_ENB; 2726 CSR_WRITE_4(sc, ET_PM, pmcfg); 2727 if ((if_getflags(sc->ifp) & IFF_UP) != 0) 2728 et_init_locked(sc); 2729 ET_UNLOCK(sc); 2730 return (0); 2731 } 2732