1 /*- 2 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $ 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/endian.h> 43 #include <sys/kernel.h> 44 #include <sys/bus.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/proc.h> 48 #include <sys/rman.h> 49 #include <sys/module.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_types.h> 58 #include <net/bpf.h> 59 #include <net/if_arp.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 63 #include <machine/bus.h> 64 65 #include <dev/mii/mii.h> 66 #include <dev/mii/miivar.h> 67 68 #include <dev/pci/pcireg.h> 69 #include <dev/pci/pcivar.h> 70 71 #include <dev/et/if_etreg.h> 72 #include <dev/et/if_etvar.h> 73 74 #include "miibus_if.h" 75 76 MODULE_DEPEND(et, pci, 1, 1, 1); 77 MODULE_DEPEND(et, ether, 1, 1, 1); 78 MODULE_DEPEND(et, miibus, 1, 1, 1); 79 80 /* Tunables. */ 81 static int msi_disable = 0; 82 TUNABLE_INT("hw.et.msi_disable", &msi_disable); 83 84 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 85 86 static int et_probe(device_t); 87 static int et_attach(device_t); 88 static int et_detach(device_t); 89 static int et_shutdown(device_t); 90 static int et_suspend(device_t); 91 static int et_resume(device_t); 92 93 static int et_miibus_readreg(device_t, int, int); 94 static int et_miibus_writereg(device_t, int, int, int); 95 static void et_miibus_statchg(device_t); 96 97 static void et_init_locked(struct et_softc *); 98 static void et_init(void *); 99 static int et_ioctl(struct ifnet *, u_long, caddr_t); 100 static void et_start_locked(struct ifnet *); 101 static void et_start(struct ifnet *); 102 static int et_watchdog(struct et_softc *); 103 static int et_ifmedia_upd_locked(struct ifnet *); 104 static int et_ifmedia_upd(struct ifnet *); 105 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 106 107 static void et_add_sysctls(struct et_softc *); 108 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 109 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 110 111 static void et_intr(void *); 112 static void et_rxeof(struct et_softc *); 113 static void et_txeof(struct et_softc *); 114 115 static int et_dma_alloc(struct et_softc *); 116 static void et_dma_free(struct et_softc *); 117 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int); 118 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t, 119 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, 120 const char *); 121 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **, 122 bus_dmamap_t *); 123 static void et_init_tx_ring(struct et_softc *); 124 static int et_init_rx_ring(struct et_softc *); 125 static void et_free_tx_ring(struct et_softc *); 126 static void et_free_rx_ring(struct et_softc *); 127 static int et_encap(struct et_softc *, struct mbuf **); 128 static int et_newbuf_cluster(struct et_rxbuf_data *, int); 129 static int et_newbuf_hdr(struct et_rxbuf_data *, int); 130 static void et_rxbuf_discard(struct et_rxbuf_data *, int); 131 132 static void et_stop(struct et_softc *); 133 static int et_chip_init(struct et_softc *); 134 static void et_chip_attach(struct et_softc *); 135 static void et_init_mac(struct et_softc *); 136 static void et_init_rxmac(struct et_softc *); 137 static void et_init_txmac(struct et_softc *); 138 static int et_init_rxdma(struct et_softc *); 139 static int et_init_txdma(struct et_softc *); 140 static int et_start_rxdma(struct et_softc *); 141 static int et_start_txdma(struct et_softc *); 142 static int et_stop_rxdma(struct et_softc *); 143 static int et_stop_txdma(struct et_softc *); 144 static void et_reset(struct et_softc *); 145 static int et_bus_config(struct et_softc *); 146 static void et_get_eaddr(device_t, uint8_t[]); 147 static void et_setmulti(struct et_softc *); 148 static void et_tick(void *); 149 static void et_stats_update(struct et_softc *); 150 151 static const struct et_dev { 152 uint16_t vid; 153 uint16_t did; 154 const char *desc; 155 } et_devices[] = { 156 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 157 "Agere ET1310 Gigabit Ethernet" }, 158 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 159 "Agere ET1310 Fast Ethernet" }, 160 { 0, 0, NULL } 161 }; 162 163 static device_method_t et_methods[] = { 164 DEVMETHOD(device_probe, et_probe), 165 DEVMETHOD(device_attach, et_attach), 166 DEVMETHOD(device_detach, et_detach), 167 DEVMETHOD(device_shutdown, et_shutdown), 168 DEVMETHOD(device_suspend, et_suspend), 169 DEVMETHOD(device_resume, et_resume), 170 171 DEVMETHOD(miibus_readreg, et_miibus_readreg), 172 DEVMETHOD(miibus_writereg, et_miibus_writereg), 173 DEVMETHOD(miibus_statchg, et_miibus_statchg), 174 175 DEVMETHOD_END 176 }; 177 178 static driver_t et_driver = { 179 "et", 180 et_methods, 181 sizeof(struct et_softc) 182 }; 183 184 static devclass_t et_devclass; 185 186 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0); 187 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0); 188 189 static int et_rx_intr_npkts = 32; 190 static int et_rx_intr_delay = 20; /* x10 usec */ 191 static int et_tx_intr_nsegs = 126; 192 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 193 194 TUNABLE_INT("hw.et.timer", &et_timer); 195 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 196 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 197 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 198 199 static int 200 et_probe(device_t dev) 201 { 202 const struct et_dev *d; 203 uint16_t did, vid; 204 205 vid = pci_get_vendor(dev); 206 did = pci_get_device(dev); 207 208 for (d = et_devices; d->desc != NULL; ++d) { 209 if (vid == d->vid && did == d->did) { 210 device_set_desc(dev, d->desc); 211 return (BUS_PROBE_DEFAULT); 212 } 213 } 214 return (ENXIO); 215 } 216 217 static int 218 et_attach(device_t dev) 219 { 220 struct et_softc *sc; 221 struct ifnet *ifp; 222 uint8_t eaddr[ETHER_ADDR_LEN]; 223 uint32_t pmcfg; 224 int cap, error, msic; 225 226 sc = device_get_softc(dev); 227 sc->dev = dev; 228 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 229 MTX_DEF); 230 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0); 231 232 ifp = sc->ifp = if_alloc(IFT_ETHER); 233 if (ifp == NULL) { 234 device_printf(dev, "can not if_alloc()\n"); 235 error = ENOSPC; 236 goto fail; 237 } 238 239 /* 240 * Initialize tunables 241 */ 242 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 243 sc->sc_rx_intr_delay = et_rx_intr_delay; 244 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 245 sc->sc_timer = et_timer; 246 247 /* Enable bus mastering */ 248 pci_enable_busmaster(dev); 249 250 /* 251 * Allocate IO memory 252 */ 253 sc->sc_mem_rid = PCIR_BAR(0); 254 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 255 &sc->sc_mem_rid, RF_ACTIVE); 256 if (sc->sc_mem_res == NULL) { 257 device_printf(dev, "can't allocate IO memory\n"); 258 return (ENXIO); 259 } 260 261 msic = 0; 262 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 263 sc->sc_expcap = cap; 264 sc->sc_flags |= ET_FLAG_PCIE; 265 msic = pci_msi_count(dev); 266 if (bootverbose) 267 device_printf(dev, "MSI count: %d\n", msic); 268 } 269 if (msic > 0 && msi_disable == 0) { 270 msic = 1; 271 if (pci_alloc_msi(dev, &msic) == 0) { 272 if (msic == 1) { 273 device_printf(dev, "Using %d MSI message\n", 274 msic); 275 sc->sc_flags |= ET_FLAG_MSI; 276 } else 277 pci_release_msi(dev); 278 } 279 } 280 281 /* 282 * Allocate IRQ 283 */ 284 if ((sc->sc_flags & ET_FLAG_MSI) == 0) { 285 sc->sc_irq_rid = 0; 286 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 287 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE); 288 } else { 289 sc->sc_irq_rid = 1; 290 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 291 &sc->sc_irq_rid, RF_ACTIVE); 292 } 293 if (sc->sc_irq_res == NULL) { 294 device_printf(dev, "can't allocate irq\n"); 295 error = ENXIO; 296 goto fail; 297 } 298 299 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST) 300 sc->sc_flags |= ET_FLAG_FASTETHER; 301 302 error = et_bus_config(sc); 303 if (error) 304 goto fail; 305 306 et_get_eaddr(dev, eaddr); 307 308 /* Take PHY out of COMA and enable clocks. */ 309 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 310 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 311 pmcfg |= EM_PM_GIGEPHY_ENB; 312 CSR_WRITE_4(sc, ET_PM, pmcfg); 313 314 et_reset(sc); 315 316 error = et_dma_alloc(sc); 317 if (error) 318 goto fail; 319 320 ifp->if_softc = sc; 321 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 322 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 323 ifp->if_init = et_init; 324 ifp->if_ioctl = et_ioctl; 325 ifp->if_start = et_start; 326 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU; 327 ifp->if_capenable = ifp->if_capabilities; 328 ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1; 329 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1); 330 IFQ_SET_READY(&ifp->if_snd); 331 332 et_chip_attach(sc); 333 334 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd, 335 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 336 MIIF_DOPAUSE); 337 if (error) { 338 device_printf(dev, "attaching PHYs failed\n"); 339 goto fail; 340 } 341 342 ether_ifattach(ifp, eaddr); 343 344 /* Tell the upper layer(s) we support long frames. */ 345 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 346 347 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE, 348 NULL, et_intr, sc, &sc->sc_irq_handle); 349 if (error) { 350 ether_ifdetach(ifp); 351 device_printf(dev, "can't setup intr\n"); 352 goto fail; 353 } 354 355 et_add_sysctls(sc); 356 357 return (0); 358 fail: 359 et_detach(dev); 360 return (error); 361 } 362 363 static int 364 et_detach(device_t dev) 365 { 366 struct et_softc *sc; 367 368 sc = device_get_softc(dev); 369 if (device_is_attached(dev)) { 370 ether_ifdetach(sc->ifp); 371 ET_LOCK(sc); 372 et_stop(sc); 373 ET_UNLOCK(sc); 374 callout_drain(&sc->sc_tick); 375 } 376 377 if (sc->sc_miibus != NULL) 378 device_delete_child(dev, sc->sc_miibus); 379 bus_generic_detach(dev); 380 381 if (sc->sc_irq_handle != NULL) 382 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 383 if (sc->sc_irq_res != NULL) 384 bus_release_resource(dev, SYS_RES_IRQ, 385 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); 386 if ((sc->sc_flags & ET_FLAG_MSI) != 0) 387 pci_release_msi(dev); 388 if (sc->sc_mem_res != NULL) 389 bus_release_resource(dev, SYS_RES_MEMORY, 390 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res); 391 392 if (sc->ifp != NULL) 393 if_free(sc->ifp); 394 395 et_dma_free(sc); 396 397 mtx_destroy(&sc->sc_mtx); 398 399 return (0); 400 } 401 402 static int 403 et_shutdown(device_t dev) 404 { 405 struct et_softc *sc; 406 407 sc = device_get_softc(dev); 408 ET_LOCK(sc); 409 et_stop(sc); 410 ET_UNLOCK(sc); 411 return (0); 412 } 413 414 static int 415 et_miibus_readreg(device_t dev, int phy, int reg) 416 { 417 struct et_softc *sc; 418 uint32_t val; 419 int i, ret; 420 421 sc = device_get_softc(dev); 422 /* Stop any pending operations */ 423 CSR_WRITE_4(sc, ET_MII_CMD, 0); 424 425 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 426 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 427 CSR_WRITE_4(sc, ET_MII_ADDR, val); 428 429 /* Start reading */ 430 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 431 432 #define NRETRY 50 433 434 for (i = 0; i < NRETRY; ++i) { 435 val = CSR_READ_4(sc, ET_MII_IND); 436 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 437 break; 438 DELAY(50); 439 } 440 if (i == NRETRY) { 441 if_printf(sc->ifp, 442 "read phy %d, reg %d timed out\n", phy, reg); 443 ret = 0; 444 goto back; 445 } 446 447 #undef NRETRY 448 449 val = CSR_READ_4(sc, ET_MII_STAT); 450 ret = val & ET_MII_STAT_VALUE_MASK; 451 452 back: 453 /* Make sure that the current operation is stopped */ 454 CSR_WRITE_4(sc, ET_MII_CMD, 0); 455 return (ret); 456 } 457 458 static int 459 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 460 { 461 struct et_softc *sc; 462 uint32_t val; 463 int i; 464 465 sc = device_get_softc(dev); 466 /* Stop any pending operations */ 467 CSR_WRITE_4(sc, ET_MII_CMD, 0); 468 469 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 470 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 471 CSR_WRITE_4(sc, ET_MII_ADDR, val); 472 473 /* Start writing */ 474 CSR_WRITE_4(sc, ET_MII_CTRL, 475 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK); 476 477 #define NRETRY 100 478 479 for (i = 0; i < NRETRY; ++i) { 480 val = CSR_READ_4(sc, ET_MII_IND); 481 if ((val & ET_MII_IND_BUSY) == 0) 482 break; 483 DELAY(50); 484 } 485 if (i == NRETRY) { 486 if_printf(sc->ifp, 487 "write phy %d, reg %d timed out\n", phy, reg); 488 et_miibus_readreg(dev, phy, reg); 489 } 490 491 #undef NRETRY 492 493 /* Make sure that the current operation is stopped */ 494 CSR_WRITE_4(sc, ET_MII_CMD, 0); 495 return (0); 496 } 497 498 static void 499 et_miibus_statchg(device_t dev) 500 { 501 struct et_softc *sc; 502 struct mii_data *mii; 503 struct ifnet *ifp; 504 uint32_t cfg1, cfg2, ctrl; 505 int i; 506 507 sc = device_get_softc(dev); 508 509 mii = device_get_softc(sc->sc_miibus); 510 ifp = sc->ifp; 511 if (mii == NULL || ifp == NULL || 512 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 513 return; 514 515 sc->sc_flags &= ~ET_FLAG_LINK; 516 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 517 (IFM_ACTIVE | IFM_AVALID)) { 518 switch (IFM_SUBTYPE(mii->mii_media_active)) { 519 case IFM_10_T: 520 case IFM_100_TX: 521 sc->sc_flags |= ET_FLAG_LINK; 522 break; 523 case IFM_1000_T: 524 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 525 sc->sc_flags |= ET_FLAG_LINK; 526 break; 527 } 528 } 529 530 /* XXX Stop TX/RX MAC? */ 531 if ((sc->sc_flags & ET_FLAG_LINK) == 0) 532 return; 533 534 /* Program MACs with resolved speed/duplex/flow-control. */ 535 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 536 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 537 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 538 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 539 ET_MAC_CFG1_LOOPBACK); 540 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 541 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 542 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 543 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 544 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) & 545 ET_MAC_CFG2_PREAMBLE_LEN_MASK); 546 547 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 548 cfg2 |= ET_MAC_CFG2_MODE_GMII; 549 else { 550 cfg2 |= ET_MAC_CFG2_MODE_MII; 551 ctrl |= ET_MAC_CTRL_MODE_MII; 552 } 553 554 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) { 555 cfg2 |= ET_MAC_CFG2_FDX; 556 /* 557 * Controller lacks automatic TX pause frame 558 * generation so it should be handled by driver. 559 * Even though driver can send pause frame with 560 * arbitrary pause time, controller does not 561 * provide a way that tells how many free RX 562 * buffers are available in controller. This 563 * limitation makes it hard to generate XON frame 564 * in time on driver side so don't enable TX flow 565 * control. 566 */ 567 #ifdef notyet 568 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) 569 cfg1 |= ET_MAC_CFG1_TXFLOW; 570 #endif 571 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) 572 cfg1 |= ET_MAC_CFG1_RXFLOW; 573 } else 574 ctrl |= ET_MAC_CTRL_GHDX; 575 576 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 577 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 578 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 579 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1); 580 581 #define NRETRY 50 582 583 for (i = 0; i < NRETRY; ++i) { 584 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 585 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 586 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 587 break; 588 DELAY(100); 589 } 590 if (i == NRETRY) 591 if_printf(ifp, "can't enable RX/TX\n"); 592 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 593 594 #undef NRETRY 595 } 596 597 static int 598 et_ifmedia_upd_locked(struct ifnet *ifp) 599 { 600 struct et_softc *sc; 601 struct mii_data *mii; 602 struct mii_softc *miisc; 603 604 sc = ifp->if_softc; 605 mii = device_get_softc(sc->sc_miibus); 606 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 607 PHY_RESET(miisc); 608 return (mii_mediachg(mii)); 609 } 610 611 static int 612 et_ifmedia_upd(struct ifnet *ifp) 613 { 614 struct et_softc *sc; 615 int res; 616 617 sc = ifp->if_softc; 618 ET_LOCK(sc); 619 res = et_ifmedia_upd_locked(ifp); 620 ET_UNLOCK(sc); 621 622 return (res); 623 } 624 625 static void 626 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 627 { 628 struct et_softc *sc; 629 struct mii_data *mii; 630 631 sc = ifp->if_softc; 632 ET_LOCK(sc); 633 if ((ifp->if_flags & IFF_UP) == 0) { 634 ET_UNLOCK(sc); 635 return; 636 } 637 638 mii = device_get_softc(sc->sc_miibus); 639 mii_pollstat(mii); 640 ifmr->ifm_active = mii->mii_media_active; 641 ifmr->ifm_status = mii->mii_media_status; 642 ET_UNLOCK(sc); 643 } 644 645 static void 646 et_stop(struct et_softc *sc) 647 { 648 struct ifnet *ifp; 649 650 ET_LOCK_ASSERT(sc); 651 652 ifp = sc->ifp; 653 callout_stop(&sc->sc_tick); 654 /* Disable interrupts. */ 655 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 656 657 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~( 658 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN)); 659 DELAY(100); 660 661 et_stop_rxdma(sc); 662 et_stop_txdma(sc); 663 et_stats_update(sc); 664 665 et_free_tx_ring(sc); 666 et_free_rx_ring(sc); 667 668 sc->sc_tx = 0; 669 sc->sc_tx_intr = 0; 670 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 671 672 sc->watchdog_timer = 0; 673 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 674 } 675 676 static int 677 et_bus_config(struct et_softc *sc) 678 { 679 uint32_t val, max_plsz; 680 uint16_t ack_latency, replay_timer; 681 682 /* 683 * Test whether EEPROM is valid 684 * NOTE: Read twice to get the correct value 685 */ 686 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1); 687 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1); 688 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 689 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val); 690 return (ENXIO); 691 } 692 693 /* TODO: LED */ 694 695 if ((sc->sc_flags & ET_FLAG_PCIE) == 0) 696 return (0); 697 698 /* 699 * Configure ACK latency and replay timer according to 700 * max playload size 701 */ 702 val = pci_read_config(sc->dev, 703 sc->sc_expcap + PCIER_DEVICE_CAP, 4); 704 max_plsz = val & PCIEM_CAP_MAX_PAYLOAD; 705 706 switch (max_plsz) { 707 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 708 ack_latency = ET_PCIV_ACK_LATENCY_128; 709 replay_timer = ET_PCIV_REPLAY_TIMER_128; 710 break; 711 712 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 713 ack_latency = ET_PCIV_ACK_LATENCY_256; 714 replay_timer = ET_PCIV_REPLAY_TIMER_256; 715 break; 716 717 default: 718 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2); 719 replay_timer = pci_read_config(sc->dev, 720 ET_PCIR_REPLAY_TIMER, 2); 721 device_printf(sc->dev, "ack latency %u, replay timer %u\n", 722 ack_latency, replay_timer); 723 break; 724 } 725 if (ack_latency != 0) { 726 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 727 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer, 728 2); 729 } 730 731 /* 732 * Set L0s and L1 latency timer to 2us 733 */ 734 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4); 735 val &= ~(PCIEM_LINK_CAP_L0S_EXIT | PCIEM_LINK_CAP_L1_EXIT); 736 /* L0s exit latency : 2us */ 737 val |= 0x00005000; 738 /* L1 exit latency : 2us */ 739 val |= 0x00028000; 740 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4); 741 742 /* 743 * Set max read request size to 2048 bytes 744 */ 745 pci_set_max_read_req(sc->dev, 2048); 746 747 return (0); 748 } 749 750 static void 751 et_get_eaddr(device_t dev, uint8_t eaddr[]) 752 { 753 uint32_t val; 754 int i; 755 756 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 757 for (i = 0; i < 4; ++i) 758 eaddr[i] = (val >> (8 * i)) & 0xff; 759 760 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 761 for (; i < ETHER_ADDR_LEN; ++i) 762 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 763 } 764 765 static void 766 et_reset(struct et_softc *sc) 767 { 768 769 CSR_WRITE_4(sc, ET_MAC_CFG1, 770 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 771 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 772 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 773 774 CSR_WRITE_4(sc, ET_SWRST, 775 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 776 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 777 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 778 779 CSR_WRITE_4(sc, ET_MAC_CFG1, 780 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 781 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 782 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 783 /* Disable interrupts. */ 784 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 785 } 786 787 struct et_dmamap_arg { 788 bus_addr_t et_busaddr; 789 }; 790 791 static void 792 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 793 { 794 struct et_dmamap_arg *ctx; 795 796 if (error) 797 return; 798 799 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); 800 801 ctx = arg; 802 ctx->et_busaddr = segs->ds_addr; 803 } 804 805 static int 806 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize, 807 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, 808 const char *msg) 809 { 810 struct et_dmamap_arg ctx; 811 int error; 812 813 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR, 814 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, 815 tag); 816 if (error != 0) { 817 device_printf(sc->dev, "could not create %s dma tag\n", msg); 818 return (error); 819 } 820 /* Allocate DMA'able memory for ring. */ 821 error = bus_dmamem_alloc(*tag, (void **)ring, 822 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); 823 if (error != 0) { 824 device_printf(sc->dev, 825 "could not allocate DMA'able memory for %s\n", msg); 826 return (error); 827 } 828 /* Load the address of the ring. */ 829 ctx.et_busaddr = 0; 830 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr, 831 &ctx, BUS_DMA_NOWAIT); 832 if (error != 0) { 833 device_printf(sc->dev, 834 "could not load DMA'able memory for %s\n", msg); 835 return (error); 836 } 837 *paddr = ctx.et_busaddr; 838 return (0); 839 } 840 841 static void 842 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring, 843 bus_dmamap_t *map) 844 { 845 846 if (*map != NULL) 847 bus_dmamap_unload(*tag, *map); 848 if (*map != NULL && *ring != NULL) { 849 bus_dmamem_free(*tag, *ring, *map); 850 *ring = NULL; 851 *map = NULL; 852 } 853 if (*tag) { 854 bus_dma_tag_destroy(*tag); 855 *tag = NULL; 856 } 857 } 858 859 static int 860 et_dma_alloc(struct et_softc *sc) 861 { 862 struct et_txdesc_ring *tx_ring; 863 struct et_rxdesc_ring *rx_ring; 864 struct et_rxstat_ring *rxst_ring; 865 struct et_rxstatus_data *rxsd; 866 struct et_rxbuf_data *rbd; 867 struct et_txbuf_data *tbd; 868 struct et_txstatus_data *txsd; 869 int i, error; 870 871 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 872 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 873 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 874 &sc->sc_dtag); 875 if (error != 0) { 876 device_printf(sc->dev, "could not allocate parent dma tag\n"); 877 return (error); 878 } 879 880 /* TX ring. */ 881 tx_ring = &sc->sc_tx_ring; 882 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE, 883 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap, 884 &tx_ring->tr_paddr, "TX ring"); 885 if (error) 886 return (error); 887 888 /* TX status block. */ 889 txsd = &sc->sc_tx_status; 890 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t), 891 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap, 892 &txsd->txsd_paddr, "TX status block"); 893 if (error) 894 return (error); 895 896 /* RX ring 0, used as to recive small sized frames. */ 897 rx_ring = &sc->sc_rx_ring[0]; 898 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE, 899 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap, 900 &rx_ring->rr_paddr, "RX ring 0"); 901 rx_ring->rr_posreg = ET_RX_RING0_POS; 902 if (error) 903 return (error); 904 905 /* RX ring 1, used as to store normal sized frames. */ 906 rx_ring = &sc->sc_rx_ring[1]; 907 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE, 908 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap, 909 &rx_ring->rr_paddr, "RX ring 1"); 910 rx_ring->rr_posreg = ET_RX_RING1_POS; 911 if (error) 912 return (error); 913 914 /* RX stat ring. */ 915 rxst_ring = &sc->sc_rxstat_ring; 916 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE, 917 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat, 918 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring"); 919 if (error) 920 return (error); 921 922 /* RX status block. */ 923 rxsd = &sc->sc_rx_status; 924 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, 925 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag, 926 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap, 927 &rxsd->rxsd_paddr, "RX status block"); 928 if (error) 929 return (error); 930 931 /* Create parent DMA tag for mbufs. */ 932 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 933 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 934 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 935 &sc->sc_mbuf_dtag); 936 if (error != 0) { 937 device_printf(sc->dev, 938 "could not allocate parent dma tag for mbuf\n"); 939 return (error); 940 } 941 942 /* Create DMA tag for mini RX mbufs to use RX ring 0. */ 943 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 944 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1, 945 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag); 946 if (error) { 947 device_printf(sc->dev, "could not create mini RX dma tag\n"); 948 return (error); 949 } 950 951 /* Create DMA tag for standard RX mbufs to use RX ring 1. */ 952 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 953 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 954 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag); 955 if (error) { 956 device_printf(sc->dev, "could not create RX dma tag\n"); 957 return (error); 958 } 959 960 /* Create DMA tag for TX mbufs. */ 961 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 962 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 963 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL, 964 &sc->sc_tx_tag); 965 if (error) { 966 device_printf(sc->dev, "could not create TX dma tag\n"); 967 return (error); 968 } 969 970 /* Initialize RX ring 0. */ 971 rbd = &sc->sc_rx_data[0]; 972 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128; 973 rbd->rbd_newbuf = et_newbuf_hdr; 974 rbd->rbd_discard = et_rxbuf_discard; 975 rbd->rbd_softc = sc; 976 rbd->rbd_ring = &sc->sc_rx_ring[0]; 977 /* Create DMA maps for mini RX buffers, ring 0. */ 978 for (i = 0; i < ET_RX_NDESC; i++) { 979 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0, 980 &rbd->rbd_buf[i].rb_dmap); 981 if (error) { 982 device_printf(sc->dev, 983 "could not create DMA map for mini RX mbufs\n"); 984 return (error); 985 } 986 } 987 988 /* Create a spare DMA map for mini RX buffers, ring 0. */ 989 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0, 990 &sc->sc_rx_mini_sparemap); 991 if (error) { 992 device_printf(sc->dev, 993 "could not create spare DMA map for mini RX mbuf\n"); 994 return (error); 995 } 996 997 /* Initialize RX ring 1. */ 998 rbd = &sc->sc_rx_data[1]; 999 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048; 1000 rbd->rbd_newbuf = et_newbuf_cluster; 1001 rbd->rbd_discard = et_rxbuf_discard; 1002 rbd->rbd_softc = sc; 1003 rbd->rbd_ring = &sc->sc_rx_ring[1]; 1004 /* Create DMA maps for standard RX buffers, ring 1. */ 1005 for (i = 0; i < ET_RX_NDESC; i++) { 1006 error = bus_dmamap_create(sc->sc_rx_tag, 0, 1007 &rbd->rbd_buf[i].rb_dmap); 1008 if (error) { 1009 device_printf(sc->dev, 1010 "could not create DMA map for mini RX mbufs\n"); 1011 return (error); 1012 } 1013 } 1014 1015 /* Create a spare DMA map for standard RX buffers, ring 1. */ 1016 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap); 1017 if (error) { 1018 device_printf(sc->dev, 1019 "could not create spare DMA map for RX mbuf\n"); 1020 return (error); 1021 } 1022 1023 /* Create DMA maps for TX buffers. */ 1024 tbd = &sc->sc_tx_data; 1025 for (i = 0; i < ET_TX_NDESC; i++) { 1026 error = bus_dmamap_create(sc->sc_tx_tag, 0, 1027 &tbd->tbd_buf[i].tb_dmap); 1028 if (error) { 1029 device_printf(sc->dev, 1030 "could not create DMA map for TX mbufs\n"); 1031 return (error); 1032 } 1033 } 1034 1035 return (0); 1036 } 1037 1038 static void 1039 et_dma_free(struct et_softc *sc) 1040 { 1041 struct et_txdesc_ring *tx_ring; 1042 struct et_rxdesc_ring *rx_ring; 1043 struct et_txstatus_data *txsd; 1044 struct et_rxstat_ring *rxst_ring; 1045 struct et_rxstatus_data *rxsd; 1046 struct et_rxbuf_data *rbd; 1047 struct et_txbuf_data *tbd; 1048 int i; 1049 1050 /* Destroy DMA maps for mini RX buffers, ring 0. */ 1051 rbd = &sc->sc_rx_data[0]; 1052 for (i = 0; i < ET_RX_NDESC; i++) { 1053 if (rbd->rbd_buf[i].rb_dmap) { 1054 bus_dmamap_destroy(sc->sc_rx_mini_tag, 1055 rbd->rbd_buf[i].rb_dmap); 1056 rbd->rbd_buf[i].rb_dmap = NULL; 1057 } 1058 } 1059 if (sc->sc_rx_mini_sparemap) { 1060 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap); 1061 sc->sc_rx_mini_sparemap = NULL; 1062 } 1063 if (sc->sc_rx_mini_tag) { 1064 bus_dma_tag_destroy(sc->sc_rx_mini_tag); 1065 sc->sc_rx_mini_tag = NULL; 1066 } 1067 1068 /* Destroy DMA maps for standard RX buffers, ring 1. */ 1069 rbd = &sc->sc_rx_data[1]; 1070 for (i = 0; i < ET_RX_NDESC; i++) { 1071 if (rbd->rbd_buf[i].rb_dmap) { 1072 bus_dmamap_destroy(sc->sc_rx_tag, 1073 rbd->rbd_buf[i].rb_dmap); 1074 rbd->rbd_buf[i].rb_dmap = NULL; 1075 } 1076 } 1077 if (sc->sc_rx_sparemap) { 1078 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap); 1079 sc->sc_rx_sparemap = NULL; 1080 } 1081 if (sc->sc_rx_tag) { 1082 bus_dma_tag_destroy(sc->sc_rx_tag); 1083 sc->sc_rx_tag = NULL; 1084 } 1085 1086 /* Destroy DMA maps for TX buffers. */ 1087 tbd = &sc->sc_tx_data; 1088 for (i = 0; i < ET_TX_NDESC; i++) { 1089 if (tbd->tbd_buf[i].tb_dmap) { 1090 bus_dmamap_destroy(sc->sc_tx_tag, 1091 tbd->tbd_buf[i].tb_dmap); 1092 tbd->tbd_buf[i].tb_dmap = NULL; 1093 } 1094 } 1095 if (sc->sc_tx_tag) { 1096 bus_dma_tag_destroy(sc->sc_tx_tag); 1097 sc->sc_tx_tag = NULL; 1098 } 1099 1100 /* Destroy mini RX ring, ring 0. */ 1101 rx_ring = &sc->sc_rx_ring[0]; 1102 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc, 1103 &rx_ring->rr_dmap); 1104 /* Destroy standard RX ring, ring 1. */ 1105 rx_ring = &sc->sc_rx_ring[1]; 1106 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc, 1107 &rx_ring->rr_dmap); 1108 /* Destroy RX stat ring. */ 1109 rxst_ring = &sc->sc_rxstat_ring; 1110 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat, 1111 &rxst_ring->rsr_dmap); 1112 /* Destroy RX status block. */ 1113 rxsd = &sc->sc_rx_status; 1114 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat, 1115 &rxst_ring->rsr_dmap); 1116 /* Destroy TX ring. */ 1117 tx_ring = &sc->sc_tx_ring; 1118 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc, 1119 &tx_ring->tr_dmap); 1120 /* Destroy TX status block. */ 1121 txsd = &sc->sc_tx_status; 1122 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status, 1123 &txsd->txsd_dmap); 1124 1125 /* Destroy the parent tag. */ 1126 if (sc->sc_dtag) { 1127 bus_dma_tag_destroy(sc->sc_dtag); 1128 sc->sc_dtag = NULL; 1129 } 1130 } 1131 1132 static void 1133 et_chip_attach(struct et_softc *sc) 1134 { 1135 uint32_t val; 1136 1137 /* 1138 * Perform minimal initialization 1139 */ 1140 1141 /* Disable loopback */ 1142 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1143 1144 /* Reset MAC */ 1145 CSR_WRITE_4(sc, ET_MAC_CFG1, 1146 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1147 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1148 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1149 1150 /* 1151 * Setup half duplex mode 1152 */ 1153 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1154 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1155 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1156 ET_MAC_HDX_EXC_DEFER; 1157 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1158 1159 /* Clear MAC control */ 1160 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1161 1162 /* Reset MII */ 1163 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1164 1165 /* Bring MAC out of reset state */ 1166 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1167 1168 /* Enable memory controllers */ 1169 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1170 } 1171 1172 static void 1173 et_intr(void *xsc) 1174 { 1175 struct et_softc *sc; 1176 struct ifnet *ifp; 1177 uint32_t status; 1178 1179 sc = xsc; 1180 ET_LOCK(sc); 1181 ifp = sc->ifp; 1182 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1183 goto done; 1184 1185 status = CSR_READ_4(sc, ET_INTR_STATUS); 1186 if ((status & ET_INTRS) == 0) 1187 goto done; 1188 1189 /* Disable further interrupts. */ 1190 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 1191 1192 if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) { 1193 device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n", 1194 status); 1195 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1196 et_init_locked(sc); 1197 ET_UNLOCK(sc); 1198 return; 1199 } 1200 if (status & ET_INTR_RXDMA) 1201 et_rxeof(sc); 1202 if (status & (ET_INTR_TXDMA | ET_INTR_TIMER)) 1203 et_txeof(sc); 1204 if (status & ET_INTR_TIMER) 1205 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1206 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1207 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS); 1208 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1209 et_start_locked(ifp); 1210 } 1211 done: 1212 ET_UNLOCK(sc); 1213 } 1214 1215 static void 1216 et_init_locked(struct et_softc *sc) 1217 { 1218 struct ifnet *ifp; 1219 int error; 1220 1221 ET_LOCK_ASSERT(sc); 1222 1223 ifp = sc->ifp; 1224 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1225 return; 1226 1227 et_stop(sc); 1228 et_reset(sc); 1229 1230 et_init_tx_ring(sc); 1231 error = et_init_rx_ring(sc); 1232 if (error) 1233 return; 1234 1235 error = et_chip_init(sc); 1236 if (error) 1237 goto fail; 1238 1239 /* 1240 * Start TX/RX DMA engine 1241 */ 1242 error = et_start_rxdma(sc); 1243 if (error) 1244 return; 1245 1246 error = et_start_txdma(sc); 1247 if (error) 1248 return; 1249 1250 /* Enable interrupts. */ 1251 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS); 1252 1253 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1254 1255 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1256 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1257 1258 sc->sc_flags &= ~ET_FLAG_LINK; 1259 et_ifmedia_upd_locked(ifp); 1260 1261 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1262 1263 fail: 1264 if (error) 1265 et_stop(sc); 1266 } 1267 1268 static void 1269 et_init(void *xsc) 1270 { 1271 struct et_softc *sc = xsc; 1272 1273 ET_LOCK(sc); 1274 et_init_locked(sc); 1275 ET_UNLOCK(sc); 1276 } 1277 1278 static int 1279 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1280 { 1281 struct et_softc *sc; 1282 struct mii_data *mii; 1283 struct ifreq *ifr; 1284 int error, mask, max_framelen; 1285 1286 sc = ifp->if_softc; 1287 ifr = (struct ifreq *)data; 1288 error = 0; 1289 1290 /* XXX LOCKSUSED */ 1291 switch (cmd) { 1292 case SIOCSIFFLAGS: 1293 ET_LOCK(sc); 1294 if (ifp->if_flags & IFF_UP) { 1295 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1296 if ((ifp->if_flags ^ sc->sc_if_flags) & 1297 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST)) 1298 et_setmulti(sc); 1299 } else { 1300 et_init_locked(sc); 1301 } 1302 } else { 1303 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1304 et_stop(sc); 1305 } 1306 sc->sc_if_flags = ifp->if_flags; 1307 ET_UNLOCK(sc); 1308 break; 1309 1310 case SIOCSIFMEDIA: 1311 case SIOCGIFMEDIA: 1312 mii = device_get_softc(sc->sc_miibus); 1313 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1314 break; 1315 1316 case SIOCADDMULTI: 1317 case SIOCDELMULTI: 1318 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1319 ET_LOCK(sc); 1320 et_setmulti(sc); 1321 ET_UNLOCK(sc); 1322 } 1323 break; 1324 1325 case SIOCSIFMTU: 1326 ET_LOCK(sc); 1327 #if 0 1328 if (sc->sc_flags & ET_FLAG_JUMBO) 1329 max_framelen = ET_JUMBO_FRAMELEN; 1330 else 1331 #endif 1332 max_framelen = MCLBYTES - 1; 1333 1334 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1335 error = EOPNOTSUPP; 1336 ET_UNLOCK(sc); 1337 break; 1338 } 1339 1340 if (ifp->if_mtu != ifr->ifr_mtu) { 1341 ifp->if_mtu = ifr->ifr_mtu; 1342 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1343 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1344 et_init_locked(sc); 1345 } 1346 } 1347 ET_UNLOCK(sc); 1348 break; 1349 1350 case SIOCSIFCAP: 1351 ET_LOCK(sc); 1352 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1353 if ((mask & IFCAP_TXCSUM) != 0 && 1354 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 1355 ifp->if_capenable ^= IFCAP_TXCSUM; 1356 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 1357 ifp->if_hwassist |= ET_CSUM_FEATURES; 1358 else 1359 ifp->if_hwassist &= ~ET_CSUM_FEATURES; 1360 } 1361 ET_UNLOCK(sc); 1362 break; 1363 1364 default: 1365 error = ether_ioctl(ifp, cmd, data); 1366 break; 1367 } 1368 return (error); 1369 } 1370 1371 static void 1372 et_start_locked(struct ifnet *ifp) 1373 { 1374 struct et_softc *sc; 1375 struct mbuf *m_head = NULL; 1376 struct et_txdesc_ring *tx_ring; 1377 struct et_txbuf_data *tbd; 1378 uint32_t tx_ready_pos; 1379 int enq; 1380 1381 sc = ifp->if_softc; 1382 ET_LOCK_ASSERT(sc); 1383 1384 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1385 IFF_DRV_RUNNING || 1386 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) != 1387 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) 1388 return; 1389 1390 /* 1391 * Driver does not request TX completion interrupt for every 1392 * queued frames to prevent generating excessive interrupts. 1393 * This means driver may wait for TX completion interrupt even 1394 * though some frames were sucessfully transmitted. Reclaiming 1395 * transmitted frames will ensure driver see all available 1396 * descriptors. 1397 */ 1398 tbd = &sc->sc_tx_data; 1399 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3) 1400 et_txeof(sc); 1401 1402 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1403 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) { 1404 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1405 break; 1406 } 1407 1408 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1409 if (m_head == NULL) 1410 break; 1411 1412 if (et_encap(sc, &m_head)) { 1413 if (m_head == NULL) { 1414 ifp->if_oerrors++; 1415 break; 1416 } 1417 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1418 if (tbd->tbd_used > 0) 1419 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1420 break; 1421 } 1422 enq++; 1423 ETHER_BPF_MTAP(ifp, m_head); 1424 } 1425 1426 if (enq > 0) { 1427 tx_ring = &sc->sc_tx_ring; 1428 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1429 BUS_DMASYNC_PREWRITE); 1430 tx_ready_pos = tx_ring->tr_ready_index & 1431 ET_TX_READY_POS_INDEX_MASK; 1432 if (tx_ring->tr_ready_wrap) 1433 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1434 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1435 sc->watchdog_timer = 5; 1436 } 1437 } 1438 1439 static void 1440 et_start(struct ifnet *ifp) 1441 { 1442 struct et_softc *sc; 1443 1444 sc = ifp->if_softc; 1445 ET_LOCK(sc); 1446 et_start_locked(ifp); 1447 ET_UNLOCK(sc); 1448 } 1449 1450 static int 1451 et_watchdog(struct et_softc *sc) 1452 { 1453 uint32_t status; 1454 1455 ET_LOCK_ASSERT(sc); 1456 1457 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 1458 return (0); 1459 1460 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap, 1461 BUS_DMASYNC_POSTREAD); 1462 status = le32toh(*(sc->sc_tx_status.txsd_status)); 1463 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n", 1464 status); 1465 1466 sc->ifp->if_oerrors++; 1467 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1468 et_init_locked(sc); 1469 return (EJUSTRETURN); 1470 } 1471 1472 static int 1473 et_stop_rxdma(struct et_softc *sc) 1474 { 1475 1476 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1477 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1478 1479 DELAY(5); 1480 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1481 if_printf(sc->ifp, "can't stop RX DMA engine\n"); 1482 return (ETIMEDOUT); 1483 } 1484 return (0); 1485 } 1486 1487 static int 1488 et_stop_txdma(struct et_softc *sc) 1489 { 1490 1491 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1492 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1493 return (0); 1494 } 1495 1496 static void 1497 et_free_tx_ring(struct et_softc *sc) 1498 { 1499 struct et_txdesc_ring *tx_ring; 1500 struct et_txbuf_data *tbd; 1501 struct et_txbuf *tb; 1502 int i; 1503 1504 tbd = &sc->sc_tx_data; 1505 tx_ring = &sc->sc_tx_ring; 1506 for (i = 0; i < ET_TX_NDESC; ++i) { 1507 tb = &tbd->tbd_buf[i]; 1508 if (tb->tb_mbuf != NULL) { 1509 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap, 1510 BUS_DMASYNC_POSTWRITE); 1511 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1512 m_freem(tb->tb_mbuf); 1513 tb->tb_mbuf = NULL; 1514 } 1515 } 1516 } 1517 1518 static void 1519 et_free_rx_ring(struct et_softc *sc) 1520 { 1521 struct et_rxbuf_data *rbd; 1522 struct et_rxdesc_ring *rx_ring; 1523 struct et_rxbuf *rb; 1524 int i; 1525 1526 /* Ring 0 */ 1527 rx_ring = &sc->sc_rx_ring[0]; 1528 rbd = &sc->sc_rx_data[0]; 1529 for (i = 0; i < ET_RX_NDESC; ++i) { 1530 rb = &rbd->rbd_buf[i]; 1531 if (rb->rb_mbuf != NULL) { 1532 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap, 1533 BUS_DMASYNC_POSTREAD); 1534 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap); 1535 m_freem(rb->rb_mbuf); 1536 rb->rb_mbuf = NULL; 1537 } 1538 } 1539 1540 /* Ring 1 */ 1541 rx_ring = &sc->sc_rx_ring[1]; 1542 rbd = &sc->sc_rx_data[1]; 1543 for (i = 0; i < ET_RX_NDESC; ++i) { 1544 rb = &rbd->rbd_buf[i]; 1545 if (rb->rb_mbuf != NULL) { 1546 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap, 1547 BUS_DMASYNC_POSTREAD); 1548 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap); 1549 m_freem(rb->rb_mbuf); 1550 rb->rb_mbuf = NULL; 1551 } 1552 } 1553 } 1554 1555 static void 1556 et_setmulti(struct et_softc *sc) 1557 { 1558 struct ifnet *ifp; 1559 uint32_t hash[4] = { 0, 0, 0, 0 }; 1560 uint32_t rxmac_ctrl, pktfilt; 1561 struct ifmultiaddr *ifma; 1562 int i, count; 1563 1564 ET_LOCK_ASSERT(sc); 1565 ifp = sc->ifp; 1566 1567 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1568 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1569 1570 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1571 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1572 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1573 goto back; 1574 } 1575 1576 count = 0; 1577 if_maddr_rlock(ifp); 1578 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1579 uint32_t *hp, h; 1580 1581 if (ifma->ifma_addr->sa_family != AF_LINK) 1582 continue; 1583 1584 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1585 ifma->ifma_addr), ETHER_ADDR_LEN); 1586 h = (h & 0x3f800000) >> 23; 1587 1588 hp = &hash[0]; 1589 if (h >= 32 && h < 64) { 1590 h -= 32; 1591 hp = &hash[1]; 1592 } else if (h >= 64 && h < 96) { 1593 h -= 64; 1594 hp = &hash[2]; 1595 } else if (h >= 96) { 1596 h -= 96; 1597 hp = &hash[3]; 1598 } 1599 *hp |= (1 << h); 1600 1601 ++count; 1602 } 1603 if_maddr_runlock(ifp); 1604 1605 for (i = 0; i < 4; ++i) 1606 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1607 1608 if (count > 0) 1609 pktfilt |= ET_PKTFILT_MCAST; 1610 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1611 back: 1612 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1613 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1614 } 1615 1616 static int 1617 et_chip_init(struct et_softc *sc) 1618 { 1619 struct ifnet *ifp; 1620 uint32_t rxq_end; 1621 int error, frame_len, rxmem_size; 1622 1623 ifp = sc->ifp; 1624 /* 1625 * Split 16Kbytes internal memory between TX and RX 1626 * according to frame length. 1627 */ 1628 frame_len = ET_FRAMELEN(ifp->if_mtu); 1629 if (frame_len < 2048) { 1630 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1631 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1632 rxmem_size = ET_MEM_SIZE / 2; 1633 } else { 1634 rxmem_size = ET_MEM_SIZE - 1635 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1636 } 1637 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1638 1639 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1640 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1641 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1642 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1643 1644 /* No loopback */ 1645 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1646 1647 /* Clear MSI configure */ 1648 if ((sc->sc_flags & ET_FLAG_MSI) == 0) 1649 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1650 1651 /* Disable timer */ 1652 CSR_WRITE_4(sc, ET_TIMER, 0); 1653 1654 /* Initialize MAC */ 1655 et_init_mac(sc); 1656 1657 /* Enable memory controllers */ 1658 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1659 1660 /* Initialize RX MAC */ 1661 et_init_rxmac(sc); 1662 1663 /* Initialize TX MAC */ 1664 et_init_txmac(sc); 1665 1666 /* Initialize RX DMA engine */ 1667 error = et_init_rxdma(sc); 1668 if (error) 1669 return (error); 1670 1671 /* Initialize TX DMA engine */ 1672 error = et_init_txdma(sc); 1673 if (error) 1674 return (error); 1675 1676 return (0); 1677 } 1678 1679 static void 1680 et_init_tx_ring(struct et_softc *sc) 1681 { 1682 struct et_txdesc_ring *tx_ring; 1683 struct et_txbuf_data *tbd; 1684 struct et_txstatus_data *txsd; 1685 1686 tx_ring = &sc->sc_tx_ring; 1687 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1688 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1689 BUS_DMASYNC_PREWRITE); 1690 1691 tbd = &sc->sc_tx_data; 1692 tbd->tbd_start_index = 0; 1693 tbd->tbd_start_wrap = 0; 1694 tbd->tbd_used = 0; 1695 1696 txsd = &sc->sc_tx_status; 1697 bzero(txsd->txsd_status, sizeof(uint32_t)); 1698 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1699 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1700 } 1701 1702 static int 1703 et_init_rx_ring(struct et_softc *sc) 1704 { 1705 struct et_rxstatus_data *rxsd; 1706 struct et_rxstat_ring *rxst_ring; 1707 struct et_rxbuf_data *rbd; 1708 int i, error, n; 1709 1710 for (n = 0; n < ET_RX_NRING; ++n) { 1711 rbd = &sc->sc_rx_data[n]; 1712 for (i = 0; i < ET_RX_NDESC; ++i) { 1713 error = rbd->rbd_newbuf(rbd, i); 1714 if (error) { 1715 if_printf(sc->ifp, "%d ring %d buf, " 1716 "newbuf failed: %d\n", n, i, error); 1717 return (error); 1718 } 1719 } 1720 } 1721 1722 rxsd = &sc->sc_rx_status; 1723 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1724 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1725 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1726 1727 rxst_ring = &sc->sc_rxstat_ring; 1728 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1729 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1730 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1731 1732 return (0); 1733 } 1734 1735 static int 1736 et_init_rxdma(struct et_softc *sc) 1737 { 1738 struct et_rxstatus_data *rxsd; 1739 struct et_rxstat_ring *rxst_ring; 1740 struct et_rxdesc_ring *rx_ring; 1741 int error; 1742 1743 error = et_stop_rxdma(sc); 1744 if (error) { 1745 if_printf(sc->ifp, "can't init RX DMA engine\n"); 1746 return (error); 1747 } 1748 1749 /* 1750 * Install RX status 1751 */ 1752 rxsd = &sc->sc_rx_status; 1753 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1754 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1755 1756 /* 1757 * Install RX stat ring 1758 */ 1759 rxst_ring = &sc->sc_rxstat_ring; 1760 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1761 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1762 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1763 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1764 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1765 1766 /* Match ET_RXSTAT_POS */ 1767 rxst_ring->rsr_index = 0; 1768 rxst_ring->rsr_wrap = 0; 1769 1770 /* 1771 * Install the 2nd RX descriptor ring 1772 */ 1773 rx_ring = &sc->sc_rx_ring[1]; 1774 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1775 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1776 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1777 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1778 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1779 1780 /* Match ET_RX_RING1_POS */ 1781 rx_ring->rr_index = 0; 1782 rx_ring->rr_wrap = 1; 1783 1784 /* 1785 * Install the 1st RX descriptor ring 1786 */ 1787 rx_ring = &sc->sc_rx_ring[0]; 1788 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1789 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1790 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1791 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1792 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1793 1794 /* Match ET_RX_RING0_POS */ 1795 rx_ring->rr_index = 0; 1796 rx_ring->rr_wrap = 1; 1797 1798 /* 1799 * RX intr moderation 1800 */ 1801 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1802 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1803 1804 return (0); 1805 } 1806 1807 static int 1808 et_init_txdma(struct et_softc *sc) 1809 { 1810 struct et_txdesc_ring *tx_ring; 1811 struct et_txstatus_data *txsd; 1812 int error; 1813 1814 error = et_stop_txdma(sc); 1815 if (error) { 1816 if_printf(sc->ifp, "can't init TX DMA engine\n"); 1817 return (error); 1818 } 1819 1820 /* 1821 * Install TX descriptor ring 1822 */ 1823 tx_ring = &sc->sc_tx_ring; 1824 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1825 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1826 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1827 1828 /* 1829 * Install TX status 1830 */ 1831 txsd = &sc->sc_tx_status; 1832 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1833 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1834 1835 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1836 1837 /* Match ET_TX_READY_POS */ 1838 tx_ring->tr_ready_index = 0; 1839 tx_ring->tr_ready_wrap = 0; 1840 1841 return (0); 1842 } 1843 1844 static void 1845 et_init_mac(struct et_softc *sc) 1846 { 1847 struct ifnet *ifp; 1848 const uint8_t *eaddr; 1849 uint32_t val; 1850 1851 /* Reset MAC */ 1852 CSR_WRITE_4(sc, ET_MAC_CFG1, 1853 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1854 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1855 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1856 1857 /* 1858 * Setup inter packet gap 1859 */ 1860 val = (56 << ET_IPG_NONB2B_1_SHIFT) | 1861 (88 << ET_IPG_NONB2B_2_SHIFT) | 1862 (80 << ET_IPG_MINIFG_SHIFT) | 1863 (96 << ET_IPG_B2B_SHIFT); 1864 CSR_WRITE_4(sc, ET_IPG, val); 1865 1866 /* 1867 * Setup half duplex mode 1868 */ 1869 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1870 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1871 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1872 ET_MAC_HDX_EXC_DEFER; 1873 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1874 1875 /* Clear MAC control */ 1876 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1877 1878 /* Reset MII */ 1879 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1880 1881 /* 1882 * Set MAC address 1883 */ 1884 ifp = sc->ifp; 1885 eaddr = IF_LLADDR(ifp); 1886 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1887 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1888 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1889 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1890 1891 /* Set max frame length */ 1892 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1893 1894 /* Bring MAC out of reset state */ 1895 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1896 } 1897 1898 static void 1899 et_init_rxmac(struct et_softc *sc) 1900 { 1901 struct ifnet *ifp; 1902 const uint8_t *eaddr; 1903 uint32_t val; 1904 int i; 1905 1906 /* Disable RX MAC and WOL */ 1907 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1908 1909 /* 1910 * Clear all WOL related registers 1911 */ 1912 for (i = 0; i < 3; ++i) 1913 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1914 for (i = 0; i < 20; ++i) 1915 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1916 1917 /* 1918 * Set WOL source address. XXX is this necessary? 1919 */ 1920 ifp = sc->ifp; 1921 eaddr = IF_LLADDR(ifp); 1922 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1923 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1924 val = (eaddr[0] << 8) | eaddr[1]; 1925 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1926 1927 /* Clear packet filters */ 1928 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1929 1930 /* No ucast filtering */ 1931 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1932 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1933 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1934 1935 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1936 /* 1937 * In order to transmit jumbo packets greater than 1938 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1939 * RX MAC and RX DMA needs to be reduced in size to 1940 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1941 * order to implement this, we must use "cut through" 1942 * mode in the RX MAC, which chops packets down into 1943 * segments. In this case we selected 256 bytes, 1944 * since this is the size of the PCI-Express TLP's 1945 * that the ET1310 uses. 1946 */ 1947 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) | 1948 ET_RXMAC_MC_SEGSZ_ENABLE; 1949 } else { 1950 val = 0; 1951 } 1952 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1953 1954 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1955 1956 /* Initialize RX MAC management register */ 1957 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1958 1959 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1960 1961 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1962 ET_RXMAC_MGT_PASS_ECRC | 1963 ET_RXMAC_MGT_PASS_ELEN | 1964 ET_RXMAC_MGT_PASS_ETRUNC | 1965 ET_RXMAC_MGT_CHECK_PKT); 1966 1967 /* 1968 * Configure runt filtering (may not work on certain chip generation) 1969 */ 1970 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) & 1971 ET_PKTFILT_MINLEN_MASK; 1972 val |= ET_PKTFILT_FRAG; 1973 CSR_WRITE_4(sc, ET_PKTFILT, val); 1974 1975 /* Enable RX MAC but leave WOL disabled */ 1976 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1977 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1978 1979 /* 1980 * Setup multicast hash and allmulti/promisc mode 1981 */ 1982 et_setmulti(sc); 1983 } 1984 1985 static void 1986 et_init_txmac(struct et_softc *sc) 1987 { 1988 1989 /* Disable TX MAC and FC(?) */ 1990 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1991 1992 /* 1993 * Initialize pause time. 1994 * This register should be set before XON/XOFF frame is 1995 * sent by driver. 1996 */ 1997 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT); 1998 1999 /* Enable TX MAC but leave FC(?) diabled */ 2000 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 2001 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 2002 } 2003 2004 static int 2005 et_start_rxdma(struct et_softc *sc) 2006 { 2007 uint32_t val; 2008 2009 val = (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) | 2010 ET_RXDMA_CTRL_RING0_ENABLE; 2011 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) | 2012 ET_RXDMA_CTRL_RING1_ENABLE; 2013 2014 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 2015 2016 DELAY(5); 2017 2018 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 2019 if_printf(sc->ifp, "can't start RX DMA engine\n"); 2020 return (ETIMEDOUT); 2021 } 2022 return (0); 2023 } 2024 2025 static int 2026 et_start_txdma(struct et_softc *sc) 2027 { 2028 2029 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 2030 return (0); 2031 } 2032 2033 static void 2034 et_rxeof(struct et_softc *sc) 2035 { 2036 struct et_rxstatus_data *rxsd; 2037 struct et_rxstat_ring *rxst_ring; 2038 struct et_rxbuf_data *rbd; 2039 struct et_rxdesc_ring *rx_ring; 2040 struct et_rxstat *st; 2041 struct ifnet *ifp; 2042 struct mbuf *m; 2043 uint32_t rxstat_pos, rxring_pos; 2044 uint32_t rxst_info1, rxst_info2, rxs_stat_ring; 2045 int buflen, buf_idx, npost[2], ring_idx; 2046 int rxst_index, rxst_wrap; 2047 2048 ET_LOCK_ASSERT(sc); 2049 2050 ifp = sc->ifp; 2051 rxsd = &sc->sc_rx_status; 2052 rxst_ring = &sc->sc_rxstat_ring; 2053 2054 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2055 return; 2056 2057 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 2058 BUS_DMASYNC_POSTREAD); 2059 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 2060 BUS_DMASYNC_POSTREAD); 2061 2062 npost[0] = npost[1] = 0; 2063 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring); 2064 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 2065 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >> 2066 ET_RXS_STATRING_INDEX_SHIFT; 2067 2068 while (rxst_index != rxst_ring->rsr_index || 2069 rxst_wrap != rxst_ring->rsr_wrap) { 2070 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2071 break; 2072 2073 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT); 2074 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 2075 rxst_info1 = le32toh(st->rxst_info1); 2076 rxst_info2 = le32toh(st->rxst_info2); 2077 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >> 2078 ET_RXST_INFO2_LEN_SHIFT; 2079 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >> 2080 ET_RXST_INFO2_BUFIDX_SHIFT; 2081 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >> 2082 ET_RXST_INFO2_RINGIDX_SHIFT; 2083 2084 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 2085 rxst_ring->rsr_index = 0; 2086 rxst_ring->rsr_wrap ^= 1; 2087 } 2088 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK; 2089 if (rxst_ring->rsr_wrap) 2090 rxstat_pos |= ET_RXSTAT_POS_WRAP; 2091 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 2092 2093 if (ring_idx >= ET_RX_NRING) { 2094 ifp->if_ierrors++; 2095 if_printf(ifp, "invalid ring index %d\n", ring_idx); 2096 continue; 2097 } 2098 if (buf_idx >= ET_RX_NDESC) { 2099 ifp->if_ierrors++; 2100 if_printf(ifp, "invalid buf index %d\n", buf_idx); 2101 continue; 2102 } 2103 2104 rbd = &sc->sc_rx_data[ring_idx]; 2105 m = rbd->rbd_buf[buf_idx].rb_mbuf; 2106 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){ 2107 /* Discard errored frame. */ 2108 rbd->rbd_discard(rbd, buf_idx); 2109 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) { 2110 /* No available mbufs, discard it. */ 2111 ifp->if_iqdrops++; 2112 rbd->rbd_discard(rbd, buf_idx); 2113 } else { 2114 buflen -= ETHER_CRC_LEN; 2115 if (buflen < ETHER_HDR_LEN) { 2116 m_freem(m); 2117 ifp->if_ierrors++; 2118 } else { 2119 m->m_pkthdr.len = m->m_len = buflen; 2120 m->m_pkthdr.rcvif = ifp; 2121 ET_UNLOCK(sc); 2122 ifp->if_input(ifp, m); 2123 ET_LOCK(sc); 2124 } 2125 } 2126 2127 rx_ring = &sc->sc_rx_ring[ring_idx]; 2128 if (buf_idx != rx_ring->rr_index) { 2129 if_printf(ifp, 2130 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n", 2131 ring_idx, buf_idx, rx_ring->rr_index); 2132 } 2133 2134 MPASS(rx_ring->rr_index < ET_RX_NDESC); 2135 if (++rx_ring->rr_index == ET_RX_NDESC) { 2136 rx_ring->rr_index = 0; 2137 rx_ring->rr_wrap ^= 1; 2138 } 2139 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK; 2140 if (rx_ring->rr_wrap) 2141 rxring_pos |= ET_RX_RING_POS_WRAP; 2142 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 2143 } 2144 2145 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 2146 BUS_DMASYNC_PREREAD); 2147 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 2148 BUS_DMASYNC_PREREAD); 2149 } 2150 2151 static int 2152 et_encap(struct et_softc *sc, struct mbuf **m0) 2153 { 2154 struct et_txdesc_ring *tx_ring; 2155 struct et_txbuf_data *tbd; 2156 struct et_txdesc *td; 2157 struct mbuf *m; 2158 bus_dma_segment_t segs[ET_NSEG_MAX]; 2159 bus_dmamap_t map; 2160 uint32_t csum_flags, last_td_ctrl2; 2161 int error, i, idx, first_idx, last_idx, nsegs; 2162 2163 tx_ring = &sc->sc_tx_ring; 2164 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2165 tbd = &sc->sc_tx_data; 2166 first_idx = tx_ring->tr_ready_index; 2167 map = tbd->tbd_buf[first_idx].tb_dmap; 2168 2169 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs, 2170 0); 2171 if (error == EFBIG) { 2172 m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX); 2173 if (m == NULL) { 2174 m_freem(*m0); 2175 *m0 = NULL; 2176 return (ENOMEM); 2177 } 2178 *m0 = m; 2179 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, 2180 &nsegs, 0); 2181 if (error != 0) { 2182 m_freem(*m0); 2183 *m0 = NULL; 2184 return (error); 2185 } 2186 } else if (error != 0) 2187 return (error); 2188 2189 /* Check for descriptor overruns. */ 2190 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) { 2191 bus_dmamap_unload(sc->sc_tx_tag, map); 2192 return (ENOBUFS); 2193 } 2194 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2195 2196 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 2197 sc->sc_tx += nsegs; 2198 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 2199 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 2200 last_td_ctrl2 |= ET_TDCTRL2_INTR; 2201 } 2202 2203 m = *m0; 2204 csum_flags = 0; 2205 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) { 2206 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2207 csum_flags |= ET_TDCTRL2_CSUM_IP; 2208 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2209 csum_flags |= ET_TDCTRL2_CSUM_UDP; 2210 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2211 csum_flags |= ET_TDCTRL2_CSUM_TCP; 2212 } 2213 last_idx = -1; 2214 for (i = 0; i < nsegs; ++i) { 2215 idx = (first_idx + i) % ET_TX_NDESC; 2216 td = &tx_ring->tr_desc[idx]; 2217 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr)); 2218 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr)); 2219 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK); 2220 if (i == nsegs - 1) { 2221 /* Last frag */ 2222 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags); 2223 last_idx = idx; 2224 } else 2225 td->td_ctrl2 = htole32(csum_flags); 2226 2227 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2228 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2229 tx_ring->tr_ready_index = 0; 2230 tx_ring->tr_ready_wrap ^= 1; 2231 } 2232 } 2233 td = &tx_ring->tr_desc[first_idx]; 2234 /* First frag */ 2235 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG); 2236 2237 MPASS(last_idx >= 0); 2238 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2239 tbd->tbd_buf[last_idx].tb_dmap = map; 2240 tbd->tbd_buf[last_idx].tb_mbuf = m; 2241 2242 tbd->tbd_used += nsegs; 2243 MPASS(tbd->tbd_used <= ET_TX_NDESC); 2244 2245 return (0); 2246 } 2247 2248 static void 2249 et_txeof(struct et_softc *sc) 2250 { 2251 struct et_txdesc_ring *tx_ring; 2252 struct et_txbuf_data *tbd; 2253 struct et_txbuf *tb; 2254 struct ifnet *ifp; 2255 uint32_t tx_done; 2256 int end, wrap; 2257 2258 ET_LOCK_ASSERT(sc); 2259 2260 ifp = sc->ifp; 2261 tx_ring = &sc->sc_tx_ring; 2262 tbd = &sc->sc_tx_data; 2263 2264 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2265 return; 2266 2267 if (tbd->tbd_used == 0) 2268 return; 2269 2270 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2271 BUS_DMASYNC_POSTWRITE); 2272 2273 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2274 end = tx_done & ET_TX_DONE_POS_INDEX_MASK; 2275 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2276 2277 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2278 MPASS(tbd->tbd_start_index < ET_TX_NDESC); 2279 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2280 if (tb->tb_mbuf != NULL) { 2281 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap, 2282 BUS_DMASYNC_POSTWRITE); 2283 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap); 2284 m_freem(tb->tb_mbuf); 2285 tb->tb_mbuf = NULL; 2286 } 2287 2288 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2289 tbd->tbd_start_index = 0; 2290 tbd->tbd_start_wrap ^= 1; 2291 } 2292 2293 MPASS(tbd->tbd_used > 0); 2294 tbd->tbd_used--; 2295 } 2296 2297 if (tbd->tbd_used == 0) 2298 sc->watchdog_timer = 0; 2299 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC) 2300 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2301 } 2302 2303 static void 2304 et_tick(void *xsc) 2305 { 2306 struct et_softc *sc; 2307 struct ifnet *ifp; 2308 struct mii_data *mii; 2309 2310 sc = xsc; 2311 ET_LOCK_ASSERT(sc); 2312 ifp = sc->ifp; 2313 mii = device_get_softc(sc->sc_miibus); 2314 2315 mii_tick(mii); 2316 et_stats_update(sc); 2317 if (et_watchdog(sc) == EJUSTRETURN) 2318 return; 2319 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2320 } 2321 2322 static int 2323 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx) 2324 { 2325 struct et_softc *sc; 2326 struct et_rxdesc *desc; 2327 struct et_rxbuf *rb; 2328 struct mbuf *m; 2329 bus_dma_segment_t segs[1]; 2330 bus_dmamap_t dmap; 2331 int nsegs; 2332 2333 MPASS(buf_idx < ET_RX_NDESC); 2334 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 2335 if (m == NULL) 2336 return (ENOBUFS); 2337 m->m_len = m->m_pkthdr.len = MCLBYTES; 2338 m_adj(m, ETHER_ALIGN); 2339 2340 sc = rbd->rbd_softc; 2341 rb = &rbd->rbd_buf[buf_idx]; 2342 2343 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m, 2344 segs, &nsegs, 0) != 0) { 2345 m_freem(m); 2346 return (ENOBUFS); 2347 } 2348 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2349 2350 if (rb->rb_mbuf != NULL) { 2351 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, 2352 BUS_DMASYNC_POSTREAD); 2353 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap); 2354 } 2355 dmap = rb->rb_dmap; 2356 rb->rb_dmap = sc->sc_rx_sparemap; 2357 sc->sc_rx_sparemap = dmap; 2358 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD); 2359 2360 rb->rb_mbuf = m; 2361 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2362 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr)); 2363 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr)); 2364 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2365 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2366 BUS_DMASYNC_PREWRITE); 2367 return (0); 2368 } 2369 2370 static void 2371 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx) 2372 { 2373 struct et_rxdesc *desc; 2374 2375 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2376 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2377 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2378 BUS_DMASYNC_PREWRITE); 2379 } 2380 2381 static int 2382 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx) 2383 { 2384 struct et_softc *sc; 2385 struct et_rxdesc *desc; 2386 struct et_rxbuf *rb; 2387 struct mbuf *m; 2388 bus_dma_segment_t segs[1]; 2389 bus_dmamap_t dmap; 2390 int nsegs; 2391 2392 MPASS(buf_idx < ET_RX_NDESC); 2393 MGETHDR(m, M_NOWAIT, MT_DATA); 2394 if (m == NULL) 2395 return (ENOBUFS); 2396 m->m_len = m->m_pkthdr.len = MHLEN; 2397 m_adj(m, ETHER_ALIGN); 2398 2399 sc = rbd->rbd_softc; 2400 rb = &rbd->rbd_buf[buf_idx]; 2401 2402 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap, 2403 m, segs, &nsegs, 0) != 0) { 2404 m_freem(m); 2405 return (ENOBUFS); 2406 } 2407 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2408 2409 if (rb->rb_mbuf != NULL) { 2410 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, 2411 BUS_DMASYNC_POSTREAD); 2412 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap); 2413 } 2414 dmap = rb->rb_dmap; 2415 rb->rb_dmap = sc->sc_rx_mini_sparemap; 2416 sc->sc_rx_mini_sparemap = dmap; 2417 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD); 2418 2419 rb->rb_mbuf = m; 2420 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2421 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr)); 2422 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr)); 2423 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2424 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2425 BUS_DMASYNC_PREWRITE); 2426 return (0); 2427 } 2428 2429 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2430 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2431 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 2432 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 2433 2434 /* 2435 * Create sysctl tree 2436 */ 2437 static void 2438 et_add_sysctls(struct et_softc * sc) 2439 { 2440 struct sysctl_ctx_list *ctx; 2441 struct sysctl_oid_list *children, *parent; 2442 struct sysctl_oid *tree; 2443 struct et_hw_stats *stats; 2444 2445 ctx = device_get_sysctl_ctx(sc->dev); 2446 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2447 2448 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts", 2449 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I", 2450 "RX IM, # packets per RX interrupt"); 2451 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay", 2452 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I", 2453 "RX IM, RX interrupt delay (x10 usec)"); 2454 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs", 2455 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 2456 "TX IM, # segments per TX interrupt"); 2457 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer", 2458 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer"); 2459 2460 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 2461 NULL, "ET statistics"); 2462 parent = SYSCTL_CHILDREN(tree); 2463 2464 /* TX/RX statistics. */ 2465 stats = &sc->sc_stats; 2466 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64, 2467 "0 to 64 bytes frames"); 2468 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65, 2469 "65 to 127 bytes frames"); 2470 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128, 2471 "128 to 255 bytes frames"); 2472 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256, 2473 "256 to 511 bytes frames"); 2474 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512, 2475 "512 to 1023 bytes frames"); 2476 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024, 2477 "1024 to 1518 bytes frames"); 2478 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519, 2479 "1519 to 1522 bytes frames"); 2480 2481 /* RX statistics. */ 2482 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2483 NULL, "RX MAC statistics"); 2484 children = SYSCTL_CHILDREN(tree); 2485 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes", 2486 &stats->rx_bytes, "Good bytes"); 2487 ET_SYSCTL_STAT_ADD64(ctx, children, "frames", 2488 &stats->rx_frames, "Good frames"); 2489 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs", 2490 &stats->rx_crcerrs, "CRC errors"); 2491 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames", 2492 &stats->rx_mcast, "Multicast frames"); 2493 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames", 2494 &stats->rx_bcast, "Broadcast frames"); 2495 ET_SYSCTL_STAT_ADD32(ctx, children, "control", 2496 &stats->rx_control, "Control frames"); 2497 ET_SYSCTL_STAT_ADD32(ctx, children, "pause", 2498 &stats->rx_pause, "Pause frames"); 2499 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control", 2500 &stats->rx_unknown_control, "Unknown control frames"); 2501 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs", 2502 &stats->rx_alignerrs, "Alignment errors"); 2503 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs", 2504 &stats->rx_lenerrs, "Frames with length mismatched"); 2505 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs", 2506 &stats->rx_codeerrs, "Frames with code error"); 2507 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs", 2508 &stats->rx_cserrs, "Frames with carrier sense error"); 2509 ET_SYSCTL_STAT_ADD32(ctx, children, "runts", 2510 &stats->rx_runts, "Too short frames"); 2511 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize", 2512 &stats->rx_oversize, "Oversized frames"); 2513 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments", 2514 &stats->rx_fragments, "Fragmented frames"); 2515 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers", 2516 &stats->rx_jabbers, "Frames with jabber error"); 2517 ET_SYSCTL_STAT_ADD32(ctx, children, "drop", 2518 &stats->rx_drop, "Dropped frames"); 2519 2520 /* TX statistics. */ 2521 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2522 NULL, "TX MAC statistics"); 2523 children = SYSCTL_CHILDREN(tree); 2524 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes", 2525 &stats->tx_bytes, "Good bytes"); 2526 ET_SYSCTL_STAT_ADD64(ctx, children, "frames", 2527 &stats->tx_frames, "Good frames"); 2528 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames", 2529 &stats->tx_mcast, "Multicast frames"); 2530 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames", 2531 &stats->tx_bcast, "Broadcast frames"); 2532 ET_SYSCTL_STAT_ADD32(ctx, children, "pause", 2533 &stats->tx_pause, "Pause frames"); 2534 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred", 2535 &stats->tx_deferred, "Deferred frames"); 2536 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred", 2537 &stats->tx_excess_deferred, "Excessively deferred frames"); 2538 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls", 2539 &stats->tx_single_colls, "Single collisions"); 2540 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls", 2541 &stats->tx_multi_colls, "Multiple collisions"); 2542 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls", 2543 &stats->tx_late_colls, "Late collisions"); 2544 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls", 2545 &stats->tx_excess_colls, "Excess collisions"); 2546 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls", 2547 &stats->tx_total_colls, "Total collisions"); 2548 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored", 2549 &stats->tx_pause_honored, "Honored pause frames"); 2550 ET_SYSCTL_STAT_ADD32(ctx, children, "drop", 2551 &stats->tx_drop, "Dropped frames"); 2552 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers", 2553 &stats->tx_jabbers, "Frames with jabber errors"); 2554 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs", 2555 &stats->tx_crcerrs, "Frames with CRC errors"); 2556 ET_SYSCTL_STAT_ADD32(ctx, children, "control", 2557 &stats->tx_control, "Control frames"); 2558 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize", 2559 &stats->tx_oversize, "Oversized frames"); 2560 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize", 2561 &stats->tx_undersize, "Undersized frames"); 2562 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments", 2563 &stats->tx_fragments, "Fragmented frames"); 2564 } 2565 2566 #undef ET_SYSCTL_STAT_ADD32 2567 #undef ET_SYSCTL_STAT_ADD64 2568 2569 static int 2570 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2571 { 2572 struct et_softc *sc; 2573 struct ifnet *ifp; 2574 int error, v; 2575 2576 sc = arg1; 2577 ifp = sc->ifp; 2578 v = sc->sc_rx_intr_npkts; 2579 error = sysctl_handle_int(oidp, &v, 0, req); 2580 if (error || req->newptr == NULL) 2581 goto back; 2582 if (v <= 0) { 2583 error = EINVAL; 2584 goto back; 2585 } 2586 2587 if (sc->sc_rx_intr_npkts != v) { 2588 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2589 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2590 sc->sc_rx_intr_npkts = v; 2591 } 2592 back: 2593 return (error); 2594 } 2595 2596 static int 2597 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2598 { 2599 struct et_softc *sc; 2600 struct ifnet *ifp; 2601 int error, v; 2602 2603 sc = arg1; 2604 ifp = sc->ifp; 2605 v = sc->sc_rx_intr_delay; 2606 error = sysctl_handle_int(oidp, &v, 0, req); 2607 if (error || req->newptr == NULL) 2608 goto back; 2609 if (v <= 0) { 2610 error = EINVAL; 2611 goto back; 2612 } 2613 2614 if (sc->sc_rx_intr_delay != v) { 2615 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2616 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2617 sc->sc_rx_intr_delay = v; 2618 } 2619 back: 2620 return (error); 2621 } 2622 2623 static void 2624 et_stats_update(struct et_softc *sc) 2625 { 2626 struct ifnet *ifp; 2627 struct et_hw_stats *stats; 2628 2629 stats = &sc->sc_stats; 2630 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64); 2631 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127); 2632 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255); 2633 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511); 2634 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023); 2635 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518); 2636 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522); 2637 2638 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES); 2639 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES); 2640 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR); 2641 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST); 2642 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST); 2643 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL); 2644 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE); 2645 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL); 2646 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR); 2647 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR); 2648 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR); 2649 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR); 2650 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT); 2651 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE); 2652 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG); 2653 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER); 2654 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP); 2655 2656 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES); 2657 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES); 2658 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST); 2659 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST); 2660 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE); 2661 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER); 2662 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER); 2663 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL); 2664 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL); 2665 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL); 2666 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL); 2667 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL); 2668 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR); 2669 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP); 2670 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER); 2671 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR); 2672 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL); 2673 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE); 2674 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE); 2675 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG); 2676 2677 /* Update ifnet counters. */ 2678 ifp = sc->ifp; 2679 ifp->if_opackets = (u_long)stats->tx_frames; 2680 ifp->if_collisions = stats->tx_total_colls; 2681 ifp->if_oerrors = stats->tx_drop + stats->tx_jabbers + 2682 stats->tx_crcerrs + stats->tx_excess_deferred + 2683 stats->tx_late_colls; 2684 ifp->if_ipackets = (u_long)stats->rx_frames; 2685 ifp->if_ierrors = stats->rx_crcerrs + stats->rx_alignerrs + 2686 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs + 2687 stats->rx_runts + stats->rx_jabbers + stats->rx_drop; 2688 } 2689 2690 static int 2691 et_suspend(device_t dev) 2692 { 2693 struct et_softc *sc; 2694 uint32_t pmcfg; 2695 2696 sc = device_get_softc(dev); 2697 ET_LOCK(sc); 2698 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2699 et_stop(sc); 2700 /* Diable all clocks and put PHY into COMA. */ 2701 pmcfg = CSR_READ_4(sc, ET_PM); 2702 pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | 2703 ET_PM_RXCLK_GATE); 2704 pmcfg |= ET_PM_PHY_SW_COMA; 2705 CSR_WRITE_4(sc, ET_PM, pmcfg); 2706 ET_UNLOCK(sc); 2707 return (0); 2708 } 2709 2710 static int 2711 et_resume(device_t dev) 2712 { 2713 struct et_softc *sc; 2714 uint32_t pmcfg; 2715 2716 sc = device_get_softc(dev); 2717 ET_LOCK(sc); 2718 /* Take PHY out of COMA and enable clocks. */ 2719 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 2720 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 2721 pmcfg |= EM_PM_GIGEPHY_ENB; 2722 CSR_WRITE_4(sc, ET_PM, pmcfg); 2723 if ((sc->ifp->if_flags & IFF_UP) != 0) 2724 et_init_locked(sc); 2725 ET_UNLOCK(sc); 2726 return (0); 2727 } 2728