1 /*- 2 * Copyright (c) 2007 Sepherosa Ziehau. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Sepherosa Ziehau <sepherosa@gmail.com> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * 3. Neither the name of The DragonFly Project nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific, prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * $DragonFly: src/sys/dev/netif/et/if_et.c,v 1.10 2008/05/18 07:47:14 sephe Exp $ 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/endian.h> 43 #include <sys/kernel.h> 44 #include <sys/bus.h> 45 #include <sys/malloc.h> 46 #include <sys/mbuf.h> 47 #include <sys/proc.h> 48 #include <sys/rman.h> 49 #include <sys/module.h> 50 #include <sys/socket.h> 51 #include <sys/sockio.h> 52 #include <sys/sysctl.h> 53 54 #include <net/ethernet.h> 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_types.h> 58 #include <net/bpf.h> 59 #include <net/if_arp.h> 60 #include <net/if_media.h> 61 #include <net/if_vlan_var.h> 62 63 #include <machine/bus.h> 64 65 #include <dev/mii/mii.h> 66 #include <dev/mii/miivar.h> 67 68 #include <dev/pci/pcireg.h> 69 #include <dev/pci/pcivar.h> 70 71 #include <dev/et/if_etreg.h> 72 #include <dev/et/if_etvar.h> 73 74 #include "miibus_if.h" 75 76 MODULE_DEPEND(et, pci, 1, 1, 1); 77 MODULE_DEPEND(et, ether, 1, 1, 1); 78 MODULE_DEPEND(et, miibus, 1, 1, 1); 79 80 /* Tunables. */ 81 static int msi_disable = 0; 82 TUNABLE_INT("hw.et.msi_disable", &msi_disable); 83 84 #define ET_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 85 86 static int et_probe(device_t); 87 static int et_attach(device_t); 88 static int et_detach(device_t); 89 static int et_shutdown(device_t); 90 static int et_suspend(device_t); 91 static int et_resume(device_t); 92 93 static int et_miibus_readreg(device_t, int, int); 94 static int et_miibus_writereg(device_t, int, int, int); 95 static void et_miibus_statchg(device_t); 96 97 static void et_init_locked(struct et_softc *); 98 static void et_init(void *); 99 static int et_ioctl(struct ifnet *, u_long, caddr_t); 100 static void et_start_locked(struct ifnet *); 101 static void et_start(struct ifnet *); 102 static int et_watchdog(struct et_softc *); 103 static int et_ifmedia_upd_locked(struct ifnet *); 104 static int et_ifmedia_upd(struct ifnet *); 105 static void et_ifmedia_sts(struct ifnet *, struct ifmediareq *); 106 107 static void et_add_sysctls(struct et_softc *); 108 static int et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS); 109 static int et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS); 110 111 static void et_intr(void *); 112 static void et_rxeof(struct et_softc *); 113 static void et_txeof(struct et_softc *); 114 115 static int et_dma_alloc(struct et_softc *); 116 static void et_dma_free(struct et_softc *); 117 static void et_dma_map_addr(void *, bus_dma_segment_t *, int, int); 118 static int et_dma_ring_alloc(struct et_softc *, bus_size_t, bus_size_t, 119 bus_dma_tag_t *, uint8_t **, bus_dmamap_t *, bus_addr_t *, 120 const char *); 121 static void et_dma_ring_free(struct et_softc *, bus_dma_tag_t *, uint8_t **, 122 bus_dmamap_t *); 123 static void et_init_tx_ring(struct et_softc *); 124 static int et_init_rx_ring(struct et_softc *); 125 static void et_free_tx_ring(struct et_softc *); 126 static void et_free_rx_ring(struct et_softc *); 127 static int et_encap(struct et_softc *, struct mbuf **); 128 static int et_newbuf_cluster(struct et_rxbuf_data *, int); 129 static int et_newbuf_hdr(struct et_rxbuf_data *, int); 130 static void et_rxbuf_discard(struct et_rxbuf_data *, int); 131 132 static void et_stop(struct et_softc *); 133 static int et_chip_init(struct et_softc *); 134 static void et_chip_attach(struct et_softc *); 135 static void et_init_mac(struct et_softc *); 136 static void et_init_rxmac(struct et_softc *); 137 static void et_init_txmac(struct et_softc *); 138 static int et_init_rxdma(struct et_softc *); 139 static int et_init_txdma(struct et_softc *); 140 static int et_start_rxdma(struct et_softc *); 141 static int et_start_txdma(struct et_softc *); 142 static int et_stop_rxdma(struct et_softc *); 143 static int et_stop_txdma(struct et_softc *); 144 static void et_reset(struct et_softc *); 145 static int et_bus_config(struct et_softc *); 146 static void et_get_eaddr(device_t, uint8_t[]); 147 static void et_setmulti(struct et_softc *); 148 static void et_tick(void *); 149 static void et_stats_update(struct et_softc *); 150 151 static const struct et_dev { 152 uint16_t vid; 153 uint16_t did; 154 const char *desc; 155 } et_devices[] = { 156 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310, 157 "Agere ET1310 Gigabit Ethernet" }, 158 { PCI_VENDOR_LUCENT, PCI_PRODUCT_LUCENT_ET1310_FAST, 159 "Agere ET1310 Fast Ethernet" }, 160 { 0, 0, NULL } 161 }; 162 163 static device_method_t et_methods[] = { 164 DEVMETHOD(device_probe, et_probe), 165 DEVMETHOD(device_attach, et_attach), 166 DEVMETHOD(device_detach, et_detach), 167 DEVMETHOD(device_shutdown, et_shutdown), 168 DEVMETHOD(device_suspend, et_suspend), 169 DEVMETHOD(device_resume, et_resume), 170 171 DEVMETHOD(miibus_readreg, et_miibus_readreg), 172 DEVMETHOD(miibus_writereg, et_miibus_writereg), 173 DEVMETHOD(miibus_statchg, et_miibus_statchg), 174 175 DEVMETHOD_END 176 }; 177 178 static driver_t et_driver = { 179 "et", 180 et_methods, 181 sizeof(struct et_softc) 182 }; 183 184 static devclass_t et_devclass; 185 186 DRIVER_MODULE(et, pci, et_driver, et_devclass, 0, 0); 187 DRIVER_MODULE(miibus, et, miibus_driver, miibus_devclass, 0, 0); 188 189 static int et_rx_intr_npkts = 32; 190 static int et_rx_intr_delay = 20; /* x10 usec */ 191 static int et_tx_intr_nsegs = 126; 192 static uint32_t et_timer = 1000 * 1000 * 1000; /* nanosec */ 193 194 TUNABLE_INT("hw.et.timer", &et_timer); 195 TUNABLE_INT("hw.et.rx_intr_npkts", &et_rx_intr_npkts); 196 TUNABLE_INT("hw.et.rx_intr_delay", &et_rx_intr_delay); 197 TUNABLE_INT("hw.et.tx_intr_nsegs", &et_tx_intr_nsegs); 198 199 static int 200 et_probe(device_t dev) 201 { 202 const struct et_dev *d; 203 uint16_t did, vid; 204 205 vid = pci_get_vendor(dev); 206 did = pci_get_device(dev); 207 208 for (d = et_devices; d->desc != NULL; ++d) { 209 if (vid == d->vid && did == d->did) { 210 device_set_desc(dev, d->desc); 211 return (BUS_PROBE_DEFAULT); 212 } 213 } 214 return (ENXIO); 215 } 216 217 static int 218 et_attach(device_t dev) 219 { 220 struct et_softc *sc; 221 struct ifnet *ifp; 222 uint8_t eaddr[ETHER_ADDR_LEN]; 223 uint32_t pmcfg; 224 int cap, error, msic; 225 226 sc = device_get_softc(dev); 227 sc->dev = dev; 228 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 229 MTX_DEF); 230 callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0); 231 232 ifp = sc->ifp = if_alloc(IFT_ETHER); 233 if (ifp == NULL) { 234 device_printf(dev, "can not if_alloc()\n"); 235 error = ENOSPC; 236 goto fail; 237 } 238 239 /* 240 * Initialize tunables 241 */ 242 sc->sc_rx_intr_npkts = et_rx_intr_npkts; 243 sc->sc_rx_intr_delay = et_rx_intr_delay; 244 sc->sc_tx_intr_nsegs = et_tx_intr_nsegs; 245 sc->sc_timer = et_timer; 246 247 /* Enable bus mastering */ 248 pci_enable_busmaster(dev); 249 250 /* 251 * Allocate IO memory 252 */ 253 sc->sc_mem_rid = PCIR_BAR(0); 254 sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 255 &sc->sc_mem_rid, RF_ACTIVE); 256 if (sc->sc_mem_res == NULL) { 257 device_printf(dev, "can't allocate IO memory\n"); 258 return (ENXIO); 259 } 260 261 msic = 0; 262 if (pci_find_cap(dev, PCIY_EXPRESS, &cap) == 0) { 263 sc->sc_expcap = cap; 264 sc->sc_flags |= ET_FLAG_PCIE; 265 msic = pci_msi_count(dev); 266 if (bootverbose) 267 device_printf(dev, "MSI count: %d\n", msic); 268 } 269 if (msic > 0 && msi_disable == 0) { 270 msic = 1; 271 if (pci_alloc_msi(dev, &msic) == 0) { 272 if (msic == 1) { 273 device_printf(dev, "Using %d MSI message\n", 274 msic); 275 sc->sc_flags |= ET_FLAG_MSI; 276 } else 277 pci_release_msi(dev); 278 } 279 } 280 281 /* 282 * Allocate IRQ 283 */ 284 if ((sc->sc_flags & ET_FLAG_MSI) == 0) { 285 sc->sc_irq_rid = 0; 286 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 287 &sc->sc_irq_rid, RF_SHAREABLE | RF_ACTIVE); 288 } else { 289 sc->sc_irq_rid = 1; 290 sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, 291 &sc->sc_irq_rid, RF_ACTIVE); 292 } 293 if (sc->sc_irq_res == NULL) { 294 device_printf(dev, "can't allocate irq\n"); 295 error = ENXIO; 296 goto fail; 297 } 298 299 if (pci_get_device(dev) == PCI_PRODUCT_LUCENT_ET1310_FAST) 300 sc->sc_flags |= ET_FLAG_FASTETHER; 301 302 error = et_bus_config(sc); 303 if (error) 304 goto fail; 305 306 et_get_eaddr(dev, eaddr); 307 308 /* Take PHY out of COMA and enable clocks. */ 309 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 310 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 311 pmcfg |= EM_PM_GIGEPHY_ENB; 312 CSR_WRITE_4(sc, ET_PM, pmcfg); 313 314 et_reset(sc); 315 316 error = et_dma_alloc(sc); 317 if (error) 318 goto fail; 319 320 ifp->if_softc = sc; 321 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 322 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 323 ifp->if_init = et_init; 324 ifp->if_ioctl = et_ioctl; 325 ifp->if_start = et_start; 326 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_MTU; 327 ifp->if_capenable = ifp->if_capabilities; 328 ifp->if_snd.ifq_drv_maxlen = ET_TX_NDESC - 1; 329 IFQ_SET_MAXLEN(&ifp->if_snd, ET_TX_NDESC - 1); 330 IFQ_SET_READY(&ifp->if_snd); 331 332 et_chip_attach(sc); 333 334 error = mii_attach(dev, &sc->sc_miibus, ifp, et_ifmedia_upd, 335 et_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 336 MIIF_DOPAUSE); 337 if (error) { 338 device_printf(dev, "attaching PHYs failed\n"); 339 goto fail; 340 } 341 342 ether_ifattach(ifp, eaddr); 343 344 /* Tell the upper layer(s) we support long frames. */ 345 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 346 347 error = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE, 348 NULL, et_intr, sc, &sc->sc_irq_handle); 349 if (error) { 350 ether_ifdetach(ifp); 351 device_printf(dev, "can't setup intr\n"); 352 goto fail; 353 } 354 355 et_add_sysctls(sc); 356 357 return (0); 358 fail: 359 et_detach(dev); 360 return (error); 361 } 362 363 static int 364 et_detach(device_t dev) 365 { 366 struct et_softc *sc = device_get_softc(dev); 367 368 if (device_is_attached(dev)) { 369 ether_ifdetach(sc->ifp); 370 ET_LOCK(sc); 371 et_stop(sc); 372 ET_UNLOCK(sc); 373 callout_drain(&sc->sc_tick); 374 } 375 376 if (sc->sc_miibus != NULL) 377 device_delete_child(dev, sc->sc_miibus); 378 bus_generic_detach(dev); 379 380 if (sc->sc_irq_handle != NULL) 381 bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); 382 if (sc->sc_irq_res != NULL) 383 bus_release_resource(dev, SYS_RES_IRQ, 384 rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); 385 if ((sc->sc_flags & ET_FLAG_MSI) != 0) 386 pci_release_msi(dev); 387 if (sc->sc_mem_res != NULL) 388 bus_release_resource(dev, SYS_RES_MEMORY, 389 rman_get_rid(sc->sc_mem_res), sc->sc_mem_res); 390 391 if (sc->ifp != NULL) 392 if_free(sc->ifp); 393 394 et_dma_free(sc); 395 396 mtx_destroy(&sc->sc_mtx); 397 398 return (0); 399 } 400 401 static int 402 et_shutdown(device_t dev) 403 { 404 struct et_softc *sc = device_get_softc(dev); 405 406 ET_LOCK(sc); 407 et_stop(sc); 408 ET_UNLOCK(sc); 409 return (0); 410 } 411 412 static int 413 et_miibus_readreg(device_t dev, int phy, int reg) 414 { 415 struct et_softc *sc = device_get_softc(dev); 416 uint32_t val; 417 int i, ret; 418 419 /* Stop any pending operations */ 420 CSR_WRITE_4(sc, ET_MII_CMD, 0); 421 422 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 423 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 424 CSR_WRITE_4(sc, ET_MII_ADDR, val); 425 426 /* Start reading */ 427 CSR_WRITE_4(sc, ET_MII_CMD, ET_MII_CMD_READ); 428 429 #define NRETRY 50 430 431 for (i = 0; i < NRETRY; ++i) { 432 val = CSR_READ_4(sc, ET_MII_IND); 433 if ((val & (ET_MII_IND_BUSY | ET_MII_IND_INVALID)) == 0) 434 break; 435 DELAY(50); 436 } 437 if (i == NRETRY) { 438 if_printf(sc->ifp, 439 "read phy %d, reg %d timed out\n", phy, reg); 440 ret = 0; 441 goto back; 442 } 443 444 #undef NRETRY 445 446 val = CSR_READ_4(sc, ET_MII_STAT); 447 ret = val & ET_MII_STAT_VALUE_MASK; 448 449 back: 450 /* Make sure that the current operation is stopped */ 451 CSR_WRITE_4(sc, ET_MII_CMD, 0); 452 return (ret); 453 } 454 455 static int 456 et_miibus_writereg(device_t dev, int phy, int reg, int val0) 457 { 458 struct et_softc *sc = device_get_softc(dev); 459 uint32_t val; 460 int i; 461 462 /* Stop any pending operations */ 463 CSR_WRITE_4(sc, ET_MII_CMD, 0); 464 465 val = (phy << ET_MII_ADDR_PHY_SHIFT) & ET_MII_ADDR_PHY_MASK; 466 val |= (reg << ET_MII_ADDR_REG_SHIFT) & ET_MII_ADDR_REG_MASK; 467 CSR_WRITE_4(sc, ET_MII_ADDR, val); 468 469 /* Start writing */ 470 CSR_WRITE_4(sc, ET_MII_CTRL, 471 (val0 << ET_MII_CTRL_VALUE_SHIFT) & ET_MII_CTRL_VALUE_MASK); 472 473 #define NRETRY 100 474 475 for (i = 0; i < NRETRY; ++i) { 476 val = CSR_READ_4(sc, ET_MII_IND); 477 if ((val & ET_MII_IND_BUSY) == 0) 478 break; 479 DELAY(50); 480 } 481 if (i == NRETRY) { 482 if_printf(sc->ifp, 483 "write phy %d, reg %d timed out\n", phy, reg); 484 et_miibus_readreg(dev, phy, reg); 485 } 486 487 #undef NRETRY 488 489 /* Make sure that the current operation is stopped */ 490 CSR_WRITE_4(sc, ET_MII_CMD, 0); 491 return (0); 492 } 493 494 static void 495 et_miibus_statchg(device_t dev) 496 { 497 struct et_softc *sc; 498 struct mii_data *mii; 499 struct ifnet *ifp; 500 uint32_t cfg1, cfg2, ctrl; 501 int i; 502 503 sc = device_get_softc(dev); 504 505 mii = device_get_softc(sc->sc_miibus); 506 ifp = sc->ifp; 507 if (mii == NULL || ifp == NULL || 508 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 509 return; 510 511 sc->sc_flags &= ~ET_FLAG_LINK; 512 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 513 (IFM_ACTIVE | IFM_AVALID)) { 514 switch (IFM_SUBTYPE(mii->mii_media_active)) { 515 case IFM_10_T: 516 case IFM_100_TX: 517 sc->sc_flags |= ET_FLAG_LINK; 518 break; 519 case IFM_1000_T: 520 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 521 sc->sc_flags |= ET_FLAG_LINK; 522 break; 523 } 524 } 525 526 /* XXX Stop TX/RX MAC? */ 527 if ((sc->sc_flags & ET_FLAG_LINK) == 0) 528 return; 529 530 /* Program MACs with resolved speed/duplex/flow-control. */ 531 ctrl = CSR_READ_4(sc, ET_MAC_CTRL); 532 ctrl &= ~(ET_MAC_CTRL_GHDX | ET_MAC_CTRL_MODE_MII); 533 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 534 cfg1 &= ~(ET_MAC_CFG1_TXFLOW | ET_MAC_CFG1_RXFLOW | 535 ET_MAC_CFG1_LOOPBACK); 536 cfg2 = CSR_READ_4(sc, ET_MAC_CFG2); 537 cfg2 &= ~(ET_MAC_CFG2_MODE_MII | ET_MAC_CFG2_MODE_GMII | 538 ET_MAC_CFG2_FDX | ET_MAC_CFG2_BIGFRM); 539 cfg2 |= ET_MAC_CFG2_LENCHK | ET_MAC_CFG2_CRC | ET_MAC_CFG2_PADCRC | 540 ((7 << ET_MAC_CFG2_PREAMBLE_LEN_SHIFT) & 541 ET_MAC_CFG2_PREAMBLE_LEN_MASK); 542 543 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) 544 cfg2 |= ET_MAC_CFG2_MODE_GMII; 545 else { 546 cfg2 |= ET_MAC_CFG2_MODE_MII; 547 ctrl |= ET_MAC_CTRL_MODE_MII; 548 } 549 550 if (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) { 551 cfg2 |= ET_MAC_CFG2_FDX; 552 /* 553 * Controller lacks automatic TX pause frame 554 * generation so it should be handled by driver. 555 * Even though driver can send pause frame with 556 * arbitrary pause time, controller does not 557 * provide a way that tells how many free RX 558 * buffers are available in controller. This 559 * limitation makes it hard to generate XON frame 560 * in time on driver side so don't enable TX flow 561 * control. 562 */ 563 #ifdef notyet 564 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) 565 cfg1 |= ET_MAC_CFG1_TXFLOW; 566 #endif 567 if (IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) 568 cfg1 |= ET_MAC_CFG1_RXFLOW; 569 } else 570 ctrl |= ET_MAC_CTRL_GHDX; 571 572 CSR_WRITE_4(sc, ET_MAC_CTRL, ctrl); 573 CSR_WRITE_4(sc, ET_MAC_CFG2, cfg2); 574 cfg1 |= ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN; 575 CSR_WRITE_4(sc, ET_MAC_CFG1, cfg1); 576 577 #define NRETRY 50 578 579 for (i = 0; i < NRETRY; ++i) { 580 cfg1 = CSR_READ_4(sc, ET_MAC_CFG1); 581 if ((cfg1 & (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) == 582 (ET_MAC_CFG1_SYNC_TXEN | ET_MAC_CFG1_SYNC_RXEN)) 583 break; 584 DELAY(100); 585 } 586 if (i == NRETRY) 587 if_printf(ifp, "can't enable RX/TX\n"); 588 sc->sc_flags |= ET_FLAG_TXRX_ENABLED; 589 590 #undef NRETRY 591 } 592 593 static int 594 et_ifmedia_upd_locked(struct ifnet *ifp) 595 { 596 struct et_softc *sc = ifp->if_softc; 597 struct mii_data *mii = device_get_softc(sc->sc_miibus); 598 struct mii_softc *miisc; 599 600 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 601 PHY_RESET(miisc); 602 return (mii_mediachg(mii)); 603 } 604 605 static int 606 et_ifmedia_upd(struct ifnet *ifp) 607 { 608 struct et_softc *sc = ifp->if_softc; 609 int res; 610 611 ET_LOCK(sc); 612 res = et_ifmedia_upd_locked(ifp); 613 ET_UNLOCK(sc); 614 615 return (res); 616 } 617 618 static void 619 et_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 620 { 621 struct et_softc *sc; 622 struct mii_data *mii; 623 624 sc = ifp->if_softc; 625 ET_LOCK(sc); 626 if ((ifp->if_flags & IFF_UP) == 0) { 627 ET_UNLOCK(sc); 628 return; 629 } 630 631 mii = device_get_softc(sc->sc_miibus); 632 mii_pollstat(mii); 633 ifmr->ifm_active = mii->mii_media_active; 634 ifmr->ifm_status = mii->mii_media_status; 635 ET_UNLOCK(sc); 636 } 637 638 static void 639 et_stop(struct et_softc *sc) 640 { 641 struct ifnet *ifp = sc->ifp; 642 643 ET_LOCK_ASSERT(sc); 644 645 callout_stop(&sc->sc_tick); 646 /* Disable interrupts. */ 647 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 648 649 CSR_WRITE_4(sc, ET_MAC_CFG1, CSR_READ_4(sc, ET_MAC_CFG1) & ~( 650 ET_MAC_CFG1_TXEN | ET_MAC_CFG1_RXEN)); 651 DELAY(100); 652 653 et_stop_rxdma(sc); 654 et_stop_txdma(sc); 655 et_stats_update(sc); 656 657 et_free_tx_ring(sc); 658 et_free_rx_ring(sc); 659 660 sc->sc_tx = 0; 661 sc->sc_tx_intr = 0; 662 sc->sc_flags &= ~ET_FLAG_TXRX_ENABLED; 663 664 sc->watchdog_timer = 0; 665 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 666 } 667 668 static int 669 et_bus_config(struct et_softc *sc) 670 { 671 uint32_t val, max_plsz; 672 uint16_t ack_latency, replay_timer; 673 674 /* 675 * Test whether EEPROM is valid 676 * NOTE: Read twice to get the correct value 677 */ 678 pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1); 679 val = pci_read_config(sc->dev, ET_PCIR_EEPROM_STATUS, 1); 680 if (val & ET_PCIM_EEPROM_STATUS_ERROR) { 681 device_printf(sc->dev, "EEPROM status error 0x%02x\n", val); 682 return (ENXIO); 683 } 684 685 /* TODO: LED */ 686 687 if ((sc->sc_flags & ET_FLAG_PCIE) == 0) 688 return (0); 689 690 /* 691 * Configure ACK latency and replay timer according to 692 * max playload size 693 */ 694 val = pci_read_config(sc->dev, 695 sc->sc_expcap + PCIR_EXPRESS_DEVICE_CAP, 4); 696 max_plsz = val & PCIM_EXP_CAP_MAX_PAYLOAD; 697 698 switch (max_plsz) { 699 case ET_PCIV_DEVICE_CAPS_PLSZ_128: 700 ack_latency = ET_PCIV_ACK_LATENCY_128; 701 replay_timer = ET_PCIV_REPLAY_TIMER_128; 702 break; 703 704 case ET_PCIV_DEVICE_CAPS_PLSZ_256: 705 ack_latency = ET_PCIV_ACK_LATENCY_256; 706 replay_timer = ET_PCIV_REPLAY_TIMER_256; 707 break; 708 709 default: 710 ack_latency = pci_read_config(sc->dev, ET_PCIR_ACK_LATENCY, 2); 711 replay_timer = pci_read_config(sc->dev, 712 ET_PCIR_REPLAY_TIMER, 2); 713 device_printf(sc->dev, "ack latency %u, replay timer %u\n", 714 ack_latency, replay_timer); 715 break; 716 } 717 if (ack_latency != 0) { 718 pci_write_config(sc->dev, ET_PCIR_ACK_LATENCY, ack_latency, 2); 719 pci_write_config(sc->dev, ET_PCIR_REPLAY_TIMER, replay_timer, 720 2); 721 } 722 723 /* 724 * Set L0s and L1 latency timer to 2us 725 */ 726 val = pci_read_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, 4); 727 val &= ~(PCIM_LINK_CAP_L0S_EXIT | PCIM_LINK_CAP_L1_EXIT); 728 /* L0s exit latency : 2us */ 729 val |= 0x00005000; 730 /* L1 exit latency : 2us */ 731 val |= 0x00028000; 732 pci_write_config(sc->dev, ET_PCIR_L0S_L1_LATENCY, val, 4); 733 734 /* 735 * Set max read request size to 2048 bytes 736 */ 737 pci_set_max_read_req(sc->dev, 2048); 738 739 return (0); 740 } 741 742 static void 743 et_get_eaddr(device_t dev, uint8_t eaddr[]) 744 { 745 uint32_t val; 746 int i; 747 748 val = pci_read_config(dev, ET_PCIR_MAC_ADDR0, 4); 749 for (i = 0; i < 4; ++i) 750 eaddr[i] = (val >> (8 * i)) & 0xff; 751 752 val = pci_read_config(dev, ET_PCIR_MAC_ADDR1, 2); 753 for (; i < ETHER_ADDR_LEN; ++i) 754 eaddr[i] = (val >> (8 * (i - 4))) & 0xff; 755 } 756 757 static void 758 et_reset(struct et_softc *sc) 759 { 760 CSR_WRITE_4(sc, ET_MAC_CFG1, 761 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 762 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 763 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 764 765 CSR_WRITE_4(sc, ET_SWRST, 766 ET_SWRST_TXDMA | ET_SWRST_RXDMA | 767 ET_SWRST_TXMAC | ET_SWRST_RXMAC | 768 ET_SWRST_MAC | ET_SWRST_MAC_STAT | ET_SWRST_MMC); 769 770 CSR_WRITE_4(sc, ET_MAC_CFG1, 771 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 772 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC); 773 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 774 /* Disable interrupts. */ 775 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 776 } 777 778 struct et_dmamap_arg { 779 bus_addr_t et_busaddr; 780 }; 781 782 static void 783 et_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 784 { 785 struct et_dmamap_arg *ctx; 786 787 if (error) 788 return; 789 790 KASSERT(nseg == 1, ("%s: %d segments returned!", __func__, nseg)); 791 792 ctx = arg; 793 ctx->et_busaddr = segs->ds_addr; 794 } 795 796 static int 797 et_dma_ring_alloc(struct et_softc *sc, bus_size_t alignment, bus_size_t maxsize, 798 bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, 799 const char *msg) 800 { 801 struct et_dmamap_arg ctx; 802 int error; 803 804 error = bus_dma_tag_create(sc->sc_dtag, alignment, 0, BUS_SPACE_MAXADDR, 805 BUS_SPACE_MAXADDR, NULL, NULL, maxsize, 1, maxsize, 0, NULL, NULL, 806 tag); 807 if (error != 0) { 808 device_printf(sc->dev, "could not create %s dma tag\n", msg); 809 return (error); 810 } 811 /* Allocate DMA'able memory for ring. */ 812 error = bus_dmamem_alloc(*tag, (void **)ring, 813 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, map); 814 if (error != 0) { 815 device_printf(sc->dev, 816 "could not allocate DMA'able memory for %s\n", msg); 817 return (error); 818 } 819 /* Load the address of the ring. */ 820 ctx.et_busaddr = 0; 821 error = bus_dmamap_load(*tag, *map, *ring, maxsize, et_dma_map_addr, 822 &ctx, BUS_DMA_NOWAIT); 823 if (error != 0) { 824 device_printf(sc->dev, 825 "could not load DMA'able memory for %s\n", msg); 826 return (error); 827 } 828 *paddr = ctx.et_busaddr; 829 return (0); 830 } 831 832 static void 833 et_dma_ring_free(struct et_softc *sc, bus_dma_tag_t *tag, uint8_t **ring, 834 bus_dmamap_t *map) 835 { 836 837 if (*map != NULL) 838 bus_dmamap_unload(*tag, *map); 839 if (*map != NULL && *ring != NULL) { 840 bus_dmamem_free(*tag, *ring, *map); 841 *ring = NULL; 842 *map = NULL; 843 } 844 if (*tag) { 845 bus_dma_tag_destroy(*tag); 846 *tag = NULL; 847 } 848 } 849 850 static int 851 et_dma_alloc(struct et_softc *sc) 852 { 853 struct et_txdesc_ring *tx_ring; 854 struct et_rxdesc_ring *rx_ring; 855 struct et_rxstat_ring *rxst_ring; 856 struct et_rxstatus_data *rxsd; 857 struct et_rxbuf_data *rbd; 858 struct et_txbuf_data *tbd; 859 struct et_txstatus_data *txsd; 860 int i, error; 861 862 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 863 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 864 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 865 &sc->sc_dtag); 866 if (error != 0) { 867 device_printf(sc->dev, "could not allocate parent dma tag\n"); 868 return (error); 869 } 870 871 /* TX ring. */ 872 tx_ring = &sc->sc_tx_ring; 873 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_TX_RING_SIZE, 874 &tx_ring->tr_dtag, (uint8_t **)&tx_ring->tr_desc, &tx_ring->tr_dmap, 875 &tx_ring->tr_paddr, "TX ring"); 876 if (error) 877 return (error); 878 879 /* TX status block. */ 880 txsd = &sc->sc_tx_status; 881 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, sizeof(uint32_t), 882 &txsd->txsd_dtag, (uint8_t **)&txsd->txsd_status, &txsd->txsd_dmap, 883 &txsd->txsd_paddr, "TX status block"); 884 if (error) 885 return (error); 886 887 /* RX ring 0, used as to recive small sized frames. */ 888 rx_ring = &sc->sc_rx_ring[0]; 889 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE, 890 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap, 891 &rx_ring->rr_paddr, "RX ring 0"); 892 rx_ring->rr_posreg = ET_RX_RING0_POS; 893 if (error) 894 return (error); 895 896 /* RX ring 1, used as to store normal sized frames. */ 897 rx_ring = &sc->sc_rx_ring[1]; 898 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RX_RING_SIZE, 899 &rx_ring->rr_dtag, (uint8_t **)&rx_ring->rr_desc, &rx_ring->rr_dmap, 900 &rx_ring->rr_paddr, "RX ring 1"); 901 rx_ring->rr_posreg = ET_RX_RING1_POS; 902 if (error) 903 return (error); 904 905 /* RX stat ring. */ 906 rxst_ring = &sc->sc_rxstat_ring; 907 error = et_dma_ring_alloc(sc, ET_RING_ALIGN, ET_RXSTAT_RING_SIZE, 908 &rxst_ring->rsr_dtag, (uint8_t **)&rxst_ring->rsr_stat, 909 &rxst_ring->rsr_dmap, &rxst_ring->rsr_paddr, "RX stat ring"); 910 if (error) 911 return (error); 912 913 /* RX status block. */ 914 rxsd = &sc->sc_rx_status; 915 error = et_dma_ring_alloc(sc, ET_STATUS_ALIGN, 916 sizeof(struct et_rxstatus), &rxsd->rxsd_dtag, 917 (uint8_t **)&rxsd->rxsd_status, &rxsd->rxsd_dmap, 918 &rxsd->rxsd_paddr, "RX status block"); 919 if (error) 920 return (error); 921 922 /* Create parent DMA tag for mbufs. */ 923 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 924 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 925 BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 926 &sc->sc_mbuf_dtag); 927 if (error != 0) { 928 device_printf(sc->dev, 929 "could not allocate parent dma tag for mbuf\n"); 930 return (error); 931 } 932 933 /* Create DMA tag for mini RX mbufs to use RX ring 0. */ 934 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 935 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MHLEN, 1, 936 MHLEN, 0, NULL, NULL, &sc->sc_rx_mini_tag); 937 if (error) { 938 device_printf(sc->dev, "could not create mini RX dma tag\n"); 939 return (error); 940 } 941 942 /* Create DMA tag for standard RX mbufs to use RX ring 1. */ 943 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 944 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, 945 MCLBYTES, 0, NULL, NULL, &sc->sc_rx_tag); 946 if (error) { 947 device_printf(sc->dev, "could not create RX dma tag\n"); 948 return (error); 949 } 950 951 /* Create DMA tag for TX mbufs. */ 952 error = bus_dma_tag_create(sc->sc_mbuf_dtag, 1, 0, 953 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 954 MCLBYTES * ET_NSEG_MAX, ET_NSEG_MAX, MCLBYTES, 0, NULL, NULL, 955 &sc->sc_tx_tag); 956 if (error) { 957 device_printf(sc->dev, "could not create TX dma tag\n"); 958 return (error); 959 } 960 961 /* Initialize RX ring 0. */ 962 rbd = &sc->sc_rx_data[0]; 963 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING0_128; 964 rbd->rbd_newbuf = et_newbuf_hdr; 965 rbd->rbd_discard = et_rxbuf_discard; 966 rbd->rbd_softc = sc; 967 rbd->rbd_ring = &sc->sc_rx_ring[0]; 968 /* Create DMA maps for mini RX buffers, ring 0. */ 969 for (i = 0; i < ET_RX_NDESC; i++) { 970 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0, 971 &rbd->rbd_buf[i].rb_dmap); 972 if (error) { 973 device_printf(sc->dev, 974 "could not create DMA map for mini RX mbufs\n"); 975 return (error); 976 } 977 } 978 979 /* Create a spare DMA map for mini RX buffers, ring 0. */ 980 error = bus_dmamap_create(sc->sc_rx_mini_tag, 0, 981 &sc->sc_rx_mini_sparemap); 982 if (error) { 983 device_printf(sc->dev, 984 "could not create spare DMA map for mini RX mbuf\n"); 985 return (error); 986 } 987 988 /* Initialize RX ring 1. */ 989 rbd = &sc->sc_rx_data[1]; 990 rbd->rbd_bufsize = ET_RXDMA_CTRL_RING1_2048; 991 rbd->rbd_newbuf = et_newbuf_cluster; 992 rbd->rbd_discard = et_rxbuf_discard; 993 rbd->rbd_softc = sc; 994 rbd->rbd_ring = &sc->sc_rx_ring[1]; 995 /* Create DMA maps for standard RX buffers, ring 1. */ 996 for (i = 0; i < ET_RX_NDESC; i++) { 997 error = bus_dmamap_create(sc->sc_rx_tag, 0, 998 &rbd->rbd_buf[i].rb_dmap); 999 if (error) { 1000 device_printf(sc->dev, 1001 "could not create DMA map for mini RX mbufs\n"); 1002 return (error); 1003 } 1004 } 1005 1006 /* Create a spare DMA map for standard RX buffers, ring 1. */ 1007 error = bus_dmamap_create(sc->sc_rx_tag, 0, &sc->sc_rx_sparemap); 1008 if (error) { 1009 device_printf(sc->dev, 1010 "could not create spare DMA map for RX mbuf\n"); 1011 return (error); 1012 } 1013 1014 /* Create DMA maps for TX buffers. */ 1015 tbd = &sc->sc_tx_data; 1016 for (i = 0; i < ET_TX_NDESC; i++) { 1017 error = bus_dmamap_create(sc->sc_tx_tag, 0, 1018 &tbd->tbd_buf[i].tb_dmap); 1019 if (error) { 1020 device_printf(sc->dev, 1021 "could not create DMA map for TX mbufs\n"); 1022 return (error); 1023 } 1024 } 1025 1026 return (0); 1027 } 1028 1029 static void 1030 et_dma_free(struct et_softc *sc) 1031 { 1032 struct et_txdesc_ring *tx_ring; 1033 struct et_rxdesc_ring *rx_ring; 1034 struct et_txstatus_data *txsd; 1035 struct et_rxstat_ring *rxst_ring; 1036 struct et_rxstatus_data *rxsd; 1037 struct et_rxbuf_data *rbd; 1038 struct et_txbuf_data *tbd; 1039 int i; 1040 1041 /* Destroy DMA maps for mini RX buffers, ring 0. */ 1042 rbd = &sc->sc_rx_data[0]; 1043 for (i = 0; i < ET_RX_NDESC; i++) { 1044 if (rbd->rbd_buf[i].rb_dmap) { 1045 bus_dmamap_destroy(sc->sc_rx_mini_tag, 1046 rbd->rbd_buf[i].rb_dmap); 1047 rbd->rbd_buf[i].rb_dmap = NULL; 1048 } 1049 } 1050 if (sc->sc_rx_mini_sparemap) { 1051 bus_dmamap_destroy(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap); 1052 sc->sc_rx_mini_sparemap = NULL; 1053 } 1054 if (sc->sc_rx_mini_tag) { 1055 bus_dma_tag_destroy(sc->sc_rx_mini_tag); 1056 sc->sc_rx_mini_tag = NULL; 1057 } 1058 1059 /* Destroy DMA maps for standard RX buffers, ring 1. */ 1060 rbd = &sc->sc_rx_data[1]; 1061 for (i = 0; i < ET_RX_NDESC; i++) { 1062 if (rbd->rbd_buf[i].rb_dmap) { 1063 bus_dmamap_destroy(sc->sc_rx_tag, 1064 rbd->rbd_buf[i].rb_dmap); 1065 rbd->rbd_buf[i].rb_dmap = NULL; 1066 } 1067 } 1068 if (sc->sc_rx_sparemap) { 1069 bus_dmamap_destroy(sc->sc_rx_tag, sc->sc_rx_sparemap); 1070 sc->sc_rx_sparemap = NULL; 1071 } 1072 if (sc->sc_rx_tag) { 1073 bus_dma_tag_destroy(sc->sc_rx_tag); 1074 sc->sc_rx_tag = NULL; 1075 } 1076 1077 /* Destroy DMA maps for TX buffers. */ 1078 tbd = &sc->sc_tx_data; 1079 for (i = 0; i < ET_TX_NDESC; i++) { 1080 if (tbd->tbd_buf[i].tb_dmap) { 1081 bus_dmamap_destroy(sc->sc_tx_tag, 1082 tbd->tbd_buf[i].tb_dmap); 1083 tbd->tbd_buf[i].tb_dmap = NULL; 1084 } 1085 } 1086 if (sc->sc_tx_tag) { 1087 bus_dma_tag_destroy(sc->sc_tx_tag); 1088 sc->sc_tx_tag = NULL; 1089 } 1090 1091 /* Destroy mini RX ring, ring 0. */ 1092 rx_ring = &sc->sc_rx_ring[0]; 1093 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc, 1094 &rx_ring->rr_dmap); 1095 /* Destroy standard RX ring, ring 1. */ 1096 rx_ring = &sc->sc_rx_ring[1]; 1097 et_dma_ring_free(sc, &rx_ring->rr_dtag, (void *)&rx_ring->rr_desc, 1098 &rx_ring->rr_dmap); 1099 /* Destroy RX stat ring. */ 1100 rxst_ring = &sc->sc_rxstat_ring; 1101 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat, 1102 &rxst_ring->rsr_dmap); 1103 /* Destroy RX status block. */ 1104 rxsd = &sc->sc_rx_status; 1105 et_dma_ring_free(sc, &rxst_ring->rsr_dtag, (void *)&rxst_ring->rsr_stat, 1106 &rxst_ring->rsr_dmap); 1107 /* Destroy TX ring. */ 1108 tx_ring = &sc->sc_tx_ring; 1109 et_dma_ring_free(sc, &tx_ring->tr_dtag, (void *)&tx_ring->tr_desc, 1110 &tx_ring->tr_dmap); 1111 /* Destroy TX status block. */ 1112 txsd = &sc->sc_tx_status; 1113 et_dma_ring_free(sc, &txsd->txsd_dtag, (void *)&txsd->txsd_status, 1114 &txsd->txsd_dmap); 1115 1116 /* Destroy the parent tag. */ 1117 if (sc->sc_dtag) { 1118 bus_dma_tag_destroy(sc->sc_dtag); 1119 sc->sc_dtag = NULL; 1120 } 1121 } 1122 1123 static void 1124 et_chip_attach(struct et_softc *sc) 1125 { 1126 uint32_t val; 1127 1128 /* 1129 * Perform minimal initialization 1130 */ 1131 1132 /* Disable loopback */ 1133 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1134 1135 /* Reset MAC */ 1136 CSR_WRITE_4(sc, ET_MAC_CFG1, 1137 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1138 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1139 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1140 1141 /* 1142 * Setup half duplex mode 1143 */ 1144 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1145 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1146 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1147 ET_MAC_HDX_EXC_DEFER; 1148 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1149 1150 /* Clear MAC control */ 1151 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1152 1153 /* Reset MII */ 1154 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1155 1156 /* Bring MAC out of reset state */ 1157 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1158 1159 /* Enable memory controllers */ 1160 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1161 } 1162 1163 static void 1164 et_intr(void *xsc) 1165 { 1166 struct et_softc *sc = xsc; 1167 struct ifnet *ifp; 1168 uint32_t status; 1169 1170 ET_LOCK(sc); 1171 ifp = sc->ifp; 1172 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1173 goto done; 1174 1175 status = CSR_READ_4(sc, ET_INTR_STATUS); 1176 if ((status & ET_INTRS) == 0) 1177 goto done; 1178 1179 /* Disable further interrupts. */ 1180 CSR_WRITE_4(sc, ET_INTR_MASK, 0xffffffff); 1181 1182 if (status & (ET_INTR_RXDMA_ERROR | ET_INTR_TXDMA_ERROR)) { 1183 device_printf(sc->dev, "DMA error(0x%08x) -- resetting\n", 1184 status); 1185 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1186 et_init_locked(sc); 1187 ET_UNLOCK(sc); 1188 return; 1189 } 1190 if (status & ET_INTR_RXDMA) 1191 et_rxeof(sc); 1192 if (status & (ET_INTR_TXDMA | ET_INTR_TIMER)) 1193 et_txeof(sc); 1194 if (status & ET_INTR_TIMER) 1195 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1196 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1197 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS); 1198 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1199 et_start_locked(ifp); 1200 } 1201 done: 1202 ET_UNLOCK(sc); 1203 } 1204 1205 static void 1206 et_init_locked(struct et_softc *sc) 1207 { 1208 struct ifnet *ifp; 1209 int error; 1210 1211 ET_LOCK_ASSERT(sc); 1212 1213 ifp = sc->ifp; 1214 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1215 return; 1216 1217 et_stop(sc); 1218 et_reset(sc); 1219 1220 et_init_tx_ring(sc); 1221 error = et_init_rx_ring(sc); 1222 if (error) 1223 return; 1224 1225 error = et_chip_init(sc); 1226 if (error) 1227 goto fail; 1228 1229 /* 1230 * Start TX/RX DMA engine 1231 */ 1232 error = et_start_rxdma(sc); 1233 if (error) 1234 return; 1235 1236 error = et_start_txdma(sc); 1237 if (error) 1238 return; 1239 1240 /* Enable interrupts. */ 1241 CSR_WRITE_4(sc, ET_INTR_MASK, ~ET_INTRS); 1242 1243 CSR_WRITE_4(sc, ET_TIMER, sc->sc_timer); 1244 1245 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1246 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1247 1248 sc->sc_flags &= ~ET_FLAG_LINK; 1249 et_ifmedia_upd_locked(ifp); 1250 1251 callout_reset(&sc->sc_tick, hz, et_tick, sc); 1252 1253 fail: 1254 if (error) 1255 et_stop(sc); 1256 } 1257 1258 static void 1259 et_init(void *xsc) 1260 { 1261 struct et_softc *sc = xsc; 1262 1263 ET_LOCK(sc); 1264 et_init_locked(sc); 1265 ET_UNLOCK(sc); 1266 } 1267 1268 static int 1269 et_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1270 { 1271 struct et_softc *sc = ifp->if_softc; 1272 struct mii_data *mii = device_get_softc(sc->sc_miibus); 1273 struct ifreq *ifr = (struct ifreq *)data; 1274 int error = 0, mask, max_framelen; 1275 1276 /* XXX LOCKSUSED */ 1277 switch (cmd) { 1278 case SIOCSIFFLAGS: 1279 ET_LOCK(sc); 1280 if (ifp->if_flags & IFF_UP) { 1281 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1282 if ((ifp->if_flags ^ sc->sc_if_flags) & 1283 (IFF_ALLMULTI | IFF_PROMISC | IFF_BROADCAST)) 1284 et_setmulti(sc); 1285 } else { 1286 et_init_locked(sc); 1287 } 1288 } else { 1289 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 1290 et_stop(sc); 1291 } 1292 sc->sc_if_flags = ifp->if_flags; 1293 ET_UNLOCK(sc); 1294 break; 1295 1296 case SIOCSIFMEDIA: 1297 case SIOCGIFMEDIA: 1298 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1299 break; 1300 1301 case SIOCADDMULTI: 1302 case SIOCDELMULTI: 1303 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1304 ET_LOCK(sc); 1305 et_setmulti(sc); 1306 ET_UNLOCK(sc); 1307 } 1308 break; 1309 1310 case SIOCSIFMTU: 1311 ET_LOCK(sc); 1312 #if 0 1313 if (sc->sc_flags & ET_FLAG_JUMBO) 1314 max_framelen = ET_JUMBO_FRAMELEN; 1315 else 1316 #endif 1317 max_framelen = MCLBYTES - 1; 1318 1319 if (ET_FRAMELEN(ifr->ifr_mtu) > max_framelen) { 1320 error = EOPNOTSUPP; 1321 ET_UNLOCK(sc); 1322 break; 1323 } 1324 1325 if (ifp->if_mtu != ifr->ifr_mtu) { 1326 ifp->if_mtu = ifr->ifr_mtu; 1327 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 1328 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1329 et_init_locked(sc); 1330 } 1331 } 1332 ET_UNLOCK(sc); 1333 break; 1334 1335 case SIOCSIFCAP: 1336 ET_LOCK(sc); 1337 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1338 if ((mask & IFCAP_TXCSUM) != 0 && 1339 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 1340 ifp->if_capenable ^= IFCAP_TXCSUM; 1341 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 1342 ifp->if_hwassist |= ET_CSUM_FEATURES; 1343 else 1344 ifp->if_hwassist &= ~ET_CSUM_FEATURES; 1345 } 1346 ET_UNLOCK(sc); 1347 break; 1348 1349 default: 1350 error = ether_ioctl(ifp, cmd, data); 1351 break; 1352 } 1353 return (error); 1354 } 1355 1356 static void 1357 et_start_locked(struct ifnet *ifp) 1358 { 1359 struct et_softc *sc; 1360 struct mbuf *m_head = NULL; 1361 struct et_txdesc_ring *tx_ring; 1362 struct et_txbuf_data *tbd; 1363 uint32_t tx_ready_pos; 1364 int enq; 1365 1366 sc = ifp->if_softc; 1367 ET_LOCK_ASSERT(sc); 1368 1369 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1370 IFF_DRV_RUNNING || 1371 (sc->sc_flags & (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) != 1372 (ET_FLAG_LINK | ET_FLAG_TXRX_ENABLED)) 1373 return; 1374 1375 /* 1376 * Driver does not request TX completion interrupt for every 1377 * queued frames to prevent generating excessive interrupts. 1378 * This means driver may wait for TX completion interrupt even 1379 * though some frames were sucessfully transmitted. Reclaiming 1380 * transmitted frames will ensure driver see all available 1381 * descriptors. 1382 */ 1383 tbd = &sc->sc_tx_data; 1384 if (tbd->tbd_used > (ET_TX_NDESC * 2) / 3) 1385 et_txeof(sc); 1386 1387 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1388 if (tbd->tbd_used + ET_NSEG_SPARE >= ET_TX_NDESC) { 1389 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1390 break; 1391 } 1392 1393 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1394 if (m_head == NULL) 1395 break; 1396 1397 if (et_encap(sc, &m_head)) { 1398 if (m_head == NULL) { 1399 ifp->if_oerrors++; 1400 break; 1401 } 1402 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1403 if (tbd->tbd_used > 0) 1404 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1405 break; 1406 } 1407 enq++; 1408 ETHER_BPF_MTAP(ifp, m_head); 1409 } 1410 1411 if (enq > 0) { 1412 tx_ring = &sc->sc_tx_ring; 1413 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1414 BUS_DMASYNC_PREWRITE); 1415 tx_ready_pos = tx_ring->tr_ready_index & 1416 ET_TX_READY_POS_INDEX_MASK; 1417 if (tx_ring->tr_ready_wrap) 1418 tx_ready_pos |= ET_TX_READY_POS_WRAP; 1419 CSR_WRITE_4(sc, ET_TX_READY_POS, tx_ready_pos); 1420 sc->watchdog_timer = 5; 1421 } 1422 } 1423 1424 static void 1425 et_start(struct ifnet *ifp) 1426 { 1427 struct et_softc *sc = ifp->if_softc; 1428 1429 ET_LOCK(sc); 1430 et_start_locked(ifp); 1431 ET_UNLOCK(sc); 1432 } 1433 1434 static int 1435 et_watchdog(struct et_softc *sc) 1436 { 1437 uint32_t status; 1438 1439 ET_LOCK_ASSERT(sc); 1440 1441 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 1442 return (0); 1443 1444 bus_dmamap_sync(sc->sc_tx_status.txsd_dtag, sc->sc_tx_status.txsd_dmap, 1445 BUS_DMASYNC_POSTREAD); 1446 status = le32toh(*(sc->sc_tx_status.txsd_status)); 1447 if_printf(sc->ifp, "watchdog timed out (0x%08x) -- resetting\n", 1448 status); 1449 1450 sc->ifp->if_oerrors++; 1451 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1452 et_init_locked(sc); 1453 return (EJUSTRETURN); 1454 } 1455 1456 static int 1457 et_stop_rxdma(struct et_softc *sc) 1458 { 1459 CSR_WRITE_4(sc, ET_RXDMA_CTRL, 1460 ET_RXDMA_CTRL_HALT | ET_RXDMA_CTRL_RING1_ENABLE); 1461 1462 DELAY(5); 1463 if ((CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) == 0) { 1464 if_printf(sc->ifp, "can't stop RX DMA engine\n"); 1465 return (ETIMEDOUT); 1466 } 1467 return (0); 1468 } 1469 1470 static int 1471 et_stop_txdma(struct et_softc *sc) 1472 { 1473 CSR_WRITE_4(sc, ET_TXDMA_CTRL, 1474 ET_TXDMA_CTRL_HALT | ET_TXDMA_CTRL_SINGLE_EPKT); 1475 return (0); 1476 } 1477 1478 static void 1479 et_free_tx_ring(struct et_softc *sc) 1480 { 1481 struct et_txdesc_ring *tx_ring; 1482 struct et_txbuf_data *tbd; 1483 struct et_txbuf *tb; 1484 int i; 1485 1486 tbd = &sc->sc_tx_data; 1487 tx_ring = &sc->sc_tx_ring; 1488 for (i = 0; i < ET_TX_NDESC; ++i) { 1489 tb = &tbd->tbd_buf[i]; 1490 if (tb->tb_mbuf != NULL) { 1491 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap, 1492 BUS_DMASYNC_POSTWRITE); 1493 bus_dmamap_unload(sc->sc_mbuf_dtag, tb->tb_dmap); 1494 m_freem(tb->tb_mbuf); 1495 tb->tb_mbuf = NULL; 1496 } 1497 } 1498 } 1499 1500 static void 1501 et_free_rx_ring(struct et_softc *sc) 1502 { 1503 struct et_rxbuf_data *rbd; 1504 struct et_rxdesc_ring *rx_ring; 1505 struct et_rxbuf *rb; 1506 int i; 1507 1508 /* Ring 0 */ 1509 rx_ring = &sc->sc_rx_ring[0]; 1510 rbd = &sc->sc_rx_data[0]; 1511 for (i = 0; i < ET_RX_NDESC; ++i) { 1512 rb = &rbd->rbd_buf[i]; 1513 if (rb->rb_mbuf != NULL) { 1514 bus_dmamap_sync(sc->sc_rx_mini_tag, rx_ring->rr_dmap, 1515 BUS_DMASYNC_POSTREAD); 1516 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap); 1517 m_freem(rb->rb_mbuf); 1518 rb->rb_mbuf = NULL; 1519 } 1520 } 1521 1522 /* Ring 1 */ 1523 rx_ring = &sc->sc_rx_ring[1]; 1524 rbd = &sc->sc_rx_data[1]; 1525 for (i = 0; i < ET_RX_NDESC; ++i) { 1526 rb = &rbd->rbd_buf[i]; 1527 if (rb->rb_mbuf != NULL) { 1528 bus_dmamap_sync(sc->sc_rx_tag, rx_ring->rr_dmap, 1529 BUS_DMASYNC_POSTREAD); 1530 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap); 1531 m_freem(rb->rb_mbuf); 1532 rb->rb_mbuf = NULL; 1533 } 1534 } 1535 } 1536 1537 static void 1538 et_setmulti(struct et_softc *sc) 1539 { 1540 struct ifnet *ifp; 1541 uint32_t hash[4] = { 0, 0, 0, 0 }; 1542 uint32_t rxmac_ctrl, pktfilt; 1543 struct ifmultiaddr *ifma; 1544 int i, count; 1545 1546 ET_LOCK_ASSERT(sc); 1547 ifp = sc->ifp; 1548 1549 pktfilt = CSR_READ_4(sc, ET_PKTFILT); 1550 rxmac_ctrl = CSR_READ_4(sc, ET_RXMAC_CTRL); 1551 1552 pktfilt &= ~(ET_PKTFILT_BCAST | ET_PKTFILT_MCAST | ET_PKTFILT_UCAST); 1553 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1554 rxmac_ctrl |= ET_RXMAC_CTRL_NO_PKTFILT; 1555 goto back; 1556 } 1557 1558 count = 0; 1559 if_maddr_rlock(ifp); 1560 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1561 uint32_t *hp, h; 1562 1563 if (ifma->ifma_addr->sa_family != AF_LINK) 1564 continue; 1565 1566 h = ether_crc32_be(LLADDR((struct sockaddr_dl *) 1567 ifma->ifma_addr), ETHER_ADDR_LEN); 1568 h = (h & 0x3f800000) >> 23; 1569 1570 hp = &hash[0]; 1571 if (h >= 32 && h < 64) { 1572 h -= 32; 1573 hp = &hash[1]; 1574 } else if (h >= 64 && h < 96) { 1575 h -= 64; 1576 hp = &hash[2]; 1577 } else if (h >= 96) { 1578 h -= 96; 1579 hp = &hash[3]; 1580 } 1581 *hp |= (1 << h); 1582 1583 ++count; 1584 } 1585 if_maddr_runlock(ifp); 1586 1587 for (i = 0; i < 4; ++i) 1588 CSR_WRITE_4(sc, ET_MULTI_HASH + (i * 4), hash[i]); 1589 1590 if (count > 0) 1591 pktfilt |= ET_PKTFILT_MCAST; 1592 rxmac_ctrl &= ~ET_RXMAC_CTRL_NO_PKTFILT; 1593 back: 1594 CSR_WRITE_4(sc, ET_PKTFILT, pktfilt); 1595 CSR_WRITE_4(sc, ET_RXMAC_CTRL, rxmac_ctrl); 1596 } 1597 1598 static int 1599 et_chip_init(struct et_softc *sc) 1600 { 1601 struct ifnet *ifp = sc->ifp; 1602 uint32_t rxq_end; 1603 int error, frame_len, rxmem_size; 1604 1605 /* 1606 * Split 16Kbytes internal memory between TX and RX 1607 * according to frame length. 1608 */ 1609 frame_len = ET_FRAMELEN(ifp->if_mtu); 1610 if (frame_len < 2048) { 1611 rxmem_size = ET_MEM_RXSIZE_DEFAULT; 1612 } else if (frame_len <= ET_RXMAC_CUT_THRU_FRMLEN) { 1613 rxmem_size = ET_MEM_SIZE / 2; 1614 } else { 1615 rxmem_size = ET_MEM_SIZE - 1616 roundup(frame_len + ET_MEM_TXSIZE_EX, ET_MEM_UNIT); 1617 } 1618 rxq_end = ET_QUEUE_ADDR(rxmem_size); 1619 1620 CSR_WRITE_4(sc, ET_RXQUEUE_START, ET_QUEUE_ADDR_START); 1621 CSR_WRITE_4(sc, ET_RXQUEUE_END, rxq_end); 1622 CSR_WRITE_4(sc, ET_TXQUEUE_START, rxq_end + 1); 1623 CSR_WRITE_4(sc, ET_TXQUEUE_END, ET_QUEUE_ADDR_END); 1624 1625 /* No loopback */ 1626 CSR_WRITE_4(sc, ET_LOOPBACK, 0); 1627 1628 /* Clear MSI configure */ 1629 if ((sc->sc_flags & ET_FLAG_MSI) == 0) 1630 CSR_WRITE_4(sc, ET_MSI_CFG, 0); 1631 1632 /* Disable timer */ 1633 CSR_WRITE_4(sc, ET_TIMER, 0); 1634 1635 /* Initialize MAC */ 1636 et_init_mac(sc); 1637 1638 /* Enable memory controllers */ 1639 CSR_WRITE_4(sc, ET_MMC_CTRL, ET_MMC_CTRL_ENABLE); 1640 1641 /* Initialize RX MAC */ 1642 et_init_rxmac(sc); 1643 1644 /* Initialize TX MAC */ 1645 et_init_txmac(sc); 1646 1647 /* Initialize RX DMA engine */ 1648 error = et_init_rxdma(sc); 1649 if (error) 1650 return (error); 1651 1652 /* Initialize TX DMA engine */ 1653 error = et_init_txdma(sc); 1654 if (error) 1655 return (error); 1656 1657 return (0); 1658 } 1659 1660 static void 1661 et_init_tx_ring(struct et_softc *sc) 1662 { 1663 struct et_txdesc_ring *tx_ring; 1664 struct et_txbuf_data *tbd; 1665 struct et_txstatus_data *txsd; 1666 1667 tx_ring = &sc->sc_tx_ring; 1668 bzero(tx_ring->tr_desc, ET_TX_RING_SIZE); 1669 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 1670 BUS_DMASYNC_PREWRITE); 1671 1672 tbd = &sc->sc_tx_data; 1673 tbd->tbd_start_index = 0; 1674 tbd->tbd_start_wrap = 0; 1675 tbd->tbd_used = 0; 1676 1677 txsd = &sc->sc_tx_status; 1678 bzero(txsd->txsd_status, sizeof(uint32_t)); 1679 bus_dmamap_sync(txsd->txsd_dtag, txsd->txsd_dmap, 1680 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1681 } 1682 1683 static int 1684 et_init_rx_ring(struct et_softc *sc) 1685 { 1686 struct et_rxstatus_data *rxsd; 1687 struct et_rxstat_ring *rxst_ring; 1688 struct et_rxbuf_data *rbd; 1689 int i, error, n; 1690 1691 for (n = 0; n < ET_RX_NRING; ++n) { 1692 rbd = &sc->sc_rx_data[n]; 1693 for (i = 0; i < ET_RX_NDESC; ++i) { 1694 error = rbd->rbd_newbuf(rbd, i); 1695 if (error) { 1696 if_printf(sc->ifp, "%d ring %d buf, " 1697 "newbuf failed: %d\n", n, i, error); 1698 return (error); 1699 } 1700 } 1701 } 1702 1703 rxsd = &sc->sc_rx_status; 1704 bzero(rxsd->rxsd_status, sizeof(struct et_rxstatus)); 1705 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 1706 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1707 1708 rxst_ring = &sc->sc_rxstat_ring; 1709 bzero(rxst_ring->rsr_stat, ET_RXSTAT_RING_SIZE); 1710 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 1711 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1712 1713 return (0); 1714 } 1715 1716 static int 1717 et_init_rxdma(struct et_softc *sc) 1718 { 1719 struct et_rxstatus_data *rxsd = &sc->sc_rx_status; 1720 struct et_rxstat_ring *rxst_ring = &sc->sc_rxstat_ring; 1721 struct et_rxdesc_ring *rx_ring; 1722 int error; 1723 1724 error = et_stop_rxdma(sc); 1725 if (error) { 1726 if_printf(sc->ifp, "can't init RX DMA engine\n"); 1727 return (error); 1728 } 1729 1730 /* 1731 * Install RX status 1732 */ 1733 CSR_WRITE_4(sc, ET_RX_STATUS_HI, ET_ADDR_HI(rxsd->rxsd_paddr)); 1734 CSR_WRITE_4(sc, ET_RX_STATUS_LO, ET_ADDR_LO(rxsd->rxsd_paddr)); 1735 1736 /* 1737 * Install RX stat ring 1738 */ 1739 CSR_WRITE_4(sc, ET_RXSTAT_HI, ET_ADDR_HI(rxst_ring->rsr_paddr)); 1740 CSR_WRITE_4(sc, ET_RXSTAT_LO, ET_ADDR_LO(rxst_ring->rsr_paddr)); 1741 CSR_WRITE_4(sc, ET_RXSTAT_CNT, ET_RX_NSTAT - 1); 1742 CSR_WRITE_4(sc, ET_RXSTAT_POS, 0); 1743 CSR_WRITE_4(sc, ET_RXSTAT_MINCNT, ((ET_RX_NSTAT * 15) / 100) - 1); 1744 1745 /* Match ET_RXSTAT_POS */ 1746 rxst_ring->rsr_index = 0; 1747 rxst_ring->rsr_wrap = 0; 1748 1749 /* 1750 * Install the 2nd RX descriptor ring 1751 */ 1752 rx_ring = &sc->sc_rx_ring[1]; 1753 CSR_WRITE_4(sc, ET_RX_RING1_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1754 CSR_WRITE_4(sc, ET_RX_RING1_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1755 CSR_WRITE_4(sc, ET_RX_RING1_CNT, ET_RX_NDESC - 1); 1756 CSR_WRITE_4(sc, ET_RX_RING1_POS, ET_RX_RING1_POS_WRAP); 1757 CSR_WRITE_4(sc, ET_RX_RING1_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1758 1759 /* Match ET_RX_RING1_POS */ 1760 rx_ring->rr_index = 0; 1761 rx_ring->rr_wrap = 1; 1762 1763 /* 1764 * Install the 1st RX descriptor ring 1765 */ 1766 rx_ring = &sc->sc_rx_ring[0]; 1767 CSR_WRITE_4(sc, ET_RX_RING0_HI, ET_ADDR_HI(rx_ring->rr_paddr)); 1768 CSR_WRITE_4(sc, ET_RX_RING0_LO, ET_ADDR_LO(rx_ring->rr_paddr)); 1769 CSR_WRITE_4(sc, ET_RX_RING0_CNT, ET_RX_NDESC - 1); 1770 CSR_WRITE_4(sc, ET_RX_RING0_POS, ET_RX_RING0_POS_WRAP); 1771 CSR_WRITE_4(sc, ET_RX_RING0_MINCNT, ((ET_RX_NDESC * 15) / 100) - 1); 1772 1773 /* Match ET_RX_RING0_POS */ 1774 rx_ring->rr_index = 0; 1775 rx_ring->rr_wrap = 1; 1776 1777 /* 1778 * RX intr moderation 1779 */ 1780 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, sc->sc_rx_intr_npkts); 1781 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, sc->sc_rx_intr_delay); 1782 1783 return (0); 1784 } 1785 1786 static int 1787 et_init_txdma(struct et_softc *sc) 1788 { 1789 struct et_txdesc_ring *tx_ring = &sc->sc_tx_ring; 1790 struct et_txstatus_data *txsd = &sc->sc_tx_status; 1791 int error; 1792 1793 error = et_stop_txdma(sc); 1794 if (error) { 1795 if_printf(sc->ifp, "can't init TX DMA engine\n"); 1796 return (error); 1797 } 1798 1799 /* 1800 * Install TX descriptor ring 1801 */ 1802 CSR_WRITE_4(sc, ET_TX_RING_HI, ET_ADDR_HI(tx_ring->tr_paddr)); 1803 CSR_WRITE_4(sc, ET_TX_RING_LO, ET_ADDR_LO(tx_ring->tr_paddr)); 1804 CSR_WRITE_4(sc, ET_TX_RING_CNT, ET_TX_NDESC - 1); 1805 1806 /* 1807 * Install TX status 1808 */ 1809 CSR_WRITE_4(sc, ET_TX_STATUS_HI, ET_ADDR_HI(txsd->txsd_paddr)); 1810 CSR_WRITE_4(sc, ET_TX_STATUS_LO, ET_ADDR_LO(txsd->txsd_paddr)); 1811 1812 CSR_WRITE_4(sc, ET_TX_READY_POS, 0); 1813 1814 /* Match ET_TX_READY_POS */ 1815 tx_ring->tr_ready_index = 0; 1816 tx_ring->tr_ready_wrap = 0; 1817 1818 return (0); 1819 } 1820 1821 static void 1822 et_init_mac(struct et_softc *sc) 1823 { 1824 struct ifnet *ifp = sc->ifp; 1825 const uint8_t *eaddr = IF_LLADDR(ifp); 1826 uint32_t val; 1827 1828 /* Reset MAC */ 1829 CSR_WRITE_4(sc, ET_MAC_CFG1, 1830 ET_MAC_CFG1_RST_TXFUNC | ET_MAC_CFG1_RST_RXFUNC | 1831 ET_MAC_CFG1_RST_TXMC | ET_MAC_CFG1_RST_RXMC | 1832 ET_MAC_CFG1_SIM_RST | ET_MAC_CFG1_SOFT_RST); 1833 1834 /* 1835 * Setup inter packet gap 1836 */ 1837 val = (56 << ET_IPG_NONB2B_1_SHIFT) | 1838 (88 << ET_IPG_NONB2B_2_SHIFT) | 1839 (80 << ET_IPG_MINIFG_SHIFT) | 1840 (96 << ET_IPG_B2B_SHIFT); 1841 CSR_WRITE_4(sc, ET_IPG, val); 1842 1843 /* 1844 * Setup half duplex mode 1845 */ 1846 val = (10 << ET_MAC_HDX_ALT_BEB_TRUNC_SHIFT) | 1847 (15 << ET_MAC_HDX_REXMIT_MAX_SHIFT) | 1848 (55 << ET_MAC_HDX_COLLWIN_SHIFT) | 1849 ET_MAC_HDX_EXC_DEFER; 1850 CSR_WRITE_4(sc, ET_MAC_HDX, val); 1851 1852 /* Clear MAC control */ 1853 CSR_WRITE_4(sc, ET_MAC_CTRL, 0); 1854 1855 /* Reset MII */ 1856 CSR_WRITE_4(sc, ET_MII_CFG, ET_MII_CFG_CLKRST); 1857 1858 /* 1859 * Set MAC address 1860 */ 1861 val = eaddr[2] | (eaddr[3] << 8) | (eaddr[4] << 16) | (eaddr[5] << 24); 1862 CSR_WRITE_4(sc, ET_MAC_ADDR1, val); 1863 val = (eaddr[0] << 16) | (eaddr[1] << 24); 1864 CSR_WRITE_4(sc, ET_MAC_ADDR2, val); 1865 1866 /* Set max frame length */ 1867 CSR_WRITE_4(sc, ET_MAX_FRMLEN, ET_FRAMELEN(ifp->if_mtu)); 1868 1869 /* Bring MAC out of reset state */ 1870 CSR_WRITE_4(sc, ET_MAC_CFG1, 0); 1871 } 1872 1873 static void 1874 et_init_rxmac(struct et_softc *sc) 1875 { 1876 struct ifnet *ifp = sc->ifp; 1877 const uint8_t *eaddr = IF_LLADDR(ifp); 1878 uint32_t val; 1879 int i; 1880 1881 /* Disable RX MAC and WOL */ 1882 CSR_WRITE_4(sc, ET_RXMAC_CTRL, ET_RXMAC_CTRL_WOL_DISABLE); 1883 1884 /* 1885 * Clear all WOL related registers 1886 */ 1887 for (i = 0; i < 3; ++i) 1888 CSR_WRITE_4(sc, ET_WOL_CRC + (i * 4), 0); 1889 for (i = 0; i < 20; ++i) 1890 CSR_WRITE_4(sc, ET_WOL_MASK + (i * 4), 0); 1891 1892 /* 1893 * Set WOL source address. XXX is this necessary? 1894 */ 1895 val = (eaddr[2] << 24) | (eaddr[3] << 16) | (eaddr[4] << 8) | eaddr[5]; 1896 CSR_WRITE_4(sc, ET_WOL_SA_LO, val); 1897 val = (eaddr[0] << 8) | eaddr[1]; 1898 CSR_WRITE_4(sc, ET_WOL_SA_HI, val); 1899 1900 /* Clear packet filters */ 1901 CSR_WRITE_4(sc, ET_PKTFILT, 0); 1902 1903 /* No ucast filtering */ 1904 CSR_WRITE_4(sc, ET_UCAST_FILTADDR1, 0); 1905 CSR_WRITE_4(sc, ET_UCAST_FILTADDR2, 0); 1906 CSR_WRITE_4(sc, ET_UCAST_FILTADDR3, 0); 1907 1908 if (ET_FRAMELEN(ifp->if_mtu) > ET_RXMAC_CUT_THRU_FRMLEN) { 1909 /* 1910 * In order to transmit jumbo packets greater than 1911 * ET_RXMAC_CUT_THRU_FRMLEN bytes, the FIFO between 1912 * RX MAC and RX DMA needs to be reduced in size to 1913 * (ET_MEM_SIZE - ET_MEM_TXSIZE_EX - framelen). In 1914 * order to implement this, we must use "cut through" 1915 * mode in the RX MAC, which chops packets down into 1916 * segments. In this case we selected 256 bytes, 1917 * since this is the size of the PCI-Express TLP's 1918 * that the ET1310 uses. 1919 */ 1920 val = (ET_RXMAC_SEGSZ(256) & ET_RXMAC_MC_SEGSZ_MAX_MASK) | 1921 ET_RXMAC_MC_SEGSZ_ENABLE; 1922 } else { 1923 val = 0; 1924 } 1925 CSR_WRITE_4(sc, ET_RXMAC_MC_SEGSZ, val); 1926 1927 CSR_WRITE_4(sc, ET_RXMAC_MC_WATERMARK, 0); 1928 1929 /* Initialize RX MAC management register */ 1930 CSR_WRITE_4(sc, ET_RXMAC_MGT, 0); 1931 1932 CSR_WRITE_4(sc, ET_RXMAC_SPACE_AVL, 0); 1933 1934 CSR_WRITE_4(sc, ET_RXMAC_MGT, 1935 ET_RXMAC_MGT_PASS_ECRC | 1936 ET_RXMAC_MGT_PASS_ELEN | 1937 ET_RXMAC_MGT_PASS_ETRUNC | 1938 ET_RXMAC_MGT_CHECK_PKT); 1939 1940 /* 1941 * Configure runt filtering (may not work on certain chip generation) 1942 */ 1943 val = (ETHER_MIN_LEN << ET_PKTFILT_MINLEN_SHIFT) & 1944 ET_PKTFILT_MINLEN_MASK; 1945 val |= ET_PKTFILT_FRAG; 1946 CSR_WRITE_4(sc, ET_PKTFILT, val); 1947 1948 /* Enable RX MAC but leave WOL disabled */ 1949 CSR_WRITE_4(sc, ET_RXMAC_CTRL, 1950 ET_RXMAC_CTRL_WOL_DISABLE | ET_RXMAC_CTRL_ENABLE); 1951 1952 /* 1953 * Setup multicast hash and allmulti/promisc mode 1954 */ 1955 et_setmulti(sc); 1956 } 1957 1958 static void 1959 et_init_txmac(struct et_softc *sc) 1960 { 1961 /* Disable TX MAC and FC(?) */ 1962 CSR_WRITE_4(sc, ET_TXMAC_CTRL, ET_TXMAC_CTRL_FC_DISABLE); 1963 1964 /* 1965 * Initialize pause time. 1966 * This register should be set before XON/XOFF frame is 1967 * sent by driver. 1968 */ 1969 CSR_WRITE_4(sc, ET_TXMAC_FLOWCTRL, 0 << ET_TXMAC_FLOWCTRL_CFPT_SHIFT); 1970 1971 /* Enable TX MAC but leave FC(?) diabled */ 1972 CSR_WRITE_4(sc, ET_TXMAC_CTRL, 1973 ET_TXMAC_CTRL_ENABLE | ET_TXMAC_CTRL_FC_DISABLE); 1974 } 1975 1976 static int 1977 et_start_rxdma(struct et_softc *sc) 1978 { 1979 uint32_t val = 0; 1980 1981 val |= (sc->sc_rx_data[0].rbd_bufsize & ET_RXDMA_CTRL_RING0_SIZE_MASK) | 1982 ET_RXDMA_CTRL_RING0_ENABLE; 1983 val |= (sc->sc_rx_data[1].rbd_bufsize & ET_RXDMA_CTRL_RING1_SIZE_MASK) | 1984 ET_RXDMA_CTRL_RING1_ENABLE; 1985 1986 CSR_WRITE_4(sc, ET_RXDMA_CTRL, val); 1987 1988 DELAY(5); 1989 1990 if (CSR_READ_4(sc, ET_RXDMA_CTRL) & ET_RXDMA_CTRL_HALTED) { 1991 if_printf(sc->ifp, "can't start RX DMA engine\n"); 1992 return (ETIMEDOUT); 1993 } 1994 return (0); 1995 } 1996 1997 static int 1998 et_start_txdma(struct et_softc *sc) 1999 { 2000 CSR_WRITE_4(sc, ET_TXDMA_CTRL, ET_TXDMA_CTRL_SINGLE_EPKT); 2001 return (0); 2002 } 2003 2004 static void 2005 et_rxeof(struct et_softc *sc) 2006 { 2007 struct et_rxstatus_data *rxsd; 2008 struct et_rxstat_ring *rxst_ring; 2009 struct et_rxbuf_data *rbd; 2010 struct et_rxdesc_ring *rx_ring; 2011 struct et_rxstat *st; 2012 struct ifnet *ifp; 2013 struct mbuf *m; 2014 uint32_t rxstat_pos, rxring_pos; 2015 uint32_t rxst_info1, rxst_info2, rxs_stat_ring; 2016 int buflen, buf_idx, npost[2], ring_idx; 2017 int rxst_index, rxst_wrap; 2018 2019 ET_LOCK_ASSERT(sc); 2020 2021 ifp = sc->ifp; 2022 rxsd = &sc->sc_rx_status; 2023 rxst_ring = &sc->sc_rxstat_ring; 2024 2025 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2026 return; 2027 2028 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 2029 BUS_DMASYNC_POSTREAD); 2030 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 2031 BUS_DMASYNC_POSTREAD); 2032 2033 npost[0] = npost[1] = 0; 2034 rxs_stat_ring = le32toh(rxsd->rxsd_status->rxs_stat_ring); 2035 rxst_wrap = (rxs_stat_ring & ET_RXS_STATRING_WRAP) ? 1 : 0; 2036 rxst_index = (rxs_stat_ring & ET_RXS_STATRING_INDEX_MASK) >> 2037 ET_RXS_STATRING_INDEX_SHIFT; 2038 2039 while (rxst_index != rxst_ring->rsr_index || 2040 rxst_wrap != rxst_ring->rsr_wrap) { 2041 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2042 break; 2043 2044 MPASS(rxst_ring->rsr_index < ET_RX_NSTAT); 2045 st = &rxst_ring->rsr_stat[rxst_ring->rsr_index]; 2046 rxst_info1 = le32toh(st->rxst_info1); 2047 rxst_info2 = le32toh(st->rxst_info2); 2048 buflen = (rxst_info2 & ET_RXST_INFO2_LEN_MASK) >> 2049 ET_RXST_INFO2_LEN_SHIFT; 2050 buf_idx = (rxst_info2 & ET_RXST_INFO2_BUFIDX_MASK) >> 2051 ET_RXST_INFO2_BUFIDX_SHIFT; 2052 ring_idx = (rxst_info2 & ET_RXST_INFO2_RINGIDX_MASK) >> 2053 ET_RXST_INFO2_RINGIDX_SHIFT; 2054 2055 if (++rxst_ring->rsr_index == ET_RX_NSTAT) { 2056 rxst_ring->rsr_index = 0; 2057 rxst_ring->rsr_wrap ^= 1; 2058 } 2059 rxstat_pos = rxst_ring->rsr_index & ET_RXSTAT_POS_INDEX_MASK; 2060 if (rxst_ring->rsr_wrap) 2061 rxstat_pos |= ET_RXSTAT_POS_WRAP; 2062 CSR_WRITE_4(sc, ET_RXSTAT_POS, rxstat_pos); 2063 2064 if (ring_idx >= ET_RX_NRING) { 2065 ifp->if_ierrors++; 2066 if_printf(ifp, "invalid ring index %d\n", ring_idx); 2067 continue; 2068 } 2069 if (buf_idx >= ET_RX_NDESC) { 2070 ifp->if_ierrors++; 2071 if_printf(ifp, "invalid buf index %d\n", buf_idx); 2072 continue; 2073 } 2074 2075 rbd = &sc->sc_rx_data[ring_idx]; 2076 m = rbd->rbd_buf[buf_idx].rb_mbuf; 2077 if ((rxst_info1 & ET_RXST_INFO1_OK) == 0){ 2078 /* Discard errored frame. */ 2079 rbd->rbd_discard(rbd, buf_idx); 2080 } else if (rbd->rbd_newbuf(rbd, buf_idx) != 0) { 2081 /* No available mbufs, discard it. */ 2082 ifp->if_iqdrops++; 2083 rbd->rbd_discard(rbd, buf_idx); 2084 } else { 2085 buflen -= ETHER_CRC_LEN; 2086 if (buflen < ETHER_HDR_LEN) { 2087 m_freem(m); 2088 ifp->if_ierrors++; 2089 } else { 2090 m->m_pkthdr.len = m->m_len = buflen; 2091 m->m_pkthdr.rcvif = ifp; 2092 ET_UNLOCK(sc); 2093 ifp->if_input(ifp, m); 2094 ET_LOCK(sc); 2095 } 2096 } 2097 2098 rx_ring = &sc->sc_rx_ring[ring_idx]; 2099 if (buf_idx != rx_ring->rr_index) { 2100 if_printf(ifp, 2101 "WARNING!! ring %d, buf_idx %d, rr_idx %d\n", 2102 ring_idx, buf_idx, rx_ring->rr_index); 2103 } 2104 2105 MPASS(rx_ring->rr_index < ET_RX_NDESC); 2106 if (++rx_ring->rr_index == ET_RX_NDESC) { 2107 rx_ring->rr_index = 0; 2108 rx_ring->rr_wrap ^= 1; 2109 } 2110 rxring_pos = rx_ring->rr_index & ET_RX_RING_POS_INDEX_MASK; 2111 if (rx_ring->rr_wrap) 2112 rxring_pos |= ET_RX_RING_POS_WRAP; 2113 CSR_WRITE_4(sc, rx_ring->rr_posreg, rxring_pos); 2114 } 2115 2116 bus_dmamap_sync(rxsd->rxsd_dtag, rxsd->rxsd_dmap, 2117 BUS_DMASYNC_PREREAD); 2118 bus_dmamap_sync(rxst_ring->rsr_dtag, rxst_ring->rsr_dmap, 2119 BUS_DMASYNC_PREREAD); 2120 } 2121 2122 static int 2123 et_encap(struct et_softc *sc, struct mbuf **m0) 2124 { 2125 struct et_txdesc_ring *tx_ring; 2126 struct et_txbuf_data *tbd; 2127 struct et_txdesc *td; 2128 struct mbuf *m; 2129 bus_dma_segment_t segs[ET_NSEG_MAX]; 2130 bus_dmamap_t map; 2131 uint32_t csum_flags, last_td_ctrl2; 2132 int error, i, idx, first_idx, last_idx, nsegs; 2133 2134 tx_ring = &sc->sc_tx_ring; 2135 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2136 tbd = &sc->sc_tx_data; 2137 first_idx = tx_ring->tr_ready_index; 2138 map = tbd->tbd_buf[first_idx].tb_dmap; 2139 2140 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs, 2141 0); 2142 if (error == EFBIG) { 2143 m = m_collapse(*m0, M_DONTWAIT, ET_NSEG_MAX); 2144 if (m == NULL) { 2145 m_freem(*m0); 2146 *m0 = NULL; 2147 return (ENOMEM); 2148 } 2149 *m0 = m; 2150 error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, 2151 &nsegs, 0); 2152 if (error != 0) { 2153 m_freem(*m0); 2154 *m0 = NULL; 2155 return (error); 2156 } 2157 } else if (error != 0) 2158 return (error); 2159 2160 /* Check for descriptor overruns. */ 2161 if (tbd->tbd_used + nsegs > ET_TX_NDESC - 1) { 2162 bus_dmamap_unload(sc->sc_tx_tag, map); 2163 return (ENOBUFS); 2164 } 2165 bus_dmamap_sync(sc->sc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2166 2167 last_td_ctrl2 = ET_TDCTRL2_LAST_FRAG; 2168 sc->sc_tx += nsegs; 2169 if (sc->sc_tx / sc->sc_tx_intr_nsegs != sc->sc_tx_intr) { 2170 sc->sc_tx_intr = sc->sc_tx / sc->sc_tx_intr_nsegs; 2171 last_td_ctrl2 |= ET_TDCTRL2_INTR; 2172 } 2173 2174 m = *m0; 2175 csum_flags = 0; 2176 if ((m->m_pkthdr.csum_flags & ET_CSUM_FEATURES) != 0) { 2177 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2178 csum_flags |= ET_TDCTRL2_CSUM_IP; 2179 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2180 csum_flags |= ET_TDCTRL2_CSUM_UDP; 2181 else if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2182 csum_flags |= ET_TDCTRL2_CSUM_TCP; 2183 } 2184 last_idx = -1; 2185 for (i = 0; i < nsegs; ++i) { 2186 idx = (first_idx + i) % ET_TX_NDESC; 2187 td = &tx_ring->tr_desc[idx]; 2188 td->td_addr_hi = htole32(ET_ADDR_HI(segs[i].ds_addr)); 2189 td->td_addr_lo = htole32(ET_ADDR_LO(segs[i].ds_addr)); 2190 td->td_ctrl1 = htole32(segs[i].ds_len & ET_TDCTRL1_LEN_MASK); 2191 if (i == nsegs - 1) { 2192 /* Last frag */ 2193 td->td_ctrl2 = htole32(last_td_ctrl2 | csum_flags); 2194 last_idx = idx; 2195 } else 2196 td->td_ctrl2 = htole32(csum_flags); 2197 2198 MPASS(tx_ring->tr_ready_index < ET_TX_NDESC); 2199 if (++tx_ring->tr_ready_index == ET_TX_NDESC) { 2200 tx_ring->tr_ready_index = 0; 2201 tx_ring->tr_ready_wrap ^= 1; 2202 } 2203 } 2204 td = &tx_ring->tr_desc[first_idx]; 2205 /* First frag */ 2206 td->td_ctrl2 |= htole32(ET_TDCTRL2_FIRST_FRAG); 2207 2208 MPASS(last_idx >= 0); 2209 tbd->tbd_buf[first_idx].tb_dmap = tbd->tbd_buf[last_idx].tb_dmap; 2210 tbd->tbd_buf[last_idx].tb_dmap = map; 2211 tbd->tbd_buf[last_idx].tb_mbuf = m; 2212 2213 tbd->tbd_used += nsegs; 2214 MPASS(tbd->tbd_used <= ET_TX_NDESC); 2215 2216 return (0); 2217 } 2218 2219 static void 2220 et_txeof(struct et_softc *sc) 2221 { 2222 struct et_txdesc_ring *tx_ring; 2223 struct et_txbuf_data *tbd; 2224 struct et_txbuf *tb; 2225 struct ifnet *ifp; 2226 uint32_t tx_done; 2227 int end, wrap; 2228 2229 ET_LOCK_ASSERT(sc); 2230 2231 ifp = sc->ifp; 2232 tx_ring = &sc->sc_tx_ring; 2233 tbd = &sc->sc_tx_data; 2234 2235 if ((sc->sc_flags & ET_FLAG_TXRX_ENABLED) == 0) 2236 return; 2237 2238 if (tbd->tbd_used == 0) 2239 return; 2240 2241 bus_dmamap_sync(tx_ring->tr_dtag, tx_ring->tr_dmap, 2242 BUS_DMASYNC_POSTWRITE); 2243 2244 tx_done = CSR_READ_4(sc, ET_TX_DONE_POS); 2245 end = tx_done & ET_TX_DONE_POS_INDEX_MASK; 2246 wrap = (tx_done & ET_TX_DONE_POS_WRAP) ? 1 : 0; 2247 2248 while (tbd->tbd_start_index != end || tbd->tbd_start_wrap != wrap) { 2249 MPASS(tbd->tbd_start_index < ET_TX_NDESC); 2250 tb = &tbd->tbd_buf[tbd->tbd_start_index]; 2251 if (tb->tb_mbuf != NULL) { 2252 bus_dmamap_sync(sc->sc_tx_tag, tb->tb_dmap, 2253 BUS_DMASYNC_POSTWRITE); 2254 bus_dmamap_unload(sc->sc_tx_tag, tb->tb_dmap); 2255 m_freem(tb->tb_mbuf); 2256 tb->tb_mbuf = NULL; 2257 } 2258 2259 if (++tbd->tbd_start_index == ET_TX_NDESC) { 2260 tbd->tbd_start_index = 0; 2261 tbd->tbd_start_wrap ^= 1; 2262 } 2263 2264 MPASS(tbd->tbd_used > 0); 2265 tbd->tbd_used--; 2266 } 2267 2268 if (tbd->tbd_used == 0) 2269 sc->watchdog_timer = 0; 2270 if (tbd->tbd_used + ET_NSEG_SPARE < ET_TX_NDESC) 2271 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2272 } 2273 2274 static void 2275 et_tick(void *xsc) 2276 { 2277 struct et_softc *sc = xsc; 2278 struct ifnet *ifp; 2279 struct mii_data *mii; 2280 2281 ET_LOCK_ASSERT(sc); 2282 ifp = sc->ifp; 2283 mii = device_get_softc(sc->sc_miibus); 2284 2285 mii_tick(mii); 2286 et_stats_update(sc); 2287 if (et_watchdog(sc) == EJUSTRETURN) 2288 return; 2289 callout_reset(&sc->sc_tick, hz, et_tick, sc); 2290 } 2291 2292 static int 2293 et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx) 2294 { 2295 struct et_softc *sc; 2296 struct et_rxdesc *desc; 2297 struct et_rxbuf *rb; 2298 struct mbuf *m; 2299 bus_dma_segment_t segs[1]; 2300 bus_dmamap_t dmap; 2301 int nsegs; 2302 2303 MPASS(buf_idx < ET_RX_NDESC); 2304 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2305 if (m == NULL) 2306 return (ENOBUFS); 2307 m->m_len = m->m_pkthdr.len = MCLBYTES; 2308 m_adj(m, ETHER_ALIGN); 2309 2310 sc = rbd->rbd_softc; 2311 rb = &rbd->rbd_buf[buf_idx]; 2312 2313 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_tag, sc->sc_rx_sparemap, m, 2314 segs, &nsegs, 0) != 0) { 2315 m_freem(m); 2316 return (ENOBUFS); 2317 } 2318 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2319 2320 if (rb->rb_mbuf != NULL) { 2321 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, 2322 BUS_DMASYNC_POSTREAD); 2323 bus_dmamap_unload(sc->sc_rx_tag, rb->rb_dmap); 2324 } 2325 dmap = rb->rb_dmap; 2326 rb->rb_dmap = sc->sc_rx_sparemap; 2327 sc->sc_rx_sparemap = dmap; 2328 bus_dmamap_sync(sc->sc_rx_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD); 2329 2330 rb->rb_mbuf = m; 2331 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2332 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr)); 2333 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr)); 2334 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2335 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2336 BUS_DMASYNC_PREWRITE); 2337 return (0); 2338 } 2339 2340 static void 2341 et_rxbuf_discard(struct et_rxbuf_data *rbd, int buf_idx) 2342 { 2343 struct et_rxdesc *desc; 2344 2345 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2346 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2347 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2348 BUS_DMASYNC_PREWRITE); 2349 } 2350 2351 static int 2352 et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx) 2353 { 2354 struct et_softc *sc; 2355 struct et_rxdesc *desc; 2356 struct et_rxbuf *rb; 2357 struct mbuf *m; 2358 bus_dma_segment_t segs[1]; 2359 bus_dmamap_t dmap; 2360 int nsegs; 2361 2362 MPASS(buf_idx < ET_RX_NDESC); 2363 MGETHDR(m, M_DONTWAIT, MT_DATA); 2364 if (m == NULL) 2365 return (ENOBUFS); 2366 m->m_len = m->m_pkthdr.len = MHLEN; 2367 m_adj(m, ETHER_ALIGN); 2368 2369 sc = rbd->rbd_softc; 2370 rb = &rbd->rbd_buf[buf_idx]; 2371 2372 if (bus_dmamap_load_mbuf_sg(sc->sc_rx_mini_tag, sc->sc_rx_mini_sparemap, 2373 m, segs, &nsegs, 0) != 0) { 2374 m_freem(m); 2375 return (ENOBUFS); 2376 } 2377 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 2378 2379 if (rb->rb_mbuf != NULL) { 2380 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, 2381 BUS_DMASYNC_POSTREAD); 2382 bus_dmamap_unload(sc->sc_rx_mini_tag, rb->rb_dmap); 2383 } 2384 dmap = rb->rb_dmap; 2385 rb->rb_dmap = sc->sc_rx_mini_sparemap; 2386 sc->sc_rx_mini_sparemap = dmap; 2387 bus_dmamap_sync(sc->sc_rx_mini_tag, rb->rb_dmap, BUS_DMASYNC_PREREAD); 2388 2389 rb->rb_mbuf = m; 2390 desc = &rbd->rbd_ring->rr_desc[buf_idx]; 2391 desc->rd_addr_hi = htole32(ET_ADDR_HI(segs[0].ds_addr)); 2392 desc->rd_addr_lo = htole32(ET_ADDR_LO(segs[0].ds_addr)); 2393 desc->rd_ctrl = htole32(buf_idx & ET_RDCTRL_BUFIDX_MASK); 2394 bus_dmamap_sync(rbd->rbd_ring->rr_dtag, rbd->rbd_ring->rr_dmap, 2395 BUS_DMASYNC_PREWRITE); 2396 return (0); 2397 } 2398 2399 #define ET_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2400 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2401 #define ET_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 2402 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 2403 2404 /* 2405 * Create sysctl tree 2406 */ 2407 static void 2408 et_add_sysctls(struct et_softc * sc) 2409 { 2410 struct sysctl_ctx_list *ctx; 2411 struct sysctl_oid_list *children, *parent; 2412 struct sysctl_oid *tree; 2413 struct et_hw_stats *stats; 2414 2415 ctx = device_get_sysctl_ctx(sc->dev); 2416 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 2417 2418 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_npkts", 2419 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_npkts, "I", 2420 "RX IM, # packets per RX interrupt"); 2421 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_intr_delay", 2422 CTLTYPE_INT | CTLFLAG_RW, sc, 0, et_sysctl_rx_intr_delay, "I", 2423 "RX IM, RX interrupt delay (x10 usec)"); 2424 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_intr_nsegs", 2425 CTLFLAG_RW, &sc->sc_tx_intr_nsegs, 0, 2426 "TX IM, # segments per TX interrupt"); 2427 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "timer", 2428 CTLFLAG_RW, &sc->sc_timer, 0, "TX timer"); 2429 2430 tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, 2431 NULL, "ET statistics"); 2432 parent = SYSCTL_CHILDREN(tree); 2433 2434 /* TX/RX statistics. */ 2435 stats = &sc->sc_stats; 2436 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_64", &stats->pkts_64, 2437 "0 to 64 bytes frames"); 2438 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_65_127", &stats->pkts_65, 2439 "65 to 127 bytes frames"); 2440 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_128_255", &stats->pkts_128, 2441 "128 to 255 bytes frames"); 2442 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_256_511", &stats->pkts_256, 2443 "256 to 511 bytes frames"); 2444 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_512_1023", &stats->pkts_512, 2445 "512 to 1023 bytes frames"); 2446 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1024_1518", &stats->pkts_1024, 2447 "1024 to 1518 bytes frames"); 2448 ET_SYSCTL_STAT_ADD64(ctx, parent, "frames_1519_1522", &stats->pkts_1519, 2449 "1519 to 1522 bytes frames"); 2450 2451 /* RX statistics. */ 2452 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2453 NULL, "RX MAC statistics"); 2454 children = SYSCTL_CHILDREN(tree); 2455 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes", 2456 &stats->rx_bytes, "Good bytes"); 2457 ET_SYSCTL_STAT_ADD64(ctx, children, "frames", 2458 &stats->rx_frames, "Good frames"); 2459 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs", 2460 &stats->rx_crcerrs, "CRC errors"); 2461 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames", 2462 &stats->rx_mcast, "Multicast frames"); 2463 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames", 2464 &stats->rx_bcast, "Broadcast frames"); 2465 ET_SYSCTL_STAT_ADD32(ctx, children, "control", 2466 &stats->rx_control, "Control frames"); 2467 ET_SYSCTL_STAT_ADD32(ctx, children, "pause", 2468 &stats->rx_pause, "Pause frames"); 2469 ET_SYSCTL_STAT_ADD32(ctx, children, "unknown_control", 2470 &stats->rx_unknown_control, "Unknown control frames"); 2471 ET_SYSCTL_STAT_ADD32(ctx, children, "align_errs", 2472 &stats->rx_alignerrs, "Alignment errors"); 2473 ET_SYSCTL_STAT_ADD32(ctx, children, "len_errs", 2474 &stats->rx_lenerrs, "Frames with length mismatched"); 2475 ET_SYSCTL_STAT_ADD32(ctx, children, "code_errs", 2476 &stats->rx_codeerrs, "Frames with code error"); 2477 ET_SYSCTL_STAT_ADD32(ctx, children, "cs_errs", 2478 &stats->rx_cserrs, "Frames with carrier sense error"); 2479 ET_SYSCTL_STAT_ADD32(ctx, children, "runts", 2480 &stats->rx_runts, "Too short frames"); 2481 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize", 2482 &stats->rx_oversize, "Oversized frames"); 2483 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments", 2484 &stats->rx_fragments, "Fragmented frames"); 2485 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers", 2486 &stats->rx_jabbers, "Frames with jabber error"); 2487 ET_SYSCTL_STAT_ADD32(ctx, children, "drop", 2488 &stats->rx_drop, "Dropped frames"); 2489 2490 /* TX statistics. */ 2491 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2492 NULL, "TX MAC statistics"); 2493 children = SYSCTL_CHILDREN(tree); 2494 ET_SYSCTL_STAT_ADD64(ctx, children, "bytes", 2495 &stats->tx_bytes, "Good bytes"); 2496 ET_SYSCTL_STAT_ADD64(ctx, children, "frames", 2497 &stats->tx_frames, "Good frames"); 2498 ET_SYSCTL_STAT_ADD64(ctx, children, "mcast_frames", 2499 &stats->tx_mcast, "Multicast frames"); 2500 ET_SYSCTL_STAT_ADD64(ctx, children, "bcast_frames", 2501 &stats->tx_bcast, "Broadcast frames"); 2502 ET_SYSCTL_STAT_ADD32(ctx, children, "pause", 2503 &stats->tx_pause, "Pause frames"); 2504 ET_SYSCTL_STAT_ADD32(ctx, children, "deferred", 2505 &stats->tx_deferred, "Deferred frames"); 2506 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_deferred", 2507 &stats->tx_excess_deferred, "Excessively deferred frames"); 2508 ET_SYSCTL_STAT_ADD32(ctx, children, "single_colls", 2509 &stats->tx_single_colls, "Single collisions"); 2510 ET_SYSCTL_STAT_ADD32(ctx, children, "multi_colls", 2511 &stats->tx_multi_colls, "Multiple collisions"); 2512 ET_SYSCTL_STAT_ADD32(ctx, children, "late_colls", 2513 &stats->tx_late_colls, "Late collisions"); 2514 ET_SYSCTL_STAT_ADD32(ctx, children, "excess_colls", 2515 &stats->tx_excess_colls, "Excess collisions"); 2516 ET_SYSCTL_STAT_ADD32(ctx, children, "total_colls", 2517 &stats->tx_total_colls, "Total collisions"); 2518 ET_SYSCTL_STAT_ADD32(ctx, children, "pause_honored", 2519 &stats->tx_pause_honored, "Honored pause frames"); 2520 ET_SYSCTL_STAT_ADD32(ctx, children, "drop", 2521 &stats->tx_drop, "Dropped frames"); 2522 ET_SYSCTL_STAT_ADD32(ctx, children, "jabbers", 2523 &stats->tx_jabbers, "Frames with jabber errors"); 2524 ET_SYSCTL_STAT_ADD32(ctx, children, "crc_errs", 2525 &stats->tx_crcerrs, "Frames with CRC errors"); 2526 ET_SYSCTL_STAT_ADD32(ctx, children, "control", 2527 &stats->tx_control, "Control frames"); 2528 ET_SYSCTL_STAT_ADD64(ctx, children, "oversize", 2529 &stats->tx_oversize, "Oversized frames"); 2530 ET_SYSCTL_STAT_ADD32(ctx, children, "undersize", 2531 &stats->tx_undersize, "Undersized frames"); 2532 ET_SYSCTL_STAT_ADD32(ctx, children, "fragments", 2533 &stats->tx_fragments, "Fragmented frames"); 2534 } 2535 2536 #undef ET_SYSCTL_STAT_ADD32 2537 #undef ET_SYSCTL_STAT_ADD64 2538 2539 static int 2540 et_sysctl_rx_intr_npkts(SYSCTL_HANDLER_ARGS) 2541 { 2542 struct et_softc *sc = arg1; 2543 struct ifnet *ifp = sc->ifp; 2544 int error = 0, v; 2545 2546 v = sc->sc_rx_intr_npkts; 2547 error = sysctl_handle_int(oidp, &v, 0, req); 2548 if (error || req->newptr == NULL) 2549 goto back; 2550 if (v <= 0) { 2551 error = EINVAL; 2552 goto back; 2553 } 2554 2555 if (sc->sc_rx_intr_npkts != v) { 2556 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2557 CSR_WRITE_4(sc, ET_RX_INTR_NPKTS, v); 2558 sc->sc_rx_intr_npkts = v; 2559 } 2560 back: 2561 return (error); 2562 } 2563 2564 static int 2565 et_sysctl_rx_intr_delay(SYSCTL_HANDLER_ARGS) 2566 { 2567 struct et_softc *sc = arg1; 2568 struct ifnet *ifp = sc->ifp; 2569 int error = 0, v; 2570 2571 v = sc->sc_rx_intr_delay; 2572 error = sysctl_handle_int(oidp, &v, 0, req); 2573 if (error || req->newptr == NULL) 2574 goto back; 2575 if (v <= 0) { 2576 error = EINVAL; 2577 goto back; 2578 } 2579 2580 if (sc->sc_rx_intr_delay != v) { 2581 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2582 CSR_WRITE_4(sc, ET_RX_INTR_DELAY, v); 2583 sc->sc_rx_intr_delay = v; 2584 } 2585 back: 2586 return (error); 2587 } 2588 2589 static void 2590 et_stats_update(struct et_softc *sc) 2591 { 2592 struct ifnet *ifp; 2593 struct et_hw_stats *stats; 2594 2595 stats = &sc->sc_stats; 2596 stats->pkts_64 += CSR_READ_4(sc, ET_STAT_PKTS_64); 2597 stats->pkts_65 += CSR_READ_4(sc, ET_STAT_PKTS_65_127); 2598 stats->pkts_128 += CSR_READ_4(sc, ET_STAT_PKTS_128_255); 2599 stats->pkts_256 += CSR_READ_4(sc, ET_STAT_PKTS_256_511); 2600 stats->pkts_512 += CSR_READ_4(sc, ET_STAT_PKTS_512_1023); 2601 stats->pkts_1024 += CSR_READ_4(sc, ET_STAT_PKTS_1024_1518); 2602 stats->pkts_1519 += CSR_READ_4(sc, ET_STAT_PKTS_1519_1522); 2603 2604 stats->rx_bytes += CSR_READ_4(sc, ET_STAT_RX_BYTES); 2605 stats->rx_frames += CSR_READ_4(sc, ET_STAT_RX_FRAMES); 2606 stats->rx_crcerrs += CSR_READ_4(sc, ET_STAT_RX_CRC_ERR); 2607 stats->rx_mcast += CSR_READ_4(sc, ET_STAT_RX_MCAST); 2608 stats->rx_bcast += CSR_READ_4(sc, ET_STAT_RX_BCAST); 2609 stats->rx_control += CSR_READ_4(sc, ET_STAT_RX_CTL); 2610 stats->rx_pause += CSR_READ_4(sc, ET_STAT_RX_PAUSE); 2611 stats->rx_unknown_control += CSR_READ_4(sc, ET_STAT_RX_UNKNOWN_CTL); 2612 stats->rx_alignerrs += CSR_READ_4(sc, ET_STAT_RX_ALIGN_ERR); 2613 stats->rx_lenerrs += CSR_READ_4(sc, ET_STAT_RX_LEN_ERR); 2614 stats->rx_codeerrs += CSR_READ_4(sc, ET_STAT_RX_CODE_ERR); 2615 stats->rx_cserrs += CSR_READ_4(sc, ET_STAT_RX_CS_ERR); 2616 stats->rx_runts += CSR_READ_4(sc, ET_STAT_RX_RUNT); 2617 stats->rx_oversize += CSR_READ_4(sc, ET_STAT_RX_OVERSIZE); 2618 stats->rx_fragments += CSR_READ_4(sc, ET_STAT_RX_FRAG); 2619 stats->rx_jabbers += CSR_READ_4(sc, ET_STAT_RX_JABBER); 2620 stats->rx_drop += CSR_READ_4(sc, ET_STAT_RX_DROP); 2621 2622 stats->tx_bytes += CSR_READ_4(sc, ET_STAT_TX_BYTES); 2623 stats->tx_frames += CSR_READ_4(sc, ET_STAT_TX_FRAMES); 2624 stats->tx_mcast += CSR_READ_4(sc, ET_STAT_TX_MCAST); 2625 stats->tx_bcast += CSR_READ_4(sc, ET_STAT_TX_BCAST); 2626 stats->tx_pause += CSR_READ_4(sc, ET_STAT_TX_PAUSE); 2627 stats->tx_deferred += CSR_READ_4(sc, ET_STAT_TX_DEFER); 2628 stats->tx_excess_deferred += CSR_READ_4(sc, ET_STAT_TX_EXCESS_DEFER); 2629 stats->tx_single_colls += CSR_READ_4(sc, ET_STAT_TX_SINGLE_COL); 2630 stats->tx_multi_colls += CSR_READ_4(sc, ET_STAT_TX_MULTI_COL); 2631 stats->tx_late_colls += CSR_READ_4(sc, ET_STAT_TX_LATE_COL); 2632 stats->tx_excess_colls += CSR_READ_4(sc, ET_STAT_TX_EXCESS_COL); 2633 stats->tx_total_colls += CSR_READ_4(sc, ET_STAT_TX_TOTAL_COL); 2634 stats->tx_pause_honored += CSR_READ_4(sc, ET_STAT_TX_PAUSE_HONOR); 2635 stats->tx_drop += CSR_READ_4(sc, ET_STAT_TX_DROP); 2636 stats->tx_jabbers += CSR_READ_4(sc, ET_STAT_TX_JABBER); 2637 stats->tx_crcerrs += CSR_READ_4(sc, ET_STAT_TX_CRC_ERR); 2638 stats->tx_control += CSR_READ_4(sc, ET_STAT_TX_CTL); 2639 stats->tx_oversize += CSR_READ_4(sc, ET_STAT_TX_OVERSIZE); 2640 stats->tx_undersize += CSR_READ_4(sc, ET_STAT_TX_UNDERSIZE); 2641 stats->tx_fragments += CSR_READ_4(sc, ET_STAT_TX_FRAG); 2642 2643 /* Update ifnet counters. */ 2644 ifp = sc->ifp; 2645 ifp->if_opackets = (u_long)stats->tx_frames; 2646 ifp->if_collisions = stats->tx_total_colls; 2647 ifp->if_oerrors = stats->tx_drop + stats->tx_jabbers + 2648 stats->tx_crcerrs + stats->tx_excess_deferred + 2649 stats->tx_late_colls; 2650 ifp->if_ipackets = (u_long)stats->rx_frames; 2651 ifp->if_ierrors = stats->rx_crcerrs + stats->rx_alignerrs + 2652 stats->rx_lenerrs + stats->rx_codeerrs + stats->rx_cserrs + 2653 stats->rx_runts + stats->rx_jabbers + stats->rx_drop; 2654 } 2655 2656 static int 2657 et_suspend(device_t dev) 2658 { 2659 struct et_softc *sc; 2660 uint32_t pmcfg; 2661 2662 sc = device_get_softc(dev); 2663 ET_LOCK(sc); 2664 if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2665 et_stop(sc); 2666 /* Diable all clocks and put PHY into COMA. */ 2667 pmcfg = CSR_READ_4(sc, ET_PM); 2668 pmcfg &= ~(EM_PM_GIGEPHY_ENB | ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | 2669 ET_PM_RXCLK_GATE); 2670 pmcfg |= ET_PM_PHY_SW_COMA; 2671 CSR_WRITE_4(sc, ET_PM, pmcfg); 2672 ET_UNLOCK(sc); 2673 return (0); 2674 } 2675 2676 static int 2677 et_resume(device_t dev) 2678 { 2679 struct et_softc *sc; 2680 uint32_t pmcfg; 2681 2682 sc = device_get_softc(dev); 2683 ET_LOCK(sc); 2684 /* Take PHY out of COMA and enable clocks. */ 2685 pmcfg = ET_PM_SYSCLK_GATE | ET_PM_TXCLK_GATE | ET_PM_RXCLK_GATE; 2686 if ((sc->sc_flags & ET_FLAG_FASTETHER) == 0) 2687 pmcfg |= EM_PM_GIGEPHY_ENB; 2688 CSR_WRITE_4(sc, ET_PM, pmcfg); 2689 if ((sc->ifp->if_flags & IFF_UP) != 0) 2690 et_init_locked(sc); 2691 ET_UNLOCK(sc); 2692 return (0); 2693 } 2694