1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/endian.h> 36 #include <sys/kernel.h> 37 #include <sys/malloc.h> 38 #include <sys/mbuf.h> 39 #include <sys/rman.h> 40 #include <sys/module.h> 41 #include <sys/queue.h> 42 #include <sys/socket.h> 43 #include <sys/sockio.h> 44 #include <sys/sysctl.h> 45 #include <sys/taskqueue.h> 46 47 #include <net/bpf.h> 48 #include <net/if.h> 49 #include <net/if_var.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 #include <net/if_vlan_var.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 #include <netinet/tcp.h> 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 65 #include <dev/pci/pcireg.h> 66 #include <dev/pci/pcivar.h> 67 68 #include <machine/bus.h> 69 #include <machine/in_cksum.h> 70 71 #include <dev/age/if_agereg.h> 72 #include <dev/age/if_agevar.h> 73 74 /* "device miibus" required. See GENERIC if you get errors here. */ 75 #include "miibus_if.h" 76 77 #define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 78 79 MODULE_DEPEND(age, pci, 1, 1, 1); 80 MODULE_DEPEND(age, ether, 1, 1, 1); 81 MODULE_DEPEND(age, miibus, 1, 1, 1); 82 83 /* Tunables. */ 84 static int msi_disable = 0; 85 static int msix_disable = 0; 86 TUNABLE_INT("hw.age.msi_disable", &msi_disable); 87 TUNABLE_INT("hw.age.msix_disable", &msix_disable); 88 89 /* 90 * Devices supported by this driver. 91 */ 92 static struct age_dev { 93 uint16_t age_vendorid; 94 uint16_t age_deviceid; 95 const char *age_name; 96 } age_devs[] = { 97 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1, 98 "Attansic Technology Corp, L1 Gigabit Ethernet" }, 99 }; 100 101 static int age_miibus_readreg(device_t, int, int); 102 static int age_miibus_writereg(device_t, int, int, int); 103 static void age_miibus_statchg(device_t); 104 static void age_mediastatus(if_t, struct ifmediareq *); 105 static int age_mediachange(if_t); 106 static int age_probe(device_t); 107 static void age_get_macaddr(struct age_softc *); 108 static void age_phy_reset(struct age_softc *); 109 static int age_attach(device_t); 110 static int age_detach(device_t); 111 static void age_sysctl_node(struct age_softc *); 112 static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int); 113 static int age_check_boundary(struct age_softc *); 114 static int age_dma_alloc(struct age_softc *); 115 static void age_dma_free(struct age_softc *); 116 static int age_shutdown(device_t); 117 static void age_setwol(struct age_softc *); 118 static int age_suspend(device_t); 119 static int age_resume(device_t); 120 static int age_encap(struct age_softc *, struct mbuf **); 121 static void age_start(if_t); 122 static void age_start_locked(if_t); 123 static void age_watchdog(struct age_softc *); 124 static int age_ioctl(if_t, u_long, caddr_t); 125 static void age_mac_config(struct age_softc *); 126 static void age_link_task(void *, int); 127 static void age_stats_update(struct age_softc *); 128 static int age_intr(void *); 129 static void age_int_task(void *, int); 130 static void age_txintr(struct age_softc *, int); 131 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 132 static int age_rxintr(struct age_softc *, int, int); 133 static void age_tick(void *); 134 static void age_reset(struct age_softc *); 135 static void age_init(void *); 136 static void age_init_locked(struct age_softc *); 137 static void age_stop(struct age_softc *); 138 static void age_stop_txmac(struct age_softc *); 139 static void age_stop_rxmac(struct age_softc *); 140 static void age_init_tx_ring(struct age_softc *); 141 static int age_init_rx_ring(struct age_softc *); 142 static void age_init_rr_ring(struct age_softc *); 143 static void age_init_cmb_block(struct age_softc *); 144 static void age_init_smb_block(struct age_softc *); 145 #ifndef __NO_STRICT_ALIGNMENT 146 static struct mbuf *age_fixup_rx(if_t, struct mbuf *); 147 #endif 148 static int age_newbuf(struct age_softc *, struct age_rxdesc *); 149 static void age_rxvlan(struct age_softc *); 150 static void age_rxfilter(struct age_softc *); 151 static int sysctl_age_stats(SYSCTL_HANDLER_ARGS); 152 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 153 static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS); 154 static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS); 155 156 static device_method_t age_methods[] = { 157 /* Device interface. */ 158 DEVMETHOD(device_probe, age_probe), 159 DEVMETHOD(device_attach, age_attach), 160 DEVMETHOD(device_detach, age_detach), 161 DEVMETHOD(device_shutdown, age_shutdown), 162 DEVMETHOD(device_suspend, age_suspend), 163 DEVMETHOD(device_resume, age_resume), 164 165 /* MII interface. */ 166 DEVMETHOD(miibus_readreg, age_miibus_readreg), 167 DEVMETHOD(miibus_writereg, age_miibus_writereg), 168 DEVMETHOD(miibus_statchg, age_miibus_statchg), 169 { NULL, NULL } 170 }; 171 172 static driver_t age_driver = { 173 "age", 174 age_methods, 175 sizeof(struct age_softc) 176 }; 177 178 DRIVER_MODULE(age, pci, age_driver, 0, 0); 179 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, age, age_devs, 180 nitems(age_devs)); 181 DRIVER_MODULE(miibus, age, miibus_driver, 0, 0); 182 183 static struct resource_spec age_res_spec_mem[] = { 184 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 185 { -1, 0, 0 } 186 }; 187 188 static struct resource_spec age_irq_spec_legacy[] = { 189 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 190 { -1, 0, 0 } 191 }; 192 193 static struct resource_spec age_irq_spec_msi[] = { 194 { SYS_RES_IRQ, 1, RF_ACTIVE }, 195 { -1, 0, 0 } 196 }; 197 198 static struct resource_spec age_irq_spec_msix[] = { 199 { SYS_RES_IRQ, 1, RF_ACTIVE }, 200 { -1, 0, 0 } 201 }; 202 203 /* 204 * Read a PHY register on the MII of the L1. 205 */ 206 static int 207 age_miibus_readreg(device_t dev, int phy, int reg) 208 { 209 struct age_softc *sc; 210 uint32_t v; 211 int i; 212 213 sc = device_get_softc(dev); 214 215 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 216 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 217 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 218 DELAY(1); 219 v = CSR_READ_4(sc, AGE_MDIO); 220 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 221 break; 222 } 223 224 if (i == 0) { 225 device_printf(sc->age_dev, "phy read timeout : %d\n", reg); 226 return (0); 227 } 228 229 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 230 } 231 232 /* 233 * Write a PHY register on the MII of the L1. 234 */ 235 static int 236 age_miibus_writereg(device_t dev, int phy, int reg, int val) 237 { 238 struct age_softc *sc; 239 uint32_t v; 240 int i; 241 242 sc = device_get_softc(dev); 243 244 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 245 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 246 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 247 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 248 DELAY(1); 249 v = CSR_READ_4(sc, AGE_MDIO); 250 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 251 break; 252 } 253 254 if (i == 0) 255 device_printf(sc->age_dev, "phy write timeout : %d\n", reg); 256 257 return (0); 258 } 259 260 /* 261 * Callback from MII layer when media changes. 262 */ 263 static void 264 age_miibus_statchg(device_t dev) 265 { 266 struct age_softc *sc; 267 268 sc = device_get_softc(dev); 269 taskqueue_enqueue(taskqueue_swi, &sc->age_link_task); 270 } 271 272 /* 273 * Get the current interface media status. 274 */ 275 static void 276 age_mediastatus(if_t ifp, struct ifmediareq *ifmr) 277 { 278 struct age_softc *sc; 279 struct mii_data *mii; 280 281 sc = if_getsoftc(ifp); 282 AGE_LOCK(sc); 283 mii = device_get_softc(sc->age_miibus); 284 285 mii_pollstat(mii); 286 ifmr->ifm_status = mii->mii_media_status; 287 ifmr->ifm_active = mii->mii_media_active; 288 AGE_UNLOCK(sc); 289 } 290 291 /* 292 * Set hardware to newly-selected media. 293 */ 294 static int 295 age_mediachange(if_t ifp) 296 { 297 struct age_softc *sc; 298 struct mii_data *mii; 299 struct mii_softc *miisc; 300 int error; 301 302 sc = if_getsoftc(ifp); 303 AGE_LOCK(sc); 304 mii = device_get_softc(sc->age_miibus); 305 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 306 PHY_RESET(miisc); 307 error = mii_mediachg(mii); 308 AGE_UNLOCK(sc); 309 310 return (error); 311 } 312 313 static int 314 age_probe(device_t dev) 315 { 316 struct age_dev *sp; 317 int i; 318 uint16_t vendor, devid; 319 320 vendor = pci_get_vendor(dev); 321 devid = pci_get_device(dev); 322 sp = age_devs; 323 for (i = 0; i < nitems(age_devs); i++, sp++) { 324 if (vendor == sp->age_vendorid && 325 devid == sp->age_deviceid) { 326 device_set_desc(dev, sp->age_name); 327 return (BUS_PROBE_DEFAULT); 328 } 329 } 330 331 return (ENXIO); 332 } 333 334 static void 335 age_get_macaddr(struct age_softc *sc) 336 { 337 uint32_t ea[2], reg; 338 int i, vpdc; 339 340 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 341 if ((reg & SPI_VPD_ENB) != 0) { 342 /* Get VPD stored in TWSI EEPROM. */ 343 reg &= ~SPI_VPD_ENB; 344 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 345 } 346 347 if (pci_find_cap(sc->age_dev, PCIY_VPD, &vpdc) == 0) { 348 /* 349 * PCI VPD capability found, let TWSI reload EEPROM. 350 * This will set ethernet address of controller. 351 */ 352 CSR_WRITE_4(sc, AGE_TWSI_CTRL, CSR_READ_4(sc, AGE_TWSI_CTRL) | 353 TWSI_CTRL_SW_LD_START); 354 for (i = 100; i > 0; i--) { 355 DELAY(1000); 356 reg = CSR_READ_4(sc, AGE_TWSI_CTRL); 357 if ((reg & TWSI_CTRL_SW_LD_START) == 0) 358 break; 359 } 360 if (i == 0) 361 device_printf(sc->age_dev, 362 "reloading EEPROM timeout!\n"); 363 } else { 364 if (bootverbose) 365 device_printf(sc->age_dev, 366 "PCI VPD capability not found!\n"); 367 } 368 369 ea[0] = CSR_READ_4(sc, AGE_PAR0); 370 ea[1] = CSR_READ_4(sc, AGE_PAR1); 371 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; 372 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; 373 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; 374 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 375 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 376 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 377 } 378 379 static void 380 age_phy_reset(struct age_softc *sc) 381 { 382 uint16_t reg, pn; 383 int i, linkup; 384 385 /* Reset PHY. */ 386 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 387 DELAY(2000); 388 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 389 DELAY(2000); 390 391 #define ATPHY_DBG_ADDR 0x1D 392 #define ATPHY_DBG_DATA 0x1E 393 #define ATPHY_CDTC 0x16 394 #define PHY_CDTC_ENB 0x0001 395 #define PHY_CDTC_POFF 8 396 #define ATPHY_CDTS 0x1C 397 #define PHY_CDTS_STAT_OK 0x0000 398 #define PHY_CDTS_STAT_SHORT 0x0100 399 #define PHY_CDTS_STAT_OPEN 0x0200 400 #define PHY_CDTS_STAT_INVAL 0x0300 401 #define PHY_CDTS_STAT_MASK 0x0300 402 403 /* Check power saving mode. Magic from Linux. */ 404 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, BMCR_RESET); 405 for (linkup = 0, pn = 0; pn < 4; pn++) { 406 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, ATPHY_CDTC, 407 (pn << PHY_CDTC_POFF) | PHY_CDTC_ENB); 408 for (i = 200; i > 0; i--) { 409 DELAY(1000); 410 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr, 411 ATPHY_CDTC); 412 if ((reg & PHY_CDTC_ENB) == 0) 413 break; 414 } 415 DELAY(1000); 416 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr, 417 ATPHY_CDTS); 418 if ((reg & PHY_CDTS_STAT_MASK) != PHY_CDTS_STAT_OPEN) { 419 linkup++; 420 break; 421 } 422 } 423 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, MII_BMCR, 424 BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 425 if (linkup == 0) { 426 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 427 ATPHY_DBG_ADDR, 0); 428 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 429 ATPHY_DBG_DATA, 0x124E); 430 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 431 ATPHY_DBG_ADDR, 1); 432 reg = age_miibus_readreg(sc->age_dev, sc->age_phyaddr, 433 ATPHY_DBG_DATA); 434 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 435 ATPHY_DBG_DATA, reg | 0x03); 436 /* XXX */ 437 DELAY(1500 * 1000); 438 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 439 ATPHY_DBG_ADDR, 0); 440 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 441 ATPHY_DBG_DATA, 0x024E); 442 } 443 444 #undef ATPHY_DBG_ADDR 445 #undef ATPHY_DBG_DATA 446 #undef ATPHY_CDTC 447 #undef PHY_CDTC_ENB 448 #undef PHY_CDTC_POFF 449 #undef ATPHY_CDTS 450 #undef PHY_CDTS_STAT_OK 451 #undef PHY_CDTS_STAT_SHORT 452 #undef PHY_CDTS_STAT_OPEN 453 #undef PHY_CDTS_STAT_INVAL 454 #undef PHY_CDTS_STAT_MASK 455 } 456 457 static int 458 age_attach(device_t dev) 459 { 460 struct age_softc *sc; 461 if_t ifp; 462 uint16_t burst; 463 int error, i, msic, msixc, pmc; 464 465 error = 0; 466 sc = device_get_softc(dev); 467 sc->age_dev = dev; 468 469 mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 470 MTX_DEF); 471 callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0); 472 TASK_INIT(&sc->age_int_task, 0, age_int_task, sc); 473 TASK_INIT(&sc->age_link_task, 0, age_link_task, sc); 474 475 /* Map the device. */ 476 pci_enable_busmaster(dev); 477 sc->age_res_spec = age_res_spec_mem; 478 sc->age_irq_spec = age_irq_spec_legacy; 479 error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res); 480 if (error != 0) { 481 device_printf(dev, "cannot allocate memory resources.\n"); 482 goto fail; 483 } 484 485 /* Set PHY address. */ 486 sc->age_phyaddr = AGE_PHY_ADDR; 487 488 /* Reset PHY. */ 489 age_phy_reset(sc); 490 491 /* Reset the ethernet controller. */ 492 age_reset(sc); 493 494 /* Get PCI and chip id/revision. */ 495 sc->age_rev = pci_get_revid(dev); 496 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 497 MASTER_CHIP_REV_SHIFT; 498 if (bootverbose) { 499 device_printf(dev, "PCI device revision : 0x%04x\n", 500 sc->age_rev); 501 device_printf(dev, "Chip id/revision : 0x%04x\n", 502 sc->age_chip_rev); 503 } 504 505 /* 506 * XXX 507 * Unintialized hardware returns an invalid chip id/revision 508 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that 509 * unplugged cable results in putting hardware into automatic 510 * power down mode which in turn returns invalld chip revision. 511 */ 512 if (sc->age_chip_rev == 0xFFFF) { 513 device_printf(dev,"invalid chip revision : 0x%04x -- " 514 "not initialized?\n", sc->age_chip_rev); 515 error = ENXIO; 516 goto fail; 517 } 518 519 device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n", 520 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 521 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 522 523 /* Allocate IRQ resources. */ 524 msixc = pci_msix_count(dev); 525 msic = pci_msi_count(dev); 526 if (bootverbose) { 527 device_printf(dev, "MSIX count : %d\n", msixc); 528 device_printf(dev, "MSI count : %d\n", msic); 529 } 530 531 /* Prefer MSIX over MSI. */ 532 if (msix_disable == 0 || msi_disable == 0) { 533 if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES && 534 pci_alloc_msix(dev, &msixc) == 0) { 535 if (msic == AGE_MSIX_MESSAGES) { 536 device_printf(dev, "Using %d MSIX messages.\n", 537 msixc); 538 sc->age_flags |= AGE_FLAG_MSIX; 539 sc->age_irq_spec = age_irq_spec_msix; 540 } else 541 pci_release_msi(dev); 542 } 543 if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 && 544 msic == AGE_MSI_MESSAGES && 545 pci_alloc_msi(dev, &msic) == 0) { 546 if (msic == AGE_MSI_MESSAGES) { 547 device_printf(dev, "Using %d MSI messages.\n", 548 msic); 549 sc->age_flags |= AGE_FLAG_MSI; 550 sc->age_irq_spec = age_irq_spec_msi; 551 } else 552 pci_release_msi(dev); 553 } 554 } 555 556 error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq); 557 if (error != 0) { 558 device_printf(dev, "cannot allocate IRQ resources.\n"); 559 goto fail; 560 } 561 562 /* Get DMA parameters from PCIe device control register. */ 563 if (pci_find_cap(dev, PCIY_EXPRESS, &i) == 0) { 564 sc->age_flags |= AGE_FLAG_PCIE; 565 burst = pci_read_config(dev, i + 0x08, 2); 566 /* Max read request size. */ 567 sc->age_dma_rd_burst = ((burst >> 12) & 0x07) << 568 DMA_CFG_RD_BURST_SHIFT; 569 /* Max payload size. */ 570 sc->age_dma_wr_burst = ((burst >> 5) & 0x07) << 571 DMA_CFG_WR_BURST_SHIFT; 572 if (bootverbose) { 573 device_printf(dev, "Read request size : %d bytes.\n", 574 128 << ((burst >> 12) & 0x07)); 575 device_printf(dev, "TLP payload size : %d bytes.\n", 576 128 << ((burst >> 5) & 0x07)); 577 } 578 } else { 579 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 580 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 581 } 582 583 /* Create device sysctl node. */ 584 age_sysctl_node(sc); 585 586 if ((error = age_dma_alloc(sc)) != 0) 587 goto fail; 588 589 /* Load station address. */ 590 age_get_macaddr(sc); 591 592 ifp = sc->age_ifp = if_alloc(IFT_ETHER); 593 if_setsoftc(ifp, sc); 594 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 595 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 596 if_setioctlfn(ifp, age_ioctl); 597 if_setstartfn(ifp, age_start); 598 if_setinitfn(ifp, age_init); 599 if_setsendqlen(ifp, AGE_TX_RING_CNT - 1); 600 if_setsendqready(ifp); 601 if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4); 602 if_sethwassist(ifp, AGE_CSUM_FEATURES | CSUM_TSO); 603 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) { 604 sc->age_flags |= AGE_FLAG_PMCAP; 605 if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST, 0); 606 } 607 if_setcapenable(ifp, if_getcapabilities(ifp)); 608 609 /* Set up MII bus. */ 610 error = mii_attach(dev, &sc->age_miibus, ifp, age_mediachange, 611 age_mediastatus, BMSR_DEFCAPMASK, sc->age_phyaddr, MII_OFFSET_ANY, 612 0); 613 if (error != 0) { 614 device_printf(dev, "attaching PHYs failed\n"); 615 goto fail; 616 } 617 618 ether_ifattach(ifp, sc->age_eaddr); 619 620 /* VLAN capability setup. */ 621 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 622 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO, 0); 623 if_setcapenable(ifp, if_getcapabilities(ifp)); 624 625 /* Tell the upper layer(s) we support long frames. */ 626 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 627 628 /* Create local taskq. */ 629 sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK, 630 taskqueue_thread_enqueue, &sc->age_tq); 631 if (sc->age_tq == NULL) { 632 device_printf(dev, "could not create taskqueue.\n"); 633 ether_ifdetach(ifp); 634 error = ENXIO; 635 goto fail; 636 } 637 taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq", 638 device_get_nameunit(sc->age_dev)); 639 640 if ((sc->age_flags & AGE_FLAG_MSIX) != 0) 641 msic = AGE_MSIX_MESSAGES; 642 else if ((sc->age_flags & AGE_FLAG_MSI) != 0) 643 msic = AGE_MSI_MESSAGES; 644 else 645 msic = 1; 646 for (i = 0; i < msic; i++) { 647 error = bus_setup_intr(dev, sc->age_irq[i], 648 INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc, 649 &sc->age_intrhand[i]); 650 if (error != 0) 651 break; 652 } 653 if (error != 0) { 654 device_printf(dev, "could not set up interrupt handler.\n"); 655 taskqueue_free(sc->age_tq); 656 sc->age_tq = NULL; 657 ether_ifdetach(ifp); 658 goto fail; 659 } 660 661 fail: 662 if (error != 0) 663 age_detach(dev); 664 665 return (error); 666 } 667 668 static int 669 age_detach(device_t dev) 670 { 671 struct age_softc *sc; 672 if_t ifp; 673 int i, msic; 674 675 sc = device_get_softc(dev); 676 677 ifp = sc->age_ifp; 678 if (device_is_attached(dev)) { 679 AGE_LOCK(sc); 680 sc->age_flags |= AGE_FLAG_DETACH; 681 age_stop(sc); 682 AGE_UNLOCK(sc); 683 callout_drain(&sc->age_tick_ch); 684 taskqueue_drain(sc->age_tq, &sc->age_int_task); 685 taskqueue_drain(taskqueue_swi, &sc->age_link_task); 686 ether_ifdetach(ifp); 687 } 688 689 if (sc->age_tq != NULL) { 690 taskqueue_drain(sc->age_tq, &sc->age_int_task); 691 taskqueue_free(sc->age_tq); 692 sc->age_tq = NULL; 693 } 694 695 if (sc->age_miibus != NULL) { 696 device_delete_child(dev, sc->age_miibus); 697 sc->age_miibus = NULL; 698 } 699 bus_generic_detach(dev); 700 age_dma_free(sc); 701 702 if (ifp != NULL) { 703 if_free(ifp); 704 sc->age_ifp = NULL; 705 } 706 707 if ((sc->age_flags & AGE_FLAG_MSIX) != 0) 708 msic = AGE_MSIX_MESSAGES; 709 else if ((sc->age_flags & AGE_FLAG_MSI) != 0) 710 msic = AGE_MSI_MESSAGES; 711 else 712 msic = 1; 713 for (i = 0; i < msic; i++) { 714 if (sc->age_intrhand[i] != NULL) { 715 bus_teardown_intr(dev, sc->age_irq[i], 716 sc->age_intrhand[i]); 717 sc->age_intrhand[i] = NULL; 718 } 719 } 720 721 bus_release_resources(dev, sc->age_irq_spec, sc->age_irq); 722 if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0) 723 pci_release_msi(dev); 724 bus_release_resources(dev, sc->age_res_spec, sc->age_res); 725 mtx_destroy(&sc->age_mtx); 726 727 return (0); 728 } 729 730 static void 731 age_sysctl_node(struct age_softc *sc) 732 { 733 int error; 734 735 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), 736 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, 737 "stats", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 738 sc, 0, sysctl_age_stats, "I", "Statistics"); 739 740 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), 741 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, 742 "int_mod", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 743 &sc->age_int_mod, 0, sysctl_hw_age_int_mod, "I", 744 "age interrupt moderation"); 745 746 /* Pull in device tunables. */ 747 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 748 error = resource_int_value(device_get_name(sc->age_dev), 749 device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod); 750 if (error == 0) { 751 if (sc->age_int_mod < AGE_IM_TIMER_MIN || 752 sc->age_int_mod > AGE_IM_TIMER_MAX) { 753 device_printf(sc->age_dev, 754 "int_mod value out of range; using default: %d\n", 755 AGE_IM_TIMER_DEFAULT); 756 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 757 } 758 } 759 760 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), 761 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, 762 "process_limit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 763 &sc->age_process_limit, 0, sysctl_hw_age_proc_limit, "I", 764 "max number of Rx events to process"); 765 766 /* Pull in device tunables. */ 767 sc->age_process_limit = AGE_PROC_DEFAULT; 768 error = resource_int_value(device_get_name(sc->age_dev), 769 device_get_unit(sc->age_dev), "process_limit", 770 &sc->age_process_limit); 771 if (error == 0) { 772 if (sc->age_process_limit < AGE_PROC_MIN || 773 sc->age_process_limit > AGE_PROC_MAX) { 774 device_printf(sc->age_dev, 775 "process_limit value out of range; " 776 "using default: %d\n", AGE_PROC_DEFAULT); 777 sc->age_process_limit = AGE_PROC_DEFAULT; 778 } 779 } 780 } 781 782 struct age_dmamap_arg { 783 bus_addr_t age_busaddr; 784 }; 785 786 static void 787 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 788 { 789 struct age_dmamap_arg *ctx; 790 791 if (error != 0) 792 return; 793 794 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 795 796 ctx = (struct age_dmamap_arg *)arg; 797 ctx->age_busaddr = segs[0].ds_addr; 798 } 799 800 /* 801 * Attansic L1 controller have single register to specify high 802 * address part of DMA blocks. So all descriptor structures and 803 * DMA memory blocks should have the same high address of given 804 * 4GB address space(i.e. crossing 4GB boundary is not allowed). 805 */ 806 static int 807 age_check_boundary(struct age_softc *sc) 808 { 809 bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end; 810 bus_addr_t cmb_block_end, smb_block_end; 811 812 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 813 tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ; 814 rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ; 815 rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ; 816 cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ; 817 smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ; 818 819 if ((AGE_ADDR_HI(tx_ring_end) != 820 AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) || 821 (AGE_ADDR_HI(rx_ring_end) != 822 AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) || 823 (AGE_ADDR_HI(rr_ring_end) != 824 AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) || 825 (AGE_ADDR_HI(cmb_block_end) != 826 AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) || 827 (AGE_ADDR_HI(smb_block_end) != 828 AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr))) 829 return (EFBIG); 830 831 if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) || 832 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) || 833 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) || 834 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end))) 835 return (EFBIG); 836 837 return (0); 838 } 839 840 static int 841 age_dma_alloc(struct age_softc *sc) 842 { 843 struct age_txdesc *txd; 844 struct age_rxdesc *rxd; 845 bus_addr_t lowaddr; 846 struct age_dmamap_arg ctx; 847 int error, i; 848 849 lowaddr = BUS_SPACE_MAXADDR; 850 851 again: 852 /* Create parent ring/DMA block tag. */ 853 error = bus_dma_tag_create( 854 bus_get_dma_tag(sc->age_dev), /* parent */ 855 1, 0, /* alignment, boundary */ 856 lowaddr, /* lowaddr */ 857 BUS_SPACE_MAXADDR, /* highaddr */ 858 NULL, NULL, /* filter, filterarg */ 859 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 860 0, /* nsegments */ 861 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 862 0, /* flags */ 863 NULL, NULL, /* lockfunc, lockarg */ 864 &sc->age_cdata.age_parent_tag); 865 if (error != 0) { 866 device_printf(sc->age_dev, 867 "could not create parent DMA tag.\n"); 868 goto fail; 869 } 870 871 /* Create tag for Tx ring. */ 872 error = bus_dma_tag_create( 873 sc->age_cdata.age_parent_tag, /* parent */ 874 AGE_TX_RING_ALIGN, 0, /* alignment, boundary */ 875 BUS_SPACE_MAXADDR, /* lowaddr */ 876 BUS_SPACE_MAXADDR, /* highaddr */ 877 NULL, NULL, /* filter, filterarg */ 878 AGE_TX_RING_SZ, /* maxsize */ 879 1, /* nsegments */ 880 AGE_TX_RING_SZ, /* maxsegsize */ 881 0, /* flags */ 882 NULL, NULL, /* lockfunc, lockarg */ 883 &sc->age_cdata.age_tx_ring_tag); 884 if (error != 0) { 885 device_printf(sc->age_dev, 886 "could not create Tx ring DMA tag.\n"); 887 goto fail; 888 } 889 890 /* Create tag for Rx ring. */ 891 error = bus_dma_tag_create( 892 sc->age_cdata.age_parent_tag, /* parent */ 893 AGE_RX_RING_ALIGN, 0, /* alignment, boundary */ 894 BUS_SPACE_MAXADDR, /* lowaddr */ 895 BUS_SPACE_MAXADDR, /* highaddr */ 896 NULL, NULL, /* filter, filterarg */ 897 AGE_RX_RING_SZ, /* maxsize */ 898 1, /* nsegments */ 899 AGE_RX_RING_SZ, /* maxsegsize */ 900 0, /* flags */ 901 NULL, NULL, /* lockfunc, lockarg */ 902 &sc->age_cdata.age_rx_ring_tag); 903 if (error != 0) { 904 device_printf(sc->age_dev, 905 "could not create Rx ring DMA tag.\n"); 906 goto fail; 907 } 908 909 /* Create tag for Rx return ring. */ 910 error = bus_dma_tag_create( 911 sc->age_cdata.age_parent_tag, /* parent */ 912 AGE_RR_RING_ALIGN, 0, /* alignment, boundary */ 913 BUS_SPACE_MAXADDR, /* lowaddr */ 914 BUS_SPACE_MAXADDR, /* highaddr */ 915 NULL, NULL, /* filter, filterarg */ 916 AGE_RR_RING_SZ, /* maxsize */ 917 1, /* nsegments */ 918 AGE_RR_RING_SZ, /* maxsegsize */ 919 0, /* flags */ 920 NULL, NULL, /* lockfunc, lockarg */ 921 &sc->age_cdata.age_rr_ring_tag); 922 if (error != 0) { 923 device_printf(sc->age_dev, 924 "could not create Rx return ring DMA tag.\n"); 925 goto fail; 926 } 927 928 /* Create tag for coalesing message block. */ 929 error = bus_dma_tag_create( 930 sc->age_cdata.age_parent_tag, /* parent */ 931 AGE_CMB_ALIGN, 0, /* alignment, boundary */ 932 BUS_SPACE_MAXADDR, /* lowaddr */ 933 BUS_SPACE_MAXADDR, /* highaddr */ 934 NULL, NULL, /* filter, filterarg */ 935 AGE_CMB_BLOCK_SZ, /* maxsize */ 936 1, /* nsegments */ 937 AGE_CMB_BLOCK_SZ, /* maxsegsize */ 938 0, /* flags */ 939 NULL, NULL, /* lockfunc, lockarg */ 940 &sc->age_cdata.age_cmb_block_tag); 941 if (error != 0) { 942 device_printf(sc->age_dev, 943 "could not create CMB DMA tag.\n"); 944 goto fail; 945 } 946 947 /* Create tag for statistics message block. */ 948 error = bus_dma_tag_create( 949 sc->age_cdata.age_parent_tag, /* parent */ 950 AGE_SMB_ALIGN, 0, /* alignment, boundary */ 951 BUS_SPACE_MAXADDR, /* lowaddr */ 952 BUS_SPACE_MAXADDR, /* highaddr */ 953 NULL, NULL, /* filter, filterarg */ 954 AGE_SMB_BLOCK_SZ, /* maxsize */ 955 1, /* nsegments */ 956 AGE_SMB_BLOCK_SZ, /* maxsegsize */ 957 0, /* flags */ 958 NULL, NULL, /* lockfunc, lockarg */ 959 &sc->age_cdata.age_smb_block_tag); 960 if (error != 0) { 961 device_printf(sc->age_dev, 962 "could not create SMB DMA tag.\n"); 963 goto fail; 964 } 965 966 /* Allocate DMA'able memory and load the DMA map. */ 967 error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag, 968 (void **)&sc->age_rdata.age_tx_ring, 969 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 970 &sc->age_cdata.age_tx_ring_map); 971 if (error != 0) { 972 device_printf(sc->age_dev, 973 "could not allocate DMA'able memory for Tx ring.\n"); 974 goto fail; 975 } 976 ctx.age_busaddr = 0; 977 error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag, 978 sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring, 979 AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0); 980 if (error != 0 || ctx.age_busaddr == 0) { 981 device_printf(sc->age_dev, 982 "could not load DMA'able memory for Tx ring.\n"); 983 goto fail; 984 } 985 sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr; 986 /* Rx ring */ 987 error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag, 988 (void **)&sc->age_rdata.age_rx_ring, 989 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 990 &sc->age_cdata.age_rx_ring_map); 991 if (error != 0) { 992 device_printf(sc->age_dev, 993 "could not allocate DMA'able memory for Rx ring.\n"); 994 goto fail; 995 } 996 ctx.age_busaddr = 0; 997 error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag, 998 sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring, 999 AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0); 1000 if (error != 0 || ctx.age_busaddr == 0) { 1001 device_printf(sc->age_dev, 1002 "could not load DMA'able memory for Rx ring.\n"); 1003 goto fail; 1004 } 1005 sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr; 1006 /* Rx return ring */ 1007 error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag, 1008 (void **)&sc->age_rdata.age_rr_ring, 1009 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1010 &sc->age_cdata.age_rr_ring_map); 1011 if (error != 0) { 1012 device_printf(sc->age_dev, 1013 "could not allocate DMA'able memory for Rx return ring.\n"); 1014 goto fail; 1015 } 1016 ctx.age_busaddr = 0; 1017 error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag, 1018 sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring, 1019 AGE_RR_RING_SZ, age_dmamap_cb, 1020 &ctx, 0); 1021 if (error != 0 || ctx.age_busaddr == 0) { 1022 device_printf(sc->age_dev, 1023 "could not load DMA'able memory for Rx return ring.\n"); 1024 goto fail; 1025 } 1026 sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr; 1027 /* CMB block */ 1028 error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag, 1029 (void **)&sc->age_rdata.age_cmb_block, 1030 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1031 &sc->age_cdata.age_cmb_block_map); 1032 if (error != 0) { 1033 device_printf(sc->age_dev, 1034 "could not allocate DMA'able memory for CMB block.\n"); 1035 goto fail; 1036 } 1037 ctx.age_busaddr = 0; 1038 error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag, 1039 sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block, 1040 AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); 1041 if (error != 0 || ctx.age_busaddr == 0) { 1042 device_printf(sc->age_dev, 1043 "could not load DMA'able memory for CMB block.\n"); 1044 goto fail; 1045 } 1046 sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr; 1047 /* SMB block */ 1048 error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag, 1049 (void **)&sc->age_rdata.age_smb_block, 1050 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1051 &sc->age_cdata.age_smb_block_map); 1052 if (error != 0) { 1053 device_printf(sc->age_dev, 1054 "could not allocate DMA'able memory for SMB block.\n"); 1055 goto fail; 1056 } 1057 ctx.age_busaddr = 0; 1058 error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag, 1059 sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block, 1060 AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); 1061 if (error != 0 || ctx.age_busaddr == 0) { 1062 device_printf(sc->age_dev, 1063 "could not load DMA'able memory for SMB block.\n"); 1064 goto fail; 1065 } 1066 sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr; 1067 1068 /* 1069 * All ring buffer and DMA blocks should have the same 1070 * high address part of 64bit DMA address space. 1071 */ 1072 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 1073 (error = age_check_boundary(sc)) != 0) { 1074 device_printf(sc->age_dev, "4GB boundary crossed, " 1075 "switching to 32bit DMA addressing mode.\n"); 1076 age_dma_free(sc); 1077 /* Limit DMA address space to 32bit and try again. */ 1078 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1079 goto again; 1080 } 1081 1082 /* 1083 * Create Tx/Rx buffer parent tag. 1084 * L1 supports full 64bit DMA addressing in Tx/Rx buffers 1085 * so it needs separate parent DMA tag. 1086 * XXX 1087 * It seems enabling 64bit DMA causes data corruption. Limit 1088 * DMA address space to 32bit. 1089 */ 1090 error = bus_dma_tag_create( 1091 bus_get_dma_tag(sc->age_dev), /* parent */ 1092 1, 0, /* alignment, boundary */ 1093 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1094 BUS_SPACE_MAXADDR, /* highaddr */ 1095 NULL, NULL, /* filter, filterarg */ 1096 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1097 0, /* nsegments */ 1098 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1099 0, /* flags */ 1100 NULL, NULL, /* lockfunc, lockarg */ 1101 &sc->age_cdata.age_buffer_tag); 1102 if (error != 0) { 1103 device_printf(sc->age_dev, 1104 "could not create parent buffer DMA tag.\n"); 1105 goto fail; 1106 } 1107 1108 /* Create tag for Tx buffers. */ 1109 error = bus_dma_tag_create( 1110 sc->age_cdata.age_buffer_tag, /* parent */ 1111 1, 0, /* alignment, boundary */ 1112 BUS_SPACE_MAXADDR, /* lowaddr */ 1113 BUS_SPACE_MAXADDR, /* highaddr */ 1114 NULL, NULL, /* filter, filterarg */ 1115 AGE_TSO_MAXSIZE, /* maxsize */ 1116 AGE_MAXTXSEGS, /* nsegments */ 1117 AGE_TSO_MAXSEGSIZE, /* maxsegsize */ 1118 0, /* flags */ 1119 NULL, NULL, /* lockfunc, lockarg */ 1120 &sc->age_cdata.age_tx_tag); 1121 if (error != 0) { 1122 device_printf(sc->age_dev, "could not create Tx DMA tag.\n"); 1123 goto fail; 1124 } 1125 1126 /* Create tag for Rx buffers. */ 1127 error = bus_dma_tag_create( 1128 sc->age_cdata.age_buffer_tag, /* parent */ 1129 AGE_RX_BUF_ALIGN, 0, /* alignment, boundary */ 1130 BUS_SPACE_MAXADDR, /* lowaddr */ 1131 BUS_SPACE_MAXADDR, /* highaddr */ 1132 NULL, NULL, /* filter, filterarg */ 1133 MCLBYTES, /* maxsize */ 1134 1, /* nsegments */ 1135 MCLBYTES, /* maxsegsize */ 1136 0, /* flags */ 1137 NULL, NULL, /* lockfunc, lockarg */ 1138 &sc->age_cdata.age_rx_tag); 1139 if (error != 0) { 1140 device_printf(sc->age_dev, "could not create Rx DMA tag.\n"); 1141 goto fail; 1142 } 1143 1144 /* Create DMA maps for Tx buffers. */ 1145 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1146 txd = &sc->age_cdata.age_txdesc[i]; 1147 txd->tx_m = NULL; 1148 txd->tx_dmamap = NULL; 1149 error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0, 1150 &txd->tx_dmamap); 1151 if (error != 0) { 1152 device_printf(sc->age_dev, 1153 "could not create Tx dmamap.\n"); 1154 goto fail; 1155 } 1156 } 1157 /* Create DMA maps for Rx buffers. */ 1158 if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, 1159 &sc->age_cdata.age_rx_sparemap)) != 0) { 1160 device_printf(sc->age_dev, 1161 "could not create spare Rx dmamap.\n"); 1162 goto fail; 1163 } 1164 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1165 rxd = &sc->age_cdata.age_rxdesc[i]; 1166 rxd->rx_m = NULL; 1167 rxd->rx_dmamap = NULL; 1168 error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, 1169 &rxd->rx_dmamap); 1170 if (error != 0) { 1171 device_printf(sc->age_dev, 1172 "could not create Rx dmamap.\n"); 1173 goto fail; 1174 } 1175 } 1176 1177 fail: 1178 return (error); 1179 } 1180 1181 static void 1182 age_dma_free(struct age_softc *sc) 1183 { 1184 struct age_txdesc *txd; 1185 struct age_rxdesc *rxd; 1186 int i; 1187 1188 /* Tx buffers */ 1189 if (sc->age_cdata.age_tx_tag != NULL) { 1190 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1191 txd = &sc->age_cdata.age_txdesc[i]; 1192 if (txd->tx_dmamap != NULL) { 1193 bus_dmamap_destroy(sc->age_cdata.age_tx_tag, 1194 txd->tx_dmamap); 1195 txd->tx_dmamap = NULL; 1196 } 1197 } 1198 bus_dma_tag_destroy(sc->age_cdata.age_tx_tag); 1199 sc->age_cdata.age_tx_tag = NULL; 1200 } 1201 /* Rx buffers */ 1202 if (sc->age_cdata.age_rx_tag != NULL) { 1203 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1204 rxd = &sc->age_cdata.age_rxdesc[i]; 1205 if (rxd->rx_dmamap != NULL) { 1206 bus_dmamap_destroy(sc->age_cdata.age_rx_tag, 1207 rxd->rx_dmamap); 1208 rxd->rx_dmamap = NULL; 1209 } 1210 } 1211 if (sc->age_cdata.age_rx_sparemap != NULL) { 1212 bus_dmamap_destroy(sc->age_cdata.age_rx_tag, 1213 sc->age_cdata.age_rx_sparemap); 1214 sc->age_cdata.age_rx_sparemap = NULL; 1215 } 1216 bus_dma_tag_destroy(sc->age_cdata.age_rx_tag); 1217 sc->age_cdata.age_rx_tag = NULL; 1218 } 1219 /* Tx ring. */ 1220 if (sc->age_cdata.age_tx_ring_tag != NULL) { 1221 if (sc->age_rdata.age_tx_ring_paddr != 0) 1222 bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag, 1223 sc->age_cdata.age_tx_ring_map); 1224 if (sc->age_rdata.age_tx_ring != NULL) 1225 bus_dmamem_free(sc->age_cdata.age_tx_ring_tag, 1226 sc->age_rdata.age_tx_ring, 1227 sc->age_cdata.age_tx_ring_map); 1228 sc->age_rdata.age_tx_ring_paddr = 0; 1229 sc->age_rdata.age_tx_ring = NULL; 1230 bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag); 1231 sc->age_cdata.age_tx_ring_tag = NULL; 1232 } 1233 /* Rx ring. */ 1234 if (sc->age_cdata.age_rx_ring_tag != NULL) { 1235 if (sc->age_rdata.age_rx_ring_paddr != 0) 1236 bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag, 1237 sc->age_cdata.age_rx_ring_map); 1238 if (sc->age_rdata.age_rx_ring != NULL) 1239 bus_dmamem_free(sc->age_cdata.age_rx_ring_tag, 1240 sc->age_rdata.age_rx_ring, 1241 sc->age_cdata.age_rx_ring_map); 1242 sc->age_rdata.age_rx_ring_paddr = 0; 1243 sc->age_rdata.age_rx_ring = NULL; 1244 bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag); 1245 sc->age_cdata.age_rx_ring_tag = NULL; 1246 } 1247 /* Rx return ring. */ 1248 if (sc->age_cdata.age_rr_ring_tag != NULL) { 1249 if (sc->age_rdata.age_rr_ring_paddr != 0) 1250 bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag, 1251 sc->age_cdata.age_rr_ring_map); 1252 if (sc->age_rdata.age_rr_ring != NULL) 1253 bus_dmamem_free(sc->age_cdata.age_rr_ring_tag, 1254 sc->age_rdata.age_rr_ring, 1255 sc->age_cdata.age_rr_ring_map); 1256 sc->age_rdata.age_rr_ring_paddr = 0; 1257 sc->age_rdata.age_rr_ring = NULL; 1258 bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag); 1259 sc->age_cdata.age_rr_ring_tag = NULL; 1260 } 1261 /* CMB block */ 1262 if (sc->age_cdata.age_cmb_block_tag != NULL) { 1263 if (sc->age_rdata.age_cmb_block_paddr != 0) 1264 bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag, 1265 sc->age_cdata.age_cmb_block_map); 1266 if (sc->age_rdata.age_cmb_block != NULL) 1267 bus_dmamem_free(sc->age_cdata.age_cmb_block_tag, 1268 sc->age_rdata.age_cmb_block, 1269 sc->age_cdata.age_cmb_block_map); 1270 sc->age_rdata.age_cmb_block_paddr = 0; 1271 sc->age_rdata.age_cmb_block = NULL; 1272 bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag); 1273 sc->age_cdata.age_cmb_block_tag = NULL; 1274 } 1275 /* SMB block */ 1276 if (sc->age_cdata.age_smb_block_tag != NULL) { 1277 if (sc->age_rdata.age_smb_block_paddr != 0) 1278 bus_dmamap_unload(sc->age_cdata.age_smb_block_tag, 1279 sc->age_cdata.age_smb_block_map); 1280 if (sc->age_rdata.age_smb_block != NULL) 1281 bus_dmamem_free(sc->age_cdata.age_smb_block_tag, 1282 sc->age_rdata.age_smb_block, 1283 sc->age_cdata.age_smb_block_map); 1284 sc->age_rdata.age_smb_block_paddr = 0; 1285 sc->age_rdata.age_smb_block = NULL; 1286 bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag); 1287 sc->age_cdata.age_smb_block_tag = NULL; 1288 } 1289 1290 if (sc->age_cdata.age_buffer_tag != NULL) { 1291 bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag); 1292 sc->age_cdata.age_buffer_tag = NULL; 1293 } 1294 if (sc->age_cdata.age_parent_tag != NULL) { 1295 bus_dma_tag_destroy(sc->age_cdata.age_parent_tag); 1296 sc->age_cdata.age_parent_tag = NULL; 1297 } 1298 } 1299 1300 /* 1301 * Make sure the interface is stopped at reboot time. 1302 */ 1303 static int 1304 age_shutdown(device_t dev) 1305 { 1306 1307 return (age_suspend(dev)); 1308 } 1309 1310 static void 1311 age_setwol(struct age_softc *sc) 1312 { 1313 if_t ifp; 1314 struct mii_data *mii; 1315 uint32_t reg, pmcs; 1316 uint16_t pmstat; 1317 int aneg, i, pmc; 1318 1319 AGE_LOCK_ASSERT(sc); 1320 1321 if (pci_find_cap(sc->age_dev, PCIY_PMG, &pmc) != 0) { 1322 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1323 /* 1324 * No PME capability, PHY power down. 1325 * XXX 1326 * Due to an unknown reason powering down PHY resulted 1327 * in unexpected results such as inaccessbility of 1328 * hardware of freshly rebooted system. Disable 1329 * powering down PHY until I got more information for 1330 * Attansic/Atheros PHY hardwares. 1331 */ 1332 #ifdef notyet 1333 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1334 MII_BMCR, BMCR_PDOWN); 1335 #endif 1336 return; 1337 } 1338 1339 ifp = sc->age_ifp; 1340 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) { 1341 /* 1342 * Note, this driver resets the link speed to 10/100Mbps with 1343 * auto-negotiation but we don't know whether that operation 1344 * would succeed or not as it have no control after powering 1345 * off. If the renegotiation fail WOL may not work. Running 1346 * at 1Gbps will draw more power than 375mA at 3.3V which is 1347 * specified in PCI specification and that would result in 1348 * complete shutdowning power to ethernet controller. 1349 * 1350 * TODO 1351 * Save current negotiated media speed/duplex/flow-control 1352 * to softc and restore the same link again after resuming. 1353 * PHY handling such as power down/resetting to 100Mbps 1354 * may be better handled in suspend method in phy driver. 1355 */ 1356 mii = device_get_softc(sc->age_miibus); 1357 mii_pollstat(mii); 1358 aneg = 0; 1359 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1360 switch IFM_SUBTYPE(mii->mii_media_active) { 1361 case IFM_10_T: 1362 case IFM_100_TX: 1363 goto got_link; 1364 case IFM_1000_T: 1365 aneg++; 1366 default: 1367 break; 1368 } 1369 } 1370 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1371 MII_100T2CR, 0); 1372 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1373 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | 1374 ANAR_10 | ANAR_CSMA); 1375 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1376 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1377 DELAY(1000); 1378 if (aneg != 0) { 1379 /* Poll link state until age(4) get a 10/100 link. */ 1380 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1381 mii_pollstat(mii); 1382 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1383 switch (IFM_SUBTYPE( 1384 mii->mii_media_active)) { 1385 case IFM_10_T: 1386 case IFM_100_TX: 1387 age_mac_config(sc); 1388 goto got_link; 1389 default: 1390 break; 1391 } 1392 } 1393 AGE_UNLOCK(sc); 1394 pause("agelnk", hz); 1395 AGE_LOCK(sc); 1396 } 1397 if (i == MII_ANEGTICKS_GIGE) 1398 device_printf(sc->age_dev, 1399 "establishing link failed, " 1400 "WOL may not work!"); 1401 } 1402 /* 1403 * No link, force MAC to have 100Mbps, full-duplex link. 1404 * This is the last resort and may/may not work. 1405 */ 1406 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1407 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1408 age_mac_config(sc); 1409 } 1410 1411 got_link: 1412 pmcs = 0; 1413 if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) 1414 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1415 CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs); 1416 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1417 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC); 1418 reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST); 1419 if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) != 0) 1420 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1421 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) { 1422 reg |= MAC_CFG_RX_ENB; 1423 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1424 } 1425 1426 /* Request PME. */ 1427 pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2); 1428 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1429 if ((if_getcapenable(ifp) & IFCAP_WOL) != 0) 1430 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1431 pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1432 #ifdef notyet 1433 /* See above for powering down PHY issues. */ 1434 if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) { 1435 /* No WOL, PHY power down. */ 1436 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1437 MII_BMCR, BMCR_PDOWN); 1438 } 1439 #endif 1440 } 1441 1442 static int 1443 age_suspend(device_t dev) 1444 { 1445 struct age_softc *sc; 1446 1447 sc = device_get_softc(dev); 1448 1449 AGE_LOCK(sc); 1450 age_stop(sc); 1451 age_setwol(sc); 1452 AGE_UNLOCK(sc); 1453 1454 return (0); 1455 } 1456 1457 static int 1458 age_resume(device_t dev) 1459 { 1460 struct age_softc *sc; 1461 if_t ifp; 1462 1463 sc = device_get_softc(dev); 1464 1465 AGE_LOCK(sc); 1466 age_phy_reset(sc); 1467 ifp = sc->age_ifp; 1468 if ((if_getflags(ifp) & IFF_UP) != 0) 1469 age_init_locked(sc); 1470 1471 AGE_UNLOCK(sc); 1472 1473 return (0); 1474 } 1475 1476 static int 1477 age_encap(struct age_softc *sc, struct mbuf **m_head) 1478 { 1479 struct age_txdesc *txd, *txd_last; 1480 struct tx_desc *desc; 1481 struct mbuf *m; 1482 struct ip *ip; 1483 struct tcphdr *tcp; 1484 bus_dma_segment_t txsegs[AGE_MAXTXSEGS]; 1485 bus_dmamap_t map; 1486 uint32_t cflags, hdrlen, ip_off, poff, vtag; 1487 int error, i, nsegs, prod, si; 1488 1489 AGE_LOCK_ASSERT(sc); 1490 1491 M_ASSERTPKTHDR((*m_head)); 1492 1493 m = *m_head; 1494 ip = NULL; 1495 tcp = NULL; 1496 cflags = vtag = 0; 1497 ip_off = poff = 0; 1498 if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) { 1499 /* 1500 * L1 requires offset of TCP/UDP payload in its Tx 1501 * descriptor to perform hardware Tx checksum offload. 1502 * Additionally, TSO requires IP/TCP header size and 1503 * modification of IP/TCP header in order to make TSO 1504 * engine work. This kind of operation takes many CPU 1505 * cycles on FreeBSD so fast host CPU is needed to get 1506 * smooth TSO performance. 1507 */ 1508 struct ether_header *eh; 1509 1510 if (M_WRITABLE(m) == 0) { 1511 /* Get a writable copy. */ 1512 m = m_dup(*m_head, M_NOWAIT); 1513 /* Release original mbufs. */ 1514 m_freem(*m_head); 1515 if (m == NULL) { 1516 *m_head = NULL; 1517 return (ENOBUFS); 1518 } 1519 *m_head = m; 1520 } 1521 ip_off = sizeof(struct ether_header); 1522 m = m_pullup(m, ip_off); 1523 if (m == NULL) { 1524 *m_head = NULL; 1525 return (ENOBUFS); 1526 } 1527 eh = mtod(m, struct ether_header *); 1528 /* 1529 * Check if hardware VLAN insertion is off. 1530 * Additional check for LLC/SNAP frame? 1531 */ 1532 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1533 ip_off = sizeof(struct ether_vlan_header); 1534 m = m_pullup(m, ip_off); 1535 if (m == NULL) { 1536 *m_head = NULL; 1537 return (ENOBUFS); 1538 } 1539 } 1540 m = m_pullup(m, ip_off + sizeof(struct ip)); 1541 if (m == NULL) { 1542 *m_head = NULL; 1543 return (ENOBUFS); 1544 } 1545 ip = (struct ip *)(mtod(m, char *) + ip_off); 1546 poff = ip_off + (ip->ip_hl << 2); 1547 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1548 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1549 if (m == NULL) { 1550 *m_head = NULL; 1551 return (ENOBUFS); 1552 } 1553 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1554 m = m_pullup(m, poff + (tcp->th_off << 2)); 1555 if (m == NULL) { 1556 *m_head = NULL; 1557 return (ENOBUFS); 1558 } 1559 /* 1560 * L1 requires IP/TCP header size and offset as 1561 * well as TCP pseudo checksum which complicates 1562 * TSO configuration. I guess this comes from the 1563 * adherence to Microsoft NDIS Large Send 1564 * specification which requires insertion of 1565 * pseudo checksum by upper stack. The pseudo 1566 * checksum that NDIS refers to doesn't include 1567 * TCP payload length so age(4) should recompute 1568 * the pseudo checksum here. Hopefully this wouldn't 1569 * be much burden on modern CPUs. 1570 * Reset IP checksum and recompute TCP pseudo 1571 * checksum as NDIS specification said. 1572 */ 1573 ip = (struct ip *)(mtod(m, char *) + ip_off); 1574 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1575 ip->ip_sum = 0; 1576 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1577 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1578 } 1579 *m_head = m; 1580 } 1581 1582 si = prod = sc->age_cdata.age_tx_prod; 1583 txd = &sc->age_cdata.age_txdesc[prod]; 1584 txd_last = txd; 1585 map = txd->tx_dmamap; 1586 1587 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map, 1588 *m_head, txsegs, &nsegs, 0); 1589 if (error == EFBIG) { 1590 m = m_collapse(*m_head, M_NOWAIT, AGE_MAXTXSEGS); 1591 if (m == NULL) { 1592 m_freem(*m_head); 1593 *m_head = NULL; 1594 return (ENOMEM); 1595 } 1596 *m_head = m; 1597 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map, 1598 *m_head, txsegs, &nsegs, 0); 1599 if (error != 0) { 1600 m_freem(*m_head); 1601 *m_head = NULL; 1602 return (error); 1603 } 1604 } else if (error != 0) 1605 return (error); 1606 if (nsegs == 0) { 1607 m_freem(*m_head); 1608 *m_head = NULL; 1609 return (EIO); 1610 } 1611 1612 /* Check descriptor overrun. */ 1613 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1614 bus_dmamap_unload(sc->age_cdata.age_tx_tag, map); 1615 return (ENOBUFS); 1616 } 1617 1618 m = *m_head; 1619 /* Configure VLAN hardware tag insertion. */ 1620 if ((m->m_flags & M_VLANTAG) != 0) { 1621 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); 1622 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1623 cflags |= AGE_TD_INSERT_VLAN_TAG; 1624 } 1625 1626 desc = NULL; 1627 i = 0; 1628 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1629 /* Request TSO and set MSS. */ 1630 cflags |= AGE_TD_TSO_IPV4; 1631 cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM; 1632 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << 1633 AGE_TD_TSO_MSS_SHIFT); 1634 /* Set IP/TCP header size. */ 1635 cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT; 1636 cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT; 1637 /* 1638 * L1 requires the first buffer should only hold IP/TCP 1639 * header data. TCP payload should be handled in other 1640 * descriptors. 1641 */ 1642 hdrlen = poff + (tcp->th_off << 2); 1643 desc = &sc->age_rdata.age_tx_ring[prod]; 1644 desc->addr = htole64(txsegs[0].ds_addr); 1645 desc->len = htole32(AGE_TX_BYTES(hdrlen) | vtag); 1646 desc->flags = htole32(cflags); 1647 sc->age_cdata.age_tx_cnt++; 1648 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1649 if (m->m_len - hdrlen > 0) { 1650 /* Handle remaining payload of the 1st fragment. */ 1651 desc = &sc->age_rdata.age_tx_ring[prod]; 1652 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 1653 desc->len = htole32(AGE_TX_BYTES(m->m_len - hdrlen) | 1654 vtag); 1655 desc->flags = htole32(cflags); 1656 sc->age_cdata.age_tx_cnt++; 1657 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1658 } 1659 /* Handle remaining fragments. */ 1660 i = 1; 1661 } else if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1662 /* Configure Tx IP/TCP/UDP checksum offload. */ 1663 cflags |= AGE_TD_CSUM; 1664 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1665 cflags |= AGE_TD_TCPCSUM; 1666 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1667 cflags |= AGE_TD_UDPCSUM; 1668 /* Set checksum start offset. */ 1669 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1670 /* Set checksum insertion position of TCP/UDP. */ 1671 cflags |= ((poff + m->m_pkthdr.csum_data) << 1672 AGE_TD_CSUM_XSUMOFFSET_SHIFT); 1673 } 1674 for (; i < nsegs; i++) { 1675 desc = &sc->age_rdata.age_tx_ring[prod]; 1676 desc->addr = htole64(txsegs[i].ds_addr); 1677 desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag); 1678 desc->flags = htole32(cflags); 1679 sc->age_cdata.age_tx_cnt++; 1680 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1681 } 1682 /* Update producer index. */ 1683 sc->age_cdata.age_tx_prod = prod; 1684 1685 /* Set EOP on the last descriptor. */ 1686 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1687 desc = &sc->age_rdata.age_tx_ring[prod]; 1688 desc->flags |= htole32(AGE_TD_EOP); 1689 1690 /* Lastly set TSO header and modify IP/TCP header for TSO operation. */ 1691 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1692 desc = &sc->age_rdata.age_tx_ring[si]; 1693 desc->flags |= htole32(AGE_TD_TSO_HDR); 1694 } 1695 1696 /* Swap dmamap of the first and the last. */ 1697 txd = &sc->age_cdata.age_txdesc[prod]; 1698 map = txd_last->tx_dmamap; 1699 txd_last->tx_dmamap = txd->tx_dmamap; 1700 txd->tx_dmamap = map; 1701 txd->tx_m = m; 1702 1703 /* Sync descriptors. */ 1704 bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE); 1705 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 1706 sc->age_cdata.age_tx_ring_map, 1707 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1708 1709 return (0); 1710 } 1711 1712 static void 1713 age_start(if_t ifp) 1714 { 1715 struct age_softc *sc; 1716 1717 sc = if_getsoftc(ifp); 1718 AGE_LOCK(sc); 1719 age_start_locked(ifp); 1720 AGE_UNLOCK(sc); 1721 } 1722 1723 static void 1724 age_start_locked(if_t ifp) 1725 { 1726 struct age_softc *sc; 1727 struct mbuf *m_head; 1728 int enq; 1729 1730 sc = if_getsoftc(ifp); 1731 1732 AGE_LOCK_ASSERT(sc); 1733 1734 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1735 IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0) 1736 return; 1737 1738 for (enq = 0; !if_sendq_empty(ifp); ) { 1739 m_head = if_dequeue(ifp); 1740 if (m_head == NULL) 1741 break; 1742 /* 1743 * Pack the data into the transmit ring. If we 1744 * don't have room, set the OACTIVE flag and wait 1745 * for the NIC to drain the ring. 1746 */ 1747 if (age_encap(sc, &m_head)) { 1748 if (m_head == NULL) 1749 break; 1750 if_sendq_prepend(ifp, m_head); 1751 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 1752 break; 1753 } 1754 1755 enq++; 1756 /* 1757 * If there's a BPF listener, bounce a copy of this frame 1758 * to him. 1759 */ 1760 ETHER_BPF_MTAP(ifp, m_head); 1761 } 1762 1763 if (enq > 0) { 1764 /* Update mbox. */ 1765 AGE_COMMIT_MBOX(sc); 1766 /* Set a timeout in case the chip goes out to lunch. */ 1767 sc->age_watchdog_timer = AGE_TX_TIMEOUT; 1768 } 1769 } 1770 1771 static void 1772 age_watchdog(struct age_softc *sc) 1773 { 1774 if_t ifp; 1775 1776 AGE_LOCK_ASSERT(sc); 1777 1778 if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer) 1779 return; 1780 1781 ifp = sc->age_ifp; 1782 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1783 if_printf(sc->age_ifp, "watchdog timeout (missed link)\n"); 1784 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1785 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1786 age_init_locked(sc); 1787 return; 1788 } 1789 if (sc->age_cdata.age_tx_cnt == 0) { 1790 if_printf(sc->age_ifp, 1791 "watchdog timeout (missed Tx interrupts) -- recovering\n"); 1792 if (!if_sendq_empty(ifp)) 1793 age_start_locked(ifp); 1794 return; 1795 } 1796 if_printf(sc->age_ifp, "watchdog timeout\n"); 1797 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1798 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1799 age_init_locked(sc); 1800 if (!if_sendq_empty(ifp)) 1801 age_start_locked(ifp); 1802 } 1803 1804 static int 1805 age_ioctl(if_t ifp, u_long cmd, caddr_t data) 1806 { 1807 struct age_softc *sc; 1808 struct ifreq *ifr; 1809 struct mii_data *mii; 1810 uint32_t reg; 1811 int error, mask; 1812 1813 sc = if_getsoftc(ifp); 1814 ifr = (struct ifreq *)data; 1815 error = 0; 1816 switch (cmd) { 1817 case SIOCSIFMTU: 1818 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU) 1819 error = EINVAL; 1820 else if (if_getmtu(ifp) != ifr->ifr_mtu) { 1821 AGE_LOCK(sc); 1822 if_setmtu(ifp, ifr->ifr_mtu); 1823 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1824 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1825 age_init_locked(sc); 1826 } 1827 AGE_UNLOCK(sc); 1828 } 1829 break; 1830 case SIOCSIFFLAGS: 1831 AGE_LOCK(sc); 1832 if ((if_getflags(ifp) & IFF_UP) != 0) { 1833 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 1834 if (((if_getflags(ifp) ^ sc->age_if_flags) 1835 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1836 age_rxfilter(sc); 1837 } else { 1838 if ((sc->age_flags & AGE_FLAG_DETACH) == 0) 1839 age_init_locked(sc); 1840 } 1841 } else { 1842 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1843 age_stop(sc); 1844 } 1845 sc->age_if_flags = if_getflags(ifp); 1846 AGE_UNLOCK(sc); 1847 break; 1848 case SIOCADDMULTI: 1849 case SIOCDELMULTI: 1850 AGE_LOCK(sc); 1851 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 1852 age_rxfilter(sc); 1853 AGE_UNLOCK(sc); 1854 break; 1855 case SIOCSIFMEDIA: 1856 case SIOCGIFMEDIA: 1857 mii = device_get_softc(sc->age_miibus); 1858 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1859 break; 1860 case SIOCSIFCAP: 1861 AGE_LOCK(sc); 1862 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1863 if ((mask & IFCAP_TXCSUM) != 0 && 1864 (if_getcapabilities(ifp) & IFCAP_TXCSUM) != 0) { 1865 if_togglecapenable(ifp, IFCAP_TXCSUM); 1866 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1867 if_sethwassistbits(ifp, AGE_CSUM_FEATURES, 0); 1868 else 1869 if_sethwassistbits(ifp, 0, AGE_CSUM_FEATURES); 1870 } 1871 if ((mask & IFCAP_RXCSUM) != 0 && 1872 (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0) { 1873 if_togglecapenable(ifp, IFCAP_RXCSUM); 1874 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1875 reg &= ~MAC_CFG_RXCSUM_ENB; 1876 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 1877 reg |= MAC_CFG_RXCSUM_ENB; 1878 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1879 } 1880 if ((mask & IFCAP_TSO4) != 0 && 1881 (if_getcapabilities(ifp) & IFCAP_TSO4) != 0) { 1882 if_togglecapenable(ifp, IFCAP_TSO4); 1883 if ((if_getcapenable(ifp) & IFCAP_TSO4) != 0) 1884 if_sethwassistbits(ifp, CSUM_TSO, 0); 1885 else 1886 if_sethwassistbits(ifp, 0, CSUM_TSO); 1887 } 1888 1889 if ((mask & IFCAP_WOL_MCAST) != 0 && 1890 (if_getcapabilities(ifp) & IFCAP_WOL_MCAST) != 0) 1891 if_togglecapenable(ifp, IFCAP_WOL_MCAST); 1892 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1893 (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) != 0) 1894 if_togglecapenable(ifp, IFCAP_WOL_MAGIC); 1895 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1896 (if_getcapabilities(ifp) & IFCAP_VLAN_HWCSUM) != 0) 1897 if_togglecapenable(ifp, IFCAP_VLAN_HWCSUM); 1898 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1899 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTSO) != 0) 1900 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 1901 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1902 (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0) { 1903 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 1904 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) 1905 if_setcapenablebit(ifp, 0, IFCAP_VLAN_HWTSO); 1906 age_rxvlan(sc); 1907 } 1908 AGE_UNLOCK(sc); 1909 VLAN_CAPABILITIES(ifp); 1910 break; 1911 default: 1912 error = ether_ioctl(ifp, cmd, data); 1913 break; 1914 } 1915 1916 return (error); 1917 } 1918 1919 static void 1920 age_mac_config(struct age_softc *sc) 1921 { 1922 struct mii_data *mii; 1923 uint32_t reg; 1924 1925 AGE_LOCK_ASSERT(sc); 1926 1927 mii = device_get_softc(sc->age_miibus); 1928 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1929 reg &= ~MAC_CFG_FULL_DUPLEX; 1930 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1931 reg &= ~MAC_CFG_SPEED_MASK; 1932 /* Reprogram MAC with resolved speed/duplex. */ 1933 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1934 case IFM_10_T: 1935 case IFM_100_TX: 1936 reg |= MAC_CFG_SPEED_10_100; 1937 break; 1938 case IFM_1000_T: 1939 reg |= MAC_CFG_SPEED_1000; 1940 break; 1941 } 1942 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1943 reg |= MAC_CFG_FULL_DUPLEX; 1944 #ifdef notyet 1945 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1946 reg |= MAC_CFG_TX_FC; 1947 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 1948 reg |= MAC_CFG_RX_FC; 1949 #endif 1950 } 1951 1952 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1953 } 1954 1955 static void 1956 age_link_task(void *arg, int pending) 1957 { 1958 struct age_softc *sc; 1959 struct mii_data *mii; 1960 if_t ifp; 1961 uint32_t reg; 1962 1963 sc = (struct age_softc *)arg; 1964 1965 AGE_LOCK(sc); 1966 mii = device_get_softc(sc->age_miibus); 1967 ifp = sc->age_ifp; 1968 if (mii == NULL || ifp == NULL || 1969 (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 1970 AGE_UNLOCK(sc); 1971 return; 1972 } 1973 1974 sc->age_flags &= ~AGE_FLAG_LINK; 1975 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1976 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1977 case IFM_10_T: 1978 case IFM_100_TX: 1979 case IFM_1000_T: 1980 sc->age_flags |= AGE_FLAG_LINK; 1981 break; 1982 default: 1983 break; 1984 } 1985 } 1986 1987 /* Stop Rx/Tx MACs. */ 1988 age_stop_rxmac(sc); 1989 age_stop_txmac(sc); 1990 1991 /* Program MACs with resolved speed/duplex/flow-control. */ 1992 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 1993 age_mac_config(sc); 1994 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1995 /* Restart DMA engine and Tx/Rx MAC. */ 1996 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 1997 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 1998 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 1999 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2000 } 2001 2002 AGE_UNLOCK(sc); 2003 } 2004 2005 static void 2006 age_stats_update(struct age_softc *sc) 2007 { 2008 struct age_stats *stat; 2009 struct smb *smb; 2010 if_t ifp; 2011 2012 AGE_LOCK_ASSERT(sc); 2013 2014 stat = &sc->age_stat; 2015 2016 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 2017 sc->age_cdata.age_smb_block_map, 2018 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2019 2020 smb = sc->age_rdata.age_smb_block; 2021 if (smb->updated == 0) 2022 return; 2023 2024 ifp = sc->age_ifp; 2025 /* Rx stats. */ 2026 stat->rx_frames += smb->rx_frames; 2027 stat->rx_bcast_frames += smb->rx_bcast_frames; 2028 stat->rx_mcast_frames += smb->rx_mcast_frames; 2029 stat->rx_pause_frames += smb->rx_pause_frames; 2030 stat->rx_control_frames += smb->rx_control_frames; 2031 stat->rx_crcerrs += smb->rx_crcerrs; 2032 stat->rx_lenerrs += smb->rx_lenerrs; 2033 stat->rx_bytes += smb->rx_bytes; 2034 stat->rx_runts += smb->rx_runts; 2035 stat->rx_fragments += smb->rx_fragments; 2036 stat->rx_pkts_64 += smb->rx_pkts_64; 2037 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2038 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2039 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2040 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2041 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2042 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2043 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2044 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2045 stat->rx_desc_oflows += smb->rx_desc_oflows; 2046 stat->rx_alignerrs += smb->rx_alignerrs; 2047 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2048 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2049 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2050 2051 /* Tx stats. */ 2052 stat->tx_frames += smb->tx_frames; 2053 stat->tx_bcast_frames += smb->tx_bcast_frames; 2054 stat->tx_mcast_frames += smb->tx_mcast_frames; 2055 stat->tx_pause_frames += smb->tx_pause_frames; 2056 stat->tx_excess_defer += smb->tx_excess_defer; 2057 stat->tx_control_frames += smb->tx_control_frames; 2058 stat->tx_deferred += smb->tx_deferred; 2059 stat->tx_bytes += smb->tx_bytes; 2060 stat->tx_pkts_64 += smb->tx_pkts_64; 2061 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2062 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2063 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2064 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2065 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2066 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2067 stat->tx_single_colls += smb->tx_single_colls; 2068 stat->tx_multi_colls += smb->tx_multi_colls; 2069 stat->tx_late_colls += smb->tx_late_colls; 2070 stat->tx_excess_colls += smb->tx_excess_colls; 2071 stat->tx_underrun += smb->tx_underrun; 2072 stat->tx_desc_underrun += smb->tx_desc_underrun; 2073 stat->tx_lenerrs += smb->tx_lenerrs; 2074 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2075 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2076 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2077 2078 /* Update counters in ifnet. */ 2079 if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames); 2080 2081 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls + 2082 smb->tx_multi_colls + smb->tx_late_colls + 2083 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT); 2084 2085 if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_excess_colls + 2086 smb->tx_late_colls + smb->tx_underrun + 2087 smb->tx_pkts_truncated); 2088 2089 if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames); 2090 2091 if_inc_counter(ifp, IFCOUNTER_IERRORS, smb->rx_crcerrs + 2092 smb->rx_lenerrs + smb->rx_runts + smb->rx_pkts_truncated + 2093 smb->rx_fifo_oflows + smb->rx_desc_oflows + 2094 smb->rx_alignerrs); 2095 2096 /* Update done, clear. */ 2097 smb->updated = 0; 2098 2099 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 2100 sc->age_cdata.age_smb_block_map, 2101 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2102 } 2103 2104 static int 2105 age_intr(void *arg) 2106 { 2107 struct age_softc *sc; 2108 uint32_t status; 2109 2110 sc = (struct age_softc *)arg; 2111 2112 status = CSR_READ_4(sc, AGE_INTR_STATUS); 2113 if (status == 0 || (status & AGE_INTRS) == 0) 2114 return (FILTER_STRAY); 2115 /* Disable interrupts. */ 2116 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 2117 taskqueue_enqueue(sc->age_tq, &sc->age_int_task); 2118 2119 return (FILTER_HANDLED); 2120 } 2121 2122 static void 2123 age_int_task(void *arg, int pending) 2124 { 2125 struct age_softc *sc; 2126 if_t ifp; 2127 struct cmb *cmb; 2128 uint32_t status; 2129 2130 sc = (struct age_softc *)arg; 2131 2132 AGE_LOCK(sc); 2133 2134 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 2135 sc->age_cdata.age_cmb_block_map, 2136 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2137 cmb = sc->age_rdata.age_cmb_block; 2138 status = le32toh(cmb->intr_status); 2139 if (sc->age_morework != 0) 2140 status |= INTR_CMB_RX; 2141 if ((status & AGE_INTRS) == 0) 2142 goto done; 2143 2144 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 2145 TPD_CONS_SHIFT; 2146 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 2147 RRD_PROD_SHIFT; 2148 /* Let hardware know CMB was served. */ 2149 cmb->intr_status = 0; 2150 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 2151 sc->age_cdata.age_cmb_block_map, 2152 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2153 2154 ifp = sc->age_ifp; 2155 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) { 2156 if ((status & INTR_CMB_RX) != 0) 2157 sc->age_morework = age_rxintr(sc, sc->age_rr_prod, 2158 sc->age_process_limit); 2159 if ((status & INTR_CMB_TX) != 0) 2160 age_txintr(sc, sc->age_tpd_cons); 2161 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) { 2162 if ((status & INTR_DMA_RD_TO_RST) != 0) 2163 device_printf(sc->age_dev, 2164 "DMA read error! -- resetting\n"); 2165 if ((status & INTR_DMA_WR_TO_RST) != 0) 2166 device_printf(sc->age_dev, 2167 "DMA write error! -- resetting\n"); 2168 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 2169 age_init_locked(sc); 2170 } 2171 if (!if_sendq_empty(ifp)) 2172 age_start_locked(ifp); 2173 if ((status & INTR_SMB) != 0) 2174 age_stats_update(sc); 2175 } 2176 2177 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 2178 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 2179 sc->age_cdata.age_cmb_block_map, 2180 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2181 status = le32toh(cmb->intr_status); 2182 if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) { 2183 taskqueue_enqueue(sc->age_tq, &sc->age_int_task); 2184 AGE_UNLOCK(sc); 2185 return; 2186 } 2187 2188 done: 2189 /* Re-enable interrupts. */ 2190 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 2191 AGE_UNLOCK(sc); 2192 } 2193 2194 static void 2195 age_txintr(struct age_softc *sc, int tpd_cons) 2196 { 2197 if_t ifp; 2198 struct age_txdesc *txd; 2199 int cons, prog; 2200 2201 AGE_LOCK_ASSERT(sc); 2202 2203 ifp = sc->age_ifp; 2204 2205 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 2206 sc->age_cdata.age_tx_ring_map, 2207 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2208 2209 /* 2210 * Go through our Tx list and free mbufs for those 2211 * frames which have been transmitted. 2212 */ 2213 cons = sc->age_cdata.age_tx_cons; 2214 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 2215 if (sc->age_cdata.age_tx_cnt <= 0) 2216 break; 2217 prog++; 2218 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2219 sc->age_cdata.age_tx_cnt--; 2220 txd = &sc->age_cdata.age_txdesc[cons]; 2221 /* 2222 * Clear Tx descriptors, it's not required but would 2223 * help debugging in case of Tx issues. 2224 */ 2225 txd->tx_desc->addr = 0; 2226 txd->tx_desc->len = 0; 2227 txd->tx_desc->flags = 0; 2228 2229 if (txd->tx_m == NULL) 2230 continue; 2231 /* Reclaim transmitted mbufs. */ 2232 bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap, 2233 BUS_DMASYNC_POSTWRITE); 2234 bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap); 2235 m_freem(txd->tx_m); 2236 txd->tx_m = NULL; 2237 } 2238 2239 if (prog > 0) { 2240 sc->age_cdata.age_tx_cons = cons; 2241 2242 /* 2243 * Unarm watchdog timer only when there are no pending 2244 * Tx descriptors in queue. 2245 */ 2246 if (sc->age_cdata.age_tx_cnt == 0) 2247 sc->age_watchdog_timer = 0; 2248 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 2249 sc->age_cdata.age_tx_ring_map, 2250 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2251 } 2252 } 2253 2254 #ifndef __NO_STRICT_ALIGNMENT 2255 static struct mbuf * 2256 age_fixup_rx(if_t ifp, struct mbuf *m) 2257 { 2258 struct mbuf *n; 2259 int i; 2260 uint16_t *src, *dst; 2261 2262 src = mtod(m, uint16_t *); 2263 dst = src - 3; 2264 2265 if (m->m_next == NULL) { 2266 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 2267 *dst++ = *src++; 2268 m->m_data -= 6; 2269 return (m); 2270 } 2271 /* 2272 * Append a new mbuf to received mbuf chain and copy ethernet 2273 * header from the mbuf chain. This can save lots of CPU 2274 * cycles for jumbo frame. 2275 */ 2276 MGETHDR(n, M_NOWAIT, MT_DATA); 2277 if (n == NULL) { 2278 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 2279 m_freem(m); 2280 return (NULL); 2281 } 2282 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 2283 m->m_data += ETHER_HDR_LEN; 2284 m->m_len -= ETHER_HDR_LEN; 2285 n->m_len = ETHER_HDR_LEN; 2286 M_MOVE_PKTHDR(n, m); 2287 n->m_next = m; 2288 return (n); 2289 } 2290 #endif 2291 2292 /* Receive a frame. */ 2293 static void 2294 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 2295 { 2296 struct age_rxdesc *rxd; 2297 if_t ifp; 2298 struct mbuf *mp, *m; 2299 uint32_t status, index, vtag; 2300 int count, nsegs; 2301 int rx_cons; 2302 2303 AGE_LOCK_ASSERT(sc); 2304 2305 ifp = sc->age_ifp; 2306 status = le32toh(rxrd->flags); 2307 index = le32toh(rxrd->index); 2308 rx_cons = AGE_RX_CONS(index); 2309 nsegs = AGE_RX_NSEGS(index); 2310 2311 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 2312 if ((status & (AGE_RRD_ERROR | AGE_RRD_LENGTH_NOK)) != 0) { 2313 /* 2314 * We want to pass the following frames to upper 2315 * layer regardless of error status of Rx return 2316 * ring. 2317 * 2318 * o IP/TCP/UDP checksum is bad. 2319 * o frame length and protocol specific length 2320 * does not match. 2321 */ 2322 status |= AGE_RRD_IPCSUM_NOK | AGE_RRD_TCP_UDPCSUM_NOK; 2323 if ((status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 2324 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) 2325 return; 2326 } 2327 2328 for (count = 0; count < nsegs; count++, 2329 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 2330 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 2331 mp = rxd->rx_m; 2332 /* Add a new receive buffer to the ring. */ 2333 if (age_newbuf(sc, rxd) != 0) { 2334 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 2335 /* Reuse Rx buffers. */ 2336 if (sc->age_cdata.age_rxhead != NULL) 2337 m_freem(sc->age_cdata.age_rxhead); 2338 break; 2339 } 2340 2341 /* 2342 * Assume we've received a full sized frame. 2343 * Actual size is fixed when we encounter the end of 2344 * multi-segmented frame. 2345 */ 2346 mp->m_len = AGE_RX_BUF_SIZE; 2347 2348 /* Chain received mbufs. */ 2349 if (sc->age_cdata.age_rxhead == NULL) { 2350 sc->age_cdata.age_rxhead = mp; 2351 sc->age_cdata.age_rxtail = mp; 2352 } else { 2353 mp->m_flags &= ~M_PKTHDR; 2354 sc->age_cdata.age_rxprev_tail = 2355 sc->age_cdata.age_rxtail; 2356 sc->age_cdata.age_rxtail->m_next = mp; 2357 sc->age_cdata.age_rxtail = mp; 2358 } 2359 2360 if (count == nsegs - 1) { 2361 /* Last desc. for this frame. */ 2362 m = sc->age_cdata.age_rxhead; 2363 m->m_flags |= M_PKTHDR; 2364 /* 2365 * It seems that L1 controller has no way 2366 * to tell hardware to strip CRC bytes. 2367 */ 2368 m->m_pkthdr.len = sc->age_cdata.age_rxlen - 2369 ETHER_CRC_LEN; 2370 if (nsegs > 1) { 2371 /* Set last mbuf size. */ 2372 mp->m_len = sc->age_cdata.age_rxlen - 2373 ((nsegs - 1) * AGE_RX_BUF_SIZE); 2374 /* Remove the CRC bytes in chained mbufs. */ 2375 if (mp->m_len <= ETHER_CRC_LEN) { 2376 sc->age_cdata.age_rxtail = 2377 sc->age_cdata.age_rxprev_tail; 2378 sc->age_cdata.age_rxtail->m_len -= 2379 (ETHER_CRC_LEN - mp->m_len); 2380 sc->age_cdata.age_rxtail->m_next = NULL; 2381 m_freem(mp); 2382 } else { 2383 mp->m_len -= ETHER_CRC_LEN; 2384 } 2385 } else 2386 m->m_len = m->m_pkthdr.len; 2387 m->m_pkthdr.rcvif = ifp; 2388 /* 2389 * Set checksum information. 2390 * It seems that L1 controller can compute partial 2391 * checksum. The partial checksum value can be used 2392 * to accelerate checksum computation for fragmented 2393 * TCP/UDP packets. Upper network stack already 2394 * takes advantage of the partial checksum value in 2395 * IP reassembly stage. But I'm not sure the 2396 * correctness of the partial hardware checksum 2397 * assistance due to lack of data sheet. If it is 2398 * proven to work on L1 I'll enable it. 2399 */ 2400 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 2401 (status & AGE_RRD_IPV4) != 0) { 2402 if ((status & AGE_RRD_IPCSUM_NOK) == 0) 2403 m->m_pkthdr.csum_flags |= 2404 CSUM_IP_CHECKED | CSUM_IP_VALID; 2405 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 2406 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { 2407 m->m_pkthdr.csum_flags |= 2408 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2409 m->m_pkthdr.csum_data = 0xffff; 2410 } 2411 /* 2412 * Don't mark bad checksum for TCP/UDP frames 2413 * as fragmented frames may always have set 2414 * bad checksummed bit of descriptor status. 2415 */ 2416 } 2417 2418 /* Check for VLAN tagged frames. */ 2419 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0 && 2420 (status & AGE_RRD_VLAN) != 0) { 2421 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 2422 m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag); 2423 m->m_flags |= M_VLANTAG; 2424 } 2425 #ifndef __NO_STRICT_ALIGNMENT 2426 m = age_fixup_rx(ifp, m); 2427 if (m != NULL) 2428 #endif 2429 { 2430 /* Pass it on. */ 2431 AGE_UNLOCK(sc); 2432 if_input(ifp, m); 2433 AGE_LOCK(sc); 2434 } 2435 } 2436 } 2437 2438 /* Reset mbuf chains. */ 2439 AGE_RXCHAIN_RESET(sc); 2440 } 2441 2442 static int 2443 age_rxintr(struct age_softc *sc, int rr_prod, int count) 2444 { 2445 struct rx_rdesc *rxrd; 2446 int rr_cons, nsegs, pktlen, prog; 2447 2448 AGE_LOCK_ASSERT(sc); 2449 2450 rr_cons = sc->age_cdata.age_rr_cons; 2451 if (rr_cons == rr_prod) 2452 return (0); 2453 2454 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 2455 sc->age_cdata.age_rr_ring_map, 2456 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2457 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, 2458 sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_POSTWRITE); 2459 2460 for (prog = 0; rr_cons != rr_prod; prog++) { 2461 if (count-- <= 0) 2462 break; 2463 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 2464 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 2465 if (nsegs == 0) 2466 break; 2467 /* 2468 * Check number of segments against received bytes. 2469 * Non-matching value would indicate that hardware 2470 * is still trying to update Rx return descriptors. 2471 * I'm not sure whether this check is really needed. 2472 */ 2473 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 2474 if (nsegs != howmany(pktlen, AGE_RX_BUF_SIZE)) 2475 break; 2476 2477 /* Received a frame. */ 2478 age_rxeof(sc, rxrd); 2479 /* Clear return ring. */ 2480 rxrd->index = 0; 2481 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 2482 sc->age_cdata.age_rx_cons += nsegs; 2483 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 2484 } 2485 2486 if (prog > 0) { 2487 /* Update the consumer index. */ 2488 sc->age_cdata.age_rr_cons = rr_cons; 2489 2490 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, 2491 sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE); 2492 /* Sync descriptors. */ 2493 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 2494 sc->age_cdata.age_rr_ring_map, 2495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2496 2497 /* Notify hardware availability of new Rx buffers. */ 2498 AGE_COMMIT_MBOX(sc); 2499 } 2500 2501 return (count > 0 ? 0 : EAGAIN); 2502 } 2503 2504 static void 2505 age_tick(void *arg) 2506 { 2507 struct age_softc *sc; 2508 struct mii_data *mii; 2509 2510 sc = (struct age_softc *)arg; 2511 2512 AGE_LOCK_ASSERT(sc); 2513 2514 mii = device_get_softc(sc->age_miibus); 2515 mii_tick(mii); 2516 age_watchdog(sc); 2517 callout_reset(&sc->age_tick_ch, hz, age_tick, sc); 2518 } 2519 2520 static void 2521 age_reset(struct age_softc *sc) 2522 { 2523 uint32_t reg; 2524 int i; 2525 2526 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 2527 CSR_READ_4(sc, AGE_MASTER_CFG); 2528 DELAY(1000); 2529 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2530 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 2531 break; 2532 DELAY(10); 2533 } 2534 2535 if (i == 0) 2536 device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg); 2537 /* Initialize PCIe module. From Linux. */ 2538 CSR_WRITE_4(sc, 0x12FC, 0x6500); 2539 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2540 } 2541 2542 static void 2543 age_init(void *xsc) 2544 { 2545 struct age_softc *sc; 2546 2547 sc = (struct age_softc *)xsc; 2548 AGE_LOCK(sc); 2549 age_init_locked(sc); 2550 AGE_UNLOCK(sc); 2551 } 2552 2553 static void 2554 age_init_locked(struct age_softc *sc) 2555 { 2556 if_t ifp; 2557 struct mii_data *mii; 2558 uint8_t eaddr[ETHER_ADDR_LEN]; 2559 bus_addr_t paddr; 2560 uint32_t reg, fsize; 2561 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 2562 int error; 2563 2564 AGE_LOCK_ASSERT(sc); 2565 2566 ifp = sc->age_ifp; 2567 mii = device_get_softc(sc->age_miibus); 2568 2569 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) 2570 return; 2571 2572 /* 2573 * Cancel any pending I/O. 2574 */ 2575 age_stop(sc); 2576 2577 /* 2578 * Reset the chip to a known state. 2579 */ 2580 age_reset(sc); 2581 2582 /* Initialize descriptors. */ 2583 error = age_init_rx_ring(sc); 2584 if (error != 0) { 2585 device_printf(sc->age_dev, "no memory for Rx buffers.\n"); 2586 age_stop(sc); 2587 return; 2588 } 2589 age_init_rr_ring(sc); 2590 age_init_tx_ring(sc); 2591 age_init_cmb_block(sc); 2592 age_init_smb_block(sc); 2593 2594 /* Reprogram the station address. */ 2595 bcopy(if_getlladdr(ifp), eaddr, ETHER_ADDR_LEN); 2596 CSR_WRITE_4(sc, AGE_PAR0, 2597 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2598 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 2599 2600 /* Set descriptor base addresses. */ 2601 paddr = sc->age_rdata.age_tx_ring_paddr; 2602 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 2603 paddr = sc->age_rdata.age_rx_ring_paddr; 2604 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 2605 paddr = sc->age_rdata.age_rr_ring_paddr; 2606 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 2607 paddr = sc->age_rdata.age_tx_ring_paddr; 2608 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 2609 paddr = sc->age_rdata.age_cmb_block_paddr; 2610 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 2611 paddr = sc->age_rdata.age_smb_block_paddr; 2612 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 2613 /* Set Rx/Rx return descriptor counter. */ 2614 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 2615 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 2616 DESC_RRD_CNT_MASK) | 2617 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 2618 /* Set Tx descriptor counter. */ 2619 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 2620 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 2621 2622 /* Tell hardware that we're ready to load descriptors. */ 2623 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 2624 2625 /* 2626 * Initialize mailbox register. 2627 * Updated producer/consumer index information is exchanged 2628 * through this mailbox register. However Tx producer and 2629 * Rx return consumer/Rx producer are all shared such that 2630 * it's hard to separate code path between Tx and Rx without 2631 * locking. If L1 hardware have a separate mail box register 2632 * for Tx and Rx consumer/producer management we could have 2633 * independent Tx/Rx handler which in turn Rx handler could have 2634 * been run without any locking. 2635 */ 2636 AGE_COMMIT_MBOX(sc); 2637 2638 /* Configure IPG/IFG parameters. */ 2639 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 2640 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 2641 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 2642 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 2643 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 2644 2645 /* Set parameters for half-duplex media. */ 2646 CSR_WRITE_4(sc, AGE_HDPX_CFG, 2647 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2648 HDPX_CFG_LCOL_MASK) | 2649 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2650 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2651 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2652 HDPX_CFG_ABEBT_MASK) | 2653 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2654 HDPX_CFG_JAMIPG_MASK)); 2655 2656 /* Configure interrupt moderation timer. */ 2657 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 2658 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 2659 reg &= ~MASTER_MTIMER_ENB; 2660 if (AGE_USECS(sc->age_int_mod) == 0) 2661 reg &= ~MASTER_ITIMER_ENB; 2662 else 2663 reg |= MASTER_ITIMER_ENB; 2664 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 2665 if (bootverbose) 2666 device_printf(sc->age_dev, "interrupt moderation is %d us.\n", 2667 sc->age_int_mod); 2668 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 2669 2670 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 2671 if (if_getmtu(ifp) < ETHERMTU) 2672 sc->age_max_frame_size = ETHERMTU; 2673 else 2674 sc->age_max_frame_size = if_getmtu(ifp); 2675 sc->age_max_frame_size += ETHER_HDR_LEN + 2676 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 2677 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 2678 /* Configure jumbo frame. */ 2679 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 2680 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 2681 (((fsize / sizeof(uint64_t)) << 2682 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 2683 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 2684 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 2685 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 2686 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 2687 2688 /* Configure flow-control parameters. From Linux. */ 2689 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 2690 /* 2691 * Magic workaround for old-L1. 2692 * Don't know which hw revision requires this magic. 2693 */ 2694 CSR_WRITE_4(sc, 0x12FC, 0x6500); 2695 /* 2696 * Another magic workaround for flow-control mode 2697 * change. From Linux. 2698 */ 2699 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2700 } 2701 /* 2702 * TODO 2703 * Should understand pause parameter relationships between FIFO 2704 * size and number of Rx descriptors and Rx return descriptors. 2705 * 2706 * Magic parameters came from Linux. 2707 */ 2708 switch (sc->age_chip_rev) { 2709 case 0x8001: 2710 case 0x9001: 2711 case 0x9002: 2712 case 0x9003: 2713 rxf_hi = AGE_RX_RING_CNT / 16; 2714 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 2715 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 2716 rrd_lo = AGE_RR_RING_CNT / 16; 2717 break; 2718 default: 2719 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 2720 rxf_lo = reg / 16; 2721 if (rxf_lo < 192) 2722 rxf_lo = 192; 2723 rxf_hi = (reg * 7) / 8; 2724 if (rxf_hi < rxf_lo) 2725 rxf_hi = rxf_lo + 16; 2726 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 2727 rrd_lo = reg / 8; 2728 rrd_hi = (reg * 7) / 8; 2729 if (rrd_lo < 2) 2730 rrd_lo = 2; 2731 if (rrd_hi < rrd_lo) 2732 rrd_hi = rrd_lo + 3; 2733 break; 2734 } 2735 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 2736 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 2737 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 2738 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 2739 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 2740 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 2741 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 2742 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 2743 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 2744 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 2745 2746 /* Configure RxQ. */ 2747 CSR_WRITE_4(sc, AGE_RXQ_CFG, 2748 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 2749 RXQ_CFG_RD_BURST_MASK) | 2750 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 2751 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 2752 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 2753 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 2754 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 2755 2756 /* Configure TxQ. */ 2757 CSR_WRITE_4(sc, AGE_TXQ_CFG, 2758 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 2759 TXQ_CFG_TPD_BURST_MASK) | 2760 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 2761 TXQ_CFG_TX_FIFO_BURST_MASK) | 2762 ((TXQ_CFG_TPD_FETCH_DEFAULT << 2763 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 2764 TXQ_CFG_ENB); 2765 2766 CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG, 2767 (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) & 2768 TX_JUMBO_TPD_TH_MASK) | 2769 ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) & 2770 TX_JUMBO_TPD_IPG_MASK)); 2771 /* Configure DMA parameters. */ 2772 CSR_WRITE_4(sc, AGE_DMA_CFG, 2773 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 2774 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 2775 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 2776 2777 /* Configure CMB DMA write threshold. */ 2778 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 2779 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 2780 CMB_WR_THRESH_RRD_MASK) | 2781 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 2782 CMB_WR_THRESH_TPD_MASK)); 2783 2784 /* Set CMB/SMB timer and enable them. */ 2785 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 2786 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 2787 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 2788 /* Request SMB updates for every seconds. */ 2789 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 2790 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 2791 2792 /* 2793 * Disable all WOL bits as WOL can interfere normal Rx 2794 * operation. 2795 */ 2796 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 2797 2798 /* 2799 * Configure Tx/Rx MACs. 2800 * - Auto-padding for short frames. 2801 * - Enable CRC generation. 2802 * Start with full-duplex/1000Mbps media. Actual reconfiguration 2803 * of MAC is followed after link establishment. 2804 */ 2805 CSR_WRITE_4(sc, AGE_MAC_CFG, 2806 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 2807 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 2808 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 2809 MAC_CFG_PREAMBLE_MASK)); 2810 /* Set up the receive filter. */ 2811 age_rxfilter(sc); 2812 age_rxvlan(sc); 2813 2814 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2815 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) 2816 reg |= MAC_CFG_RXCSUM_ENB; 2817 2818 /* Ack all pending interrupts and clear it. */ 2819 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 2820 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 2821 2822 /* Finally enable Tx/Rx MAC. */ 2823 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 2824 2825 sc->age_flags &= ~AGE_FLAG_LINK; 2826 /* Switch to the current media. */ 2827 mii_mediachg(mii); 2828 2829 callout_reset(&sc->age_tick_ch, hz, age_tick, sc); 2830 2831 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2832 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2833 } 2834 2835 static void 2836 age_stop(struct age_softc *sc) 2837 { 2838 if_t ifp; 2839 struct age_txdesc *txd; 2840 struct age_rxdesc *rxd; 2841 uint32_t reg; 2842 int i; 2843 2844 AGE_LOCK_ASSERT(sc); 2845 /* 2846 * Mark the interface down and cancel the watchdog timer. 2847 */ 2848 ifp = sc->age_ifp; 2849 if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); 2850 sc->age_flags &= ~AGE_FLAG_LINK; 2851 callout_stop(&sc->age_tick_ch); 2852 sc->age_watchdog_timer = 0; 2853 2854 /* 2855 * Disable interrupts. 2856 */ 2857 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 2858 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 2859 /* Stop CMB/SMB updates. */ 2860 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 2861 /* Stop Rx/Tx MAC. */ 2862 age_stop_rxmac(sc); 2863 age_stop_txmac(sc); 2864 /* Stop DMA. */ 2865 CSR_WRITE_4(sc, AGE_DMA_CFG, 2866 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 2867 /* Stop TxQ/RxQ. */ 2868 CSR_WRITE_4(sc, AGE_TXQ_CFG, 2869 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 2870 CSR_WRITE_4(sc, AGE_RXQ_CFG, 2871 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 2872 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2873 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 2874 break; 2875 DELAY(10); 2876 } 2877 if (i == 0) 2878 device_printf(sc->age_dev, 2879 "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg); 2880 2881 /* Reclaim Rx buffers that have been processed. */ 2882 if (sc->age_cdata.age_rxhead != NULL) 2883 m_freem(sc->age_cdata.age_rxhead); 2884 AGE_RXCHAIN_RESET(sc); 2885 /* 2886 * Free RX and TX mbufs still in the queues. 2887 */ 2888 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2889 rxd = &sc->age_cdata.age_rxdesc[i]; 2890 if (rxd->rx_m != NULL) { 2891 bus_dmamap_sync(sc->age_cdata.age_rx_tag, 2892 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2893 bus_dmamap_unload(sc->age_cdata.age_rx_tag, 2894 rxd->rx_dmamap); 2895 m_freem(rxd->rx_m); 2896 rxd->rx_m = NULL; 2897 } 2898 } 2899 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2900 txd = &sc->age_cdata.age_txdesc[i]; 2901 if (txd->tx_m != NULL) { 2902 bus_dmamap_sync(sc->age_cdata.age_tx_tag, 2903 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2904 bus_dmamap_unload(sc->age_cdata.age_tx_tag, 2905 txd->tx_dmamap); 2906 m_freem(txd->tx_m); 2907 txd->tx_m = NULL; 2908 } 2909 } 2910 } 2911 2912 static void 2913 age_stop_txmac(struct age_softc *sc) 2914 { 2915 uint32_t reg; 2916 int i; 2917 2918 AGE_LOCK_ASSERT(sc); 2919 2920 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2921 if ((reg & MAC_CFG_TX_ENB) != 0) { 2922 reg &= ~MAC_CFG_TX_ENB; 2923 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2924 } 2925 /* Stop Tx DMA engine. */ 2926 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2927 if ((reg & DMA_CFG_RD_ENB) != 0) { 2928 reg &= ~DMA_CFG_RD_ENB; 2929 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2930 } 2931 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2932 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2933 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2934 break; 2935 DELAY(10); 2936 } 2937 if (i == 0) 2938 device_printf(sc->age_dev, "stopping TxMAC timeout!\n"); 2939 } 2940 2941 static void 2942 age_stop_rxmac(struct age_softc *sc) 2943 { 2944 uint32_t reg; 2945 int i; 2946 2947 AGE_LOCK_ASSERT(sc); 2948 2949 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2950 if ((reg & MAC_CFG_RX_ENB) != 0) { 2951 reg &= ~MAC_CFG_RX_ENB; 2952 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2953 } 2954 /* Stop Rx DMA engine. */ 2955 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2956 if ((reg & DMA_CFG_WR_ENB) != 0) { 2957 reg &= ~DMA_CFG_WR_ENB; 2958 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2959 } 2960 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2961 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2962 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2963 break; 2964 DELAY(10); 2965 } 2966 if (i == 0) 2967 device_printf(sc->age_dev, "stopping RxMAC timeout!\n"); 2968 } 2969 2970 static void 2971 age_init_tx_ring(struct age_softc *sc) 2972 { 2973 struct age_ring_data *rd; 2974 struct age_txdesc *txd; 2975 int i; 2976 2977 AGE_LOCK_ASSERT(sc); 2978 2979 sc->age_cdata.age_tx_prod = 0; 2980 sc->age_cdata.age_tx_cons = 0; 2981 sc->age_cdata.age_tx_cnt = 0; 2982 2983 rd = &sc->age_rdata; 2984 bzero(rd->age_tx_ring, AGE_TX_RING_SZ); 2985 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2986 txd = &sc->age_cdata.age_txdesc[i]; 2987 txd->tx_desc = &rd->age_tx_ring[i]; 2988 txd->tx_m = NULL; 2989 } 2990 2991 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 2992 sc->age_cdata.age_tx_ring_map, 2993 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2994 } 2995 2996 static int 2997 age_init_rx_ring(struct age_softc *sc) 2998 { 2999 struct age_ring_data *rd; 3000 struct age_rxdesc *rxd; 3001 int i; 3002 3003 AGE_LOCK_ASSERT(sc); 3004 3005 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 3006 sc->age_morework = 0; 3007 rd = &sc->age_rdata; 3008 bzero(rd->age_rx_ring, AGE_RX_RING_SZ); 3009 for (i = 0; i < AGE_RX_RING_CNT; i++) { 3010 rxd = &sc->age_cdata.age_rxdesc[i]; 3011 rxd->rx_m = NULL; 3012 rxd->rx_desc = &rd->age_rx_ring[i]; 3013 if (age_newbuf(sc, rxd) != 0) 3014 return (ENOBUFS); 3015 } 3016 3017 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, 3018 sc->age_cdata.age_rx_ring_map, BUS_DMASYNC_PREWRITE); 3019 3020 return (0); 3021 } 3022 3023 static void 3024 age_init_rr_ring(struct age_softc *sc) 3025 { 3026 struct age_ring_data *rd; 3027 3028 AGE_LOCK_ASSERT(sc); 3029 3030 sc->age_cdata.age_rr_cons = 0; 3031 AGE_RXCHAIN_RESET(sc); 3032 3033 rd = &sc->age_rdata; 3034 bzero(rd->age_rr_ring, AGE_RR_RING_SZ); 3035 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 3036 sc->age_cdata.age_rr_ring_map, 3037 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3038 } 3039 3040 static void 3041 age_init_cmb_block(struct age_softc *sc) 3042 { 3043 struct age_ring_data *rd; 3044 3045 AGE_LOCK_ASSERT(sc); 3046 3047 rd = &sc->age_rdata; 3048 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); 3049 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 3050 sc->age_cdata.age_cmb_block_map, 3051 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3052 } 3053 3054 static void 3055 age_init_smb_block(struct age_softc *sc) 3056 { 3057 struct age_ring_data *rd; 3058 3059 AGE_LOCK_ASSERT(sc); 3060 3061 rd = &sc->age_rdata; 3062 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); 3063 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 3064 sc->age_cdata.age_smb_block_map, 3065 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3066 } 3067 3068 static int 3069 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd) 3070 { 3071 struct rx_desc *desc; 3072 struct mbuf *m; 3073 bus_dma_segment_t segs[1]; 3074 bus_dmamap_t map; 3075 int nsegs; 3076 3077 AGE_LOCK_ASSERT(sc); 3078 3079 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 3080 if (m == NULL) 3081 return (ENOBUFS); 3082 m->m_len = m->m_pkthdr.len = MCLBYTES; 3083 #ifndef __NO_STRICT_ALIGNMENT 3084 m_adj(m, AGE_RX_BUF_ALIGN); 3085 #endif 3086 3087 if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag, 3088 sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) { 3089 m_freem(m); 3090 return (ENOBUFS); 3091 } 3092 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3093 3094 if (rxd->rx_m != NULL) { 3095 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, 3096 BUS_DMASYNC_POSTREAD); 3097 bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap); 3098 } 3099 map = rxd->rx_dmamap; 3100 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 3101 sc->age_cdata.age_rx_sparemap = map; 3102 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, 3103 BUS_DMASYNC_PREREAD); 3104 rxd->rx_m = m; 3105 3106 desc = rxd->rx_desc; 3107 desc->addr = htole64(segs[0].ds_addr); 3108 desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) << 3109 AGE_RD_LEN_SHIFT); 3110 return (0); 3111 } 3112 3113 static void 3114 age_rxvlan(struct age_softc *sc) 3115 { 3116 if_t ifp; 3117 uint32_t reg; 3118 3119 AGE_LOCK_ASSERT(sc); 3120 3121 ifp = sc->age_ifp; 3122 reg = CSR_READ_4(sc, AGE_MAC_CFG); 3123 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3124 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) != 0) 3125 reg |= MAC_CFG_VLAN_TAG_STRIP; 3126 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 3127 } 3128 3129 static u_int 3130 age_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 3131 { 3132 uint32_t *mchash = arg; 3133 uint32_t crc; 3134 3135 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 3136 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3137 3138 return (1); 3139 } 3140 3141 static void 3142 age_rxfilter(struct age_softc *sc) 3143 { 3144 if_t ifp; 3145 uint32_t mchash[2]; 3146 uint32_t rxcfg; 3147 3148 AGE_LOCK_ASSERT(sc); 3149 3150 ifp = sc->age_ifp; 3151 3152 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 3153 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3154 if ((if_getflags(ifp) & IFF_BROADCAST) != 0) 3155 rxcfg |= MAC_CFG_BCAST; 3156 if ((if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3157 if ((if_getflags(ifp) & IFF_PROMISC) != 0) 3158 rxcfg |= MAC_CFG_PROMISC; 3159 if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) 3160 rxcfg |= MAC_CFG_ALLMULTI; 3161 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF); 3162 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF); 3163 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 3164 return; 3165 } 3166 3167 /* Program new filter. */ 3168 bzero(mchash, sizeof(mchash)); 3169 if_foreach_llmaddr(ifp, age_hash_maddr, mchash); 3170 3171 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 3172 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 3173 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 3174 } 3175 3176 static int 3177 sysctl_age_stats(SYSCTL_HANDLER_ARGS) 3178 { 3179 struct age_softc *sc; 3180 struct age_stats *stats; 3181 int error, result; 3182 3183 result = -1; 3184 error = sysctl_handle_int(oidp, &result, 0, req); 3185 3186 if (error != 0 || req->newptr == NULL) 3187 return (error); 3188 3189 if (result != 1) 3190 return (error); 3191 3192 sc = (struct age_softc *)arg1; 3193 stats = &sc->age_stat; 3194 printf("%s statistics:\n", device_get_nameunit(sc->age_dev)); 3195 printf("Transmit good frames : %ju\n", 3196 (uintmax_t)stats->tx_frames); 3197 printf("Transmit good broadcast frames : %ju\n", 3198 (uintmax_t)stats->tx_bcast_frames); 3199 printf("Transmit good multicast frames : %ju\n", 3200 (uintmax_t)stats->tx_mcast_frames); 3201 printf("Transmit pause control frames : %u\n", 3202 stats->tx_pause_frames); 3203 printf("Transmit control frames : %u\n", 3204 stats->tx_control_frames); 3205 printf("Transmit frames with excessive deferrals : %u\n", 3206 stats->tx_excess_defer); 3207 printf("Transmit deferrals : %u\n", 3208 stats->tx_deferred); 3209 printf("Transmit good octets : %ju\n", 3210 (uintmax_t)stats->tx_bytes); 3211 printf("Transmit good broadcast octets : %ju\n", 3212 (uintmax_t)stats->tx_bcast_bytes); 3213 printf("Transmit good multicast octets : %ju\n", 3214 (uintmax_t)stats->tx_mcast_bytes); 3215 printf("Transmit frames 64 bytes : %ju\n", 3216 (uintmax_t)stats->tx_pkts_64); 3217 printf("Transmit frames 65 to 127 bytes : %ju\n", 3218 (uintmax_t)stats->tx_pkts_65_127); 3219 printf("Transmit frames 128 to 255 bytes : %ju\n", 3220 (uintmax_t)stats->tx_pkts_128_255); 3221 printf("Transmit frames 256 to 511 bytes : %ju\n", 3222 (uintmax_t)stats->tx_pkts_256_511); 3223 printf("Transmit frames 512 to 1024 bytes : %ju\n", 3224 (uintmax_t)stats->tx_pkts_512_1023); 3225 printf("Transmit frames 1024 to 1518 bytes : %ju\n", 3226 (uintmax_t)stats->tx_pkts_1024_1518); 3227 printf("Transmit frames 1519 to MTU bytes : %ju\n", 3228 (uintmax_t)stats->tx_pkts_1519_max); 3229 printf("Transmit single collisions : %u\n", 3230 stats->tx_single_colls); 3231 printf("Transmit multiple collisions : %u\n", 3232 stats->tx_multi_colls); 3233 printf("Transmit late collisions : %u\n", 3234 stats->tx_late_colls); 3235 printf("Transmit abort due to excessive collisions : %u\n", 3236 stats->tx_excess_colls); 3237 printf("Transmit underruns due to FIFO underruns : %u\n", 3238 stats->tx_underrun); 3239 printf("Transmit descriptor write-back errors : %u\n", 3240 stats->tx_desc_underrun); 3241 printf("Transmit frames with length mismatched frame size : %u\n", 3242 stats->tx_lenerrs); 3243 printf("Transmit frames with truncated due to MTU size : %u\n", 3244 stats->tx_lenerrs); 3245 3246 printf("Receive good frames : %ju\n", 3247 (uintmax_t)stats->rx_frames); 3248 printf("Receive good broadcast frames : %ju\n", 3249 (uintmax_t)stats->rx_bcast_frames); 3250 printf("Receive good multicast frames : %ju\n", 3251 (uintmax_t)stats->rx_mcast_frames); 3252 printf("Receive pause control frames : %u\n", 3253 stats->rx_pause_frames); 3254 printf("Receive control frames : %u\n", 3255 stats->rx_control_frames); 3256 printf("Receive CRC errors : %u\n", 3257 stats->rx_crcerrs); 3258 printf("Receive frames with length errors : %u\n", 3259 stats->rx_lenerrs); 3260 printf("Receive good octets : %ju\n", 3261 (uintmax_t)stats->rx_bytes); 3262 printf("Receive good broadcast octets : %ju\n", 3263 (uintmax_t)stats->rx_bcast_bytes); 3264 printf("Receive good multicast octets : %ju\n", 3265 (uintmax_t)stats->rx_mcast_bytes); 3266 printf("Receive frames too short : %u\n", 3267 stats->rx_runts); 3268 printf("Receive fragmented frames : %ju\n", 3269 (uintmax_t)stats->rx_fragments); 3270 printf("Receive frames 64 bytes : %ju\n", 3271 (uintmax_t)stats->rx_pkts_64); 3272 printf("Receive frames 65 to 127 bytes : %ju\n", 3273 (uintmax_t)stats->rx_pkts_65_127); 3274 printf("Receive frames 128 to 255 bytes : %ju\n", 3275 (uintmax_t)stats->rx_pkts_128_255); 3276 printf("Receive frames 256 to 511 bytes : %ju\n", 3277 (uintmax_t)stats->rx_pkts_256_511); 3278 printf("Receive frames 512 to 1024 bytes : %ju\n", 3279 (uintmax_t)stats->rx_pkts_512_1023); 3280 printf("Receive frames 1024 to 1518 bytes : %ju\n", 3281 (uintmax_t)stats->rx_pkts_1024_1518); 3282 printf("Receive frames 1519 to MTU bytes : %ju\n", 3283 (uintmax_t)stats->rx_pkts_1519_max); 3284 printf("Receive frames too long : %ju\n", 3285 (uint64_t)stats->rx_pkts_truncated); 3286 printf("Receive frames with FIFO overflow : %u\n", 3287 stats->rx_fifo_oflows); 3288 printf("Receive frames with return descriptor overflow : %u\n", 3289 stats->rx_desc_oflows); 3290 printf("Receive frames with alignment errors : %u\n", 3291 stats->rx_alignerrs); 3292 printf("Receive frames dropped due to address filtering : %ju\n", 3293 (uint64_t)stats->rx_pkts_filtered); 3294 3295 return (error); 3296 } 3297 3298 static int 3299 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3300 { 3301 int error, value; 3302 3303 if (arg1 == NULL) 3304 return (EINVAL); 3305 value = *(int *)arg1; 3306 error = sysctl_handle_int(oidp, &value, 0, req); 3307 if (error || req->newptr == NULL) 3308 return (error); 3309 if (value < low || value > high) 3310 return (EINVAL); 3311 *(int *)arg1 = value; 3312 3313 return (0); 3314 } 3315 3316 static int 3317 sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS) 3318 { 3319 return (sysctl_int_range(oidp, arg1, arg2, req, 3320 AGE_PROC_MIN, AGE_PROC_MAX)); 3321 } 3322 3323 static int 3324 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS) 3325 { 3326 3327 return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN, 3328 AGE_IM_TIMER_MAX)); 3329 } 3330