1 /*- 2 * Copyright (c) 2008, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* Driver for Attansic Technology Corp. L1 Gigabit Ethernet. */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/rman.h> 41 #include <sys/module.h> 42 #include <sys/queue.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 #include <sys/taskqueue.h> 47 48 #include <net/bpf.h> 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 #include <net/if_vlan_var.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 #include <netinet/tcp.h> 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 65 #include <dev/pci/pcireg.h> 66 #include <dev/pci/pcivar.h> 67 68 #include <machine/bus.h> 69 #include <machine/in_cksum.h> 70 71 #include <dev/age/if_agereg.h> 72 #include <dev/age/if_agevar.h> 73 74 /* "device miibus" required. See GENERIC if you get errors here. */ 75 #include "miibus_if.h" 76 77 #ifndef IFCAP_VLAN_HWTSO 78 #define IFCAP_VLAN_HWTSO 0 79 #endif 80 #define AGE_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 81 82 MODULE_DEPEND(age, pci, 1, 1, 1); 83 MODULE_DEPEND(age, ether, 1, 1, 1); 84 MODULE_DEPEND(age, miibus, 1, 1, 1); 85 86 /* Tunables. */ 87 static int msi_disable = 0; 88 static int msix_disable = 0; 89 TUNABLE_INT("hw.age.msi_disable", &msi_disable); 90 TUNABLE_INT("hw.age.msix_disable", &msix_disable); 91 92 /* 93 * Devices supported by this driver. 94 */ 95 static struct age_dev { 96 uint16_t age_vendorid; 97 uint16_t age_deviceid; 98 const char *age_name; 99 } age_devs[] = { 100 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L1, 101 "Attansic Technology Corp, L1 Gigabit Ethernet" }, 102 }; 103 104 static int age_miibus_readreg(device_t, int, int); 105 static int age_miibus_writereg(device_t, int, int, int); 106 static void age_miibus_statchg(device_t); 107 static void age_mediastatus(struct ifnet *, struct ifmediareq *); 108 static int age_mediachange(struct ifnet *); 109 static int age_read_vpd_word(struct age_softc *, uint32_t, uint32_t, 110 uint32_t *); 111 static int age_probe(device_t); 112 static void age_get_macaddr(struct age_softc *); 113 static void age_phy_reset(struct age_softc *); 114 static int age_attach(device_t); 115 static int age_detach(device_t); 116 static void age_sysctl_node(struct age_softc *); 117 static void age_dmamap_cb(void *, bus_dma_segment_t *, int, int); 118 static int age_check_boundary(struct age_softc *); 119 static int age_dma_alloc(struct age_softc *); 120 static void age_dma_free(struct age_softc *); 121 static int age_shutdown(device_t); 122 static void age_setwol(struct age_softc *); 123 static int age_suspend(device_t); 124 static int age_resume(device_t); 125 static int age_encap(struct age_softc *, struct mbuf **); 126 static void age_tx_task(void *, int); 127 static void age_start(struct ifnet *); 128 static void age_watchdog(struct age_softc *); 129 static int age_ioctl(struct ifnet *, u_long, caddr_t); 130 static void age_mac_config(struct age_softc *); 131 static void age_link_task(void *, int); 132 static void age_stats_update(struct age_softc *); 133 static int age_intr(void *); 134 static void age_int_task(void *, int); 135 static void age_txintr(struct age_softc *, int); 136 static void age_rxeof(struct age_softc *sc, struct rx_rdesc *); 137 static int age_rxintr(struct age_softc *, int, int); 138 static void age_tick(void *); 139 static void age_reset(struct age_softc *); 140 static void age_init(void *); 141 static void age_init_locked(struct age_softc *); 142 static void age_stop(struct age_softc *); 143 static void age_stop_txmac(struct age_softc *); 144 static void age_stop_rxmac(struct age_softc *); 145 static void age_init_tx_ring(struct age_softc *); 146 static int age_init_rx_ring(struct age_softc *); 147 static void age_init_rr_ring(struct age_softc *); 148 static void age_init_cmb_block(struct age_softc *); 149 static void age_init_smb_block(struct age_softc *); 150 static int age_newbuf(struct age_softc *, struct age_rxdesc *); 151 static void age_rxvlan(struct age_softc *); 152 static void age_rxfilter(struct age_softc *); 153 static int sysctl_age_stats(SYSCTL_HANDLER_ARGS); 154 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 155 static int sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS); 156 static int sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS); 157 158 159 static device_method_t age_methods[] = { 160 /* Device interface. */ 161 DEVMETHOD(device_probe, age_probe), 162 DEVMETHOD(device_attach, age_attach), 163 DEVMETHOD(device_detach, age_detach), 164 DEVMETHOD(device_shutdown, age_shutdown), 165 DEVMETHOD(device_suspend, age_suspend), 166 DEVMETHOD(device_resume, age_resume), 167 168 /* MII interface. */ 169 DEVMETHOD(miibus_readreg, age_miibus_readreg), 170 DEVMETHOD(miibus_writereg, age_miibus_writereg), 171 DEVMETHOD(miibus_statchg, age_miibus_statchg), 172 173 { NULL, NULL } 174 }; 175 176 static driver_t age_driver = { 177 "age", 178 age_methods, 179 sizeof(struct age_softc) 180 }; 181 182 static devclass_t age_devclass; 183 184 DRIVER_MODULE(age, pci, age_driver, age_devclass, 0, 0); 185 DRIVER_MODULE(miibus, age, miibus_driver, miibus_devclass, 0, 0); 186 187 static struct resource_spec age_res_spec_mem[] = { 188 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 189 { -1, 0, 0 } 190 }; 191 192 static struct resource_spec age_irq_spec_legacy[] = { 193 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 194 { -1, 0, 0 } 195 }; 196 197 static struct resource_spec age_irq_spec_msi[] = { 198 { SYS_RES_IRQ, 1, RF_ACTIVE }, 199 { -1, 0, 0 } 200 }; 201 202 static struct resource_spec age_irq_spec_msix[] = { 203 { SYS_RES_IRQ, 1, RF_ACTIVE }, 204 { -1, 0, 0 } 205 }; 206 207 /* 208 * Read a PHY register on the MII of the L1. 209 */ 210 static int 211 age_miibus_readreg(device_t dev, int phy, int reg) 212 { 213 struct age_softc *sc; 214 uint32_t v; 215 int i; 216 217 sc = device_get_softc(dev); 218 if (phy != sc->age_phyaddr) 219 return (0); 220 221 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 222 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 223 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 224 DELAY(1); 225 v = CSR_READ_4(sc, AGE_MDIO); 226 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 227 break; 228 } 229 230 if (i == 0) { 231 device_printf(sc->age_dev, "phy read timeout : %d\n", reg); 232 return (0); 233 } 234 235 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 236 } 237 238 /* 239 * Write a PHY register on the MII of the L1. 240 */ 241 static int 242 age_miibus_writereg(device_t dev, int phy, int reg, int val) 243 { 244 struct age_softc *sc; 245 uint32_t v; 246 int i; 247 248 sc = device_get_softc(dev); 249 if (phy != sc->age_phyaddr) 250 return (0); 251 252 CSR_WRITE_4(sc, AGE_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 253 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 254 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 255 for (i = AGE_PHY_TIMEOUT; i > 0; i--) { 256 DELAY(1); 257 v = CSR_READ_4(sc, AGE_MDIO); 258 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 259 break; 260 } 261 262 if (i == 0) 263 device_printf(sc->age_dev, "phy write timeout : %d\n", reg); 264 265 return (0); 266 } 267 268 /* 269 * Callback from MII layer when media changes. 270 */ 271 static void 272 age_miibus_statchg(device_t dev) 273 { 274 struct age_softc *sc; 275 276 sc = device_get_softc(dev); 277 taskqueue_enqueue(taskqueue_swi, &sc->age_link_task); 278 } 279 280 /* 281 * Get the current interface media status. 282 */ 283 static void 284 age_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 285 { 286 struct age_softc *sc; 287 struct mii_data *mii; 288 289 sc = ifp->if_softc; 290 AGE_LOCK(sc); 291 mii = device_get_softc(sc->age_miibus); 292 293 mii_pollstat(mii); 294 AGE_UNLOCK(sc); 295 ifmr->ifm_status = mii->mii_media_status; 296 ifmr->ifm_active = mii->mii_media_active; 297 } 298 299 /* 300 * Set hardware to newly-selected media. 301 */ 302 static int 303 age_mediachange(struct ifnet *ifp) 304 { 305 struct age_softc *sc; 306 struct mii_data *mii; 307 struct mii_softc *miisc; 308 int error; 309 310 sc = ifp->if_softc; 311 AGE_LOCK(sc); 312 mii = device_get_softc(sc->age_miibus); 313 if (mii->mii_instance != 0) { 314 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 315 mii_phy_reset(miisc); 316 } 317 error = mii_mediachg(mii); 318 AGE_UNLOCK(sc); 319 320 return (error); 321 } 322 323 static int 324 age_read_vpd_word(struct age_softc *sc, uint32_t vpdc, uint32_t offset, 325 uint32_t *word) 326 { 327 int i; 328 329 pci_write_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, offset, 2); 330 for (i = AGE_TIMEOUT; i > 0; i--) { 331 DELAY(10); 332 if ((pci_read_config(sc->age_dev, vpdc + PCIR_VPD_ADDR, 2) & 333 0x8000) == 0x8000) 334 break; 335 } 336 if (i == 0) { 337 device_printf(sc->age_dev, "VPD read timeout!\n"); 338 *word = 0; 339 return (ETIMEDOUT); 340 } 341 342 *word = pci_read_config(sc->age_dev, vpdc + PCIR_VPD_DATA, 4); 343 return (0); 344 } 345 346 static int 347 age_probe(device_t dev) 348 { 349 struct age_dev *sp; 350 int i; 351 uint16_t vendor, devid; 352 353 vendor = pci_get_vendor(dev); 354 devid = pci_get_device(dev); 355 sp = age_devs; 356 for (i = 0; i < sizeof(age_devs) / sizeof(age_devs[0]); 357 i++, sp++) { 358 if (vendor == sp->age_vendorid && 359 devid == sp->age_deviceid) { 360 device_set_desc(dev, sp->age_name); 361 return (BUS_PROBE_DEFAULT); 362 } 363 } 364 365 return (ENXIO); 366 } 367 368 static void 369 age_get_macaddr(struct age_softc *sc) 370 { 371 uint32_t ea[2], off, reg, word; 372 int vpd_error, match, vpdc; 373 374 reg = CSR_READ_4(sc, AGE_SPI_CTRL); 375 if ((reg & SPI_VPD_ENB) != 0) { 376 /* Get VPD stored in TWSI EEPROM. */ 377 reg &= ~SPI_VPD_ENB; 378 CSR_WRITE_4(sc, AGE_SPI_CTRL, reg); 379 } 380 381 vpd_error = 0; 382 ea[0] = ea[1] = 0; 383 if ((vpd_error = pci_find_extcap(sc->age_dev, PCIY_VPD, &vpdc)) == 0) { 384 /* 385 * PCI VPD capability exists, but it seems that it's 386 * not in the standard form as stated in PCI VPD 387 * specification such that driver could not use 388 * pci_get_vpd_readonly(9) with keyword 'NA'. 389 * Search VPD data starting at address 0x0100. The data 390 * should be used as initializers to set AGE_PAR0, 391 * AGE_PAR1 register including other PCI configuration 392 * registers. 393 */ 394 word = 0; 395 match = 0; 396 reg = 0; 397 for (off = AGE_VPD_REG_CONF_START; off < AGE_VPD_REG_CONF_END; 398 off += sizeof(uint32_t)) { 399 vpd_error = age_read_vpd_word(sc, vpdc, off, &word); 400 if (vpd_error != 0) 401 break; 402 if (match != 0) { 403 switch (reg) { 404 case AGE_PAR0: 405 ea[0] = word; 406 break; 407 case AGE_PAR1: 408 ea[1] = word; 409 break; 410 default: 411 break; 412 } 413 match = 0; 414 } else if ((word & 0xFF) == AGE_VPD_REG_CONF_SIG) { 415 match = 1; 416 reg = word >> 16; 417 } else 418 break; 419 } 420 if (off >= AGE_VPD_REG_CONF_END) 421 vpd_error = ENOENT; 422 if (vpd_error == 0) { 423 /* 424 * Don't blindly trust ethernet address obtained 425 * from VPD. Check whether ethernet address is 426 * valid one. Otherwise fall-back to reading 427 * PAR register. 428 */ 429 ea[1] &= 0xFFFF; 430 if ((ea[0] == 0 && ea[1] == 0) || 431 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) { 432 if (1 || bootverbose) 433 device_printf(sc->age_dev, 434 "invalid ethernet address " 435 "returned from VPD.\n"); 436 vpd_error = EINVAL; 437 } 438 } 439 if (vpd_error != 0 && (1 || bootverbose)) 440 device_printf(sc->age_dev, "VPD access failure!\n"); 441 } else { 442 if (1 || bootverbose) 443 device_printf(sc->age_dev, 444 "PCI VPD capability not found!\n"); 445 } 446 447 /* 448 * It seems that L1 also provides a way to extract ethernet 449 * address via SPI flash interface. Because SPI flash memory 450 * device of different vendors vary in their instruction 451 * codes for read ID instruction, it's very hard to get 452 * instructions codes without detailed information for the 453 * flash memory device used on ethernet controller. To simplify 454 * code, just read AGE_PAR0/AGE_PAR1 register to get ethernet 455 * address which is supposed to be set by hardware during 456 * power on reset. 457 */ 458 if (vpd_error != 0) { 459 /* 460 * VPD is mapped to SPI flash memory or BIOS set it. 461 */ 462 ea[0] = CSR_READ_4(sc, AGE_PAR0); 463 ea[1] = CSR_READ_4(sc, AGE_PAR1); 464 } 465 466 ea[1] &= 0xFFFF; 467 if ((ea[0] == 0 && ea[1] == 0) || 468 (ea[0] == 0xFFFFFFFF && ea[1] == 0xFFFF)) { 469 device_printf(sc->age_dev, 470 "generating fake ethernet address.\n"); 471 ea[0] = arc4random(); 472 /* Set OUI to ASUSTek COMPUTER INC. */ 473 sc->age_eaddr[0] = 0x00; 474 sc->age_eaddr[1] = 0x1B; 475 sc->age_eaddr[2] = 0xFC; 476 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 477 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 478 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 479 } else { 480 sc->age_eaddr[0] = (ea[1] >> 8) & 0xFF; 481 sc->age_eaddr[1] = (ea[1] >> 0) & 0xFF; 482 sc->age_eaddr[2] = (ea[0] >> 24) & 0xFF; 483 sc->age_eaddr[3] = (ea[0] >> 16) & 0xFF; 484 sc->age_eaddr[4] = (ea[0] >> 8) & 0xFF; 485 sc->age_eaddr[5] = (ea[0] >> 0) & 0xFF; 486 } 487 } 488 489 static void 490 age_phy_reset(struct age_softc *sc) 491 { 492 493 /* Reset PHY. */ 494 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_RST); 495 DELAY(1000); 496 CSR_WRITE_4(sc, AGE_GPHY_CTRL, GPHY_CTRL_CLR); 497 DELAY(1000); 498 } 499 500 static int 501 age_attach(device_t dev) 502 { 503 struct age_softc *sc; 504 struct ifnet *ifp; 505 uint16_t burst; 506 int error, i, msic, msixc, pmc; 507 508 error = 0; 509 sc = device_get_softc(dev); 510 sc->age_dev = dev; 511 512 mtx_init(&sc->age_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 513 MTX_DEF); 514 callout_init_mtx(&sc->age_tick_ch, &sc->age_mtx, 0); 515 TASK_INIT(&sc->age_int_task, 0, age_int_task, sc); 516 TASK_INIT(&sc->age_link_task, 0, age_link_task, sc); 517 518 /* Map the device. */ 519 pci_enable_busmaster(dev); 520 sc->age_res_spec = age_res_spec_mem; 521 sc->age_irq_spec = age_irq_spec_legacy; 522 error = bus_alloc_resources(dev, sc->age_res_spec, sc->age_res); 523 if (error != 0) { 524 device_printf(dev, "cannot allocate memory resources.\n"); 525 goto fail; 526 } 527 528 /* Set PHY address. */ 529 sc->age_phyaddr = AGE_PHY_ADDR; 530 531 /* Reset PHY. */ 532 age_phy_reset(sc); 533 534 /* Reset the ethernet controller. */ 535 age_reset(sc); 536 537 /* Get PCI and chip id/revision. */ 538 sc->age_rev = pci_get_revid(dev); 539 sc->age_chip_rev = CSR_READ_4(sc, AGE_MASTER_CFG) >> 540 MASTER_CHIP_REV_SHIFT; 541 if (1 || bootverbose) { 542 device_printf(dev, "PCI device revision : 0x%04x\n", sc->age_rev); 543 device_printf(dev, "Chip id/revision : 0x%04x\n", 544 sc->age_chip_rev); 545 } 546 547 /* 548 * XXX 549 * Unintialized hardware returns an invalid chip id/revision 550 * as well as 0xFFFFFFFF for Tx/Rx fifo length. It seems that 551 * unplugged cable results in putting hardware into automatic 552 * power down mode which in turn returns invalld chip revision. 553 */ 554 if (sc->age_chip_rev == 0xFFFF) { 555 device_printf(dev,"invalid chip revision : 0x%04x -- " 556 "not initialized?\n", sc->age_chip_rev); 557 error = ENXIO; 558 goto fail; 559 } 560 561 device_printf(dev, "%d Tx FIFO, %d Rx FIFO\n", 562 CSR_READ_4(sc, AGE_SRAM_TX_FIFO_LEN), 563 CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN)); 564 565 /* Allocate IRQ resources. */ 566 msixc = pci_msix_count(dev); 567 msic = pci_msi_count(dev); 568 if (1 || bootverbose) { 569 device_printf(dev, "MSIX count : %d\n", msixc); 570 device_printf(dev, "MSI count : %d\n", msic); 571 } 572 573 /* Prefer MSIX over MSI. */ 574 if (msix_disable == 0 || msi_disable == 0) { 575 if (msix_disable == 0 && msixc == AGE_MSIX_MESSAGES && 576 pci_alloc_msix(dev, &msixc) == 0) { 577 if (msic == AGE_MSIX_MESSAGES) { 578 device_printf(dev, "Using %d MSIX messages.\n", 579 msixc); 580 sc->age_flags |= AGE_FLAG_MSIX; 581 sc->age_irq_spec = age_irq_spec_msix; 582 } else 583 pci_release_msi(dev); 584 } 585 if (msi_disable == 0 && (sc->age_flags & AGE_FLAG_MSIX) == 0 && 586 msic == AGE_MSI_MESSAGES && 587 pci_alloc_msi(dev, &msic) == 0) { 588 if (msic == AGE_MSI_MESSAGES) { 589 device_printf(dev, "Using %d MSI messages.\n", 590 msic); 591 sc->age_flags |= AGE_FLAG_MSI; 592 sc->age_irq_spec = age_irq_spec_msi; 593 } else 594 pci_release_msi(dev); 595 } 596 } 597 598 error = bus_alloc_resources(dev, sc->age_irq_spec, sc->age_irq); 599 if (error != 0) { 600 device_printf(dev, "cannot allocate IRQ resources.\n"); 601 goto fail; 602 } 603 604 605 /* Get DMA parameters from PCIe device control register. */ 606 if (pci_find_extcap(dev, PCIY_EXPRESS, &i) == 0) { 607 sc->age_flags |= AGE_FLAG_PCIE; 608 burst = pci_read_config(dev, i + 0x08, 2); 609 /* Max read request size. */ 610 sc->age_dma_rd_burst = ((burst >> 12) & 0x07) << 611 DMA_CFG_RD_BURST_SHIFT; 612 /* Max payload size. */ 613 sc->age_dma_wr_burst = ((burst >> 5) & 0x07) << 614 DMA_CFG_WR_BURST_SHIFT; 615 if (1 || bootverbose) { 616 device_printf(dev, "Read request size : %d bytes.\n", 617 128 << ((burst >> 12) & 0x07)); 618 device_printf(dev, "TLP payload size : %d bytes.\n", 619 128 << ((burst >> 5) & 0x07)); 620 } 621 } else { 622 sc->age_dma_rd_burst = DMA_CFG_RD_BURST_128; 623 sc->age_dma_wr_burst = DMA_CFG_WR_BURST_128; 624 } 625 626 /* Create device sysctl node. */ 627 age_sysctl_node(sc); 628 629 if ((error = age_dma_alloc(sc) != 0)) 630 goto fail; 631 632 /* Load station address. */ 633 age_get_macaddr(sc); 634 635 ifp = sc->age_ifp = if_alloc(IFT_ETHER); 636 if (ifp == NULL) { 637 device_printf(dev, "cannot allocate ifnet structure.\n"); 638 error = ENXIO; 639 goto fail; 640 } 641 642 ifp->if_softc = sc; 643 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 644 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 645 ifp->if_ioctl = age_ioctl; 646 ifp->if_start = age_start; 647 ifp->if_init = age_init; 648 ifp->if_snd.ifq_drv_maxlen = AGE_TX_RING_CNT - 1; 649 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 650 IFQ_SET_READY(&ifp->if_snd); 651 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_TSO4; 652 ifp->if_hwassist = AGE_CSUM_FEATURES | CSUM_TSO; 653 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 654 sc->age_flags |= AGE_FLAG_PMCAP; 655 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 656 } 657 ifp->if_capenable = ifp->if_capabilities; 658 659 /* Set up MII bus. */ 660 if ((error = mii_phy_probe(dev, &sc->age_miibus, age_mediachange, 661 age_mediastatus)) != 0) { 662 device_printf(dev, "no PHY found!\n"); 663 goto fail; 664 } 665 666 ether_ifattach(ifp, sc->age_eaddr); 667 668 /* VLAN capability setup. */ 669 ifp->if_capabilities |= IFCAP_VLAN_MTU; 670 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; 671 ifp->if_capenable = ifp->if_capabilities; 672 673 /* Tell the upper layer(s) we support long frames. */ 674 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 675 676 /* Create local taskq. */ 677 TASK_INIT(&sc->age_tx_task, 1, age_tx_task, ifp); 678 sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK, 679 taskqueue_thread_enqueue, &sc->age_tq); 680 if (sc->age_tq == NULL) { 681 device_printf(dev, "could not create taskqueue.\n"); 682 ether_ifdetach(ifp); 683 error = ENXIO; 684 goto fail; 685 } 686 taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq", 687 device_get_nameunit(sc->age_dev)); 688 689 if ((sc->age_flags & AGE_FLAG_MSIX) != 0) 690 msic = AGE_MSIX_MESSAGES; 691 else if ((sc->age_flags & AGE_FLAG_MSI) != 0) 692 msic = AGE_MSI_MESSAGES; 693 else 694 msic = 1; 695 for (i = 0; i < msic; i++) { 696 error = bus_setup_intr(dev, sc->age_irq[i], 697 INTR_TYPE_NET | INTR_MPSAFE, age_intr, NULL, sc, 698 &sc->age_intrhand[i]); 699 if (error != 0) 700 break; 701 } 702 if (error != 0) { 703 device_printf(dev, "could not set up interrupt handler.\n"); 704 taskqueue_free(sc->age_tq); 705 sc->age_tq = NULL; 706 ether_ifdetach(ifp); 707 goto fail; 708 } 709 710 fail: 711 if (error != 0) 712 age_detach(dev); 713 714 return (error); 715 } 716 717 static int 718 age_detach(device_t dev) 719 { 720 struct age_softc *sc; 721 struct ifnet *ifp; 722 int i, msic; 723 724 sc = device_get_softc(dev); 725 726 ifp = sc->age_ifp; 727 if (device_is_attached(dev)) { 728 AGE_LOCK(sc); 729 sc->age_flags |= AGE_FLAG_DETACH; 730 age_stop(sc); 731 AGE_UNLOCK(sc); 732 callout_drain(&sc->age_tick_ch); 733 taskqueue_drain(sc->age_tq, &sc->age_int_task); 734 taskqueue_drain(sc->age_tq, &sc->age_tx_task); 735 taskqueue_drain(taskqueue_swi, &sc->age_link_task); 736 ether_ifdetach(ifp); 737 } 738 739 if (sc->age_tq != NULL) { 740 taskqueue_drain(sc->age_tq, &sc->age_int_task); 741 taskqueue_free(sc->age_tq); 742 sc->age_tq = NULL; 743 } 744 745 if (sc->age_miibus != NULL) { 746 device_delete_child(dev, sc->age_miibus); 747 sc->age_miibus = NULL; 748 } 749 bus_generic_detach(dev); 750 age_dma_free(sc); 751 752 if (ifp != NULL) { 753 if_free(ifp); 754 sc->age_ifp = NULL; 755 } 756 757 if ((sc->age_flags & AGE_FLAG_MSIX) != 0) 758 msic = AGE_MSIX_MESSAGES; 759 else if ((sc->age_flags & AGE_FLAG_MSI) != 0) 760 msic = AGE_MSI_MESSAGES; 761 else 762 msic = 1; 763 for (i = 0; i < msic; i++) { 764 if (sc->age_intrhand[i] != NULL) { 765 bus_teardown_intr(dev, sc->age_irq[i], 766 sc->age_intrhand[i]); 767 sc->age_intrhand[i] = NULL; 768 } 769 } 770 771 bus_release_resources(dev, sc->age_irq_spec, sc->age_irq); 772 if ((sc->age_flags & (AGE_FLAG_MSI | AGE_FLAG_MSIX)) != 0) 773 pci_release_msi(dev); 774 bus_release_resources(dev, sc->age_res_spec, sc->age_res); 775 mtx_destroy(&sc->age_mtx); 776 777 return (0); 778 } 779 780 static void 781 age_sysctl_node(struct age_softc *sc) 782 { 783 int error; 784 785 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), 786 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, 787 "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_age_stats, 788 "I", "Statistics"); 789 790 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), 791 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, 792 "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->age_int_mod, 0, 793 sysctl_hw_age_int_mod, "I", "age interrupt moderation"); 794 795 /* Pull in device tunables. */ 796 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 797 error = resource_int_value(device_get_name(sc->age_dev), 798 device_get_unit(sc->age_dev), "int_mod", &sc->age_int_mod); 799 if (error == 0) { 800 if (sc->age_int_mod < AGE_IM_TIMER_MIN || 801 sc->age_int_mod > AGE_IM_TIMER_MAX) { 802 device_printf(sc->age_dev, 803 "int_mod value out of range; using default: %d\n", 804 AGE_IM_TIMER_DEFAULT); 805 sc->age_int_mod = AGE_IM_TIMER_DEFAULT; 806 } 807 } 808 809 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->age_dev), 810 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->age_dev)), OID_AUTO, 811 "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->age_process_limit, 812 0, sysctl_hw_age_proc_limit, "I", 813 "max number of Rx events to process"); 814 815 /* Pull in device tunables. */ 816 sc->age_process_limit = AGE_PROC_DEFAULT; 817 error = resource_int_value(device_get_name(sc->age_dev), 818 device_get_unit(sc->age_dev), "process_limit", 819 &sc->age_process_limit); 820 if (error == 0) { 821 if (sc->age_process_limit < AGE_PROC_MIN || 822 sc->age_process_limit > AGE_PROC_MAX) { 823 device_printf(sc->age_dev, 824 "process_limit value out of range; " 825 "using default: %d\n", AGE_PROC_DEFAULT); 826 sc->age_process_limit = AGE_PROC_DEFAULT; 827 } 828 } 829 } 830 831 struct age_dmamap_arg { 832 bus_addr_t age_busaddr; 833 }; 834 835 static void 836 age_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 837 { 838 struct age_dmamap_arg *ctx; 839 840 if (error != 0) 841 return; 842 843 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 844 845 ctx = (struct age_dmamap_arg *)arg; 846 ctx->age_busaddr = segs[0].ds_addr; 847 } 848 849 /* 850 * Attansic L1 controller have single register to specify high 851 * address part of DMA blocks. So all descriptor structures and 852 * DMA memory blocks should have the same high address of given 853 * 4GB address space(i.e. crossing 4GB boundary is not allowed). 854 */ 855 static int 856 age_check_boundary(struct age_softc *sc) 857 { 858 bus_addr_t rx_ring_end, rr_ring_end, tx_ring_end; 859 bus_addr_t cmb_block_end, smb_block_end; 860 861 /* Tx/Rx descriptor queue should reside within 4GB boundary. */ 862 tx_ring_end = sc->age_rdata.age_tx_ring_paddr + AGE_TX_RING_SZ; 863 rx_ring_end = sc->age_rdata.age_rx_ring_paddr + AGE_RX_RING_SZ; 864 rr_ring_end = sc->age_rdata.age_rr_ring_paddr + AGE_RR_RING_SZ; 865 cmb_block_end = sc->age_rdata.age_cmb_block_paddr + AGE_CMB_BLOCK_SZ; 866 smb_block_end = sc->age_rdata.age_smb_block_paddr + AGE_SMB_BLOCK_SZ; 867 868 if ((AGE_ADDR_HI(tx_ring_end) != 869 AGE_ADDR_HI(sc->age_rdata.age_tx_ring_paddr)) || 870 (AGE_ADDR_HI(rx_ring_end) != 871 AGE_ADDR_HI(sc->age_rdata.age_rx_ring_paddr)) || 872 (AGE_ADDR_HI(rr_ring_end) != 873 AGE_ADDR_HI(sc->age_rdata.age_rr_ring_paddr)) || 874 (AGE_ADDR_HI(cmb_block_end) != 875 AGE_ADDR_HI(sc->age_rdata.age_cmb_block_paddr)) || 876 (AGE_ADDR_HI(smb_block_end) != 877 AGE_ADDR_HI(sc->age_rdata.age_smb_block_paddr))) 878 return (EFBIG); 879 880 if ((AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rx_ring_end)) || 881 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(rr_ring_end)) || 882 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(cmb_block_end)) || 883 (AGE_ADDR_HI(tx_ring_end) != AGE_ADDR_HI(smb_block_end))) 884 return (EFBIG); 885 886 return (0); 887 } 888 889 static int 890 age_dma_alloc(struct age_softc *sc) 891 { 892 struct age_txdesc *txd; 893 struct age_rxdesc *rxd; 894 bus_addr_t lowaddr; 895 struct age_dmamap_arg ctx; 896 int error, i; 897 898 lowaddr = BUS_SPACE_MAXADDR; 899 900 again: 901 /* Create parent ring/DMA block tag. */ 902 error = bus_dma_tag_create( 903 bus_get_dma_tag(sc->age_dev), /* parent */ 904 1, 0, /* alignment, boundary */ 905 lowaddr, /* lowaddr */ 906 BUS_SPACE_MAXADDR, /* highaddr */ 907 NULL, NULL, /* filter, filterarg */ 908 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 909 0, /* nsegments */ 910 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 911 0, /* flags */ 912 NULL, NULL, /* lockfunc, lockarg */ 913 &sc->age_cdata.age_parent_tag); 914 if (error != 0) { 915 device_printf(sc->age_dev, 916 "could not create parent DMA tag.\n"); 917 goto fail; 918 } 919 920 /* Create tag for Tx ring. */ 921 error = bus_dma_tag_create( 922 sc->age_cdata.age_parent_tag, /* parent */ 923 AGE_TX_RING_ALIGN, 0, /* alignment, boundary */ 924 BUS_SPACE_MAXADDR, /* lowaddr */ 925 BUS_SPACE_MAXADDR, /* highaddr */ 926 NULL, NULL, /* filter, filterarg */ 927 AGE_TX_RING_SZ, /* maxsize */ 928 1, /* nsegments */ 929 AGE_TX_RING_SZ, /* maxsegsize */ 930 0, /* flags */ 931 NULL, NULL, /* lockfunc, lockarg */ 932 &sc->age_cdata.age_tx_ring_tag); 933 if (error != 0) { 934 device_printf(sc->age_dev, 935 "could not create Tx ring DMA tag.\n"); 936 goto fail; 937 } 938 939 /* Create tag for Rx ring. */ 940 error = bus_dma_tag_create( 941 sc->age_cdata.age_parent_tag, /* parent */ 942 AGE_RX_RING_ALIGN, 0, /* alignment, boundary */ 943 BUS_SPACE_MAXADDR, /* lowaddr */ 944 BUS_SPACE_MAXADDR, /* highaddr */ 945 NULL, NULL, /* filter, filterarg */ 946 AGE_RX_RING_SZ, /* maxsize */ 947 1, /* nsegments */ 948 AGE_RX_RING_SZ, /* maxsegsize */ 949 0, /* flags */ 950 NULL, NULL, /* lockfunc, lockarg */ 951 &sc->age_cdata.age_rx_ring_tag); 952 if (error != 0) { 953 device_printf(sc->age_dev, 954 "could not create Rx ring DMA tag.\n"); 955 goto fail; 956 } 957 958 /* Create tag for Rx return ring. */ 959 error = bus_dma_tag_create( 960 sc->age_cdata.age_parent_tag, /* parent */ 961 AGE_RR_RING_ALIGN, 0, /* alignment, boundary */ 962 BUS_SPACE_MAXADDR, /* lowaddr */ 963 BUS_SPACE_MAXADDR, /* highaddr */ 964 NULL, NULL, /* filter, filterarg */ 965 AGE_RR_RING_SZ, /* maxsize */ 966 1, /* nsegments */ 967 AGE_RR_RING_SZ, /* maxsegsize */ 968 0, /* flags */ 969 NULL, NULL, /* lockfunc, lockarg */ 970 &sc->age_cdata.age_rr_ring_tag); 971 if (error != 0) { 972 device_printf(sc->age_dev, 973 "could not create Rx return ring DMA tag.\n"); 974 goto fail; 975 } 976 977 /* Create tag for coalesing message block. */ 978 error = bus_dma_tag_create( 979 sc->age_cdata.age_parent_tag, /* parent */ 980 AGE_CMB_ALIGN, 0, /* alignment, boundary */ 981 BUS_SPACE_MAXADDR, /* lowaddr */ 982 BUS_SPACE_MAXADDR, /* highaddr */ 983 NULL, NULL, /* filter, filterarg */ 984 AGE_CMB_BLOCK_SZ, /* maxsize */ 985 1, /* nsegments */ 986 AGE_CMB_BLOCK_SZ, /* maxsegsize */ 987 0, /* flags */ 988 NULL, NULL, /* lockfunc, lockarg */ 989 &sc->age_cdata.age_cmb_block_tag); 990 if (error != 0) { 991 device_printf(sc->age_dev, 992 "could not create CMB DMA tag.\n"); 993 goto fail; 994 } 995 996 /* Create tag for statistics message block. */ 997 error = bus_dma_tag_create( 998 sc->age_cdata.age_parent_tag, /* parent */ 999 AGE_SMB_ALIGN, 0, /* alignment, boundary */ 1000 BUS_SPACE_MAXADDR, /* lowaddr */ 1001 BUS_SPACE_MAXADDR, /* highaddr */ 1002 NULL, NULL, /* filter, filterarg */ 1003 AGE_SMB_BLOCK_SZ, /* maxsize */ 1004 1, /* nsegments */ 1005 AGE_SMB_BLOCK_SZ, /* maxsegsize */ 1006 0, /* flags */ 1007 NULL, NULL, /* lockfunc, lockarg */ 1008 &sc->age_cdata.age_smb_block_tag); 1009 if (error != 0) { 1010 device_printf(sc->age_dev, 1011 "could not create SMB DMA tag.\n"); 1012 goto fail; 1013 } 1014 1015 /* Allocate DMA'able memory and load the DMA map. */ 1016 error = bus_dmamem_alloc(sc->age_cdata.age_tx_ring_tag, 1017 (void **)&sc->age_rdata.age_tx_ring, 1018 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1019 &sc->age_cdata.age_tx_ring_map); 1020 if (error != 0) { 1021 device_printf(sc->age_dev, 1022 "could not allocate DMA'able memory for Tx ring.\n"); 1023 goto fail; 1024 } 1025 ctx.age_busaddr = 0; 1026 error = bus_dmamap_load(sc->age_cdata.age_tx_ring_tag, 1027 sc->age_cdata.age_tx_ring_map, sc->age_rdata.age_tx_ring, 1028 AGE_TX_RING_SZ, age_dmamap_cb, &ctx, 0); 1029 if (error != 0 || ctx.age_busaddr == 0) { 1030 device_printf(sc->age_dev, 1031 "could not load DMA'able memory for Tx ring.\n"); 1032 goto fail; 1033 } 1034 sc->age_rdata.age_tx_ring_paddr = ctx.age_busaddr; 1035 /* Rx ring */ 1036 error = bus_dmamem_alloc(sc->age_cdata.age_rx_ring_tag, 1037 (void **)&sc->age_rdata.age_rx_ring, 1038 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1039 &sc->age_cdata.age_rx_ring_map); 1040 if (error != 0) { 1041 device_printf(sc->age_dev, 1042 "could not allocate DMA'able memory for Rx ring.\n"); 1043 goto fail; 1044 } 1045 ctx.age_busaddr = 0; 1046 error = bus_dmamap_load(sc->age_cdata.age_rx_ring_tag, 1047 sc->age_cdata.age_rx_ring_map, sc->age_rdata.age_rx_ring, 1048 AGE_RX_RING_SZ, age_dmamap_cb, &ctx, 0); 1049 if (error != 0 || ctx.age_busaddr == 0) { 1050 device_printf(sc->age_dev, 1051 "could not load DMA'able memory for Rx ring.\n"); 1052 goto fail; 1053 } 1054 sc->age_rdata.age_rx_ring_paddr = ctx.age_busaddr; 1055 /* Rx return ring */ 1056 error = bus_dmamem_alloc(sc->age_cdata.age_rr_ring_tag, 1057 (void **)&sc->age_rdata.age_rr_ring, 1058 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1059 &sc->age_cdata.age_rr_ring_map); 1060 if (error != 0) { 1061 device_printf(sc->age_dev, 1062 "could not allocate DMA'able memory for Rx return ring.\n"); 1063 goto fail; 1064 } 1065 ctx.age_busaddr = 0; 1066 error = bus_dmamap_load(sc->age_cdata.age_rr_ring_tag, 1067 sc->age_cdata.age_rr_ring_map, sc->age_rdata.age_rr_ring, 1068 AGE_RR_RING_SZ, age_dmamap_cb, 1069 &ctx, 0); 1070 if (error != 0 || ctx.age_busaddr == 0) { 1071 device_printf(sc->age_dev, 1072 "could not load DMA'able memory for Rx return ring.\n"); 1073 goto fail; 1074 } 1075 sc->age_rdata.age_rr_ring_paddr = ctx.age_busaddr; 1076 /* CMB block */ 1077 error = bus_dmamem_alloc(sc->age_cdata.age_cmb_block_tag, 1078 (void **)&sc->age_rdata.age_cmb_block, 1079 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1080 &sc->age_cdata.age_cmb_block_map); 1081 if (error != 0) { 1082 device_printf(sc->age_dev, 1083 "could not allocate DMA'able memory for CMB block.\n"); 1084 goto fail; 1085 } 1086 ctx.age_busaddr = 0; 1087 error = bus_dmamap_load(sc->age_cdata.age_cmb_block_tag, 1088 sc->age_cdata.age_cmb_block_map, sc->age_rdata.age_cmb_block, 1089 AGE_CMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); 1090 if (error != 0 || ctx.age_busaddr == 0) { 1091 device_printf(sc->age_dev, 1092 "could not load DMA'able memory for CMB block.\n"); 1093 goto fail; 1094 } 1095 sc->age_rdata.age_cmb_block_paddr = ctx.age_busaddr; 1096 /* SMB block */ 1097 error = bus_dmamem_alloc(sc->age_cdata.age_smb_block_tag, 1098 (void **)&sc->age_rdata.age_smb_block, 1099 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1100 &sc->age_cdata.age_smb_block_map); 1101 if (error != 0) { 1102 device_printf(sc->age_dev, 1103 "could not allocate DMA'able memory for SMB block.\n"); 1104 goto fail; 1105 } 1106 ctx.age_busaddr = 0; 1107 error = bus_dmamap_load(sc->age_cdata.age_smb_block_tag, 1108 sc->age_cdata.age_smb_block_map, sc->age_rdata.age_smb_block, 1109 AGE_SMB_BLOCK_SZ, age_dmamap_cb, &ctx, 0); 1110 if (error != 0 || ctx.age_busaddr == 0) { 1111 device_printf(sc->age_dev, 1112 "could not load DMA'able memory for SMB block.\n"); 1113 goto fail; 1114 } 1115 sc->age_rdata.age_smb_block_paddr = ctx.age_busaddr; 1116 1117 /* 1118 * All ring buffer and DMA blocks should have the same 1119 * high address part of 64bit DMA address space. 1120 */ 1121 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 1122 (error = age_check_boundary(sc)) != 0) { 1123 device_printf(sc->age_dev, "4GB boundary crossed, " 1124 "switching to 32bit DMA addressing mode.\n"); 1125 age_dma_free(sc); 1126 /* Limit DMA address space to 32bit and try again. */ 1127 lowaddr = BUS_SPACE_MAXADDR_32BIT; 1128 goto again; 1129 } 1130 1131 /* 1132 * Create Tx/Rx buffer parent tag. 1133 * L1 supports full 64bit DMA addressing in Tx/Rx buffers 1134 * so it needs separate parent DMA tag. 1135 */ 1136 error = bus_dma_tag_create( 1137 bus_get_dma_tag(sc->age_dev), /* parent */ 1138 1, 0, /* alignment, boundary */ 1139 BUS_SPACE_MAXADDR, /* lowaddr */ 1140 BUS_SPACE_MAXADDR, /* highaddr */ 1141 NULL, NULL, /* filter, filterarg */ 1142 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1143 0, /* nsegments */ 1144 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1145 0, /* flags */ 1146 NULL, NULL, /* lockfunc, lockarg */ 1147 &sc->age_cdata.age_buffer_tag); 1148 if (error != 0) { 1149 device_printf(sc->age_dev, 1150 "could not create parent buffer DMA tag.\n"); 1151 goto fail; 1152 } 1153 1154 /* Create tag for Tx buffers. */ 1155 error = bus_dma_tag_create( 1156 sc->age_cdata.age_buffer_tag, /* parent */ 1157 1, 0, /* alignment, boundary */ 1158 BUS_SPACE_MAXADDR, /* lowaddr */ 1159 BUS_SPACE_MAXADDR, /* highaddr */ 1160 NULL, NULL, /* filter, filterarg */ 1161 AGE_TSO_MAXSIZE, /* maxsize */ 1162 AGE_MAXTXSEGS, /* nsegments */ 1163 AGE_TSO_MAXSEGSIZE, /* maxsegsize */ 1164 0, /* flags */ 1165 NULL, NULL, /* lockfunc, lockarg */ 1166 &sc->age_cdata.age_tx_tag); 1167 if (error != 0) { 1168 device_printf(sc->age_dev, "could not create Tx DMA tag.\n"); 1169 goto fail; 1170 } 1171 1172 /* Create tag for Rx buffers. */ 1173 error = bus_dma_tag_create( 1174 sc->age_cdata.age_buffer_tag, /* parent */ 1175 1, 0, /* alignment, boundary */ 1176 BUS_SPACE_MAXADDR, /* lowaddr */ 1177 BUS_SPACE_MAXADDR, /* highaddr */ 1178 NULL, NULL, /* filter, filterarg */ 1179 MCLBYTES, /* maxsize */ 1180 1, /* nsegments */ 1181 MCLBYTES, /* maxsegsize */ 1182 0, /* flags */ 1183 NULL, NULL, /* lockfunc, lockarg */ 1184 &sc->age_cdata.age_rx_tag); 1185 if (error != 0) { 1186 device_printf(sc->age_dev, "could not create Rx DMA tag.\n"); 1187 goto fail; 1188 } 1189 1190 /* Create DMA maps for Tx buffers. */ 1191 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1192 txd = &sc->age_cdata.age_txdesc[i]; 1193 txd->tx_m = NULL; 1194 txd->tx_dmamap = NULL; 1195 error = bus_dmamap_create(sc->age_cdata.age_tx_tag, 0, 1196 &txd->tx_dmamap); 1197 if (error != 0) { 1198 device_printf(sc->age_dev, 1199 "could not create Tx dmamap.\n"); 1200 goto fail; 1201 } 1202 } 1203 /* Create DMA maps for Rx buffers. */ 1204 if ((error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, 1205 &sc->age_cdata.age_rx_sparemap)) != 0) { 1206 device_printf(sc->age_dev, 1207 "could not create spare Rx dmamap.\n"); 1208 goto fail; 1209 } 1210 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1211 rxd = &sc->age_cdata.age_rxdesc[i]; 1212 rxd->rx_m = NULL; 1213 rxd->rx_dmamap = NULL; 1214 error = bus_dmamap_create(sc->age_cdata.age_rx_tag, 0, 1215 &rxd->rx_dmamap); 1216 if (error != 0) { 1217 device_printf(sc->age_dev, 1218 "could not create Rx dmamap.\n"); 1219 goto fail; 1220 } 1221 } 1222 1223 fail: 1224 return (error); 1225 } 1226 1227 static void 1228 age_dma_free(struct age_softc *sc) 1229 { 1230 struct age_txdesc *txd; 1231 struct age_rxdesc *rxd; 1232 int i; 1233 1234 /* Tx buffers */ 1235 if (sc->age_cdata.age_tx_tag != NULL) { 1236 for (i = 0; i < AGE_TX_RING_CNT; i++) { 1237 txd = &sc->age_cdata.age_txdesc[i]; 1238 if (txd->tx_dmamap != NULL) { 1239 bus_dmamap_destroy(sc->age_cdata.age_tx_tag, 1240 txd->tx_dmamap); 1241 txd->tx_dmamap = NULL; 1242 } 1243 } 1244 bus_dma_tag_destroy(sc->age_cdata.age_tx_tag); 1245 sc->age_cdata.age_tx_tag = NULL; 1246 } 1247 /* Rx buffers */ 1248 if (sc->age_cdata.age_rx_tag != NULL) { 1249 for (i = 0; i < AGE_RX_RING_CNT; i++) { 1250 rxd = &sc->age_cdata.age_rxdesc[i]; 1251 if (rxd->rx_dmamap != NULL) { 1252 bus_dmamap_destroy(sc->age_cdata.age_rx_tag, 1253 rxd->rx_dmamap); 1254 rxd->rx_dmamap = NULL; 1255 } 1256 } 1257 if (sc->age_cdata.age_rx_sparemap != NULL) { 1258 bus_dmamap_destroy(sc->age_cdata.age_rx_tag, 1259 sc->age_cdata.age_rx_sparemap); 1260 sc->age_cdata.age_rx_sparemap = NULL; 1261 } 1262 bus_dma_tag_destroy(sc->age_cdata.age_rx_tag); 1263 sc->age_cdata.age_rx_tag = NULL; 1264 } 1265 /* Tx ring. */ 1266 if (sc->age_cdata.age_tx_ring_tag != NULL) { 1267 if (sc->age_cdata.age_tx_ring_map != NULL) 1268 bus_dmamap_unload(sc->age_cdata.age_tx_ring_tag, 1269 sc->age_cdata.age_tx_ring_map); 1270 if (sc->age_cdata.age_tx_ring_map != NULL && 1271 sc->age_rdata.age_tx_ring != NULL) 1272 bus_dmamem_free(sc->age_cdata.age_tx_ring_tag, 1273 sc->age_rdata.age_tx_ring, 1274 sc->age_cdata.age_tx_ring_map); 1275 sc->age_rdata.age_tx_ring = NULL; 1276 sc->age_cdata.age_tx_ring_map = NULL; 1277 bus_dma_tag_destroy(sc->age_cdata.age_tx_ring_tag); 1278 sc->age_cdata.age_tx_ring_tag = NULL; 1279 } 1280 /* Rx ring. */ 1281 if (sc->age_cdata.age_rx_ring_tag != NULL) { 1282 if (sc->age_cdata.age_rx_ring_map != NULL) 1283 bus_dmamap_unload(sc->age_cdata.age_rx_ring_tag, 1284 sc->age_cdata.age_rx_ring_map); 1285 if (sc->age_cdata.age_rx_ring_map != NULL && 1286 sc->age_rdata.age_rx_ring != NULL) 1287 bus_dmamem_free(sc->age_cdata.age_rx_ring_tag, 1288 sc->age_rdata.age_rx_ring, 1289 sc->age_cdata.age_rx_ring_map); 1290 sc->age_rdata.age_rx_ring = NULL; 1291 sc->age_cdata.age_rx_ring_map = NULL; 1292 bus_dma_tag_destroy(sc->age_cdata.age_rx_ring_tag); 1293 sc->age_cdata.age_rx_ring_tag = NULL; 1294 } 1295 /* Rx return ring. */ 1296 if (sc->age_cdata.age_rr_ring_tag != NULL) { 1297 if (sc->age_cdata.age_rr_ring_map != NULL) 1298 bus_dmamap_unload(sc->age_cdata.age_rr_ring_tag, 1299 sc->age_cdata.age_rr_ring_map); 1300 if (sc->age_cdata.age_rr_ring_map != NULL && 1301 sc->age_rdata.age_rr_ring != NULL) 1302 bus_dmamem_free(sc->age_cdata.age_rr_ring_tag, 1303 sc->age_rdata.age_rr_ring, 1304 sc->age_cdata.age_rr_ring_map); 1305 sc->age_rdata.age_rr_ring = NULL; 1306 sc->age_cdata.age_rr_ring_map = NULL; 1307 bus_dma_tag_destroy(sc->age_cdata.age_rr_ring_tag); 1308 sc->age_cdata.age_rr_ring_tag = NULL; 1309 } 1310 /* CMB block */ 1311 if (sc->age_cdata.age_cmb_block_tag != NULL) { 1312 if (sc->age_cdata.age_cmb_block_map != NULL) 1313 bus_dmamap_unload(sc->age_cdata.age_cmb_block_tag, 1314 sc->age_cdata.age_cmb_block_map); 1315 if (sc->age_cdata.age_cmb_block_map != NULL && 1316 sc->age_rdata.age_cmb_block != NULL) 1317 bus_dmamem_free(sc->age_cdata.age_cmb_block_tag, 1318 sc->age_rdata.age_cmb_block, 1319 sc->age_cdata.age_cmb_block_map); 1320 sc->age_rdata.age_cmb_block = NULL; 1321 sc->age_cdata.age_cmb_block_map = NULL; 1322 bus_dma_tag_destroy(sc->age_cdata.age_cmb_block_tag); 1323 sc->age_cdata.age_cmb_block_tag = NULL; 1324 } 1325 /* SMB block */ 1326 if (sc->age_cdata.age_smb_block_tag != NULL) { 1327 if (sc->age_cdata.age_smb_block_map != NULL) 1328 bus_dmamap_unload(sc->age_cdata.age_smb_block_tag, 1329 sc->age_cdata.age_smb_block_map); 1330 if (sc->age_cdata.age_smb_block_map != NULL && 1331 sc->age_rdata.age_smb_block != NULL) 1332 bus_dmamem_free(sc->age_cdata.age_smb_block_tag, 1333 sc->age_rdata.age_smb_block, 1334 sc->age_cdata.age_smb_block_map); 1335 sc->age_rdata.age_smb_block = NULL; 1336 sc->age_cdata.age_smb_block_map = NULL; 1337 bus_dma_tag_destroy(sc->age_cdata.age_smb_block_tag); 1338 sc->age_cdata.age_smb_block_tag = NULL; 1339 } 1340 1341 if (sc->age_cdata.age_buffer_tag != NULL) { 1342 bus_dma_tag_destroy(sc->age_cdata.age_buffer_tag); 1343 sc->age_cdata.age_buffer_tag = NULL; 1344 } 1345 if (sc->age_cdata.age_parent_tag != NULL) { 1346 bus_dma_tag_destroy(sc->age_cdata.age_parent_tag); 1347 sc->age_cdata.age_parent_tag = NULL; 1348 } 1349 } 1350 1351 /* 1352 * Make sure the interface is stopped at reboot time. 1353 */ 1354 static int 1355 age_shutdown(device_t dev) 1356 { 1357 1358 return (age_suspend(dev)); 1359 } 1360 1361 static void 1362 age_setwol(struct age_softc *sc) 1363 { 1364 struct ifnet *ifp; 1365 struct mii_data *mii; 1366 uint32_t reg, pmcs; 1367 uint16_t pmstat; 1368 int aneg, i, pmc; 1369 1370 AGE_LOCK_ASSERT(sc); 1371 1372 if (pci_find_extcap(sc->age_dev, PCIY_PMG, &pmc) == 0) { 1373 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 1374 /* 1375 * No PME capability, PHY power down. 1376 * XXX 1377 * Due to an unknown reason powering down PHY resulted 1378 * in unexpected results such as inaccessbility of 1379 * hardware of freshly rebooted system. Disable 1380 * powering down PHY until I got more information for 1381 * Attansic/Atheros PHY hardwares. 1382 */ 1383 #ifdef notyet 1384 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1385 MII_BMCR, BMCR_PDOWN); 1386 #endif 1387 return; 1388 } 1389 1390 ifp = sc->age_ifp; 1391 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1392 /* 1393 * Note, this driver resets the link speed to 10/100Mbps with 1394 * auto-negotiation but we don't know whether that operation 1395 * would succeed or not as it have no control after powering 1396 * off. If the renegotiation fail WOL may not work. Running 1397 * at 1Gbps will draw more power than 375mA at 3.3V which is 1398 * specified in PCI specification and that would result in 1399 * complete shutdowning power to ethernet controller. 1400 * 1401 * TODO 1402 * Save current negotiated media speed/duplex/flow-control 1403 * to softc and restore the same link again after resuming. 1404 * PHY handling such as power down/resetting to 100Mbps 1405 * may be better handled in suspend method in phy driver. 1406 */ 1407 mii = device_get_softc(sc->age_miibus); 1408 mii_pollstat(mii); 1409 aneg = 0; 1410 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1411 switch IFM_SUBTYPE(mii->mii_media_active) { 1412 case IFM_10_T: 1413 case IFM_100_TX: 1414 goto got_link; 1415 case IFM_1000_T: 1416 aneg++; 1417 default: 1418 break; 1419 } 1420 } 1421 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1422 MII_100T2CR, 0); 1423 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1424 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | 1425 ANAR_10 | ANAR_CSMA); 1426 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1427 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 1428 DELAY(1000); 1429 if (aneg != 0) { 1430 /* Poll link state until age(4) get a 10/100 link. */ 1431 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 1432 mii_pollstat(mii); 1433 if ((mii->mii_media_status & IFM_AVALID) != 0) { 1434 switch (IFM_SUBTYPE( 1435 mii->mii_media_active)) { 1436 case IFM_10_T: 1437 case IFM_100_TX: 1438 age_mac_config(sc); 1439 goto got_link; 1440 default: 1441 break; 1442 } 1443 } 1444 AGE_UNLOCK(sc); 1445 pause("agelnk", hz); 1446 AGE_LOCK(sc); 1447 } 1448 if (i == MII_ANEGTICKS_GIGE) 1449 device_printf(sc->age_dev, 1450 "establishing link failed, " 1451 "WOL may not work!"); 1452 } 1453 /* 1454 * No link, force MAC to have 100Mbps, full-duplex link. 1455 * This is the last resort and may/may not work. 1456 */ 1457 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 1458 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 1459 age_mac_config(sc); 1460 } 1461 1462 got_link: 1463 pmcs = 0; 1464 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 1465 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 1466 CSR_WRITE_4(sc, AGE_WOL_CFG, pmcs); 1467 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1468 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC); 1469 reg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST); 1470 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 1471 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 1472 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1473 reg |= MAC_CFG_RX_ENB; 1474 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1475 } 1476 1477 /* Request PME. */ 1478 pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2); 1479 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1480 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1481 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1482 pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1483 #ifdef notyet 1484 /* See above for powering down PHY issues. */ 1485 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 1486 /* No WOL, PHY power down. */ 1487 age_miibus_writereg(sc->age_dev, sc->age_phyaddr, 1488 MII_BMCR, BMCR_PDOWN); 1489 } 1490 #endif 1491 } 1492 1493 static int 1494 age_suspend(device_t dev) 1495 { 1496 struct age_softc *sc; 1497 1498 sc = device_get_softc(dev); 1499 1500 AGE_LOCK(sc); 1501 age_stop(sc); 1502 age_setwol(sc); 1503 AGE_UNLOCK(sc); 1504 1505 return (0); 1506 } 1507 1508 static int 1509 age_resume(device_t dev) 1510 { 1511 struct age_softc *sc; 1512 struct ifnet *ifp; 1513 uint16_t cmd; 1514 1515 sc = device_get_softc(dev); 1516 1517 AGE_LOCK(sc); 1518 /* 1519 * Clear INTx emulation disable for hardwares that 1520 * is set in resume event. From Linux. 1521 */ 1522 cmd = pci_read_config(sc->age_dev, PCIR_COMMAND, 2); 1523 if ((cmd & 0x0400) != 0) { 1524 cmd &= ~0x0400; 1525 pci_write_config(sc->age_dev, PCIR_COMMAND, cmd, 2); 1526 } 1527 ifp = sc->age_ifp; 1528 if ((ifp->if_flags & IFF_UP) != 0) 1529 age_init_locked(sc); 1530 1531 AGE_UNLOCK(sc); 1532 1533 return (0); 1534 } 1535 1536 static int 1537 age_encap(struct age_softc *sc, struct mbuf **m_head) 1538 { 1539 struct age_txdesc *txd, *txd_last; 1540 struct tx_desc *desc; 1541 struct mbuf *m; 1542 struct ip *ip; 1543 struct tcphdr *tcp; 1544 bus_dma_segment_t txsegs[AGE_MAXTXSEGS]; 1545 bus_dmamap_t map; 1546 uint32_t cflags, ip_off, poff, vtag; 1547 int error, i, nsegs, prod, si; 1548 1549 AGE_LOCK_ASSERT(sc); 1550 1551 M_ASSERTPKTHDR((*m_head)); 1552 1553 m = *m_head; 1554 ip = NULL; 1555 tcp = NULL; 1556 cflags = vtag = 0; 1557 ip_off = poff = 0; 1558 if ((m->m_pkthdr.csum_flags & (AGE_CSUM_FEATURES | CSUM_TSO)) != 0) { 1559 /* 1560 * L1 requires offset of TCP/UDP payload in its Tx 1561 * descriptor to perform hardware Tx checksum offload. 1562 * Additionally, TSO requires IP/TCP header size and 1563 * modification of IP/TCP header in order to make TSO 1564 * engine work. This kind of operation takes many CPU 1565 * cycles on FreeBSD so fast host CPU is needed to get 1566 * smooth TSO performance. 1567 */ 1568 struct ether_header *eh; 1569 1570 if (M_WRITABLE(m) == 0) { 1571 /* Get a writable copy. */ 1572 m = m_dup(*m_head, M_DONTWAIT); 1573 /* Release original mbufs. */ 1574 m_freem(*m_head); 1575 if (m == NULL) { 1576 *m_head = NULL; 1577 return (ENOBUFS); 1578 } 1579 *m_head = m; 1580 } 1581 ip_off = sizeof(struct ether_header); 1582 m = m_pullup(m, ip_off); 1583 if (m == NULL) { 1584 *m_head = NULL; 1585 return (ENOBUFS); 1586 } 1587 eh = mtod(m, struct ether_header *); 1588 /* 1589 * Check if hardware VLAN insertion is off. 1590 * Additional check for LLC/SNAP frame? 1591 */ 1592 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1593 ip_off = sizeof(struct ether_vlan_header); 1594 m = m_pullup(m, ip_off); 1595 if (m == NULL) { 1596 *m_head = NULL; 1597 return (ENOBUFS); 1598 } 1599 } 1600 m = m_pullup(m, ip_off + sizeof(struct ip)); 1601 if (m == NULL) { 1602 *m_head = NULL; 1603 return (ENOBUFS); 1604 } 1605 ip = (struct ip *)(mtod(m, char *) + ip_off); 1606 poff = ip_off + (ip->ip_hl << 2); 1607 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1608 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1609 if (m == NULL) { 1610 *m_head = NULL; 1611 return (ENOBUFS); 1612 } 1613 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1614 /* 1615 * L1 requires IP/TCP header size and offset as 1616 * well as TCP pseudo checksum which complicates 1617 * TSO configuration. I guess this comes from the 1618 * adherence to Microsoft NDIS Large Send 1619 * specification which requires insertion of 1620 * pseudo checksum by upper stack. The pseudo 1621 * checksum that NDIS refers to doesn't include 1622 * TCP payload length so age(4) should recompute 1623 * the pseudo checksum here. Hopefully this wouldn't 1624 * be much burden on modern CPUs. 1625 * Reset IP checksum and recompute TCP pseudo 1626 * checksum as NDIS specification said. 1627 */ 1628 ip->ip_sum = 0; 1629 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) 1630 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1631 ip->ip_dst.s_addr, 1632 htons((tcp->th_off << 2) + IPPROTO_TCP)); 1633 else 1634 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 1635 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 1636 } 1637 *m_head = m; 1638 } 1639 1640 si = prod = sc->age_cdata.age_tx_prod; 1641 txd = &sc->age_cdata.age_txdesc[prod]; 1642 txd_last = txd; 1643 map = txd->tx_dmamap; 1644 1645 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map, 1646 *m_head, txsegs, &nsegs, 0); 1647 if (error == EFBIG) { 1648 m = m_collapse(*m_head, M_DONTWAIT, AGE_MAXTXSEGS); 1649 if (m == NULL) { 1650 m_freem(*m_head); 1651 *m_head = NULL; 1652 return (ENOMEM); 1653 } 1654 *m_head = m; 1655 error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map, 1656 *m_head, txsegs, &nsegs, 0); 1657 if (error != 0) { 1658 m_freem(*m_head); 1659 *m_head = NULL; 1660 return (error); 1661 } 1662 } else if (error != 0) 1663 return (error); 1664 if (nsegs == 0) { 1665 m_freem(*m_head); 1666 *m_head = NULL; 1667 return (EIO); 1668 } 1669 1670 /* Check descriptor overrun. */ 1671 if (sc->age_cdata.age_tx_cnt + nsegs >= AGE_TX_RING_CNT - 2) { 1672 bus_dmamap_unload(sc->age_cdata.age_tx_tag, map); 1673 return (ENOBUFS); 1674 } 1675 1676 m = *m_head; 1677 /* Configure Tx IP/TCP/UDP checksum offload. */ 1678 if ((m->m_pkthdr.csum_flags & AGE_CSUM_FEATURES) != 0) { 1679 cflags |= AGE_TD_CSUM; 1680 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 1681 cflags |= AGE_TD_TCPCSUM; 1682 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 1683 cflags |= AGE_TD_UDPCSUM; 1684 /* Set checksum start offset. */ 1685 cflags |= (poff << AGE_TD_CSUM_PLOADOFFSET_SHIFT); 1686 /* Set checksum insertion position of TCP/UDP. */ 1687 cflags |= ((poff + m->m_pkthdr.csum_data) << 1688 AGE_TD_CSUM_XSUMOFFSET_SHIFT); 1689 } 1690 1691 /* Configure TSO. */ 1692 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1693 if (poff + (tcp->th_off << 2) == m->m_pkthdr.len) { 1694 /* Not TSO but IP/TCP checksum offload. */ 1695 cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM; 1696 /* Clear TSO in order not to set AGE_TD_TSO_HDR. */ 1697 m->m_pkthdr.csum_flags &= ~CSUM_TSO; 1698 } else { 1699 /* Request TSO and set MSS. */ 1700 cflags |= AGE_TD_TSO_IPV4; 1701 cflags |= AGE_TD_IPCSUM | AGE_TD_TCPCSUM; 1702 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << 1703 AGE_TD_TSO_MSS_SHIFT); 1704 } 1705 /* Set IP/TCP header size. */ 1706 cflags |= ip->ip_hl << AGE_TD_IPHDR_LEN_SHIFT; 1707 cflags |= tcp->th_off << AGE_TD_TSO_TCPHDR_LEN_SHIFT; 1708 } 1709 1710 /* Configure VLAN hardware tag insertion. */ 1711 if ((m->m_flags & M_VLANTAG) != 0) { 1712 vtag = AGE_TX_VLAN_TAG(m->m_pkthdr.ether_vtag); 1713 vtag = ((vtag << AGE_TD_VLAN_SHIFT) & AGE_TD_VLAN_MASK); 1714 cflags |= AGE_TD_INSERT_VLAN_TAG; 1715 } 1716 1717 desc = NULL; 1718 for (i = 0; i < nsegs; i++) { 1719 desc = &sc->age_rdata.age_tx_ring[prod]; 1720 desc->addr = htole64(txsegs[i].ds_addr); 1721 desc->len = htole32(AGE_TX_BYTES(txsegs[i].ds_len) | vtag); 1722 desc->flags = htole32(cflags); 1723 sc->age_cdata.age_tx_cnt++; 1724 AGE_DESC_INC(prod, AGE_TX_RING_CNT); 1725 } 1726 /* Update producer index. */ 1727 sc->age_cdata.age_tx_prod = prod; 1728 1729 /* Set EOP on the last descriptor. */ 1730 prod = (prod + AGE_TX_RING_CNT - 1) % AGE_TX_RING_CNT; 1731 desc = &sc->age_rdata.age_tx_ring[prod]; 1732 desc->flags |= htole32(AGE_TD_EOP); 1733 1734 /* Lastly set TSO header and modify IP/TCP header for TSO operation. */ 1735 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 1736 desc = &sc->age_rdata.age_tx_ring[si]; 1737 desc->flags |= htole32(AGE_TD_TSO_HDR); 1738 } 1739 1740 /* Swap dmamap of the first and the last. */ 1741 txd = &sc->age_cdata.age_txdesc[prod]; 1742 map = txd_last->tx_dmamap; 1743 txd_last->tx_dmamap = txd->tx_dmamap; 1744 txd->tx_dmamap = map; 1745 txd->tx_m = m; 1746 1747 /* Sync descriptors. */ 1748 bus_dmamap_sync(sc->age_cdata.age_tx_tag, map, BUS_DMASYNC_PREWRITE); 1749 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 1750 sc->age_cdata.age_tx_ring_map, 1751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1752 1753 return (0); 1754 } 1755 1756 static void 1757 age_tx_task(void *arg, int pending) 1758 { 1759 struct ifnet *ifp; 1760 1761 ifp = (struct ifnet *)arg; 1762 age_start(ifp); 1763 } 1764 1765 static void 1766 age_start(struct ifnet *ifp) 1767 { 1768 struct age_softc *sc; 1769 struct mbuf *m_head; 1770 int enq; 1771 1772 sc = ifp->if_softc; 1773 1774 AGE_LOCK(sc); 1775 1776 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1777 IFF_DRV_RUNNING || (sc->age_flags & AGE_FLAG_LINK) == 0) { 1778 AGE_UNLOCK(sc); 1779 return; 1780 } 1781 1782 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 1783 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 1784 if (m_head == NULL) 1785 break; 1786 /* 1787 * Pack the data into the transmit ring. If we 1788 * don't have room, set the OACTIVE flag and wait 1789 * for the NIC to drain the ring. 1790 */ 1791 if (age_encap(sc, &m_head)) { 1792 if (m_head == NULL) 1793 break; 1794 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 1795 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1796 break; 1797 } 1798 1799 enq++; 1800 /* 1801 * If there's a BPF listener, bounce a copy of this frame 1802 * to him. 1803 */ 1804 ETHER_BPF_MTAP(ifp, m_head); 1805 } 1806 1807 if (enq > 0) { 1808 /* Update mbox. */ 1809 AGE_COMMIT_MBOX(sc); 1810 /* Set a timeout in case the chip goes out to lunch. */ 1811 sc->age_watchdog_timer = AGE_TX_TIMEOUT; 1812 } 1813 1814 AGE_UNLOCK(sc); 1815 } 1816 1817 static void 1818 age_watchdog(struct age_softc *sc) 1819 { 1820 struct ifnet *ifp; 1821 1822 AGE_LOCK_ASSERT(sc); 1823 1824 if (sc->age_watchdog_timer == 0 || --sc->age_watchdog_timer) 1825 return; 1826 1827 ifp = sc->age_ifp; 1828 if ((sc->age_flags & AGE_FLAG_LINK) == 0) { 1829 if_printf(sc->age_ifp, "watchdog timeout (missed link)\n"); 1830 ifp->if_oerrors++; 1831 age_init_locked(sc); 1832 return; 1833 } 1834 if (sc->age_cdata.age_tx_cnt == 0) { 1835 if_printf(sc->age_ifp, 1836 "watchdog timeout (missed Tx interrupts) -- recovering\n"); 1837 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1838 taskqueue_enqueue(sc->age_tq, &sc->age_tx_task); 1839 return; 1840 } 1841 if_printf(sc->age_ifp, "watchdog timeout\n"); 1842 ifp->if_oerrors++; 1843 age_init_locked(sc); 1844 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1845 taskqueue_enqueue(sc->age_tq, &sc->age_tx_task); 1846 } 1847 1848 static int 1849 age_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 1850 { 1851 struct age_softc *sc; 1852 struct ifreq *ifr; 1853 struct mii_data *mii; 1854 uint32_t reg; 1855 int error, mask; 1856 1857 sc = ifp->if_softc; 1858 ifr = (struct ifreq *)data; 1859 error = 0; 1860 switch (cmd) { 1861 case SIOCSIFMTU: 1862 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > AGE_JUMBO_MTU) 1863 error = EINVAL; 1864 else if (ifp->if_mtu != ifr->ifr_mtu) { 1865 AGE_LOCK(sc); 1866 ifp->if_mtu = ifr->ifr_mtu; 1867 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1868 age_init_locked(sc); 1869 AGE_UNLOCK(sc); 1870 } 1871 break; 1872 case SIOCSIFFLAGS: 1873 AGE_LOCK(sc); 1874 if ((ifp->if_flags & IFF_UP) != 0) { 1875 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1876 if (((ifp->if_flags ^ sc->age_if_flags) 1877 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 1878 age_rxfilter(sc); 1879 } else { 1880 if ((sc->age_flags & AGE_FLAG_DETACH) == 0) 1881 age_init_locked(sc); 1882 } 1883 } else { 1884 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1885 age_stop(sc); 1886 } 1887 sc->age_if_flags = ifp->if_flags; 1888 AGE_UNLOCK(sc); 1889 break; 1890 case SIOCADDMULTI: 1891 case SIOCDELMULTI: 1892 AGE_LOCK(sc); 1893 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1894 age_rxfilter(sc); 1895 AGE_UNLOCK(sc); 1896 break; 1897 case SIOCSIFMEDIA: 1898 case SIOCGIFMEDIA: 1899 mii = device_get_softc(sc->age_miibus); 1900 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1901 break; 1902 case SIOCSIFCAP: 1903 AGE_LOCK(sc); 1904 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 1905 if ((mask & IFCAP_TXCSUM) != 0 && 1906 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 1907 ifp->if_capenable ^= IFCAP_TXCSUM; 1908 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 1909 ifp->if_hwassist |= AGE_CSUM_FEATURES; 1910 else 1911 ifp->if_hwassist &= ~AGE_CSUM_FEATURES; 1912 } 1913 if ((mask & IFCAP_RXCSUM) != 0 && 1914 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 1915 ifp->if_capenable ^= IFCAP_RXCSUM; 1916 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1917 reg &= ~MAC_CFG_RXCSUM_ENB; 1918 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1919 reg |= MAC_CFG_RXCSUM_ENB; 1920 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 1921 } 1922 if ((mask & IFCAP_TSO4) != 0 && 1923 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 1924 ifp->if_capenable ^= IFCAP_TSO4; 1925 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 1926 ifp->if_hwassist |= CSUM_TSO; 1927 else 1928 ifp->if_hwassist &= ~CSUM_TSO; 1929 } 1930 1931 if ((mask & IFCAP_WOL_MCAST) != 0 && 1932 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 1933 ifp->if_capenable ^= IFCAP_WOL_MCAST; 1934 if ((mask & IFCAP_WOL_MAGIC) != 0 && 1935 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 1936 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 1937 1938 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 1939 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 1940 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1941 age_rxvlan(sc); 1942 } 1943 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 1944 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 1945 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 1946 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 1947 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 1948 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 1949 /* 1950 * VLAN hardware tagging is required to do checksum 1951 * offload or TSO on VLAN interface. Checksum offload 1952 * on VLAN interface also requires hardware assistance 1953 * of parent interface. 1954 */ 1955 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 1956 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 1957 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 1958 ifp->if_capenable &= 1959 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 1960 AGE_UNLOCK(sc); 1961 VLAN_CAPABILITIES(ifp); 1962 break; 1963 default: 1964 error = ether_ioctl(ifp, cmd, data); 1965 break; 1966 } 1967 1968 return (error); 1969 } 1970 1971 static void 1972 age_mac_config(struct age_softc *sc) 1973 { 1974 struct mii_data *mii; 1975 uint32_t reg; 1976 1977 AGE_LOCK_ASSERT(sc); 1978 1979 mii = device_get_softc(sc->age_miibus); 1980 reg = CSR_READ_4(sc, AGE_MAC_CFG); 1981 reg &= ~MAC_CFG_FULL_DUPLEX; 1982 reg &= ~(MAC_CFG_TX_FC | MAC_CFG_RX_FC); 1983 reg &= ~MAC_CFG_SPEED_MASK; 1984 /* Reprogram MAC with resolved speed/duplex. */ 1985 switch (IFM_SUBTYPE(mii->mii_media_active)) { 1986 case IFM_10_T: 1987 case IFM_100_TX: 1988 reg |= MAC_CFG_SPEED_10_100; 1989 break; 1990 case IFM_1000_T: 1991 reg |= MAC_CFG_SPEED_1000; 1992 break; 1993 } 1994 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 1995 reg |= MAC_CFG_FULL_DUPLEX; 1996 #ifdef notyet 1997 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 1998 reg |= MAC_CFG_TX_FC; 1999 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 2000 reg |= MAC_CFG_RX_FC; 2001 #endif 2002 } 2003 2004 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2005 } 2006 2007 static void 2008 age_link_task(void *arg, int pending) 2009 { 2010 struct age_softc *sc; 2011 struct mii_data *mii; 2012 struct ifnet *ifp; 2013 uint32_t reg; 2014 2015 sc = (struct age_softc *)arg; 2016 2017 AGE_LOCK(sc); 2018 mii = device_get_softc(sc->age_miibus); 2019 ifp = sc->age_ifp; 2020 if (mii == NULL || ifp == NULL || 2021 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 2022 AGE_UNLOCK(sc); 2023 return; 2024 } 2025 2026 sc->age_flags &= ~AGE_FLAG_LINK; 2027 if ((mii->mii_media_status & IFM_AVALID) != 0) { 2028 switch (IFM_SUBTYPE(mii->mii_media_active)) { 2029 case IFM_10_T: 2030 case IFM_100_TX: 2031 case IFM_1000_T: 2032 sc->age_flags |= AGE_FLAG_LINK; 2033 break; 2034 default: 2035 break; 2036 } 2037 } 2038 2039 /* Stop Rx/Tx MACs. */ 2040 age_stop_rxmac(sc); 2041 age_stop_txmac(sc); 2042 2043 /* Program MACs with resolved speed/duplex/flow-control. */ 2044 if ((sc->age_flags & AGE_FLAG_LINK) != 0) { 2045 age_mac_config(sc); 2046 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2047 /* Restart DMA engine and Tx/Rx MAC. */ 2048 CSR_WRITE_4(sc, AGE_DMA_CFG, CSR_READ_4(sc, AGE_DMA_CFG) | 2049 DMA_CFG_RD_ENB | DMA_CFG_WR_ENB); 2050 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 2051 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2052 } 2053 2054 AGE_UNLOCK(sc); 2055 } 2056 2057 static void 2058 age_stats_update(struct age_softc *sc) 2059 { 2060 struct age_stats *stat; 2061 struct smb *smb; 2062 struct ifnet *ifp; 2063 2064 AGE_LOCK_ASSERT(sc); 2065 2066 stat = &sc->age_stat; 2067 2068 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 2069 sc->age_cdata.age_smb_block_map, 2070 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2071 2072 smb = sc->age_rdata.age_smb_block; 2073 if (smb->updated == 0) 2074 return; 2075 2076 ifp = sc->age_ifp; 2077 /* Rx stats. */ 2078 stat->rx_frames += smb->rx_frames; 2079 stat->rx_bcast_frames += smb->rx_bcast_frames; 2080 stat->rx_mcast_frames += smb->rx_mcast_frames; 2081 stat->rx_pause_frames += smb->rx_pause_frames; 2082 stat->rx_control_frames += smb->rx_control_frames; 2083 stat->rx_crcerrs += smb->rx_crcerrs; 2084 stat->rx_lenerrs += smb->rx_lenerrs; 2085 stat->rx_bytes += smb->rx_bytes; 2086 stat->rx_runts += smb->rx_runts; 2087 stat->rx_fragments += smb->rx_fragments; 2088 stat->rx_pkts_64 += smb->rx_pkts_64; 2089 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 2090 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 2091 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 2092 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 2093 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 2094 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 2095 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 2096 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 2097 stat->rx_desc_oflows += smb->rx_desc_oflows; 2098 stat->rx_alignerrs += smb->rx_alignerrs; 2099 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 2100 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 2101 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 2102 2103 /* Tx stats. */ 2104 stat->tx_frames += smb->tx_frames; 2105 stat->tx_bcast_frames += smb->tx_bcast_frames; 2106 stat->tx_mcast_frames += smb->tx_mcast_frames; 2107 stat->tx_pause_frames += smb->tx_pause_frames; 2108 stat->tx_excess_defer += smb->tx_excess_defer; 2109 stat->tx_control_frames += smb->tx_control_frames; 2110 stat->tx_deferred += smb->tx_deferred; 2111 stat->tx_bytes += smb->tx_bytes; 2112 stat->tx_pkts_64 += smb->tx_pkts_64; 2113 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 2114 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 2115 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 2116 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 2117 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 2118 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 2119 stat->tx_single_colls += smb->tx_single_colls; 2120 stat->tx_multi_colls += smb->tx_multi_colls; 2121 stat->tx_late_colls += smb->tx_late_colls; 2122 stat->tx_excess_colls += smb->tx_excess_colls; 2123 stat->tx_underrun += smb->tx_underrun; 2124 stat->tx_desc_underrun += smb->tx_desc_underrun; 2125 stat->tx_lenerrs += smb->tx_lenerrs; 2126 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 2127 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 2128 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 2129 2130 /* Update counters in ifnet. */ 2131 ifp->if_opackets += smb->tx_frames; 2132 2133 ifp->if_collisions += smb->tx_single_colls + 2134 smb->tx_multi_colls + smb->tx_late_colls + 2135 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT; 2136 2137 ifp->if_oerrors += smb->tx_excess_colls + 2138 smb->tx_late_colls + smb->tx_underrun + 2139 smb->tx_pkts_truncated; 2140 2141 ifp->if_ipackets += smb->rx_frames; 2142 2143 ifp->if_ierrors += smb->rx_crcerrs + smb->rx_lenerrs + 2144 smb->rx_runts + smb->rx_pkts_truncated + 2145 smb->rx_fifo_oflows + smb->rx_desc_oflows + 2146 smb->rx_alignerrs; 2147 2148 /* Update done, clear. */ 2149 smb->updated = 0; 2150 2151 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 2152 sc->age_cdata.age_smb_block_map, 2153 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2154 } 2155 2156 static int 2157 age_intr(void *arg) 2158 { 2159 struct age_softc *sc; 2160 uint32_t status; 2161 2162 sc = (struct age_softc *)arg; 2163 2164 status = CSR_READ_4(sc, AGE_INTR_STATUS); 2165 if (status == 0 || (status & AGE_INTRS) == 0) 2166 return (FILTER_STRAY); 2167 /* Disable interrupts. */ 2168 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 2169 taskqueue_enqueue(sc->age_tq, &sc->age_int_task); 2170 2171 return (FILTER_HANDLED); 2172 } 2173 2174 static void 2175 age_int_task(void *arg, int pending) 2176 { 2177 struct age_softc *sc; 2178 struct ifnet *ifp; 2179 struct cmb *cmb; 2180 uint32_t status; 2181 2182 sc = (struct age_softc *)arg; 2183 2184 AGE_LOCK(sc); 2185 2186 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 2187 sc->age_cdata.age_cmb_block_map, 2188 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2189 cmb = sc->age_rdata.age_cmb_block; 2190 status = le32toh(cmb->intr_status); 2191 if (sc->age_morework != 0) 2192 status |= INTR_CMB_RX; 2193 if ((status & AGE_INTRS) == 0) 2194 goto done; 2195 2196 sc->age_tpd_cons = (le32toh(cmb->tpd_cons) & TPD_CONS_MASK) >> 2197 TPD_CONS_SHIFT; 2198 sc->age_rr_prod = (le32toh(cmb->rprod_cons) & RRD_PROD_MASK) >> 2199 RRD_PROD_SHIFT; 2200 /* Let hardware know CMB was served. */ 2201 cmb->intr_status = 0; 2202 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 2203 sc->age_cdata.age_cmb_block_map, 2204 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2205 2206 #if 0 2207 printf("INTR: 0x%08x\n", status); 2208 status &= ~INTR_DIS_DMA; 2209 CSR_WRITE_4(sc, AGE_INTR_STATUS, status | INTR_DIS_INT); 2210 #endif 2211 ifp = sc->age_ifp; 2212 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2213 if ((status & INTR_CMB_RX) != 0) 2214 sc->age_morework = age_rxintr(sc, sc->age_rr_prod, 2215 sc->age_process_limit); 2216 if ((status & INTR_CMB_TX) != 0) 2217 age_txintr(sc, sc->age_tpd_cons); 2218 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST)) != 0) { 2219 if ((status & INTR_DMA_RD_TO_RST) != 0) 2220 device_printf(sc->age_dev, 2221 "DMA read error! -- resetting\n"); 2222 if ((status & INTR_DMA_WR_TO_RST) != 0) 2223 device_printf(sc->age_dev, 2224 "DMA write error! -- resetting\n"); 2225 age_init_locked(sc); 2226 } 2227 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2228 taskqueue_enqueue(sc->age_tq, &sc->age_tx_task); 2229 if ((status & INTR_SMB) != 0) 2230 age_stats_update(sc); 2231 } 2232 2233 /* Check whether CMB was updated while serving Tx/Rx/SMB handler. */ 2234 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 2235 sc->age_cdata.age_cmb_block_map, 2236 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2237 status = le32toh(cmb->intr_status); 2238 if (sc->age_morework != 0 || (status & AGE_INTRS) != 0) { 2239 taskqueue_enqueue(sc->age_tq, &sc->age_int_task); 2240 AGE_UNLOCK(sc); 2241 return; 2242 } 2243 2244 done: 2245 /* Re-enable interrupts. */ 2246 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 2247 AGE_UNLOCK(sc); 2248 } 2249 2250 static void 2251 age_txintr(struct age_softc *sc, int tpd_cons) 2252 { 2253 struct ifnet *ifp; 2254 struct age_txdesc *txd; 2255 int cons, prog; 2256 2257 AGE_LOCK_ASSERT(sc); 2258 2259 ifp = sc->age_ifp; 2260 2261 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 2262 sc->age_cdata.age_tx_ring_map, 2263 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2264 2265 /* 2266 * Go through our Tx list and free mbufs for those 2267 * frames which have been transmitted. 2268 */ 2269 cons = sc->age_cdata.age_tx_cons; 2270 for (prog = 0; cons != tpd_cons; AGE_DESC_INC(cons, AGE_TX_RING_CNT)) { 2271 if (sc->age_cdata.age_tx_cnt <= 0) 2272 break; 2273 prog++; 2274 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2275 sc->age_cdata.age_tx_cnt--; 2276 txd = &sc->age_cdata.age_txdesc[cons]; 2277 /* 2278 * Clear Tx descriptors, it's not required but would 2279 * help debugging in case of Tx issues. 2280 */ 2281 txd->tx_desc->addr = 0; 2282 txd->tx_desc->len = 0; 2283 txd->tx_desc->flags = 0; 2284 2285 if (txd->tx_m == NULL) 2286 continue; 2287 /* Reclaim transmitted mbufs. */ 2288 bus_dmamap_sync(sc->age_cdata.age_tx_tag, txd->tx_dmamap, 2289 BUS_DMASYNC_POSTWRITE); 2290 bus_dmamap_unload(sc->age_cdata.age_tx_tag, txd->tx_dmamap); 2291 m_freem(txd->tx_m); 2292 txd->tx_m = NULL; 2293 } 2294 2295 if (prog > 0) { 2296 sc->age_cdata.age_tx_cons = cons; 2297 2298 /* 2299 * Unarm watchdog timer only when there are no pending 2300 * Tx descriptors in queue. 2301 */ 2302 if (sc->age_cdata.age_tx_cnt == 0) 2303 sc->age_watchdog_timer = 0; 2304 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 2305 sc->age_cdata.age_tx_ring_map, 2306 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2307 } 2308 } 2309 2310 /* Receive a frame. */ 2311 static void 2312 age_rxeof(struct age_softc *sc, struct rx_rdesc *rxrd) 2313 { 2314 struct age_rxdesc *rxd; 2315 struct rx_desc *desc; 2316 struct ifnet *ifp; 2317 struct mbuf *mp, *m; 2318 uint32_t status, index, vtag; 2319 int count, nsegs, pktlen; 2320 int rx_cons; 2321 2322 AGE_LOCK_ASSERT(sc); 2323 2324 ifp = sc->age_ifp; 2325 status = le32toh(rxrd->flags); 2326 index = le32toh(rxrd->index); 2327 rx_cons = AGE_RX_CONS(index); 2328 nsegs = AGE_RX_NSEGS(index); 2329 2330 sc->age_cdata.age_rxlen = AGE_RX_BYTES(le32toh(rxrd->len)); 2331 if ((status & AGE_RRD_ERROR) != 0 && 2332 (status & (AGE_RRD_CRC | AGE_RRD_CODE | AGE_RRD_DRIBBLE | 2333 AGE_RRD_RUNT | AGE_RRD_OFLOW | AGE_RRD_TRUNC)) != 0) { 2334 /* 2335 * We want to pass the following frames to upper 2336 * layer regardless of error status of Rx return 2337 * ring. 2338 * 2339 * o IP/TCP/UDP checksum is bad. 2340 * o frame length and protocol specific length 2341 * does not match. 2342 */ 2343 sc->age_cdata.age_rx_cons += nsegs; 2344 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 2345 return; 2346 } 2347 2348 pktlen = 0; 2349 for (count = 0; count < nsegs; count++, 2350 AGE_DESC_INC(rx_cons, AGE_RX_RING_CNT)) { 2351 rxd = &sc->age_cdata.age_rxdesc[rx_cons]; 2352 mp = rxd->rx_m; 2353 desc = rxd->rx_desc; 2354 /* Add a new receive buffer to the ring. */ 2355 if (age_newbuf(sc, rxd) != 0) { 2356 ifp->if_iqdrops++; 2357 /* Reuse Rx buffers. */ 2358 if (sc->age_cdata.age_rxhead != NULL) { 2359 m_freem(sc->age_cdata.age_rxhead); 2360 AGE_RXCHAIN_RESET(sc); 2361 } 2362 break; 2363 } 2364 2365 /* The length of the first mbuf is computed last. */ 2366 if (count != 0) { 2367 mp->m_len = AGE_RX_BYTES(le32toh(desc->len)); 2368 pktlen += mp->m_len; 2369 } 2370 2371 /* Chain received mbufs. */ 2372 if (sc->age_cdata.age_rxhead == NULL) { 2373 sc->age_cdata.age_rxhead = mp; 2374 sc->age_cdata.age_rxtail = mp; 2375 } else { 2376 mp->m_flags &= ~M_PKTHDR; 2377 sc->age_cdata.age_rxprev_tail = 2378 sc->age_cdata.age_rxtail; 2379 sc->age_cdata.age_rxtail->m_next = mp; 2380 sc->age_cdata.age_rxtail = mp; 2381 } 2382 2383 if (count == nsegs - 1) { 2384 /* 2385 * It seems that L1 controller has no way 2386 * to tell hardware to strip CRC bytes. 2387 */ 2388 sc->age_cdata.age_rxlen -= ETHER_CRC_LEN; 2389 if (nsegs > 1) { 2390 /* Remove the CRC bytes in chained mbufs. */ 2391 pktlen -= ETHER_CRC_LEN; 2392 if (mp->m_len <= ETHER_CRC_LEN) { 2393 sc->age_cdata.age_rxtail = 2394 sc->age_cdata.age_rxprev_tail; 2395 sc->age_cdata.age_rxtail->m_len -= 2396 (ETHER_CRC_LEN - mp->m_len); 2397 sc->age_cdata.age_rxtail->m_next = NULL; 2398 m_freem(mp); 2399 } else { 2400 mp->m_len -= ETHER_CRC_LEN; 2401 } 2402 } 2403 2404 m = sc->age_cdata.age_rxhead; 2405 m->m_flags |= M_PKTHDR; 2406 m->m_pkthdr.rcvif = ifp; 2407 m->m_pkthdr.len = sc->age_cdata.age_rxlen; 2408 /* Set the first mbuf length. */ 2409 m->m_len = sc->age_cdata.age_rxlen - pktlen; 2410 2411 /* 2412 * Set checksum information. 2413 * It seems that L1 controller can compute partial 2414 * checksum. The partial checksum value can be used 2415 * to accelerate checksum computation for fragmented 2416 * TCP/UDP packets. Upper network stack already 2417 * takes advantage of the partial checksum value in 2418 * IP reassembly stage. But I'm not sure the 2419 * correctness of the partial hardware checksum 2420 * assistance due to lack of data sheet. If it is 2421 * proven to work on L1 I'll enable it. 2422 */ 2423 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && 2424 (status & AGE_RRD_IPV4) != 0) { 2425 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 2426 if ((status & AGE_RRD_IPCSUM_NOK) == 0) 2427 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 2428 if ((status & (AGE_RRD_TCP | AGE_RRD_UDP)) && 2429 (status & AGE_RRD_TCP_UDPCSUM_NOK) == 0) { 2430 m->m_pkthdr.csum_flags |= 2431 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 2432 m->m_pkthdr.csum_data = 0xffff; 2433 } 2434 /* 2435 * Don't mark bad checksum for TCP/UDP frames 2436 * as fragmented frames may always have set 2437 * bad checksummed bit of descriptor status. 2438 */ 2439 } 2440 2441 /* Check for VLAN tagged frames. */ 2442 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 2443 (status & AGE_RRD_VLAN) != 0) { 2444 vtag = AGE_RX_VLAN(le32toh(rxrd->vtags)); 2445 m->m_pkthdr.ether_vtag = AGE_RX_VLAN_TAG(vtag); 2446 m->m_flags |= M_VLANTAG; 2447 } 2448 2449 /* Pass it on. */ 2450 AGE_UNLOCK(sc); 2451 (*ifp->if_input)(ifp, m); 2452 AGE_LOCK(sc); 2453 2454 /* Reset mbuf chains. */ 2455 AGE_RXCHAIN_RESET(sc); 2456 } 2457 } 2458 2459 if (count != nsegs) { 2460 sc->age_cdata.age_rx_cons += nsegs; 2461 sc->age_cdata.age_rx_cons %= AGE_RX_RING_CNT; 2462 } else 2463 sc->age_cdata.age_rx_cons = rx_cons; 2464 } 2465 2466 static int 2467 age_rxintr(struct age_softc *sc, int rr_prod, int count) 2468 { 2469 struct rx_rdesc *rxrd; 2470 int rr_cons, nsegs, pktlen, prog; 2471 2472 AGE_LOCK_ASSERT(sc); 2473 2474 rr_cons = sc->age_cdata.age_rr_cons; 2475 if (rr_cons == rr_prod) 2476 return (0); 2477 2478 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 2479 sc->age_cdata.age_rr_ring_map, 2480 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2481 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, 2482 sc->age_cdata.age_rx_ring_map, 2483 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2484 2485 for (prog = 0; rr_cons != rr_prod; prog++) { 2486 if (count <= 0) 2487 break; 2488 rxrd = &sc->age_rdata.age_rr_ring[rr_cons]; 2489 nsegs = AGE_RX_NSEGS(le32toh(rxrd->index)); 2490 if (nsegs == 0) 2491 break; 2492 /* 2493 * Check number of segments against received bytes. 2494 * Non-matching value would indicate that hardware 2495 * is still trying to update Rx return descriptors. 2496 * I'm not sure whether this check is really needed. 2497 */ 2498 pktlen = AGE_RX_BYTES(le32toh(rxrd->len)); 2499 if (nsegs != ((pktlen + (MCLBYTES - ETHER_ALIGN - 1)) / 2500 (MCLBYTES - ETHER_ALIGN))) 2501 break; 2502 2503 prog++; 2504 /* Received a frame. */ 2505 age_rxeof(sc, rxrd); 2506 /* Clear return ring. */ 2507 rxrd->index = 0; 2508 AGE_DESC_INC(rr_cons, AGE_RR_RING_CNT); 2509 } 2510 2511 if (prog > 0) { 2512 /* Update the consumer index. */ 2513 sc->age_cdata.age_rr_cons = rr_cons; 2514 2515 /* Sync descriptors. */ 2516 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, 2517 sc->age_cdata.age_rx_ring_map, 2518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2519 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 2520 sc->age_cdata.age_rr_ring_map, 2521 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2522 2523 /* Notify hardware availability of new Rx buffers. */ 2524 AGE_COMMIT_MBOX(sc); 2525 } 2526 2527 return (count > 0 ? 0 : EAGAIN); 2528 } 2529 2530 static void 2531 age_tick(void *arg) 2532 { 2533 struct age_softc *sc; 2534 struct mii_data *mii; 2535 2536 sc = (struct age_softc *)arg; 2537 2538 AGE_LOCK_ASSERT(sc); 2539 2540 mii = device_get_softc(sc->age_miibus); 2541 mii_tick(mii); 2542 age_watchdog(sc); 2543 callout_reset(&sc->age_tick_ch, hz, age_tick, sc); 2544 } 2545 2546 static void 2547 age_reset(struct age_softc *sc) 2548 { 2549 uint32_t reg; 2550 int i; 2551 2552 CSR_WRITE_4(sc, AGE_MASTER_CFG, MASTER_RESET); 2553 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2554 DELAY(1); 2555 if ((CSR_READ_4(sc, AGE_MASTER_CFG) & MASTER_RESET) == 0) 2556 break; 2557 } 2558 if (i == 0) 2559 device_printf(sc->age_dev, "master reset timeout!\n"); 2560 2561 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2562 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 2563 break; 2564 DELAY(10); 2565 } 2566 2567 if (i == 0) 2568 device_printf(sc->age_dev, "reset timeout(0x%08x)!\n", reg); 2569 /* Initialize PCIe module. From Linux. */ 2570 CSR_WRITE_4(sc, 0x12FC, 0x6500); 2571 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2572 } 2573 2574 static void 2575 age_init(void *xsc) 2576 { 2577 struct age_softc *sc; 2578 2579 sc = (struct age_softc *)xsc; 2580 AGE_LOCK(sc); 2581 age_init_locked(sc); 2582 AGE_UNLOCK(sc); 2583 } 2584 2585 static void 2586 age_init_locked(struct age_softc *sc) 2587 { 2588 struct ifnet *ifp; 2589 struct mii_data *mii; 2590 uint8_t eaddr[ETHER_ADDR_LEN]; 2591 bus_addr_t paddr; 2592 uint32_t reg, fsize; 2593 uint32_t rxf_hi, rxf_lo, rrd_hi, rrd_lo; 2594 int error; 2595 2596 AGE_LOCK_ASSERT(sc); 2597 2598 ifp = sc->age_ifp; 2599 mii = device_get_softc(sc->age_miibus); 2600 2601 /* 2602 * Cancel any pending I/O. 2603 */ 2604 age_stop(sc); 2605 2606 /* 2607 * Reset the chip to a known state. 2608 */ 2609 age_reset(sc); 2610 2611 /* Initialize descriptors. */ 2612 error = age_init_rx_ring(sc); 2613 if (error != 0) { 2614 device_printf(sc->age_dev, "no memory for Rx buffers.\n"); 2615 age_stop(sc); 2616 return; 2617 } 2618 age_init_rr_ring(sc); 2619 age_init_tx_ring(sc); 2620 age_init_cmb_block(sc); 2621 age_init_smb_block(sc); 2622 2623 /* Reprogram the station address. */ 2624 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 2625 CSR_WRITE_4(sc, AGE_PAR0, 2626 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 2627 CSR_WRITE_4(sc, AGE_PAR1, eaddr[0] << 8 | eaddr[1]); 2628 2629 /* Set descriptor base addresses. */ 2630 paddr = sc->age_rdata.age_tx_ring_paddr; 2631 CSR_WRITE_4(sc, AGE_DESC_ADDR_HI, AGE_ADDR_HI(paddr)); 2632 paddr = sc->age_rdata.age_rx_ring_paddr; 2633 CSR_WRITE_4(sc, AGE_DESC_RD_ADDR_LO, AGE_ADDR_LO(paddr)); 2634 paddr = sc->age_rdata.age_rr_ring_paddr; 2635 CSR_WRITE_4(sc, AGE_DESC_RRD_ADDR_LO, AGE_ADDR_LO(paddr)); 2636 paddr = sc->age_rdata.age_tx_ring_paddr; 2637 CSR_WRITE_4(sc, AGE_DESC_TPD_ADDR_LO, AGE_ADDR_LO(paddr)); 2638 paddr = sc->age_rdata.age_cmb_block_paddr; 2639 CSR_WRITE_4(sc, AGE_DESC_CMB_ADDR_LO, AGE_ADDR_LO(paddr)); 2640 paddr = sc->age_rdata.age_smb_block_paddr; 2641 CSR_WRITE_4(sc, AGE_DESC_SMB_ADDR_LO, AGE_ADDR_LO(paddr)); 2642 /* Set Rx/Rx return descriptor counter. */ 2643 CSR_WRITE_4(sc, AGE_DESC_RRD_RD_CNT, 2644 ((AGE_RR_RING_CNT << DESC_RRD_CNT_SHIFT) & 2645 DESC_RRD_CNT_MASK) | 2646 ((AGE_RX_RING_CNT << DESC_RD_CNT_SHIFT) & DESC_RD_CNT_MASK)); 2647 /* Set Tx descriptor counter. */ 2648 CSR_WRITE_4(sc, AGE_DESC_TPD_CNT, 2649 (AGE_TX_RING_CNT << DESC_TPD_CNT_SHIFT) & DESC_TPD_CNT_MASK); 2650 2651 /* Tell hardware that we're ready to load descriptors. */ 2652 CSR_WRITE_4(sc, AGE_DMA_BLOCK, DMA_BLOCK_LOAD); 2653 2654 /* 2655 * Initialize mailbox register. 2656 * Updated producer/consumer index information is exchanged 2657 * through this mailbox register. However Tx producer and 2658 * Rx return consumer/Rx producer are all shared such that 2659 * it's hard to separate code path between Tx and Rx without 2660 * locking. If L1 hardware have a separate mail box register 2661 * for Tx and Rx consumer/producer management we could have 2662 * indepent Tx/Rx handler which in turn Rx handler could have 2663 * been run without any locking. 2664 */ 2665 AGE_COMMIT_MBOX(sc); 2666 2667 /* Configure IPG/IFG parameters. */ 2668 CSR_WRITE_4(sc, AGE_IPG_IFG_CFG, 2669 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & IPG_IFG_IPG2_MASK) | 2670 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & IPG_IFG_IPG1_MASK) | 2671 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & IPG_IFG_MIFG_MASK) | 2672 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & IPG_IFG_IPGT_MASK)); 2673 2674 /* Set parameters for half-duplex media. */ 2675 CSR_WRITE_4(sc, AGE_HDPX_CFG, 2676 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 2677 HDPX_CFG_LCOL_MASK) | 2678 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 2679 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 2680 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 2681 HDPX_CFG_ABEBT_MASK) | 2682 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 2683 HDPX_CFG_JAMIPG_MASK)); 2684 2685 /* Configure interrupt moderation timer. */ 2686 CSR_WRITE_2(sc, AGE_IM_TIMER, AGE_USECS(sc->age_int_mod)); 2687 reg = CSR_READ_4(sc, AGE_MASTER_CFG); 2688 reg &= ~MASTER_MTIMER_ENB; 2689 if (AGE_USECS(sc->age_int_mod) == 0) 2690 reg &= ~MASTER_ITIMER_ENB; 2691 else 2692 reg |= MASTER_ITIMER_ENB; 2693 CSR_WRITE_4(sc, AGE_MASTER_CFG, reg); 2694 if (1 || bootverbose) 2695 device_printf(sc->age_dev, "interrupt moderation is %d us.\n", 2696 sc->age_int_mod); 2697 CSR_WRITE_2(sc, AGE_INTR_CLR_TIMER, AGE_USECS(1000)); 2698 2699 /* Set Maximum frame size but don't let MTU be lass than ETHER_MTU. */ 2700 if (ifp->if_mtu < ETHERMTU) 2701 sc->age_max_frame_size = ETHERMTU; 2702 else 2703 sc->age_max_frame_size = ifp->if_mtu; 2704 sc->age_max_frame_size += ETHER_HDR_LEN + 2705 sizeof(struct ether_vlan_header) + ETHER_CRC_LEN; 2706 CSR_WRITE_4(sc, AGE_FRAME_SIZE, sc->age_max_frame_size); 2707 /* Configure jumbo frame. */ 2708 fsize = roundup(sc->age_max_frame_size, sizeof(uint64_t)); 2709 CSR_WRITE_4(sc, AGE_RXQ_JUMBO_CFG, 2710 (((fsize / sizeof(uint64_t)) << 2711 RXQ_JUMBO_CFG_SZ_THRESH_SHIFT) & RXQ_JUMBO_CFG_SZ_THRESH_MASK) | 2712 ((RXQ_JUMBO_CFG_LKAH_DEFAULT << 2713 RXQ_JUMBO_CFG_LKAH_SHIFT) & RXQ_JUMBO_CFG_LKAH_MASK) | 2714 ((AGE_USECS(8) << RXQ_JUMBO_CFG_RRD_TIMER_SHIFT) & 2715 RXQ_JUMBO_CFG_RRD_TIMER_MASK)); 2716 2717 /* Configure flow-control parameters. From Linux. */ 2718 if ((sc->age_flags & AGE_FLAG_PCIE) != 0) { 2719 /* 2720 * Magic workaround for old-L1. 2721 * Don't know which hw revision requires this magic. 2722 */ 2723 CSR_WRITE_4(sc, 0x12FC, 0x6500); 2724 /* 2725 * Another magic workaround for flow-control mode 2726 * change. From Linux. 2727 */ 2728 CSR_WRITE_4(sc, 0x1008, CSR_READ_4(sc, 0x1008) | 0x8000); 2729 } 2730 /* 2731 * TODO 2732 * Should understand pause parameter relationships between FIFO 2733 * size and number of Rx descriptors and Rx return descriptors. 2734 * 2735 * Magic parameters came from Linux. 2736 */ 2737 switch (sc->age_chip_rev) { 2738 case 0x8001: 2739 case 0x9001: 2740 case 0x9002: 2741 case 0x9003: 2742 rxf_hi = AGE_RX_RING_CNT / 16; 2743 rxf_lo = (AGE_RX_RING_CNT * 7) / 8; 2744 rrd_hi = (AGE_RR_RING_CNT * 7) / 8; 2745 rrd_lo = AGE_RR_RING_CNT / 16; 2746 break; 2747 default: 2748 reg = CSR_READ_4(sc, AGE_SRAM_RX_FIFO_LEN); 2749 rxf_lo = reg / 16; 2750 if (rxf_lo < 192) 2751 rxf_lo = 192; 2752 rxf_hi = (reg * 7) / 8; 2753 if (rxf_hi < rxf_lo) 2754 rxf_hi = rxf_lo + 16; 2755 reg = CSR_READ_4(sc, AGE_SRAM_RRD_LEN); 2756 rrd_lo = reg / 8; 2757 rrd_hi = (reg * 7) / 8; 2758 if (rrd_lo < 2) 2759 rrd_lo = 2; 2760 if (rrd_hi < rrd_lo) 2761 rrd_hi = rrd_lo + 3; 2762 break; 2763 } 2764 CSR_WRITE_4(sc, AGE_RXQ_FIFO_PAUSE_THRESH, 2765 ((rxf_lo << RXQ_FIFO_PAUSE_THRESH_LO_SHIFT) & 2766 RXQ_FIFO_PAUSE_THRESH_LO_MASK) | 2767 ((rxf_hi << RXQ_FIFO_PAUSE_THRESH_HI_SHIFT) & 2768 RXQ_FIFO_PAUSE_THRESH_HI_MASK)); 2769 CSR_WRITE_4(sc, AGE_RXQ_RRD_PAUSE_THRESH, 2770 ((rrd_lo << RXQ_RRD_PAUSE_THRESH_LO_SHIFT) & 2771 RXQ_RRD_PAUSE_THRESH_LO_MASK) | 2772 ((rrd_hi << RXQ_RRD_PAUSE_THRESH_HI_SHIFT) & 2773 RXQ_RRD_PAUSE_THRESH_HI_MASK)); 2774 2775 /* Configure RxQ. */ 2776 CSR_WRITE_4(sc, AGE_RXQ_CFG, 2777 ((RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 2778 RXQ_CFG_RD_BURST_MASK) | 2779 ((RXQ_CFG_RRD_BURST_THRESH_DEFAULT << 2780 RXQ_CFG_RRD_BURST_THRESH_SHIFT) & RXQ_CFG_RRD_BURST_THRESH_MASK) | 2781 ((RXQ_CFG_RD_PREF_MIN_IPG_DEFAULT << 2782 RXQ_CFG_RD_PREF_MIN_IPG_SHIFT) & RXQ_CFG_RD_PREF_MIN_IPG_MASK) | 2783 RXQ_CFG_CUT_THROUGH_ENB | RXQ_CFG_ENB); 2784 2785 /* Configure TxQ. */ 2786 CSR_WRITE_4(sc, AGE_TXQ_CFG, 2787 ((TXQ_CFG_TPD_BURST_DEFAULT << TXQ_CFG_TPD_BURST_SHIFT) & 2788 TXQ_CFG_TPD_BURST_MASK) | 2789 ((TXQ_CFG_TX_FIFO_BURST_DEFAULT << TXQ_CFG_TX_FIFO_BURST_SHIFT) & 2790 TXQ_CFG_TX_FIFO_BURST_MASK) | 2791 ((TXQ_CFG_TPD_FETCH_DEFAULT << 2792 TXQ_CFG_TPD_FETCH_THRESH_SHIFT) & TXQ_CFG_TPD_FETCH_THRESH_MASK) | 2793 TXQ_CFG_ENB); 2794 2795 CSR_WRITE_4(sc, AGE_TX_JUMBO_TPD_TH_IPG, 2796 (((fsize / sizeof(uint64_t) << TX_JUMBO_TPD_TH_SHIFT)) & 2797 TX_JUMBO_TPD_TH_MASK) | 2798 ((TX_JUMBO_TPD_IPG_DEFAULT << TX_JUMBO_TPD_IPG_SHIFT) & 2799 TX_JUMBO_TPD_IPG_MASK)); 2800 /* Configure DMA parameters. */ 2801 CSR_WRITE_4(sc, AGE_DMA_CFG, 2802 DMA_CFG_ENH_ORDER | DMA_CFG_RCB_64 | 2803 sc->age_dma_rd_burst | DMA_CFG_RD_ENB | 2804 sc->age_dma_wr_burst | DMA_CFG_WR_ENB); 2805 2806 /* Configure CMB DMA write threshold. */ 2807 CSR_WRITE_4(sc, AGE_CMB_WR_THRESH, 2808 ((CMB_WR_THRESH_RRD_DEFAULT << CMB_WR_THRESH_RRD_SHIFT) & 2809 CMB_WR_THRESH_RRD_MASK) | 2810 ((CMB_WR_THRESH_TPD_DEFAULT << CMB_WR_THRESH_TPD_SHIFT) & 2811 CMB_WR_THRESH_TPD_MASK)); 2812 2813 /* Set CMB/SMB timer and enable them. */ 2814 CSR_WRITE_4(sc, AGE_CMB_WR_TIMER, 2815 ((AGE_USECS(2) << CMB_WR_TIMER_TX_SHIFT) & CMB_WR_TIMER_TX_MASK) | 2816 ((AGE_USECS(2) << CMB_WR_TIMER_RX_SHIFT) & CMB_WR_TIMER_RX_MASK)); 2817 /* Request SMB updates for every seconds. */ 2818 CSR_WRITE_4(sc, AGE_SMB_TIMER, AGE_USECS(1000 * 1000)); 2819 CSR_WRITE_4(sc, AGE_CSMB_CTRL, CSMB_CTRL_SMB_ENB | CSMB_CTRL_CMB_ENB); 2820 2821 /* 2822 * Disable all WOL bits as WOL can interfere normal Rx 2823 * operation. 2824 */ 2825 CSR_WRITE_4(sc, AGE_WOL_CFG, 0); 2826 2827 /* 2828 * Configure Tx/Rx MACs. 2829 * - Auto-padding for short frames. 2830 * - Enable CRC generation. 2831 * Start with full-duplex/1000Mbps media. Actual reconfiguration 2832 * of MAC is followed after link establishment. 2833 */ 2834 CSR_WRITE_4(sc, AGE_MAC_CFG, 2835 MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | 2836 MAC_CFG_FULL_DUPLEX | MAC_CFG_SPEED_1000 | 2837 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 2838 MAC_CFG_PREAMBLE_MASK)); 2839 /* Set up the receive filter. */ 2840 age_rxfilter(sc); 2841 age_rxvlan(sc); 2842 2843 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2844 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2845 reg |= MAC_CFG_RXCSUM_ENB; 2846 2847 /* Ack all pending interrupts and clear it. */ 2848 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0); 2849 CSR_WRITE_4(sc, AGE_INTR_MASK, AGE_INTRS); 2850 2851 /* Finally enable Tx/Rx MAC. */ 2852 CSR_WRITE_4(sc, AGE_MAC_CFG, reg | MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 2853 2854 sc->age_flags &= ~AGE_FLAG_LINK; 2855 /* Switch to the current media. */ 2856 mii_mediachg(mii); 2857 2858 callout_reset(&sc->age_tick_ch, hz, age_tick, sc); 2859 2860 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2861 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2862 } 2863 2864 static void 2865 age_stop(struct age_softc *sc) 2866 { 2867 struct ifnet *ifp; 2868 struct age_txdesc *txd; 2869 struct age_rxdesc *rxd; 2870 uint32_t reg; 2871 int i; 2872 2873 AGE_LOCK_ASSERT(sc); 2874 /* 2875 * Mark the interface down and cancel the watchdog timer. 2876 */ 2877 ifp = sc->age_ifp; 2878 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2879 sc->age_flags &= ~AGE_FLAG_LINK; 2880 callout_stop(&sc->age_tick_ch); 2881 sc->age_watchdog_timer = 0; 2882 2883 /* 2884 * Disable interrupts. 2885 */ 2886 CSR_WRITE_4(sc, AGE_INTR_MASK, 0); 2887 CSR_WRITE_4(sc, AGE_INTR_STATUS, 0xFFFFFFFF); 2888 /* Stop CMB/SMB updates. */ 2889 CSR_WRITE_4(sc, AGE_CSMB_CTRL, 0); 2890 /* Stop Rx/Tx MAC. */ 2891 age_stop_rxmac(sc); 2892 age_stop_txmac(sc); 2893 /* Stop DMA. */ 2894 CSR_WRITE_4(sc, AGE_DMA_CFG, 2895 CSR_READ_4(sc, AGE_DMA_CFG) & ~(DMA_CFG_RD_ENB | DMA_CFG_WR_ENB)); 2896 /* Stop TxQ/RxQ. */ 2897 CSR_WRITE_4(sc, AGE_TXQ_CFG, 2898 CSR_READ_4(sc, AGE_TXQ_CFG) & ~TXQ_CFG_ENB); 2899 CSR_WRITE_4(sc, AGE_RXQ_CFG, 2900 CSR_READ_4(sc, AGE_RXQ_CFG) & ~RXQ_CFG_ENB); 2901 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2902 if ((reg = CSR_READ_4(sc, AGE_IDLE_STATUS)) == 0) 2903 break; 2904 DELAY(10); 2905 } 2906 if (i == 0) 2907 device_printf(sc->age_dev, 2908 "stopping Rx/Tx MACs timed out(0x%08x)!\n", reg); 2909 2910 /* Reclaim Rx buffers that have been processed. */ 2911 if (sc->age_cdata.age_rxhead != NULL) 2912 m_freem(sc->age_cdata.age_rxhead); 2913 AGE_RXCHAIN_RESET(sc); 2914 /* 2915 * Free RX and TX mbufs still in the queues. 2916 */ 2917 for (i = 0; i < AGE_RX_RING_CNT; i++) { 2918 rxd = &sc->age_cdata.age_rxdesc[i]; 2919 if (rxd->rx_m != NULL) { 2920 bus_dmamap_sync(sc->age_cdata.age_rx_tag, 2921 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2922 bus_dmamap_unload(sc->age_cdata.age_rx_tag, 2923 rxd->rx_dmamap); 2924 m_freem(rxd->rx_m); 2925 rxd->rx_m = NULL; 2926 } 2927 } 2928 for (i = 0; i < AGE_TX_RING_CNT; i++) { 2929 txd = &sc->age_cdata.age_txdesc[i]; 2930 if (txd->tx_m != NULL) { 2931 bus_dmamap_sync(sc->age_cdata.age_tx_tag, 2932 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2933 bus_dmamap_unload(sc->age_cdata.age_tx_tag, 2934 txd->tx_dmamap); 2935 m_freem(txd->tx_m); 2936 txd->tx_m = NULL; 2937 } 2938 } 2939 } 2940 2941 static void 2942 age_stop_txmac(struct age_softc *sc) 2943 { 2944 uint32_t reg; 2945 int i; 2946 2947 AGE_LOCK_ASSERT(sc); 2948 2949 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2950 if ((reg & MAC_CFG_TX_ENB) != 0) { 2951 reg &= ~MAC_CFG_TX_ENB; 2952 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2953 } 2954 /* Stop Tx DMA engine. */ 2955 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2956 if ((reg & DMA_CFG_RD_ENB) != 0) { 2957 reg &= ~DMA_CFG_RD_ENB; 2958 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2959 } 2960 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2961 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2962 (IDLE_STATUS_TXMAC | IDLE_STATUS_DMARD)) == 0) 2963 break; 2964 DELAY(10); 2965 } 2966 if (i == 0) 2967 device_printf(sc->age_dev, "stopping TxMAC timeout!\n"); 2968 } 2969 2970 static void 2971 age_stop_rxmac(struct age_softc *sc) 2972 { 2973 uint32_t reg; 2974 int i; 2975 2976 AGE_LOCK_ASSERT(sc); 2977 2978 reg = CSR_READ_4(sc, AGE_MAC_CFG); 2979 if ((reg & MAC_CFG_RX_ENB) != 0) { 2980 reg &= ~MAC_CFG_RX_ENB; 2981 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 2982 } 2983 /* Stop Rx DMA engine. */ 2984 reg = CSR_READ_4(sc, AGE_DMA_CFG); 2985 if ((reg & DMA_CFG_WR_ENB) != 0) { 2986 reg &= ~DMA_CFG_WR_ENB; 2987 CSR_WRITE_4(sc, AGE_DMA_CFG, reg); 2988 } 2989 for (i = AGE_RESET_TIMEOUT; i > 0; i--) { 2990 if ((CSR_READ_4(sc, AGE_IDLE_STATUS) & 2991 (IDLE_STATUS_RXMAC | IDLE_STATUS_DMAWR)) == 0) 2992 break; 2993 DELAY(10); 2994 } 2995 if (i == 0) 2996 device_printf(sc->age_dev, "stopping RxMAC timeout!\n"); 2997 } 2998 2999 static void 3000 age_init_tx_ring(struct age_softc *sc) 3001 { 3002 struct age_ring_data *rd; 3003 struct age_txdesc *txd; 3004 int i; 3005 3006 AGE_LOCK_ASSERT(sc); 3007 3008 sc->age_cdata.age_tx_prod = 0; 3009 sc->age_cdata.age_tx_cons = 0; 3010 sc->age_cdata.age_tx_cnt = 0; 3011 3012 rd = &sc->age_rdata; 3013 bzero(rd->age_tx_ring, AGE_TX_RING_SZ); 3014 for (i = 0; i < AGE_TX_RING_CNT; i++) { 3015 txd = &sc->age_cdata.age_txdesc[i]; 3016 txd->tx_desc = &rd->age_tx_ring[i]; 3017 txd->tx_m = NULL; 3018 } 3019 3020 bus_dmamap_sync(sc->age_cdata.age_tx_ring_tag, 3021 sc->age_cdata.age_tx_ring_map, 3022 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3023 } 3024 3025 static int 3026 age_init_rx_ring(struct age_softc *sc) 3027 { 3028 struct age_ring_data *rd; 3029 struct age_rxdesc *rxd; 3030 int i; 3031 3032 AGE_LOCK_ASSERT(sc); 3033 3034 sc->age_cdata.age_rx_cons = AGE_RX_RING_CNT - 1; 3035 sc->age_morework = 0; 3036 rd = &sc->age_rdata; 3037 bzero(rd->age_rx_ring, AGE_RX_RING_SZ); 3038 for (i = 0; i < AGE_RX_RING_CNT; i++) { 3039 rxd = &sc->age_cdata.age_rxdesc[i]; 3040 rxd->rx_m = NULL; 3041 rxd->rx_desc = &rd->age_rx_ring[i]; 3042 if (age_newbuf(sc, rxd) != 0) 3043 return (ENOBUFS); 3044 } 3045 3046 bus_dmamap_sync(sc->age_cdata.age_rx_ring_tag, 3047 sc->age_cdata.age_rx_ring_map, 3048 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3049 3050 return (0); 3051 } 3052 3053 static void 3054 age_init_rr_ring(struct age_softc *sc) 3055 { 3056 struct age_ring_data *rd; 3057 3058 AGE_LOCK_ASSERT(sc); 3059 3060 sc->age_cdata.age_rr_cons = 0; 3061 AGE_RXCHAIN_RESET(sc); 3062 3063 rd = &sc->age_rdata; 3064 bzero(rd->age_rr_ring, AGE_RR_RING_SZ); 3065 bus_dmamap_sync(sc->age_cdata.age_rr_ring_tag, 3066 sc->age_cdata.age_rr_ring_map, 3067 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3068 } 3069 3070 static void 3071 age_init_cmb_block(struct age_softc *sc) 3072 { 3073 struct age_ring_data *rd; 3074 3075 AGE_LOCK_ASSERT(sc); 3076 3077 rd = &sc->age_rdata; 3078 bzero(rd->age_cmb_block, AGE_CMB_BLOCK_SZ); 3079 bus_dmamap_sync(sc->age_cdata.age_cmb_block_tag, 3080 sc->age_cdata.age_cmb_block_map, 3081 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3082 } 3083 3084 static void 3085 age_init_smb_block(struct age_softc *sc) 3086 { 3087 struct age_ring_data *rd; 3088 3089 AGE_LOCK_ASSERT(sc); 3090 3091 rd = &sc->age_rdata; 3092 bzero(rd->age_smb_block, AGE_SMB_BLOCK_SZ); 3093 bus_dmamap_sync(sc->age_cdata.age_smb_block_tag, 3094 sc->age_cdata.age_smb_block_map, 3095 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3096 } 3097 3098 static int 3099 age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd) 3100 { 3101 struct rx_desc *desc; 3102 struct mbuf *m; 3103 bus_dma_segment_t segs[1]; 3104 bus_dmamap_t map; 3105 int nsegs; 3106 3107 AGE_LOCK_ASSERT(sc); 3108 3109 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3110 if (m == NULL) 3111 return (ENOBUFS); 3112 m->m_len = m->m_pkthdr.len = MCLBYTES; 3113 m_adj(m, ETHER_ALIGN); 3114 3115 if (bus_dmamap_load_mbuf_sg(sc->age_cdata.age_rx_tag, 3116 sc->age_cdata.age_rx_sparemap, m, segs, &nsegs, 0) != 0) { 3117 m_freem(m); 3118 return (ENOBUFS); 3119 } 3120 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3121 3122 if (rxd->rx_m != NULL) { 3123 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, 3124 BUS_DMASYNC_POSTREAD); 3125 bus_dmamap_unload(sc->age_cdata.age_rx_tag, rxd->rx_dmamap); 3126 } 3127 map = rxd->rx_dmamap; 3128 rxd->rx_dmamap = sc->age_cdata.age_rx_sparemap; 3129 sc->age_cdata.age_rx_sparemap = map; 3130 bus_dmamap_sync(sc->age_cdata.age_rx_tag, rxd->rx_dmamap, 3131 BUS_DMASYNC_PREREAD); 3132 rxd->rx_m = m; 3133 3134 desc = rxd->rx_desc; 3135 desc->addr = htole64(segs[0].ds_addr); 3136 desc->len = htole32((segs[0].ds_len & AGE_RD_LEN_MASK) << 3137 AGE_RD_LEN_SHIFT); 3138 return (0); 3139 } 3140 3141 static void 3142 age_rxvlan(struct age_softc *sc) 3143 { 3144 struct ifnet *ifp; 3145 uint32_t reg; 3146 3147 AGE_LOCK_ASSERT(sc); 3148 3149 ifp = sc->age_ifp; 3150 reg = CSR_READ_4(sc, AGE_MAC_CFG); 3151 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 3152 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 3153 reg |= MAC_CFG_VLAN_TAG_STRIP; 3154 CSR_WRITE_4(sc, AGE_MAC_CFG, reg); 3155 } 3156 3157 static void 3158 age_rxfilter(struct age_softc *sc) 3159 { 3160 struct ifnet *ifp; 3161 struct ifmultiaddr *ifma; 3162 uint32_t crc; 3163 uint32_t mchash[2]; 3164 uint32_t rxcfg; 3165 3166 AGE_LOCK_ASSERT(sc); 3167 3168 ifp = sc->age_ifp; 3169 3170 rxcfg = CSR_READ_4(sc, AGE_MAC_CFG); 3171 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 3172 if ((ifp->if_flags & IFF_BROADCAST) != 0) 3173 rxcfg |= MAC_CFG_BCAST; 3174 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 3175 if ((ifp->if_flags & IFF_PROMISC) != 0) 3176 rxcfg |= MAC_CFG_PROMISC; 3177 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 3178 rxcfg |= MAC_CFG_ALLMULTI; 3179 CSR_WRITE_4(sc, AGE_MAR0, 0xFFFFFFFF); 3180 CSR_WRITE_4(sc, AGE_MAR1, 0xFFFFFFFF); 3181 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 3182 return; 3183 } 3184 3185 /* Program new filter. */ 3186 bzero(mchash, sizeof(mchash)); 3187 3188 IF_ADDR_LOCK(ifp); 3189 TAILQ_FOREACH(ifma, &sc->age_ifp->if_multiaddrs, ifma_link) { 3190 if (ifma->ifma_addr->sa_family != AF_LINK) 3191 continue; 3192 crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) 3193 ifma->ifma_addr), ETHER_ADDR_LEN); 3194 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 3195 } 3196 IF_ADDR_UNLOCK(ifp); 3197 3198 CSR_WRITE_4(sc, AGE_MAR0, mchash[0]); 3199 CSR_WRITE_4(sc, AGE_MAR1, mchash[1]); 3200 CSR_WRITE_4(sc, AGE_MAC_CFG, rxcfg); 3201 } 3202 3203 static int 3204 sysctl_age_stats(SYSCTL_HANDLER_ARGS) 3205 { 3206 struct age_softc *sc; 3207 struct age_stats *stats; 3208 int error, result; 3209 3210 result = -1; 3211 error = sysctl_handle_int(oidp, &result, 0, req); 3212 3213 if (error != 0 || req->newptr == NULL) 3214 return (error); 3215 3216 if (result != 1) 3217 return (error); 3218 3219 sc = (struct age_softc *)arg1; 3220 stats = &sc->age_stat; 3221 printf("%s statistics:\n", device_get_nameunit(sc->age_dev)); 3222 printf("Transmit good frames : %ju\n", 3223 (uintmax_t)stats->tx_frames); 3224 printf("Transmit good broadcast frames : %ju\n", 3225 (uintmax_t)stats->tx_bcast_frames); 3226 printf("Transmit good multicast frames : %ju\n", 3227 (uintmax_t)stats->tx_mcast_frames); 3228 printf("Transmit pause control frames : %u\n", 3229 stats->tx_pause_frames); 3230 printf("Transmit control frames : %u\n", 3231 stats->tx_control_frames); 3232 printf("Transmit frames with excessive deferrals : %u\n", 3233 stats->tx_excess_defer); 3234 printf("Transmit deferrals : %u\n", 3235 stats->tx_deferred); 3236 printf("Transmit good octets : %ju\n", 3237 (uintmax_t)stats->tx_bytes); 3238 printf("Transmit good broadcast octets : %ju\n", 3239 (uintmax_t)stats->tx_bcast_bytes); 3240 printf("Transmit good multicast octets : %ju\n", 3241 (uintmax_t)stats->tx_mcast_bytes); 3242 printf("Transmit frames 64 bytes : %ju\n", 3243 (uintmax_t)stats->tx_pkts_64); 3244 printf("Transmit frames 65 to 127 bytes : %ju\n", 3245 (uintmax_t)stats->tx_pkts_65_127); 3246 printf("Transmit frames 128 to 255 bytes : %ju\n", 3247 (uintmax_t)stats->tx_pkts_128_255); 3248 printf("Transmit frames 256 to 511 bytes : %ju\n", 3249 (uintmax_t)stats->tx_pkts_256_511); 3250 printf("Transmit frames 512 to 1024 bytes : %ju\n", 3251 (uintmax_t)stats->tx_pkts_512_1023); 3252 printf("Transmit frames 1024 to 1518 bytes : %ju\n", 3253 (uintmax_t)stats->tx_pkts_1024_1518); 3254 printf("Transmit frames 1519 to MTU bytes : %ju\n", 3255 (uintmax_t)stats->tx_pkts_1519_max); 3256 printf("Transmit single collisions : %u\n", 3257 stats->tx_single_colls); 3258 printf("Transmit multiple collisions : %u\n", 3259 stats->tx_multi_colls); 3260 printf("Transmit late collisions : %u\n", 3261 stats->tx_late_colls); 3262 printf("Transmit abort due to excessive collisions : %u\n", 3263 stats->tx_excess_colls); 3264 printf("Transmit underruns due to FIFO underruns : %u\n", 3265 stats->tx_underrun); 3266 printf("Transmit descriptor write-back errors : %u\n", 3267 stats->tx_desc_underrun); 3268 printf("Transmit frames with length mismatched frame size : %u\n", 3269 stats->tx_lenerrs); 3270 printf("Transmit frames with truncated due to MTU size : %u\n", 3271 stats->tx_lenerrs); 3272 3273 printf("Receive good frames : %ju\n", 3274 (uintmax_t)stats->rx_frames); 3275 printf("Receive good broadcast frames : %ju\n", 3276 (uintmax_t)stats->rx_bcast_frames); 3277 printf("Receive good multicast frames : %ju\n", 3278 (uintmax_t)stats->rx_mcast_frames); 3279 printf("Receive pause control frames : %u\n", 3280 stats->rx_pause_frames); 3281 printf("Receive control frames : %u\n", 3282 stats->rx_control_frames); 3283 printf("Receive CRC errors : %u\n", 3284 stats->rx_crcerrs); 3285 printf("Receive frames with length errors : %u\n", 3286 stats->rx_lenerrs); 3287 printf("Receive good octets : %ju\n", 3288 (uintmax_t)stats->rx_bytes); 3289 printf("Receive good broadcast octets : %ju\n", 3290 (uintmax_t)stats->rx_bcast_bytes); 3291 printf("Receive good multicast octets : %ju\n", 3292 (uintmax_t)stats->rx_mcast_bytes); 3293 printf("Receive frames too short : %u\n", 3294 stats->rx_runts); 3295 printf("Receive fragmented frames : %ju\n", 3296 (uintmax_t)stats->rx_fragments); 3297 printf("Receive frames 64 bytes : %ju\n", 3298 (uintmax_t)stats->rx_pkts_64); 3299 printf("Receive frames 65 to 127 bytes : %ju\n", 3300 (uintmax_t)stats->rx_pkts_65_127); 3301 printf("Receive frames 128 to 255 bytes : %ju\n", 3302 (uintmax_t)stats->rx_pkts_128_255); 3303 printf("Receive frames 256 to 511 bytes : %ju\n", 3304 (uintmax_t)stats->rx_pkts_256_511); 3305 printf("Receive frames 512 to 1024 bytes : %ju\n", 3306 (uintmax_t)stats->rx_pkts_512_1023); 3307 printf("Receive frames 1024 to 1518 bytes : %ju\n", 3308 (uintmax_t)stats->rx_pkts_1024_1518); 3309 printf("Receive frames 1519 to MTU bytes : %ju\n", 3310 (uintmax_t)stats->rx_pkts_1519_max); 3311 printf("Receive frames too long : %ju\n", 3312 (uint64_t)stats->rx_pkts_truncated); 3313 printf("Receive frames with FIFO overflow : %u\n", 3314 stats->rx_fifo_oflows); 3315 printf("Receive frames with return descriptor overflow : %u\n", 3316 stats->rx_desc_oflows); 3317 printf("Receive frames with alignment errors : %u\n", 3318 stats->rx_alignerrs); 3319 printf("Receive frames dropped due to address filtering : %ju\n", 3320 (uint64_t)stats->rx_pkts_filtered); 3321 3322 return (error); 3323 } 3324 3325 static int 3326 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3327 { 3328 int error, value; 3329 3330 if (arg1 == NULL) 3331 return (EINVAL); 3332 value = *(int *)arg1; 3333 error = sysctl_handle_int(oidp, &value, 0, req); 3334 if (error || req->newptr == NULL) 3335 return (error); 3336 if (value < low || value > high) 3337 return (EINVAL); 3338 *(int *)arg1 = value; 3339 3340 return (0); 3341 } 3342 3343 static int 3344 sysctl_hw_age_proc_limit(SYSCTL_HANDLER_ARGS) 3345 { 3346 return (sysctl_int_range(oidp, arg1, arg2, req, 3347 AGE_PROC_MIN, AGE_PROC_MAX)); 3348 } 3349 3350 static int 3351 sysctl_hw_age_int_mod(SYSCTL_HANDLER_ARGS) 3352 { 3353 3354 return (sysctl_int_range(oidp, arg1, arg2, req, AGE_IM_TIMER_MIN, 3355 AGE_IM_TIMER_MAX)); 3356 } 3357