1 /*- 2 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * Driver for Attansic Technology Corp. L2 FastEthernet adapter. 26 * 27 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/rman.h> 41 #include <sys/module.h> 42 #include <sys/queue.h> 43 #include <sys/socket.h> 44 #include <sys/sockio.h> 45 #include <sys/sysctl.h> 46 #include <sys/taskqueue.h> 47 48 #include <net/bpf.h> 49 #include <net/if.h> 50 #include <net/if_arp.h> 51 #include <net/ethernet.h> 52 #include <net/if_dl.h> 53 #include <net/if_media.h> 54 #include <net/if_types.h> 55 #include <net/if_vlan_var.h> 56 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/ip.h> 60 #include <netinet/tcp.h> 61 62 #include <dev/mii/mii.h> 63 #include <dev/mii/miivar.h> 64 #include <dev/pci/pcireg.h> 65 #include <dev/pci/pcivar.h> 66 67 #include <machine/bus.h> 68 69 #include "miibus_if.h" 70 71 #include "if_aereg.h" 72 #include "if_aevar.h" 73 74 /* 75 * Devices supported by this driver. 76 */ 77 static struct ae_dev { 78 uint16_t vendorid; 79 uint16_t deviceid; 80 const char *name; 81 } ae_devs[] = { 82 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2, 83 "Attansic Technology Corp, L2 FastEthernet" }, 84 }; 85 #define AE_DEVS_COUNT (sizeof(ae_devs) / sizeof(*ae_devs)) 86 87 static struct resource_spec ae_res_spec_mem[] = { 88 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 89 { -1, 0, 0 } 90 }; 91 static struct resource_spec ae_res_spec_irq[] = { 92 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 93 { -1, 0, 0 } 94 }; 95 static struct resource_spec ae_res_spec_msi[] = { 96 { SYS_RES_IRQ, 1, RF_ACTIVE }, 97 { -1, 0, 0 } 98 }; 99 100 static int ae_probe(device_t dev); 101 static int ae_attach(device_t dev); 102 static void ae_pcie_init(ae_softc_t *sc); 103 static void ae_phy_reset(ae_softc_t *sc); 104 static void ae_phy_init(ae_softc_t *sc); 105 static int ae_reset(ae_softc_t *sc); 106 static void ae_init(void *arg); 107 static int ae_init_locked(ae_softc_t *sc); 108 static int ae_detach(device_t dev); 109 static int ae_miibus_readreg(device_t dev, int phy, int reg); 110 static int ae_miibus_writereg(device_t dev, int phy, int reg, int val); 111 static void ae_miibus_statchg(device_t dev); 112 static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 113 static int ae_mediachange(struct ifnet *ifp); 114 static void ae_retrieve_address(ae_softc_t *sc); 115 static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, 116 int error); 117 static int ae_alloc_rings(ae_softc_t *sc); 118 static void ae_dma_free(ae_softc_t *sc); 119 static int ae_shutdown(device_t dev); 120 static int ae_suspend(device_t dev); 121 static void ae_powersave_disable(ae_softc_t *sc); 122 static void ae_powersave_enable(ae_softc_t *sc); 123 static int ae_resume(device_t dev); 124 static unsigned int ae_tx_avail_size(ae_softc_t *sc); 125 static int ae_encap(ae_softc_t *sc, struct mbuf **m_head); 126 static void ae_start(struct ifnet *ifp); 127 static void ae_link_task(void *arg, int pending); 128 static void ae_stop_rxmac(ae_softc_t *sc); 129 static void ae_stop_txmac(ae_softc_t *sc); 130 static void ae_tx_task(void *arg, int pending); 131 static void ae_mac_config(ae_softc_t *sc); 132 static int ae_intr(void *arg); 133 static void ae_int_task(void *arg, int pending); 134 static void ae_tx_intr(ae_softc_t *sc); 135 static int ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd); 136 static void ae_rx_intr(ae_softc_t *sc); 137 static void ae_watchdog(ae_softc_t *sc); 138 static void ae_tick(void *arg); 139 static void ae_rxfilter(ae_softc_t *sc); 140 static void ae_rxvlan(ae_softc_t *sc); 141 static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 142 static void ae_stop(ae_softc_t *sc); 143 static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc); 144 static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word); 145 static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr); 146 static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr); 147 static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats); 148 static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats); 149 static void ae_init_tunables(ae_softc_t *sc); 150 151 static device_method_t ae_methods[] = { 152 /* Device interface. */ 153 DEVMETHOD(device_probe, ae_probe), 154 DEVMETHOD(device_attach, ae_attach), 155 DEVMETHOD(device_detach, ae_detach), 156 DEVMETHOD(device_shutdown, ae_shutdown), 157 DEVMETHOD(device_suspend, ae_suspend), 158 DEVMETHOD(device_resume, ae_resume), 159 160 /* MII interface. */ 161 DEVMETHOD(miibus_readreg, ae_miibus_readreg), 162 DEVMETHOD(miibus_writereg, ae_miibus_writereg), 163 DEVMETHOD(miibus_statchg, ae_miibus_statchg), 164 165 { NULL, NULL } 166 }; 167 static driver_t ae_driver = { 168 "ae", 169 ae_methods, 170 sizeof(ae_softc_t) 171 }; 172 static devclass_t ae_devclass; 173 174 DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0); 175 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0); 176 MODULE_DEPEND(ae, pci, 1, 1, 1); 177 MODULE_DEPEND(ae, ether, 1, 1, 1); 178 MODULE_DEPEND(ae, miibus, 1, 1, 1); 179 180 /* 181 * Tunables. 182 */ 183 static int msi_disable = 0; 184 TUNABLE_INT("hw.ae.msi_disable", &msi_disable); 185 186 #define AE_READ_4(sc, reg) \ 187 bus_read_4((sc)->mem[0], (reg)) 188 #define AE_READ_2(sc, reg) \ 189 bus_read_2((sc)->mem[0], (reg)) 190 #define AE_READ_1(sc, reg) \ 191 bus_read_1((sc)->mem[0], (reg)) 192 #define AE_WRITE_4(sc, reg, val) \ 193 bus_write_4((sc)->mem[0], (reg), (val)) 194 #define AE_WRITE_2(sc, reg, val) \ 195 bus_write_2((sc)->mem[0], (reg), (val)) 196 #define AE_WRITE_1(sc, reg, val) \ 197 bus_write_1((sc)->mem[0], (reg), (val)) 198 #define AE_PHY_READ(sc, reg) \ 199 ae_miibus_readreg(sc->dev, 0, reg) 200 #define AE_PHY_WRITE(sc, reg, val) \ 201 ae_miibus_writereg(sc->dev, 0, reg, val) 202 #define AE_CHECK_EADDR_VALID(eaddr) \ 203 ((eaddr[0] == 0 && eaddr[1] == 0) || \ 204 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff)) 205 #define AE_RXD_VLAN(vtag) \ 206 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9)) 207 #define AE_TXD_VLAN(vtag) \ 208 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08)) 209 210 /* 211 * ae statistics. 212 */ 213 #define STATS_ENTRY(node, desc, field) \ 214 { node, desc, offsetof(struct ae_stats, field) } 215 struct { 216 const char *node; 217 const char *desc; 218 intptr_t offset; 219 } ae_stats_tx[] = { 220 STATS_ENTRY("bcast", "broadcast frames", tx_bcast), 221 STATS_ENTRY("mcast", "multicast frames", tx_mcast), 222 STATS_ENTRY("pause", "PAUSE frames", tx_pause), 223 STATS_ENTRY("control", "control frames", tx_ctrl), 224 STATS_ENTRY("defers", "deferrals occuried", tx_defer), 225 STATS_ENTRY("exc_defers", "excessive deferrals occuried", tx_excdefer), 226 STATS_ENTRY("singlecols", "single collisions occuried", tx_singlecol), 227 STATS_ENTRY("multicols", "multiple collisions occuried", tx_multicol), 228 STATS_ENTRY("latecols", "late collisions occuried", tx_latecol), 229 STATS_ENTRY("aborts", "transmit aborts due collisions", tx_abortcol), 230 STATS_ENTRY("underruns", "Tx FIFO underruns", tx_underrun) 231 }, ae_stats_rx[] = { 232 STATS_ENTRY("bcast", "broadcast frames", rx_bcast), 233 STATS_ENTRY("mcast", "multicast frames", rx_mcast), 234 STATS_ENTRY("pause", "PAUSE frames", rx_pause), 235 STATS_ENTRY("control", "control frames", rx_ctrl), 236 STATS_ENTRY("crc_errors", "frames with CRC errors", rx_crcerr), 237 STATS_ENTRY("code_errors", "frames with invalid opcode", rx_codeerr), 238 STATS_ENTRY("runt", "runt frames", rx_runt), 239 STATS_ENTRY("frag", "fragmented frames", rx_frag), 240 STATS_ENTRY("align_errors", "frames with alignment errors", rx_align), 241 STATS_ENTRY("truncated", "frames truncated due to Rx FIFO inderrun", 242 rx_trunc) 243 }; 244 #define AE_STATS_RX_LEN (sizeof(ae_stats_rx) / sizeof(*ae_stats_rx)) 245 #define AE_STATS_TX_LEN (sizeof(ae_stats_tx) / sizeof(*ae_stats_tx)) 246 247 static int 248 ae_probe(device_t dev) 249 { 250 uint16_t deviceid, vendorid; 251 int i; 252 253 vendorid = pci_get_vendor(dev); 254 deviceid = pci_get_device(dev); 255 256 /* 257 * Search through the list of supported devs for matching one. 258 */ 259 for (i = 0; i < AE_DEVS_COUNT; i++) { 260 if (vendorid == ae_devs[i].vendorid && 261 deviceid == ae_devs[i].deviceid) { 262 device_set_desc(dev, ae_devs[i].name); 263 return (BUS_PROBE_DEFAULT); 264 } 265 } 266 return (ENXIO); 267 } 268 269 static int 270 ae_attach(device_t dev) 271 { 272 ae_softc_t *sc; 273 struct ifnet *ifp; 274 uint8_t chiprev; 275 uint32_t pcirev; 276 int nmsi, pmc; 277 int error; 278 279 sc = device_get_softc(dev); /* Automatically allocated and zeroed 280 on attach. */ 281 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 282 sc->dev = dev; 283 284 /* 285 * Initialize mutexes and tasks. 286 */ 287 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 288 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0); 289 TASK_INIT(&sc->int_task, 0, ae_int_task, sc); 290 TASK_INIT(&sc->link_task, 0, ae_link_task, sc); 291 292 pci_enable_busmaster(dev); /* Enable bus mastering. */ 293 294 sc->spec_mem = ae_res_spec_mem; 295 296 /* 297 * Allocate memory-mapped registers. 298 */ 299 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem); 300 if (error != 0) { 301 device_printf(dev, "could not allocate memory resources.\n"); 302 sc->spec_mem = NULL; 303 goto fail; 304 } 305 306 /* 307 * Retrieve PCI and chip revisions. 308 */ 309 pcirev = pci_get_revid(dev); 310 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) & 311 AE_MASTER_REVNUM_MASK; 312 if (bootverbose) { 313 device_printf(dev, "pci device revision: %#04x\n", pcirev); 314 device_printf(dev, "chip id: %#02x\n", chiprev); 315 } 316 nmsi = pci_msi_count(dev); 317 if (bootverbose) 318 device_printf(dev, "MSI count: %d.\n", nmsi); 319 320 /* 321 * Allocate interrupt resources. 322 */ 323 if (msi_disable == 0 && nmsi == 1) { 324 error = pci_alloc_msi(dev, &nmsi); 325 if (error == 0) { 326 device_printf(dev, "Using MSI messages.\n"); 327 sc->spec_irq = ae_res_spec_msi; 328 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 329 if (error != 0) { 330 device_printf(dev, "MSI allocation failed.\n"); 331 sc->spec_irq = NULL; 332 pci_release_msi(dev); 333 } else { 334 sc->flags |= AE_FLAG_MSI; 335 } 336 } 337 } 338 if (sc->spec_irq == NULL) { 339 sc->spec_irq = ae_res_spec_irq; 340 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 341 if (error != 0) { 342 device_printf(dev, "could not allocate IRQ resources.\n"); 343 sc->spec_irq = NULL; 344 goto fail; 345 } 346 } 347 348 ae_init_tunables(sc); 349 350 ae_phy_reset(sc); /* Reset PHY. */ 351 error = ae_reset(sc); /* Reset the controller itself. */ 352 if (error != 0) 353 goto fail; 354 355 ae_pcie_init(sc); 356 357 ae_retrieve_address(sc); /* Load MAC address. */ 358 359 error = ae_alloc_rings(sc); /* Allocate ring buffers. */ 360 if (error != 0) 361 goto fail; 362 363 /* Set default PHY address. */ 364 sc->phyaddr = AE_PHYADDR_DEFAULT; 365 366 ifp = sc->ifp = if_alloc(IFT_ETHER); 367 if (ifp == NULL) { 368 device_printf(dev, "could not allocate ifnet structure.\n"); 369 error = ENXIO; 370 goto fail; 371 } 372 373 ifp->if_softc = sc; 374 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 375 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 376 ifp->if_ioctl = ae_ioctl; 377 ifp->if_start = ae_start; 378 ifp->if_init = ae_init; 379 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 380 ifp->if_hwassist = 0; 381 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; 382 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 383 IFQ_SET_READY(&ifp->if_snd); 384 if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) { 385 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 386 sc->flags |= AE_FLAG_PMG; 387 } 388 ifp->if_capenable = ifp->if_capabilities; 389 390 /* 391 * Configure and attach MII bus. 392 */ 393 error = mii_phy_probe(dev, &sc->miibus, ae_mediachange, 394 ae_mediastatus); 395 if (error != 0) { 396 device_printf(dev, "no PHY found.\n"); 397 goto fail; 398 } 399 400 ether_ifattach(ifp, sc->eaddr); 401 /* Tell the upper layer(s) we support long frames. */ 402 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 403 404 /* 405 * Create and run all helper tasks. 406 */ 407 TASK_INIT(&sc->tx_task, 1, ae_tx_task, ifp); 408 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK, 409 taskqueue_thread_enqueue, &sc->tq); 410 if (sc->tq == NULL) { 411 device_printf(dev, "could not create taskqueue.\n"); 412 ether_ifdetach(ifp); 413 error = ENXIO; 414 goto fail; 415 } 416 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 417 device_get_nameunit(sc->dev)); 418 419 /* 420 * Configure interrupt handlers. 421 */ 422 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE, 423 ae_intr, NULL, sc, &sc->intrhand); 424 if (error != 0) { 425 device_printf(dev, "could not set up interrupt handler.\n"); 426 taskqueue_free(sc->tq); 427 sc->tq = NULL; 428 ether_ifdetach(ifp); 429 goto fail; 430 } 431 432 fail: 433 if (error != 0) 434 ae_detach(dev); 435 436 return (error); 437 } 438 439 static void 440 ae_init_tunables(ae_softc_t *sc) 441 { 442 struct sysctl_ctx_list *ctx; 443 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx; 444 struct ae_stats *ae_stats; 445 unsigned int i; 446 447 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 448 ae_stats = &sc->stats; 449 450 ctx = device_get_sysctl_ctx(sc->dev); 451 root = device_get_sysctl_tree(sc->dev); 452 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats", 453 CTLFLAG_RD, NULL, "ae statistics"); 454 455 /* 456 * Receiver statistcics. 457 */ 458 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx", 459 CTLFLAG_RD, NULL, "Rx MAC statistics"); 460 for (i = 0; i < AE_STATS_RX_LEN; i++) 461 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_rx), OID_AUTO, 462 ae_stats_rx[i].node, CTLFLAG_RD, (char *)ae_stats + 463 ae_stats_rx[i].offset, 0, ae_stats_rx[i].desc); 464 465 /* 466 * Receiver statistcics. 467 */ 468 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx", 469 CTLFLAG_RD, NULL, "Tx MAC statistics"); 470 for (i = 0; i < AE_STATS_TX_LEN; i++) 471 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(stats_tx), OID_AUTO, 472 ae_stats_tx[i].node, CTLFLAG_RD, (char *)ae_stats + 473 ae_stats_tx[i].offset, 0, ae_stats_tx[i].desc); 474 } 475 476 static void 477 ae_pcie_init(ae_softc_t *sc) 478 { 479 480 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT); 481 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT); 482 } 483 484 static void 485 ae_phy_reset(ae_softc_t *sc) 486 { 487 488 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE); 489 DELAY(1000); /* XXX: pause(9) ? */ 490 } 491 492 static int 493 ae_reset(ae_softc_t *sc) 494 { 495 int i; 496 497 /* 498 * Issue a soft reset. 499 */ 500 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET); 501 bus_barrier(sc->mem[0], AE_MASTER_REG, 4, 502 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 503 504 /* 505 * Wait for reset to complete. 506 */ 507 for (i = 0; i < AE_RESET_TIMEOUT; i++) { 508 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0) 509 break; 510 DELAY(10); 511 } 512 if (i == AE_RESET_TIMEOUT) { 513 device_printf(sc->dev, "reset timeout.\n"); 514 return (ENXIO); 515 } 516 517 /* 518 * Wait for everything to enter idle state. 519 */ 520 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 521 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 522 break; 523 DELAY(100); 524 } 525 if (i == AE_IDLE_TIMEOUT) { 526 device_printf(sc->dev, "could not enter idle state.\n"); 527 return (ENXIO); 528 } 529 return (0); 530 } 531 532 static void 533 ae_init(void *arg) 534 { 535 ae_softc_t *sc; 536 537 sc = (ae_softc_t *)arg; 538 AE_LOCK(sc); 539 ae_init_locked(sc); 540 AE_UNLOCK(sc); 541 } 542 543 static void 544 ae_phy_init(ae_softc_t *sc) 545 { 546 547 /* 548 * Enable link status change interrupt. 549 * XXX magic numbers. 550 */ 551 #ifdef notyet 552 AE_PHY_WRITE(sc, 18, 0xc00); 553 #endif 554 } 555 556 static int 557 ae_init_locked(ae_softc_t *sc) 558 { 559 struct ifnet *ifp; 560 struct mii_data *mii; 561 uint8_t eaddr[ETHER_ADDR_LEN]; 562 uint32_t val; 563 bus_addr_t addr; 564 565 AE_LOCK_ASSERT(sc); 566 567 ifp = sc->ifp; 568 mii = device_get_softc(sc->miibus); 569 570 ae_stop(sc); 571 ae_reset(sc); 572 ae_pcie_init(sc); /* Initialize PCIE stuff. */ 573 ae_phy_init(sc); 574 ae_powersave_disable(sc); 575 576 /* 577 * Clear and disable interrupts. 578 */ 579 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 580 581 /* 582 * Set the MAC address. 583 */ 584 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 585 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]; 586 AE_WRITE_4(sc, AE_EADDR0_REG, val); 587 val = eaddr[0] << 8 | eaddr[1]; 588 AE_WRITE_4(sc, AE_EADDR1_REG, val); 589 590 /* 591 * Set ring buffers base addresses. 592 */ 593 addr = sc->dma_rxd_busaddr; 594 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr)); 595 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 596 addr = sc->dma_txd_busaddr; 597 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 598 addr = sc->dma_txs_busaddr; 599 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr)); 600 601 /* 602 * Configure ring buffers sizes. 603 */ 604 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT); 605 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4); 606 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT); 607 608 /* 609 * Configure interframe gap parameters. 610 */ 611 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) & 612 AE_IFG_TXIPG_MASK) | 613 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) & 614 AE_IFG_RXIPG_MASK) | 615 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) & 616 AE_IFG_IPGR1_MASK) | 617 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) & 618 AE_IFG_IPGR2_MASK); 619 AE_WRITE_4(sc, AE_IFG_REG, val); 620 621 /* 622 * Configure half-duplex operation. 623 */ 624 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) & 625 AE_HDPX_LCOL_MASK) | 626 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) & 627 AE_HDPX_RETRY_MASK) | 628 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) & 629 AE_HDPX_ABEBT_MASK) | 630 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) & 631 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN; 632 AE_WRITE_4(sc, AE_HDPX_REG, val); 633 634 /* 635 * Configure interrupt moderate timer. 636 */ 637 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT); 638 val = AE_READ_4(sc, AE_MASTER_REG); 639 val |= AE_MASTER_IMT_EN; 640 AE_WRITE_4(sc, AE_MASTER_REG, val); 641 642 /* 643 * Configure interrupt clearing timer. 644 */ 645 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT); 646 647 /* 648 * Configure MTU. 649 */ 650 val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 651 ETHER_CRC_LEN; 652 AE_WRITE_2(sc, AE_MTU_REG, val); 653 654 /* 655 * Configure cut-through threshold. 656 */ 657 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT); 658 659 /* 660 * Configure flow control. 661 */ 662 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7); 663 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) > 664 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) : 665 (AE_RXD_COUNT_DEFAULT / 12)); 666 667 /* 668 * Init mailboxes. 669 */ 670 sc->txd_cur = sc->rxd_cur = 0; 671 sc->txs_ack = sc->txd_ack = 0; 672 sc->rxd_cur = 0; 673 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur); 674 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 675 676 sc->tx_inproc = 0; /* Number of packets the chip processes now. */ 677 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */ 678 679 /* 680 * Enable DMA. 681 */ 682 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 683 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 684 685 /* 686 * Check if everything is OK. 687 */ 688 val = AE_READ_4(sc, AE_ISR_REG); 689 if ((val & AE_ISR_PHY_LINKDOWN) != 0) { 690 device_printf(sc->dev, "Initialization failed.\n"); 691 return (ENXIO); 692 } 693 694 /* 695 * Clear interrupt status. 696 */ 697 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff); 698 AE_WRITE_4(sc, AE_ISR_REG, 0x0); 699 700 /* 701 * Enable interrupts. 702 */ 703 val = AE_READ_4(sc, AE_MASTER_REG); 704 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT); 705 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT); 706 707 /* 708 * Disable WOL. 709 */ 710 AE_WRITE_4(sc, AE_WOL_REG, 0); 711 712 /* 713 * Configure MAC. 714 */ 715 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | 716 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY | 717 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN | 718 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) | 719 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) & 720 AE_MAC_PREAMBLE_MASK); 721 AE_WRITE_4(sc, AE_MAC_REG, val); 722 723 /* 724 * Configure Rx MAC. 725 */ 726 ae_rxfilter(sc); 727 ae_rxvlan(sc); 728 729 /* 730 * Enable Tx/Rx. 731 */ 732 val = AE_READ_4(sc, AE_MAC_REG); 733 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN); 734 735 sc->flags &= ~AE_FLAG_LINK; 736 mii_mediachg(mii); /* Switch to the current media. */ 737 738 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 739 740 ifp->if_drv_flags |= IFF_DRV_RUNNING; 741 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 742 743 #ifdef AE_DEBUG 744 device_printf(sc->dev, "Initialization complete.\n"); 745 #endif 746 747 return (0); 748 } 749 750 static int 751 ae_detach(device_t dev) 752 { 753 struct ae_softc *sc; 754 struct ifnet *ifp; 755 756 sc = device_get_softc(dev); 757 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 758 ifp = sc->ifp; 759 if (device_is_attached(dev)) { 760 AE_LOCK(sc); 761 sc->flags |= AE_FLAG_DETACH; 762 ae_stop(sc); 763 AE_UNLOCK(sc); 764 callout_drain(&sc->tick_ch); 765 taskqueue_drain(sc->tq, &sc->int_task); 766 taskqueue_drain(sc->tq, &sc->tx_task); 767 taskqueue_drain(taskqueue_swi, &sc->link_task); 768 ether_ifdetach(ifp); 769 } 770 if (sc->tq != NULL) { 771 taskqueue_drain(sc->tq, &sc->int_task); 772 taskqueue_free(sc->tq); 773 sc->tq = NULL; 774 } 775 if (sc->miibus != NULL) { 776 device_delete_child(dev, sc->miibus); 777 sc->miibus = NULL; 778 } 779 bus_generic_detach(sc->dev); 780 ae_dma_free(sc); 781 if (sc->intrhand != NULL) { 782 bus_teardown_intr(dev, sc->irq[0], sc->intrhand); 783 sc->intrhand = NULL; 784 } 785 if (ifp != NULL) { 786 if_free(ifp); 787 sc->ifp = NULL; 788 } 789 if (sc->spec_irq != NULL) 790 bus_release_resources(dev, sc->spec_irq, sc->irq); 791 if (sc->spec_mem != NULL) 792 bus_release_resources(dev, sc->spec_mem, sc->mem); 793 if ((sc->flags & AE_FLAG_MSI) != 0) 794 pci_release_msi(dev); 795 mtx_destroy(&sc->mtx); 796 797 return (0); 798 } 799 800 static int 801 ae_miibus_readreg(device_t dev, int phy, int reg) 802 { 803 ae_softc_t *sc; 804 uint32_t val; 805 int i; 806 807 sc = device_get_softc(dev); 808 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 809 810 /* 811 * Locking is done in upper layers. 812 */ 813 814 if (phy != sc->phyaddr) 815 return (0); 816 817 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 818 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE | 819 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK); 820 AE_WRITE_4(sc, AE_MDIO_REG, val); 821 822 /* 823 * Wait for operation to complete. 824 */ 825 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 826 DELAY(2); 827 val = AE_READ_4(sc, AE_MDIO_REG); 828 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 829 break; 830 } 831 if (i == AE_MDIO_TIMEOUT) { 832 device_printf(sc->dev, "phy read timeout: %d.\n", reg); 833 return (0); 834 } 835 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 836 } 837 838 static int 839 ae_miibus_writereg(device_t dev, int phy, int reg, int val) 840 { 841 ae_softc_t *sc; 842 uint32_t aereg; 843 int i; 844 845 sc = device_get_softc(dev); 846 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 847 848 /* 849 * Locking is done in upper layers. 850 */ 851 852 if (phy != sc->phyaddr) 853 return (0); 854 855 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 856 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE | 857 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) | 858 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 859 AE_WRITE_4(sc, AE_MDIO_REG, aereg); 860 861 /* 862 * Wait for operation to complete. 863 */ 864 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 865 DELAY(2); 866 aereg = AE_READ_4(sc, AE_MDIO_REG); 867 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 868 break; 869 } 870 if (i == AE_MDIO_TIMEOUT) { 871 device_printf(sc->dev, "phy write timeout: %d.\n", reg); 872 } 873 return (0); 874 } 875 876 static void 877 ae_miibus_statchg(device_t dev) 878 { 879 ae_softc_t *sc; 880 881 sc = device_get_softc(dev); 882 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 883 } 884 885 static void 886 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 887 { 888 ae_softc_t *sc; 889 struct mii_data *mii; 890 891 sc = ifp->if_softc; 892 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 893 894 AE_LOCK(sc); 895 mii = device_get_softc(sc->miibus); 896 mii_pollstat(mii); 897 ifmr->ifm_status = mii->mii_media_status; 898 ifmr->ifm_active = mii->mii_media_active; 899 AE_UNLOCK(sc); 900 } 901 902 static int 903 ae_mediachange(struct ifnet *ifp) 904 { 905 ae_softc_t *sc; 906 struct mii_data *mii; 907 struct mii_softc *mii_sc; 908 int error; 909 910 /* XXX: check IFF_UP ?? */ 911 sc = ifp->if_softc; 912 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 913 AE_LOCK(sc); 914 mii = device_get_softc(sc->miibus); 915 if (mii->mii_instance != 0) { 916 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list) 917 mii_phy_reset(mii_sc); 918 } 919 error = mii_mediachg(mii); 920 AE_UNLOCK(sc); 921 922 return (error); 923 } 924 925 static int 926 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc) 927 { 928 int error; 929 uint32_t val; 930 931 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__)); 932 933 /* 934 * Not sure why, but Linux does this. 935 */ 936 val = AE_READ_4(sc, AE_SPICTL_REG); 937 if ((val & AE_SPICTL_VPD_EN) != 0) { 938 val &= ~AE_SPICTL_VPD_EN; 939 AE_WRITE_4(sc, AE_SPICTL_REG, val); 940 } 941 error = pci_find_extcap(sc->dev, PCIY_VPD, vpdc); 942 return (error); 943 } 944 945 static int 946 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word) 947 { 948 uint32_t val; 949 int i; 950 951 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */ 952 953 /* 954 * VPD registers start at offset 0x100. Read them. 955 */ 956 val = 0x100 + reg * 4; 957 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) & 958 AE_VPD_CAP_ADDR_MASK); 959 for (i = 0; i < AE_VPD_TIMEOUT; i++) { 960 DELAY(2000); 961 val = AE_READ_4(sc, AE_VPD_CAP_REG); 962 if ((val & AE_VPD_CAP_DONE) != 0) 963 break; 964 } 965 if (i == AE_VPD_TIMEOUT) { 966 device_printf(sc->dev, "timeout reading VPD register %d.\n", 967 reg); 968 return (ETIMEDOUT); 969 } 970 *word = AE_READ_4(sc, AE_VPD_DATA_REG); 971 return (0); 972 } 973 974 static int 975 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr) 976 { 977 uint32_t word, reg, val; 978 int error; 979 int found; 980 int vpdc; 981 int i; 982 983 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 984 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__)); 985 986 /* 987 * Check for EEPROM. 988 */ 989 error = ae_check_eeprom_present(sc, &vpdc); 990 if (error != 0) 991 return (error); 992 993 /* 994 * Read the VPD configuration space. 995 * Each register is prefixed with signature, 996 * so we can check if it is valid. 997 */ 998 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) { 999 error = ae_vpd_read_word(sc, i, &word); 1000 if (error != 0) 1001 break; 1002 1003 /* 1004 * Check signature. 1005 */ 1006 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG) 1007 break; 1008 reg = word >> AE_VPD_REG_SHIFT; 1009 i++; /* Move to the next word. */ 1010 1011 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG) 1012 continue; 1013 1014 error = ae_vpd_read_word(sc, i, &val); 1015 if (error != 0) 1016 break; 1017 if (reg == AE_EADDR0_REG) 1018 eaddr[0] = val; 1019 else 1020 eaddr[1] = val; 1021 found++; 1022 } 1023 1024 if (found < 2) 1025 return (ENOENT); 1026 1027 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1028 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1029 if (bootverbose) 1030 device_printf(sc->dev, 1031 "VPD ethernet address registers are invalid.\n"); 1032 return (EINVAL); 1033 } 1034 return (0); 1035 } 1036 1037 static int 1038 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr) 1039 { 1040 1041 /* 1042 * BIOS is supposed to set this. 1043 */ 1044 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG); 1045 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG); 1046 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1047 1048 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1049 if (bootverbose) 1050 device_printf(sc->dev, 1051 "Ethernet address registers are invalid.\n"); 1052 return (EINVAL); 1053 } 1054 return (0); 1055 } 1056 1057 static void 1058 ae_retrieve_address(ae_softc_t *sc) 1059 { 1060 uint32_t eaddr[2] = {0, 0}; 1061 int error; 1062 1063 /* 1064 *Check for EEPROM. 1065 */ 1066 error = ae_get_vpd_eaddr(sc, eaddr); 1067 if (error != 0) 1068 error = ae_get_reg_eaddr(sc, eaddr); 1069 if (error != 0) { 1070 if (bootverbose) 1071 device_printf(sc->dev, 1072 "Generating random ethernet address.\n"); 1073 eaddr[0] = arc4random(); 1074 1075 /* 1076 * Set OUI to ASUSTek COMPUTER INC. 1077 */ 1078 sc->eaddr[0] = 0x02; /* U/L bit set. */ 1079 sc->eaddr[1] = 0x1f; 1080 sc->eaddr[2] = 0xc6; 1081 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1082 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1083 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1084 } else { 1085 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff; 1086 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff; 1087 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff; 1088 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1089 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1090 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1091 } 1092 } 1093 1094 static void 1095 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1096 { 1097 bus_addr_t *addr = arg; 1098 1099 if (error != 0) 1100 return; 1101 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__, 1102 nsegs)); 1103 *addr = segs[0].ds_addr; 1104 } 1105 1106 static int 1107 ae_alloc_rings(ae_softc_t *sc) 1108 { 1109 bus_addr_t busaddr; 1110 int error; 1111 1112 /* 1113 * Create parent DMA tag. 1114 */ 1115 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1116 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1117 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, 1118 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 1119 &sc->dma_parent_tag); 1120 if (error != 0) { 1121 device_printf(sc->dev, "could not creare parent DMA tag.\n"); 1122 return (error); 1123 } 1124 1125 /* 1126 * Create DMA tag for TxD. 1127 */ 1128 error = bus_dma_tag_create(sc->dma_parent_tag, 1129 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1130 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1, 1131 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL, 1132 &sc->dma_txd_tag); 1133 if (error != 0) { 1134 device_printf(sc->dev, "could not creare TxD DMA tag.\n"); 1135 return (error); 1136 } 1137 1138 /* 1139 * Create DMA tag for TxS. 1140 */ 1141 error = bus_dma_tag_create(sc->dma_parent_tag, 1142 4, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1143 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1, 1144 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL, 1145 &sc->dma_txs_tag); 1146 if (error != 0) { 1147 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1148 return (error); 1149 } 1150 1151 /* 1152 * Create DMA tag for RxD. 1153 */ 1154 error = bus_dma_tag_create(sc->dma_parent_tag, 1155 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1156 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + 120, 1, 1157 AE_RXD_COUNT_DEFAULT * 1536 + 120, 0, NULL, NULL, 1158 &sc->dma_rxd_tag); 1159 if (error != 0) { 1160 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1161 return (error); 1162 } 1163 1164 /* 1165 * Allocate TxD DMA memory. 1166 */ 1167 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base, 1168 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1169 &sc->dma_txd_map); 1170 if (error != 0) { 1171 device_printf(sc->dev, 1172 "could not allocate DMA memory for TxD ring.\n"); 1173 return (error); 1174 } 1175 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base, 1176 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1177 if (error != 0 || busaddr == 0) { 1178 device_printf(sc->dev, 1179 "could not load DMA map for TxD ring.\n"); 1180 return (error); 1181 } 1182 sc->dma_txd_busaddr = busaddr; 1183 1184 /* 1185 * Allocate TxS DMA memory. 1186 */ 1187 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base, 1188 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1189 &sc->dma_txs_map); 1190 if (error != 0) { 1191 device_printf(sc->dev, 1192 "could not allocate DMA memory for TxS ring.\n"); 1193 return (error); 1194 } 1195 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base, 1196 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1197 if (error != 0 || busaddr == 0) { 1198 device_printf(sc->dev, 1199 "could not load DMA map for TxS ring.\n"); 1200 return (error); 1201 } 1202 sc->dma_txs_busaddr = busaddr; 1203 1204 /* 1205 * Allocate RxD DMA memory. 1206 */ 1207 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma, 1208 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1209 &sc->dma_rxd_map); 1210 if (error != 0) { 1211 device_printf(sc->dev, 1212 "could not allocate DMA memory for RxD ring.\n"); 1213 return (error); 1214 } 1215 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map, 1216 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + 120, ae_dmamap_cb, 1217 &busaddr, BUS_DMA_NOWAIT); 1218 if (error != 0 || busaddr == 0) { 1219 device_printf(sc->dev, 1220 "could not load DMA map for RxD ring.\n"); 1221 return (error); 1222 } 1223 sc->dma_rxd_busaddr = busaddr + 120; 1224 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + 120); 1225 1226 return (0); 1227 } 1228 1229 static void 1230 ae_dma_free(ae_softc_t *sc) 1231 { 1232 1233 if (sc->dma_txd_tag != NULL) { 1234 if (sc->dma_txd_map != NULL) { 1235 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map); 1236 if (sc->txd_base != NULL) 1237 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base, 1238 sc->dma_txd_map); 1239 1240 } 1241 bus_dma_tag_destroy(sc->dma_txd_tag); 1242 sc->dma_txd_map = NULL; 1243 sc->dma_txd_tag = NULL; 1244 sc->txd_base = NULL; 1245 } 1246 if (sc->dma_txs_tag != NULL) { 1247 if (sc->dma_txs_map != NULL) { 1248 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map); 1249 if (sc->txs_base != NULL) 1250 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base, 1251 sc->dma_txs_map); 1252 1253 } 1254 bus_dma_tag_destroy(sc->dma_txs_tag); 1255 sc->dma_txs_map = NULL; 1256 sc->dma_txs_tag = NULL; 1257 sc->txs_base = NULL; 1258 } 1259 if (sc->dma_rxd_tag != NULL) { 1260 if (sc->dma_rxd_map != NULL) { 1261 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map); 1262 if (sc->rxd_base_dma != NULL) 1263 bus_dmamem_free(sc->dma_rxd_tag, 1264 sc->rxd_base_dma, sc->dma_rxd_map); 1265 1266 } 1267 bus_dma_tag_destroy(sc->dma_rxd_tag); 1268 sc->dma_rxd_map = NULL; 1269 sc->dma_rxd_tag = NULL; 1270 sc->rxd_base_dma = NULL; 1271 } 1272 if (sc->dma_parent_tag != NULL) { 1273 bus_dma_tag_destroy(sc->dma_parent_tag); 1274 sc->dma_parent_tag = NULL; 1275 } 1276 } 1277 1278 static int 1279 ae_shutdown(device_t dev) 1280 { 1281 ae_softc_t *sc; 1282 int error; 1283 1284 sc = device_get_softc(dev); 1285 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 1286 1287 error = ae_suspend(dev); 1288 AE_LOCK(sc); 1289 ae_powersave_enable(sc); 1290 AE_UNLOCK(sc); 1291 return (error); 1292 } 1293 1294 static void 1295 ae_powersave_disable(ae_softc_t *sc) 1296 { 1297 uint32_t val; 1298 1299 AE_LOCK_ASSERT(sc); 1300 1301 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1302 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1303 if (val & AE_PHY_DBG_POWERSAVE) { 1304 val &= ~AE_PHY_DBG_POWERSAVE; 1305 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val); 1306 DELAY(1000); 1307 } 1308 } 1309 1310 static void 1311 ae_powersave_enable(ae_softc_t *sc) 1312 { 1313 uint32_t val; 1314 1315 AE_LOCK_ASSERT(sc); 1316 1317 /* 1318 * XXX magic numbers. 1319 */ 1320 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1321 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1322 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000); 1323 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2); 1324 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000); 1325 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3); 1326 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0); 1327 } 1328 1329 static void 1330 ae_pm_init(ae_softc_t *sc) 1331 { 1332 struct ifnet *ifp; 1333 uint32_t val; 1334 uint16_t pmstat; 1335 struct mii_data *mii; 1336 int pmc; 1337 1338 AE_LOCK_ASSERT(sc); 1339 1340 ifp = sc->ifp; 1341 if ((sc->flags & AE_FLAG_PMG) == 0) { 1342 /* Disable WOL entirely. */ 1343 AE_WRITE_4(sc, AE_WOL_REG, 0); 1344 return; 1345 } 1346 1347 /* 1348 * Configure WOL if enabled. 1349 */ 1350 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1351 mii = device_get_softc(sc->miibus); 1352 mii_pollstat(mii); 1353 if ((mii->mii_media_status & IFM_AVALID) != 0 && 1354 (mii->mii_media_status & IFM_ACTIVE) != 0) { 1355 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \ 1356 AE_WOL_MAGIC_PME); 1357 1358 /* 1359 * Configure MAC. 1360 */ 1361 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \ 1362 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \ 1363 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \ 1364 AE_HALFBUF_MASK) | \ 1365 ((AE_MAC_PREAMBLE_DEFAULT << \ 1366 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \ 1367 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN; 1368 if ((IFM_OPTIONS(mii->mii_media_active) & \ 1369 IFM_FDX) != 0) 1370 val |= AE_MAC_FULL_DUPLEX; 1371 AE_WRITE_4(sc, AE_MAC_REG, val); 1372 1373 } else { /* No link. */ 1374 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \ 1375 AE_WOL_LNKCHG_PME); 1376 AE_WRITE_4(sc, AE_MAC_REG, 0); 1377 } 1378 } else { 1379 ae_powersave_enable(sc); 1380 } 1381 1382 /* 1383 * PCIE hacks. Magic numbers. 1384 */ 1385 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG); 1386 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET; 1387 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val); 1388 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG); 1389 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK; 1390 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val); 1391 1392 /* 1393 * Configure PME. 1394 */ 1395 pci_find_extcap(sc->dev, PCIY_PMG, &pmc); 1396 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); 1397 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1398 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1399 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1400 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1401 } 1402 1403 static int 1404 ae_suspend(device_t dev) 1405 { 1406 ae_softc_t *sc; 1407 1408 sc = device_get_softc(dev); 1409 1410 AE_LOCK(sc); 1411 ae_stop(sc); 1412 ae_pm_init(sc); 1413 AE_UNLOCK(sc); 1414 1415 return (0); 1416 } 1417 1418 static int 1419 ae_resume(device_t dev) 1420 { 1421 ae_softc_t *sc; 1422 1423 sc = device_get_softc(dev); 1424 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1425 1426 AE_LOCK(sc); 1427 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */ 1428 if ((sc->ifp->if_flags & IFF_UP) != 0) 1429 ae_init_locked(sc); 1430 AE_UNLOCK(sc); 1431 1432 return (0); 1433 } 1434 1435 static unsigned int 1436 ae_tx_avail_size(ae_softc_t *sc) 1437 { 1438 unsigned int avail; 1439 1440 if (sc->txd_cur >= sc->txd_ack) 1441 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack); 1442 else 1443 avail = sc->txd_ack - sc->txd_cur; 1444 1445 return (avail - 4); /* 4-byte header. */ 1446 } 1447 1448 static int 1449 ae_encap(ae_softc_t *sc, struct mbuf **m_head) 1450 { 1451 struct mbuf *m0; 1452 ae_txd_t *hdr; 1453 unsigned int to_end; 1454 uint16_t len; 1455 1456 AE_LOCK_ASSERT(sc); 1457 1458 m0 = *m_head; 1459 len = m0->m_pkthdr.len; 1460 1461 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 || 1462 ae_tx_avail_size(sc) < len) { 1463 #ifdef AE_DEBUG 1464 if_printf(sc->ifp, "No free Tx available.\n"); 1465 #endif 1466 return ENOBUFS; 1467 } 1468 1469 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur); 1470 bzero(hdr, sizeof(*hdr)); 1471 sc->txd_cur = (sc->txd_cur + 4) % AE_TXD_BUFSIZE_DEFAULT; /* Header 1472 size. */ 1473 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; /* Space available to 1474 * the end of the ring 1475 */ 1476 if (to_end >= len) { 1477 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur)); 1478 } else { 1479 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base + 1480 sc->txd_cur)); 1481 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base); 1482 } 1483 1484 /* 1485 * Set TxD flags and parameters. 1486 */ 1487 if ((m0->m_flags & M_VLANTAG) != 0) { 1488 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag)); 1489 hdr->len = htole16(len | AE_TXD_INSERT_VTAG); 1490 } else { 1491 hdr->len = htole16(len); 1492 } 1493 1494 /* 1495 * Set current TxD position and round up to a 4-byte boundary. 1496 */ 1497 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1498 if (sc->txd_cur == sc->txd_ack) 1499 sc->flags &= ~AE_FLAG_TXAVAIL; 1500 #ifdef AE_DEBUG 1501 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur); 1502 #endif 1503 1504 /* 1505 * Update TxS position and check if there are empty TxS available. 1506 */ 1507 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE); 1508 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT; 1509 if (sc->txs_cur == sc->txs_ack) 1510 sc->flags &= ~AE_FLAG_TXAVAIL; 1511 1512 /* 1513 * Synchronize DMA memory. 1514 */ 1515 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD | 1516 BUS_DMASYNC_PREWRITE); 1517 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1518 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1519 1520 return (0); 1521 } 1522 1523 static void 1524 ae_start(struct ifnet *ifp) 1525 { 1526 ae_softc_t *sc; 1527 unsigned int count; 1528 struct mbuf *m0; 1529 int error; 1530 1531 sc = ifp->if_softc; 1532 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1533 AE_LOCK(sc); 1534 1535 #ifdef AE_DEBUG 1536 if_printf(ifp, "Start called.\n"); 1537 #endif 1538 1539 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1540 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0) { 1541 AE_UNLOCK(sc); 1542 return; 1543 } 1544 1545 count = 0; 1546 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 1547 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 1548 if (m0 == NULL) 1549 break; /* Nothing to do. */ 1550 1551 error = ae_encap(sc, &m0); 1552 if (error != 0) { 1553 if (m0 != NULL) { 1554 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 1555 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1556 #ifdef AE_DEBUG 1557 if_printf(ifp, "Setting OACTIVE.\n"); 1558 #endif 1559 } 1560 break; 1561 } 1562 count++; 1563 sc->tx_inproc++; 1564 1565 /* Bounce a copy of the frame to BPF. */ 1566 ETHER_BPF_MTAP(ifp, m0); 1567 1568 m_freem(m0); 1569 } 1570 1571 if (count > 0) { /* Something was dequeued. */ 1572 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4); 1573 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */ 1574 #ifdef AE_DEBUG 1575 if_printf(ifp, "%d packets dequeued.\n", count); 1576 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur); 1577 #endif 1578 } 1579 AE_UNLOCK(sc); 1580 } 1581 1582 static void 1583 ae_link_task(void *arg, int pending) 1584 { 1585 ae_softc_t *sc; 1586 struct mii_data *mii; 1587 struct ifnet *ifp; 1588 uint32_t val; 1589 1590 sc = (ae_softc_t *)arg; 1591 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1592 AE_LOCK(sc); 1593 1594 ifp = sc->ifp; 1595 mii = device_get_softc(sc->miibus); 1596 if (mii == NULL || ifp == NULL || 1597 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1598 AE_UNLOCK(sc); /* XXX: could happen? */ 1599 return; 1600 } 1601 1602 sc->flags &= ~AE_FLAG_LINK; 1603 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 1604 (IFM_AVALID | IFM_ACTIVE)) { 1605 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1606 case IFM_10_T: 1607 case IFM_100_TX: 1608 sc->flags |= AE_FLAG_LINK; 1609 break; 1610 default: 1611 break; 1612 } 1613 } 1614 1615 /* 1616 * Stop Rx/Tx MACs. 1617 */ 1618 ae_stop_rxmac(sc); 1619 ae_stop_txmac(sc); 1620 1621 if ((sc->flags & AE_FLAG_LINK) != 0) { 1622 ae_mac_config(sc); 1623 1624 /* 1625 * Restart DMA engines. 1626 */ 1627 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 1628 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 1629 1630 /* 1631 * Enable Rx and Tx MACs. 1632 */ 1633 val = AE_READ_4(sc, AE_MAC_REG); 1634 val |= AE_MAC_TX_EN | AE_MAC_RX_EN; 1635 AE_WRITE_4(sc, AE_MAC_REG, val); 1636 } 1637 AE_UNLOCK(sc); 1638 } 1639 1640 static void 1641 ae_stop_rxmac(ae_softc_t *sc) 1642 { 1643 uint32_t val; 1644 int i; 1645 1646 AE_LOCK_ASSERT(sc); 1647 1648 /* 1649 * Stop Rx MAC engine. 1650 */ 1651 val = AE_READ_4(sc, AE_MAC_REG); 1652 if ((val & AE_MAC_RX_EN) != 0) { 1653 val &= ~AE_MAC_RX_EN; 1654 AE_WRITE_4(sc, AE_MAC_REG, val); 1655 } 1656 1657 /* 1658 * Stop Rx DMA engine. 1659 */ 1660 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN) 1661 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0); 1662 1663 /* 1664 * Wait for IDLE state. 1665 */ 1666 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1667 val = AE_READ_4(sc, AE_IDLE_REG); 1668 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0) 1669 break; 1670 DELAY(100); 1671 } 1672 if (i == AE_IDLE_TIMEOUT) 1673 device_printf(sc->dev, "timed out while stopping Rx MAC.\n"); 1674 } 1675 1676 static void 1677 ae_stop_txmac(ae_softc_t *sc) 1678 { 1679 uint32_t val; 1680 int i; 1681 1682 AE_LOCK_ASSERT(sc); 1683 1684 /* 1685 * Stop Tx MAC engine. 1686 */ 1687 val = AE_READ_4(sc, AE_MAC_REG); 1688 if ((val & AE_MAC_TX_EN) != 0) { 1689 val &= ~AE_MAC_TX_EN; 1690 AE_WRITE_4(sc, AE_MAC_REG, val); 1691 } 1692 1693 /* 1694 * Stop Tx DMA engine. 1695 */ 1696 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN) 1697 AE_WRITE_1(sc, AE_DMAREAD_REG, 0); 1698 1699 /* 1700 * Wait for IDLE state. 1701 */ 1702 for (i = 0; i < AE_IDLE_TIMEOUT; i--) { 1703 val = AE_READ_4(sc, AE_IDLE_REG); 1704 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0) 1705 break; 1706 DELAY(100); 1707 } 1708 if (i == AE_IDLE_TIMEOUT) 1709 device_printf(sc->dev, "timed out while stopping Tx MAC.\n"); 1710 } 1711 1712 static void 1713 ae_tx_task(void *arg, int pending) 1714 { 1715 struct ifnet *ifp; 1716 1717 ifp = (struct ifnet *)arg; 1718 ae_start(ifp); 1719 } 1720 1721 static void 1722 ae_mac_config(ae_softc_t *sc) 1723 { 1724 struct mii_data *mii; 1725 uint32_t val; 1726 1727 AE_LOCK_ASSERT(sc); 1728 1729 mii = device_get_softc(sc->miibus); 1730 val = AE_READ_4(sc, AE_MAC_REG); 1731 val &= ~AE_MAC_FULL_DUPLEX; 1732 /* XXX disable AE_MAC_TX_FLOW_EN? */ 1733 1734 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1735 val |= AE_MAC_FULL_DUPLEX; 1736 1737 AE_WRITE_4(sc, AE_MAC_REG, val); 1738 } 1739 1740 static int 1741 ae_intr(void *arg) 1742 { 1743 ae_softc_t *sc; 1744 uint32_t val; 1745 1746 sc = (ae_softc_t *)arg; 1747 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1748 1749 val = AE_READ_4(sc, AE_ISR_REG); 1750 if (val == 0 || (val & AE_IMR_DEFAULT) == 0) 1751 return (FILTER_STRAY); 1752 1753 /* Disable interrupts. */ 1754 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE); 1755 1756 /* Schedule interrupt processing. */ 1757 taskqueue_enqueue(sc->tq, &sc->int_task); 1758 1759 return (FILTER_HANDLED); 1760 } 1761 1762 static void 1763 ae_int_task(void *arg, int pending) 1764 { 1765 ae_softc_t *sc; 1766 struct ifnet *ifp; 1767 uint32_t val; 1768 1769 sc = (ae_softc_t *)arg; 1770 1771 AE_LOCK(sc); 1772 1773 ifp = sc->ifp; 1774 1775 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */ 1776 1777 /* 1778 * Clear interrupts and disable them. 1779 */ 1780 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE); 1781 1782 #ifdef AE_DEBUG 1783 if_printf(ifp, "Interrupt received: 0x%08x\n", val); 1784 #endif 1785 1786 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1787 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT | 1788 AE_ISR_PHY_LINKDOWN)) != 0) { 1789 ae_init_locked(sc); 1790 } 1791 if ((val & AE_ISR_TX_EVENT) != 0) 1792 ae_tx_intr(sc); 1793 if ((val & AE_ISR_RX_EVENT) != 0) 1794 ae_rx_intr(sc); 1795 } 1796 1797 /* 1798 * Re-enable interrupts. 1799 */ 1800 AE_WRITE_4(sc, AE_ISR_REG, 0); 1801 1802 AE_UNLOCK(sc); 1803 } 1804 1805 static void 1806 ae_tx_intr(ae_softc_t *sc) 1807 { 1808 struct ifnet *ifp; 1809 ae_txd_t *txd; 1810 ae_txs_t *txs; 1811 uint16_t flags; 1812 1813 AE_LOCK_ASSERT(sc); 1814 1815 ifp = sc->ifp; 1816 1817 #ifdef AE_DEBUG 1818 if_printf(ifp, "Tx interrupt occuried.\n"); 1819 #endif 1820 1821 /* 1822 * Syncronize DMA buffers. 1823 */ 1824 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1825 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1826 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1827 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1828 1829 for (;;) { 1830 txs = sc->txs_base + sc->txs_ack; 1831 flags = le16toh(txs->flags); 1832 if ((flags & AE_TXS_UPDATE) == 0) 1833 break; 1834 txs->flags = htole16(flags & ~AE_TXS_UPDATE); 1835 /* Update stats. */ 1836 ae_update_stats_tx(flags, &sc->stats); 1837 1838 /* 1839 * Update TxS position. 1840 */ 1841 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT; 1842 sc->flags |= AE_FLAG_TXAVAIL; 1843 1844 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack); 1845 if (txs->len != txd->len) 1846 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n", 1847 le16toh(txs->len), le16toh(txd->len)); 1848 1849 /* 1850 * Move txd ack and align on 4-byte boundary. 1851 */ 1852 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 4 + 3) & ~3) % 1853 AE_TXD_BUFSIZE_DEFAULT; 1854 1855 if ((flags & AE_TXS_SUCCESS) != 0) 1856 ifp->if_opackets++; 1857 else 1858 ifp->if_oerrors++; 1859 1860 sc->tx_inproc--; 1861 1862 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1863 } 1864 1865 if (sc->tx_inproc < 0) { 1866 if_printf(ifp, "Received stray Tx interrupt(s).\n"); 1867 sc->tx_inproc = 0; 1868 } 1869 1870 if (sc->tx_inproc == 0) 1871 sc->wd_timer = 0; /* Unarm watchdog. */ 1872 1873 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) { 1874 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1875 taskqueue_enqueue(sc->tq, &sc->tx_task); 1876 } 1877 1878 /* 1879 * Syncronize DMA buffers. 1880 */ 1881 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1882 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1883 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1884 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1885 } 1886 1887 static int 1888 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd) 1889 { 1890 struct ifnet *ifp; 1891 struct mbuf *m; 1892 unsigned int size; 1893 uint16_t flags; 1894 1895 AE_LOCK_ASSERT(sc); 1896 1897 ifp = sc->ifp; 1898 flags = le16toh(rxd->flags); 1899 1900 #ifdef AE_DEBUG 1901 if_printf(ifp, "Rx interrupt occuried.\n"); 1902 #endif 1903 size = le16toh(rxd->len) - ETHER_CRC_LEN; 1904 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) { 1905 if_printf(ifp, "Runt frame received."); 1906 return (EIO); 1907 } 1908 1909 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL); 1910 if (m == NULL) 1911 return (ENOBUFS); 1912 1913 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1914 (flags & AE_RXD_HAS_VLAN) != 0) { 1915 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan)); 1916 m->m_flags |= M_VLANTAG; 1917 } 1918 1919 /* 1920 * Pass it through. 1921 */ 1922 AE_UNLOCK(sc); 1923 (*ifp->if_input)(ifp, m); 1924 AE_LOCK(sc); 1925 1926 return (0); 1927 } 1928 1929 static void 1930 ae_rx_intr(ae_softc_t *sc) 1931 { 1932 ae_rxd_t *rxd; 1933 struct ifnet *ifp; 1934 uint16_t flags; 1935 int error; 1936 1937 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1938 1939 AE_LOCK_ASSERT(sc); 1940 1941 ifp = sc->ifp; 1942 1943 /* 1944 * Syncronize DMA buffers. 1945 */ 1946 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map, 1947 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1948 1949 for (;;) { 1950 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur); 1951 flags = le16toh(rxd->flags); 1952 if ((flags & AE_RXD_UPDATE) == 0) 1953 break; 1954 rxd->flags = htole16(flags & ~AE_RXD_UPDATE); 1955 /* Update stats. */ 1956 ae_update_stats_rx(flags, &sc->stats); 1957 1958 /* 1959 * Update position index. 1960 */ 1961 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT; 1962 1963 if ((flags & AE_RXD_SUCCESS) == 0) { 1964 ifp->if_ierrors++; 1965 continue; 1966 } 1967 error = ae_rxeof(sc, rxd); 1968 if (error != 0) { 1969 ifp->if_ierrors++; 1970 continue; 1971 } else { 1972 ifp->if_ipackets++; 1973 } 1974 } 1975 1976 /* 1977 * Update Rx index. 1978 */ 1979 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 1980 } 1981 1982 static void 1983 ae_watchdog(ae_softc_t *sc) 1984 { 1985 struct ifnet *ifp; 1986 1987 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1988 AE_LOCK_ASSERT(sc); 1989 ifp = sc->ifp; 1990 1991 if (sc->wd_timer == 0 || --sc->wd_timer != 0) 1992 return; /* Noting to do. */ 1993 1994 if ((sc->flags & AE_FLAG_LINK) == 0) 1995 if_printf(ifp, "watchdog timeout (missed link).\n"); 1996 else 1997 if_printf(ifp, "watchdog timeout - resetting.\n"); 1998 1999 ifp->if_oerrors++; 2000 ae_init_locked(sc); 2001 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2002 taskqueue_enqueue(sc->tq, &sc->tx_task); 2003 } 2004 2005 static void 2006 ae_tick(void *arg) 2007 { 2008 ae_softc_t *sc; 2009 struct mii_data *mii; 2010 2011 sc = (ae_softc_t *)arg; 2012 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2013 AE_LOCK_ASSERT(sc); 2014 2015 mii = device_get_softc(sc->miibus); 2016 mii_tick(mii); 2017 ae_watchdog(sc); /* Watchdog check. */ 2018 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 2019 } 2020 2021 static void 2022 ae_rxvlan(ae_softc_t *sc) 2023 { 2024 struct ifnet *ifp; 2025 uint32_t val; 2026 2027 AE_LOCK_ASSERT(sc); 2028 ifp = sc->ifp; 2029 val = AE_READ_4(sc, AE_MAC_REG); 2030 val &= ~AE_MAC_RMVLAN_EN; 2031 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2032 val |= AE_MAC_RMVLAN_EN; 2033 AE_WRITE_4(sc, AE_MAC_REG, val); 2034 } 2035 2036 static void 2037 ae_rxfilter(ae_softc_t *sc) 2038 { 2039 struct ifnet *ifp; 2040 struct ifmultiaddr *ifma; 2041 uint32_t crc; 2042 uint32_t mchash[2]; 2043 uint32_t rxcfg; 2044 2045 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2046 2047 AE_LOCK_ASSERT(sc); 2048 2049 ifp = sc->ifp; 2050 2051 rxcfg = AE_READ_4(sc, AE_MAC_REG); 2052 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN); 2053 2054 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2055 rxcfg |= AE_MAC_BCAST_EN; 2056 if ((ifp->if_flags & IFF_PROMISC) != 0) 2057 rxcfg |= AE_MAC_PROMISC_EN; 2058 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2059 rxcfg |= AE_MAC_MCAST_EN; 2060 2061 /* 2062 * Wipe old settings. 2063 */ 2064 AE_WRITE_4(sc, AE_REG_MHT0, 0); 2065 AE_WRITE_4(sc, AE_REG_MHT1, 0); 2066 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2067 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff); 2068 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff); 2069 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2070 return; 2071 } 2072 2073 /* 2074 * Load multicast tables. 2075 */ 2076 bzero(mchash, sizeof(mchash)); 2077 if_maddr_rlock(ifp); 2078 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2079 if (ifma->ifma_addr->sa_family != AF_LINK) 2080 continue; 2081 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 2082 ifma->ifma_addr), ETHER_ADDR_LEN); 2083 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2084 } 2085 if_maddr_runlock(ifp); 2086 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]); 2087 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]); 2088 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2089 } 2090 2091 static int 2092 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2093 { 2094 struct ae_softc *sc; 2095 struct ifreq *ifr; 2096 struct mii_data *mii; 2097 int error, mask; 2098 2099 sc = ifp->if_softc; 2100 ifr = (struct ifreq *)data; 2101 error = 0; 2102 2103 switch (cmd) { 2104 case SIOCSIFMTU: 2105 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 2106 error = EINVAL; 2107 else if (ifp->if_mtu != ifr->ifr_mtu) { 2108 AE_LOCK(sc); 2109 ifp->if_mtu = ifr->ifr_mtu; 2110 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2111 ae_init_locked(sc); 2112 AE_UNLOCK(sc); 2113 } 2114 break; 2115 case SIOCSIFFLAGS: 2116 AE_LOCK(sc); 2117 if ((ifp->if_flags & IFF_UP) != 0) { 2118 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2119 if (((ifp->if_flags ^ sc->if_flags) 2120 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2121 ae_rxfilter(sc); 2122 } else { 2123 if ((sc->flags & AE_FLAG_DETACH) == 0) 2124 ae_init_locked(sc); 2125 } 2126 } else { 2127 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2128 ae_stop(sc); 2129 } 2130 sc->if_flags = ifp->if_flags; 2131 AE_UNLOCK(sc); 2132 break; 2133 case SIOCADDMULTI: 2134 case SIOCDELMULTI: 2135 AE_LOCK(sc); 2136 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2137 ae_rxfilter(sc); 2138 AE_UNLOCK(sc); 2139 break; 2140 case SIOCSIFMEDIA: 2141 case SIOCGIFMEDIA: 2142 mii = device_get_softc(sc->miibus); 2143 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2144 break; 2145 case SIOCSIFCAP: 2146 AE_LOCK(sc); 2147 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2148 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2149 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2150 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2151 ae_rxvlan(sc); 2152 } 2153 VLAN_CAPABILITIES(ifp); 2154 AE_UNLOCK(sc); 2155 break; 2156 default: 2157 error = ether_ioctl(ifp, cmd, data); 2158 break; 2159 } 2160 return (error); 2161 } 2162 2163 static void 2164 ae_stop(ae_softc_t *sc) 2165 { 2166 struct ifnet *ifp; 2167 int i; 2168 2169 AE_LOCK_ASSERT(sc); 2170 2171 ifp = sc->ifp; 2172 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2173 sc->flags &= ~AE_FLAG_LINK; 2174 sc->wd_timer = 0; /* Cancel watchdog. */ 2175 callout_stop(&sc->tick_ch); 2176 2177 /* 2178 * Clear and disable interrupts. 2179 */ 2180 AE_WRITE_4(sc, AE_IMR_REG, 0); 2181 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 2182 2183 /* 2184 * Stop Rx/Tx MACs. 2185 */ 2186 ae_stop_txmac(sc); 2187 ae_stop_rxmac(sc); 2188 2189 /* 2190 * Stop DMA engines. 2191 */ 2192 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN); 2193 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN); 2194 2195 /* 2196 * Wait for everything to enter idle state. 2197 */ 2198 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 2199 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 2200 break; 2201 DELAY(100); 2202 } 2203 if (i == AE_IDLE_TIMEOUT) 2204 device_printf(sc->dev, "could not enter idle state in stop.\n"); 2205 } 2206 2207 static void 2208 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats) 2209 { 2210 2211 if ((flags & AE_TXS_BCAST) != 0) 2212 stats->tx_bcast++; 2213 if ((flags & AE_TXS_MCAST) != 0) 2214 stats->tx_mcast++; 2215 if ((flags & AE_TXS_PAUSE) != 0) 2216 stats->tx_pause++; 2217 if ((flags & AE_TXS_CTRL) != 0) 2218 stats->tx_ctrl++; 2219 if ((flags & AE_TXS_DEFER) != 0) 2220 stats->tx_defer++; 2221 if ((flags & AE_TXS_EXCDEFER) != 0) 2222 stats->tx_excdefer++; 2223 if ((flags & AE_TXS_SINGLECOL) != 0) 2224 stats->tx_singlecol++; 2225 if ((flags & AE_TXS_MULTICOL) != 0) 2226 stats->tx_multicol++; 2227 if ((flags & AE_TXS_LATECOL) != 0) 2228 stats->tx_latecol++; 2229 if ((flags & AE_TXS_ABORTCOL) != 0) 2230 stats->tx_abortcol++; 2231 if ((flags & AE_TXS_UNDERRUN) != 0) 2232 stats->tx_underrun++; 2233 } 2234 2235 static void 2236 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats) 2237 { 2238 2239 if ((flags & AE_RXD_BCAST) != 0) 2240 stats->rx_bcast++; 2241 if ((flags & AE_RXD_MCAST) != 0) 2242 stats->rx_mcast++; 2243 if ((flags & AE_RXD_PAUSE) != 0) 2244 stats->rx_pause++; 2245 if ((flags & AE_RXD_CTRL) != 0) 2246 stats->rx_ctrl++; 2247 if ((flags & AE_RXD_CRCERR) != 0) 2248 stats->rx_crcerr++; 2249 if ((flags & AE_RXD_CODEERR) != 0) 2250 stats->rx_codeerr++; 2251 if ((flags & AE_RXD_RUNT) != 0) 2252 stats->rx_runt++; 2253 if ((flags & AE_RXD_FRAG) != 0) 2254 stats->rx_frag++; 2255 if ((flags & AE_RXD_TRUNC) != 0) 2256 stats->rx_trunc++; 2257 if ((flags & AE_RXD_ALIGN) != 0) 2258 stats->rx_align++; 2259 } 2260