1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2008 Stanislav Sedov <stas@FreeBSD.org>. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 * Driver for Attansic Technology Corp. L2 FastEthernet adapter. 28 * 29 * This driver is heavily based on age(4) Attansic L1 driver by Pyun YongHyeon. 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/endian.h> 39 #include <sys/kernel.h> 40 #include <sys/lock.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/mutex.h> 44 #include <sys/rman.h> 45 #include <sys/module.h> 46 #include <sys/queue.h> 47 #include <sys/socket.h> 48 #include <sys/sockio.h> 49 #include <sys/sysctl.h> 50 #include <sys/taskqueue.h> 51 52 #include <net/bpf.h> 53 #include <net/if.h> 54 #include <net/if_var.h> 55 #include <net/if_arp.h> 56 #include <net/ethernet.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_types.h> 60 #include <net/if_vlan_var.h> 61 62 #include <netinet/in.h> 63 #include <netinet/in_systm.h> 64 #include <netinet/ip.h> 65 #include <netinet/tcp.h> 66 67 #include <dev/mii/mii.h> 68 #include <dev/mii/miivar.h> 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcivar.h> 71 72 #include <machine/bus.h> 73 74 #include "miibus_if.h" 75 76 #include "if_aereg.h" 77 #include "if_aevar.h" 78 79 /* 80 * Devices supported by this driver. 81 */ 82 static struct ae_dev { 83 uint16_t vendorid; 84 uint16_t deviceid; 85 const char *name; 86 } ae_devs[] = { 87 { VENDORID_ATTANSIC, DEVICEID_ATTANSIC_L2, 88 "Attansic Technology Corp, L2 FastEthernet" }, 89 }; 90 #define AE_DEVS_COUNT nitems(ae_devs) 91 92 static struct resource_spec ae_res_spec_mem[] = { 93 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 94 { -1, 0, 0 } 95 }; 96 static struct resource_spec ae_res_spec_irq[] = { 97 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 98 { -1, 0, 0 } 99 }; 100 static struct resource_spec ae_res_spec_msi[] = { 101 { SYS_RES_IRQ, 1, RF_ACTIVE }, 102 { -1, 0, 0 } 103 }; 104 105 static int ae_probe(device_t dev); 106 static int ae_attach(device_t dev); 107 static void ae_pcie_init(ae_softc_t *sc); 108 static void ae_phy_reset(ae_softc_t *sc); 109 static void ae_phy_init(ae_softc_t *sc); 110 static int ae_reset(ae_softc_t *sc); 111 static void ae_init(void *arg); 112 static int ae_init_locked(ae_softc_t *sc); 113 static int ae_detach(device_t dev); 114 static int ae_miibus_readreg(device_t dev, int phy, int reg); 115 static int ae_miibus_writereg(device_t dev, int phy, int reg, int val); 116 static void ae_miibus_statchg(device_t dev); 117 static void ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); 118 static int ae_mediachange(struct ifnet *ifp); 119 static void ae_retrieve_address(ae_softc_t *sc); 120 static void ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, 121 int error); 122 static int ae_alloc_rings(ae_softc_t *sc); 123 static void ae_dma_free(ae_softc_t *sc); 124 static int ae_shutdown(device_t dev); 125 static int ae_suspend(device_t dev); 126 static void ae_powersave_disable(ae_softc_t *sc); 127 static void ae_powersave_enable(ae_softc_t *sc); 128 static int ae_resume(device_t dev); 129 static unsigned int ae_tx_avail_size(ae_softc_t *sc); 130 static int ae_encap(ae_softc_t *sc, struct mbuf **m_head); 131 static void ae_start(struct ifnet *ifp); 132 static void ae_start_locked(struct ifnet *ifp); 133 static void ae_link_task(void *arg, int pending); 134 static void ae_stop_rxmac(ae_softc_t *sc); 135 static void ae_stop_txmac(ae_softc_t *sc); 136 static void ae_mac_config(ae_softc_t *sc); 137 static int ae_intr(void *arg); 138 static void ae_int_task(void *arg, int pending); 139 static void ae_tx_intr(ae_softc_t *sc); 140 static void ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd); 141 static void ae_rx_intr(ae_softc_t *sc); 142 static void ae_watchdog(ae_softc_t *sc); 143 static void ae_tick(void *arg); 144 static void ae_rxfilter(ae_softc_t *sc); 145 static void ae_rxvlan(ae_softc_t *sc); 146 static int ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 147 static void ae_stop(ae_softc_t *sc); 148 static int ae_check_eeprom_present(ae_softc_t *sc, int *vpdc); 149 static int ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word); 150 static int ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr); 151 static int ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr); 152 static void ae_update_stats_rx(uint16_t flags, ae_stats_t *stats); 153 static void ae_update_stats_tx(uint16_t flags, ae_stats_t *stats); 154 static void ae_init_tunables(ae_softc_t *sc); 155 156 static device_method_t ae_methods[] = { 157 /* Device interface. */ 158 DEVMETHOD(device_probe, ae_probe), 159 DEVMETHOD(device_attach, ae_attach), 160 DEVMETHOD(device_detach, ae_detach), 161 DEVMETHOD(device_shutdown, ae_shutdown), 162 DEVMETHOD(device_suspend, ae_suspend), 163 DEVMETHOD(device_resume, ae_resume), 164 165 /* MII interface. */ 166 DEVMETHOD(miibus_readreg, ae_miibus_readreg), 167 DEVMETHOD(miibus_writereg, ae_miibus_writereg), 168 DEVMETHOD(miibus_statchg, ae_miibus_statchg), 169 { NULL, NULL } 170 }; 171 static driver_t ae_driver = { 172 "ae", 173 ae_methods, 174 sizeof(ae_softc_t) 175 }; 176 static devclass_t ae_devclass; 177 178 DRIVER_MODULE(ae, pci, ae_driver, ae_devclass, 0, 0); 179 MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, ae, ae_devs, 180 nitems(ae_devs)); 181 DRIVER_MODULE(miibus, ae, miibus_driver, miibus_devclass, 0, 0); 182 MODULE_DEPEND(ae, pci, 1, 1, 1); 183 MODULE_DEPEND(ae, ether, 1, 1, 1); 184 MODULE_DEPEND(ae, miibus, 1, 1, 1); 185 186 /* 187 * Tunables. 188 */ 189 static int msi_disable = 0; 190 TUNABLE_INT("hw.ae.msi_disable", &msi_disable); 191 192 #define AE_READ_4(sc, reg) \ 193 bus_read_4((sc)->mem[0], (reg)) 194 #define AE_READ_2(sc, reg) \ 195 bus_read_2((sc)->mem[0], (reg)) 196 #define AE_READ_1(sc, reg) \ 197 bus_read_1((sc)->mem[0], (reg)) 198 #define AE_WRITE_4(sc, reg, val) \ 199 bus_write_4((sc)->mem[0], (reg), (val)) 200 #define AE_WRITE_2(sc, reg, val) \ 201 bus_write_2((sc)->mem[0], (reg), (val)) 202 #define AE_WRITE_1(sc, reg, val) \ 203 bus_write_1((sc)->mem[0], (reg), (val)) 204 #define AE_PHY_READ(sc, reg) \ 205 ae_miibus_readreg(sc->dev, 0, reg) 206 #define AE_PHY_WRITE(sc, reg, val) \ 207 ae_miibus_writereg(sc->dev, 0, reg, val) 208 #define AE_CHECK_EADDR_VALID(eaddr) \ 209 ((eaddr[0] == 0 && eaddr[1] == 0) || \ 210 (eaddr[0] == 0xffffffff && eaddr[1] == 0xffff)) 211 #define AE_RXD_VLAN(vtag) \ 212 (((vtag) >> 4) | (((vtag) & 0x07) << 13) | (((vtag) & 0x08) << 9)) 213 #define AE_TXD_VLAN(vtag) \ 214 (((vtag) << 4) | (((vtag) >> 13) & 0x07) | (((vtag) >> 9) & 0x08)) 215 216 static int 217 ae_probe(device_t dev) 218 { 219 uint16_t deviceid, vendorid; 220 int i; 221 222 vendorid = pci_get_vendor(dev); 223 deviceid = pci_get_device(dev); 224 225 /* 226 * Search through the list of supported devs for matching one. 227 */ 228 for (i = 0; i < AE_DEVS_COUNT; i++) { 229 if (vendorid == ae_devs[i].vendorid && 230 deviceid == ae_devs[i].deviceid) { 231 device_set_desc(dev, ae_devs[i].name); 232 return (BUS_PROBE_DEFAULT); 233 } 234 } 235 return (ENXIO); 236 } 237 238 static int 239 ae_attach(device_t dev) 240 { 241 ae_softc_t *sc; 242 struct ifnet *ifp; 243 uint8_t chiprev; 244 uint32_t pcirev; 245 int nmsi, pmc; 246 int error; 247 248 sc = device_get_softc(dev); /* Automatically allocated and zeroed 249 on attach. */ 250 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 251 sc->dev = dev; 252 253 /* 254 * Initialize mutexes and tasks. 255 */ 256 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 257 callout_init_mtx(&sc->tick_ch, &sc->mtx, 0); 258 TASK_INIT(&sc->int_task, 0, ae_int_task, sc); 259 TASK_INIT(&sc->link_task, 0, ae_link_task, sc); 260 261 pci_enable_busmaster(dev); /* Enable bus mastering. */ 262 263 sc->spec_mem = ae_res_spec_mem; 264 265 /* 266 * Allocate memory-mapped registers. 267 */ 268 error = bus_alloc_resources(dev, sc->spec_mem, sc->mem); 269 if (error != 0) { 270 device_printf(dev, "could not allocate memory resources.\n"); 271 sc->spec_mem = NULL; 272 goto fail; 273 } 274 275 /* 276 * Retrieve PCI and chip revisions. 277 */ 278 pcirev = pci_get_revid(dev); 279 chiprev = (AE_READ_4(sc, AE_MASTER_REG) >> AE_MASTER_REVNUM_SHIFT) & 280 AE_MASTER_REVNUM_MASK; 281 if (bootverbose) { 282 device_printf(dev, "pci device revision: %#04x\n", pcirev); 283 device_printf(dev, "chip id: %#02x\n", chiprev); 284 } 285 nmsi = pci_msi_count(dev); 286 if (bootverbose) 287 device_printf(dev, "MSI count: %d.\n", nmsi); 288 289 /* 290 * Allocate interrupt resources. 291 */ 292 if (msi_disable == 0 && nmsi == 1) { 293 error = pci_alloc_msi(dev, &nmsi); 294 if (error == 0) { 295 device_printf(dev, "Using MSI messages.\n"); 296 sc->spec_irq = ae_res_spec_msi; 297 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 298 if (error != 0) { 299 device_printf(dev, "MSI allocation failed.\n"); 300 sc->spec_irq = NULL; 301 pci_release_msi(dev); 302 } else { 303 sc->flags |= AE_FLAG_MSI; 304 } 305 } 306 } 307 if (sc->spec_irq == NULL) { 308 sc->spec_irq = ae_res_spec_irq; 309 error = bus_alloc_resources(dev, sc->spec_irq, sc->irq); 310 if (error != 0) { 311 device_printf(dev, "could not allocate IRQ resources.\n"); 312 sc->spec_irq = NULL; 313 goto fail; 314 } 315 } 316 317 ae_init_tunables(sc); 318 319 ae_phy_reset(sc); /* Reset PHY. */ 320 error = ae_reset(sc); /* Reset the controller itself. */ 321 if (error != 0) 322 goto fail; 323 324 ae_pcie_init(sc); 325 326 ae_retrieve_address(sc); /* Load MAC address. */ 327 328 error = ae_alloc_rings(sc); /* Allocate ring buffers. */ 329 if (error != 0) 330 goto fail; 331 332 ifp = sc->ifp = if_alloc(IFT_ETHER); 333 if (ifp == NULL) { 334 device_printf(dev, "could not allocate ifnet structure.\n"); 335 error = ENXIO; 336 goto fail; 337 } 338 339 ifp->if_softc = sc; 340 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 341 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 342 ifp->if_ioctl = ae_ioctl; 343 ifp->if_start = ae_start; 344 ifp->if_init = ae_init; 345 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 346 ifp->if_hwassist = 0; 347 ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; 348 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 349 IFQ_SET_READY(&ifp->if_snd); 350 if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) { 351 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 352 sc->flags |= AE_FLAG_PMG; 353 } 354 ifp->if_capenable = ifp->if_capabilities; 355 356 /* 357 * Configure and attach MII bus. 358 */ 359 error = mii_attach(dev, &sc->miibus, ifp, ae_mediachange, 360 ae_mediastatus, BMSR_DEFCAPMASK, AE_PHYADDR_DEFAULT, 361 MII_OFFSET_ANY, 0); 362 if (error != 0) { 363 device_printf(dev, "attaching PHYs failed\n"); 364 goto fail; 365 } 366 367 ether_ifattach(ifp, sc->eaddr); 368 /* Tell the upper layer(s) we support long frames. */ 369 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 370 371 /* 372 * Create and run all helper tasks. 373 */ 374 sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK, 375 taskqueue_thread_enqueue, &sc->tq); 376 if (sc->tq == NULL) { 377 device_printf(dev, "could not create taskqueue.\n"); 378 ether_ifdetach(ifp); 379 error = ENXIO; 380 goto fail; 381 } 382 taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", 383 device_get_nameunit(sc->dev)); 384 385 /* 386 * Configure interrupt handlers. 387 */ 388 error = bus_setup_intr(dev, sc->irq[0], INTR_TYPE_NET | INTR_MPSAFE, 389 ae_intr, NULL, sc, &sc->intrhand); 390 if (error != 0) { 391 device_printf(dev, "could not set up interrupt handler.\n"); 392 taskqueue_free(sc->tq); 393 sc->tq = NULL; 394 ether_ifdetach(ifp); 395 goto fail; 396 } 397 398 fail: 399 if (error != 0) 400 ae_detach(dev); 401 402 return (error); 403 } 404 405 #define AE_SYSCTL(stx, parent, name, desc, ptr) \ 406 SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, name, CTLFLAG_RD, ptr, 0, desc) 407 408 static void 409 ae_init_tunables(ae_softc_t *sc) 410 { 411 struct sysctl_ctx_list *ctx; 412 struct sysctl_oid *root, *stats, *stats_rx, *stats_tx; 413 struct ae_stats *ae_stats; 414 415 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 416 ae_stats = &sc->stats; 417 418 ctx = device_get_sysctl_ctx(sc->dev); 419 root = device_get_sysctl_tree(sc->dev); 420 stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(root), OID_AUTO, "stats", 421 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "ae statistics"); 422 423 /* 424 * Receiver statistcics. 425 */ 426 stats_rx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx", 427 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Rx MAC statistics"); 428 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "bcast", 429 "broadcast frames", &ae_stats->rx_bcast); 430 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "mcast", 431 "multicast frames", &ae_stats->rx_mcast); 432 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "pause", 433 "PAUSE frames", &ae_stats->rx_pause); 434 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "control", 435 "control frames", &ae_stats->rx_ctrl); 436 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "crc_errors", 437 "frames with CRC errors", &ae_stats->rx_crcerr); 438 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "code_errors", 439 "frames with invalid opcode", &ae_stats->rx_codeerr); 440 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "runt", 441 "runt frames", &ae_stats->rx_runt); 442 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "frag", 443 "fragmented frames", &ae_stats->rx_frag); 444 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "align_errors", 445 "frames with alignment errors", &ae_stats->rx_align); 446 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_rx), "truncated", 447 "frames truncated due to Rx FIFO inderrun", &ae_stats->rx_trunc); 448 449 /* 450 * Receiver statistcics. 451 */ 452 stats_tx = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx", 453 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Tx MAC statistics"); 454 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "bcast", 455 "broadcast frames", &ae_stats->tx_bcast); 456 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "mcast", 457 "multicast frames", &ae_stats->tx_mcast); 458 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "pause", 459 "PAUSE frames", &ae_stats->tx_pause); 460 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "control", 461 "control frames", &ae_stats->tx_ctrl); 462 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "defers", 463 "deferrals occuried", &ae_stats->tx_defer); 464 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "exc_defers", 465 "excessive deferrals occuried", &ae_stats->tx_excdefer); 466 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "singlecols", 467 "single collisions occuried", &ae_stats->tx_singlecol); 468 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "multicols", 469 "multiple collisions occuried", &ae_stats->tx_multicol); 470 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "latecols", 471 "late collisions occuried", &ae_stats->tx_latecol); 472 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "aborts", 473 "transmit aborts due collisions", &ae_stats->tx_abortcol); 474 AE_SYSCTL(ctx, SYSCTL_CHILDREN(stats_tx), "underruns", 475 "Tx FIFO underruns", &ae_stats->tx_underrun); 476 } 477 478 static void 479 ae_pcie_init(ae_softc_t *sc) 480 { 481 482 AE_WRITE_4(sc, AE_PCIE_LTSSM_TESTMODE_REG, AE_PCIE_LTSSM_TESTMODE_DEFAULT); 483 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, AE_PCIE_DLL_TX_CTRL_DEFAULT); 484 } 485 486 static void 487 ae_phy_reset(ae_softc_t *sc) 488 { 489 490 AE_WRITE_4(sc, AE_PHY_ENABLE_REG, AE_PHY_ENABLE); 491 DELAY(1000); /* XXX: pause(9) ? */ 492 } 493 494 static int 495 ae_reset(ae_softc_t *sc) 496 { 497 int i; 498 499 /* 500 * Issue a soft reset. 501 */ 502 AE_WRITE_4(sc, AE_MASTER_REG, AE_MASTER_SOFT_RESET); 503 bus_barrier(sc->mem[0], AE_MASTER_REG, 4, 504 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); 505 506 /* 507 * Wait for reset to complete. 508 */ 509 for (i = 0; i < AE_RESET_TIMEOUT; i++) { 510 if ((AE_READ_4(sc, AE_MASTER_REG) & AE_MASTER_SOFT_RESET) == 0) 511 break; 512 DELAY(10); 513 } 514 if (i == AE_RESET_TIMEOUT) { 515 device_printf(sc->dev, "reset timeout.\n"); 516 return (ENXIO); 517 } 518 519 /* 520 * Wait for everything to enter idle state. 521 */ 522 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 523 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 524 break; 525 DELAY(100); 526 } 527 if (i == AE_IDLE_TIMEOUT) { 528 device_printf(sc->dev, "could not enter idle state.\n"); 529 return (ENXIO); 530 } 531 return (0); 532 } 533 534 static void 535 ae_init(void *arg) 536 { 537 ae_softc_t *sc; 538 539 sc = (ae_softc_t *)arg; 540 AE_LOCK(sc); 541 ae_init_locked(sc); 542 AE_UNLOCK(sc); 543 } 544 545 static void 546 ae_phy_init(ae_softc_t *sc) 547 { 548 549 /* 550 * Enable link status change interrupt. 551 * XXX magic numbers. 552 */ 553 #ifdef notyet 554 AE_PHY_WRITE(sc, 18, 0xc00); 555 #endif 556 } 557 558 static int 559 ae_init_locked(ae_softc_t *sc) 560 { 561 struct ifnet *ifp; 562 struct mii_data *mii; 563 uint8_t eaddr[ETHER_ADDR_LEN]; 564 uint32_t val; 565 bus_addr_t addr; 566 567 AE_LOCK_ASSERT(sc); 568 569 ifp = sc->ifp; 570 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 571 return (0); 572 mii = device_get_softc(sc->miibus); 573 574 ae_stop(sc); 575 ae_reset(sc); 576 ae_pcie_init(sc); /* Initialize PCIE stuff. */ 577 ae_phy_init(sc); 578 ae_powersave_disable(sc); 579 580 /* 581 * Clear and disable interrupts. 582 */ 583 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 584 585 /* 586 * Set the MAC address. 587 */ 588 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 589 val = eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]; 590 AE_WRITE_4(sc, AE_EADDR0_REG, val); 591 val = eaddr[0] << 8 | eaddr[1]; 592 AE_WRITE_4(sc, AE_EADDR1_REG, val); 593 594 bzero(sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING); 595 bzero(sc->txd_base, AE_TXD_BUFSIZE_DEFAULT); 596 bzero(sc->txs_base, AE_TXS_COUNT_DEFAULT * 4); 597 /* 598 * Set ring buffers base addresses. 599 */ 600 addr = sc->dma_rxd_busaddr; 601 AE_WRITE_4(sc, AE_DESC_ADDR_HI_REG, BUS_ADDR_HI(addr)); 602 AE_WRITE_4(sc, AE_RXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 603 addr = sc->dma_txd_busaddr; 604 AE_WRITE_4(sc, AE_TXD_ADDR_LO_REG, BUS_ADDR_LO(addr)); 605 addr = sc->dma_txs_busaddr; 606 AE_WRITE_4(sc, AE_TXS_ADDR_LO_REG, BUS_ADDR_LO(addr)); 607 608 /* 609 * Configure ring buffers sizes. 610 */ 611 AE_WRITE_2(sc, AE_RXD_COUNT_REG, AE_RXD_COUNT_DEFAULT); 612 AE_WRITE_2(sc, AE_TXD_BUFSIZE_REG, AE_TXD_BUFSIZE_DEFAULT / 4); 613 AE_WRITE_2(sc, AE_TXS_COUNT_REG, AE_TXS_COUNT_DEFAULT); 614 615 /* 616 * Configure interframe gap parameters. 617 */ 618 val = ((AE_IFG_TXIPG_DEFAULT << AE_IFG_TXIPG_SHIFT) & 619 AE_IFG_TXIPG_MASK) | 620 ((AE_IFG_RXIPG_DEFAULT << AE_IFG_RXIPG_SHIFT) & 621 AE_IFG_RXIPG_MASK) | 622 ((AE_IFG_IPGR1_DEFAULT << AE_IFG_IPGR1_SHIFT) & 623 AE_IFG_IPGR1_MASK) | 624 ((AE_IFG_IPGR2_DEFAULT << AE_IFG_IPGR2_SHIFT) & 625 AE_IFG_IPGR2_MASK); 626 AE_WRITE_4(sc, AE_IFG_REG, val); 627 628 /* 629 * Configure half-duplex operation. 630 */ 631 val = ((AE_HDPX_LCOL_DEFAULT << AE_HDPX_LCOL_SHIFT) & 632 AE_HDPX_LCOL_MASK) | 633 ((AE_HDPX_RETRY_DEFAULT << AE_HDPX_RETRY_SHIFT) & 634 AE_HDPX_RETRY_MASK) | 635 ((AE_HDPX_ABEBT_DEFAULT << AE_HDPX_ABEBT_SHIFT) & 636 AE_HDPX_ABEBT_MASK) | 637 ((AE_HDPX_JAMIPG_DEFAULT << AE_HDPX_JAMIPG_SHIFT) & 638 AE_HDPX_JAMIPG_MASK) | AE_HDPX_EXC_EN; 639 AE_WRITE_4(sc, AE_HDPX_REG, val); 640 641 /* 642 * Configure interrupt moderate timer. 643 */ 644 AE_WRITE_2(sc, AE_IMT_REG, AE_IMT_DEFAULT); 645 val = AE_READ_4(sc, AE_MASTER_REG); 646 val |= AE_MASTER_IMT_EN; 647 AE_WRITE_4(sc, AE_MASTER_REG, val); 648 649 /* 650 * Configure interrupt clearing timer. 651 */ 652 AE_WRITE_2(sc, AE_ICT_REG, AE_ICT_DEFAULT); 653 654 /* 655 * Configure MTU. 656 */ 657 val = ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 658 ETHER_CRC_LEN; 659 AE_WRITE_2(sc, AE_MTU_REG, val); 660 661 /* 662 * Configure cut-through threshold. 663 */ 664 AE_WRITE_4(sc, AE_CUT_THRESH_REG, AE_CUT_THRESH_DEFAULT); 665 666 /* 667 * Configure flow control. 668 */ 669 AE_WRITE_2(sc, AE_FLOW_THRESH_HI_REG, (AE_RXD_COUNT_DEFAULT / 8) * 7); 670 AE_WRITE_2(sc, AE_FLOW_THRESH_LO_REG, (AE_RXD_COUNT_MIN / 8) > 671 (AE_RXD_COUNT_DEFAULT / 12) ? (AE_RXD_COUNT_MIN / 8) : 672 (AE_RXD_COUNT_DEFAULT / 12)); 673 674 /* 675 * Init mailboxes. 676 */ 677 sc->txd_cur = sc->rxd_cur = 0; 678 sc->txs_ack = sc->txd_ack = 0; 679 sc->rxd_cur = 0; 680 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur); 681 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 682 683 sc->tx_inproc = 0; /* Number of packets the chip processes now. */ 684 sc->flags |= AE_FLAG_TXAVAIL; /* Free Tx's available. */ 685 686 /* 687 * Enable DMA. 688 */ 689 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 690 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 691 692 /* 693 * Check if everything is OK. 694 */ 695 val = AE_READ_4(sc, AE_ISR_REG); 696 if ((val & AE_ISR_PHY_LINKDOWN) != 0) { 697 device_printf(sc->dev, "Initialization failed.\n"); 698 return (ENXIO); 699 } 700 701 /* 702 * Clear interrupt status. 703 */ 704 AE_WRITE_4(sc, AE_ISR_REG, 0x3fffffff); 705 AE_WRITE_4(sc, AE_ISR_REG, 0x0); 706 707 /* 708 * Enable interrupts. 709 */ 710 val = AE_READ_4(sc, AE_MASTER_REG); 711 AE_WRITE_4(sc, AE_MASTER_REG, val | AE_MASTER_MANUAL_INT); 712 AE_WRITE_4(sc, AE_IMR_REG, AE_IMR_DEFAULT); 713 714 /* 715 * Disable WOL. 716 */ 717 AE_WRITE_4(sc, AE_WOL_REG, 0); 718 719 /* 720 * Configure MAC. 721 */ 722 val = AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | 723 AE_MAC_FULL_DUPLEX | AE_MAC_CLK_PHY | 724 AE_MAC_TX_FLOW_EN | AE_MAC_RX_FLOW_EN | 725 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & AE_HALFBUF_MASK) | 726 ((AE_MAC_PREAMBLE_DEFAULT << AE_MAC_PREAMBLE_SHIFT) & 727 AE_MAC_PREAMBLE_MASK); 728 AE_WRITE_4(sc, AE_MAC_REG, val); 729 730 /* 731 * Configure Rx MAC. 732 */ 733 ae_rxfilter(sc); 734 ae_rxvlan(sc); 735 736 /* 737 * Enable Tx/Rx. 738 */ 739 val = AE_READ_4(sc, AE_MAC_REG); 740 AE_WRITE_4(sc, AE_MAC_REG, val | AE_MAC_TX_EN | AE_MAC_RX_EN); 741 742 sc->flags &= ~AE_FLAG_LINK; 743 mii_mediachg(mii); /* Switch to the current media. */ 744 745 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 746 747 ifp->if_drv_flags |= IFF_DRV_RUNNING; 748 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 749 750 #ifdef AE_DEBUG 751 device_printf(sc->dev, "Initialization complete.\n"); 752 #endif 753 754 return (0); 755 } 756 757 static int 758 ae_detach(device_t dev) 759 { 760 struct ae_softc *sc; 761 struct ifnet *ifp; 762 763 sc = device_get_softc(dev); 764 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 765 ifp = sc->ifp; 766 if (device_is_attached(dev)) { 767 AE_LOCK(sc); 768 sc->flags |= AE_FLAG_DETACH; 769 ae_stop(sc); 770 AE_UNLOCK(sc); 771 callout_drain(&sc->tick_ch); 772 taskqueue_drain(sc->tq, &sc->int_task); 773 taskqueue_drain(taskqueue_swi, &sc->link_task); 774 ether_ifdetach(ifp); 775 } 776 if (sc->tq != NULL) { 777 taskqueue_drain(sc->tq, &sc->int_task); 778 taskqueue_free(sc->tq); 779 sc->tq = NULL; 780 } 781 if (sc->miibus != NULL) { 782 device_delete_child(dev, sc->miibus); 783 sc->miibus = NULL; 784 } 785 bus_generic_detach(sc->dev); 786 ae_dma_free(sc); 787 if (sc->intrhand != NULL) { 788 bus_teardown_intr(dev, sc->irq[0], sc->intrhand); 789 sc->intrhand = NULL; 790 } 791 if (ifp != NULL) { 792 if_free(ifp); 793 sc->ifp = NULL; 794 } 795 if (sc->spec_irq != NULL) 796 bus_release_resources(dev, sc->spec_irq, sc->irq); 797 if (sc->spec_mem != NULL) 798 bus_release_resources(dev, sc->spec_mem, sc->mem); 799 if ((sc->flags & AE_FLAG_MSI) != 0) 800 pci_release_msi(dev); 801 mtx_destroy(&sc->mtx); 802 803 return (0); 804 } 805 806 static int 807 ae_miibus_readreg(device_t dev, int phy, int reg) 808 { 809 ae_softc_t *sc; 810 uint32_t val; 811 int i; 812 813 sc = device_get_softc(dev); 814 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 815 816 /* 817 * Locking is done in upper layers. 818 */ 819 820 val = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 821 AE_MDIO_START | AE_MDIO_READ | AE_MDIO_SUP_PREAMBLE | 822 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK); 823 AE_WRITE_4(sc, AE_MDIO_REG, val); 824 825 /* 826 * Wait for operation to complete. 827 */ 828 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 829 DELAY(2); 830 val = AE_READ_4(sc, AE_MDIO_REG); 831 if ((val & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 832 break; 833 } 834 if (i == AE_MDIO_TIMEOUT) { 835 device_printf(sc->dev, "phy read timeout: %d.\n", reg); 836 return (0); 837 } 838 return ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 839 } 840 841 static int 842 ae_miibus_writereg(device_t dev, int phy, int reg, int val) 843 { 844 ae_softc_t *sc; 845 uint32_t aereg; 846 int i; 847 848 sc = device_get_softc(dev); 849 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 850 851 /* 852 * Locking is done in upper layers. 853 */ 854 855 aereg = ((reg << AE_MDIO_REGADDR_SHIFT) & AE_MDIO_REGADDR_MASK) | 856 AE_MDIO_START | AE_MDIO_SUP_PREAMBLE | 857 ((AE_MDIO_CLK_25_4 << AE_MDIO_CLK_SHIFT) & AE_MDIO_CLK_MASK) | 858 ((val << AE_MDIO_DATA_SHIFT) & AE_MDIO_DATA_MASK); 859 AE_WRITE_4(sc, AE_MDIO_REG, aereg); 860 861 /* 862 * Wait for operation to complete. 863 */ 864 for (i = 0; i < AE_MDIO_TIMEOUT; i++) { 865 DELAY(2); 866 aereg = AE_READ_4(sc, AE_MDIO_REG); 867 if ((aereg & (AE_MDIO_START | AE_MDIO_BUSY)) == 0) 868 break; 869 } 870 if (i == AE_MDIO_TIMEOUT) { 871 device_printf(sc->dev, "phy write timeout: %d.\n", reg); 872 } 873 return (0); 874 } 875 876 static void 877 ae_miibus_statchg(device_t dev) 878 { 879 ae_softc_t *sc; 880 881 sc = device_get_softc(dev); 882 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 883 } 884 885 static void 886 ae_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 887 { 888 ae_softc_t *sc; 889 struct mii_data *mii; 890 891 sc = ifp->if_softc; 892 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 893 894 AE_LOCK(sc); 895 mii = device_get_softc(sc->miibus); 896 mii_pollstat(mii); 897 ifmr->ifm_status = mii->mii_media_status; 898 ifmr->ifm_active = mii->mii_media_active; 899 AE_UNLOCK(sc); 900 } 901 902 static int 903 ae_mediachange(struct ifnet *ifp) 904 { 905 ae_softc_t *sc; 906 struct mii_data *mii; 907 struct mii_softc *mii_sc; 908 int error; 909 910 /* XXX: check IFF_UP ?? */ 911 sc = ifp->if_softc; 912 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 913 AE_LOCK(sc); 914 mii = device_get_softc(sc->miibus); 915 LIST_FOREACH(mii_sc, &mii->mii_phys, mii_list) 916 PHY_RESET(mii_sc); 917 error = mii_mediachg(mii); 918 AE_UNLOCK(sc); 919 920 return (error); 921 } 922 923 static int 924 ae_check_eeprom_present(ae_softc_t *sc, int *vpdc) 925 { 926 int error; 927 uint32_t val; 928 929 KASSERT(vpdc != NULL, ("[ae, %d]: vpdc is NULL!\n", __LINE__)); 930 931 /* 932 * Not sure why, but Linux does this. 933 */ 934 val = AE_READ_4(sc, AE_SPICTL_REG); 935 if ((val & AE_SPICTL_VPD_EN) != 0) { 936 val &= ~AE_SPICTL_VPD_EN; 937 AE_WRITE_4(sc, AE_SPICTL_REG, val); 938 } 939 error = pci_find_cap(sc->dev, PCIY_VPD, vpdc); 940 return (error); 941 } 942 943 static int 944 ae_vpd_read_word(ae_softc_t *sc, int reg, uint32_t *word) 945 { 946 uint32_t val; 947 int i; 948 949 AE_WRITE_4(sc, AE_VPD_DATA_REG, 0); /* Clear register value. */ 950 951 /* 952 * VPD registers start at offset 0x100. Read them. 953 */ 954 val = 0x100 + reg * 4; 955 AE_WRITE_4(sc, AE_VPD_CAP_REG, (val << AE_VPD_CAP_ADDR_SHIFT) & 956 AE_VPD_CAP_ADDR_MASK); 957 for (i = 0; i < AE_VPD_TIMEOUT; i++) { 958 DELAY(2000); 959 val = AE_READ_4(sc, AE_VPD_CAP_REG); 960 if ((val & AE_VPD_CAP_DONE) != 0) 961 break; 962 } 963 if (i == AE_VPD_TIMEOUT) { 964 device_printf(sc->dev, "timeout reading VPD register %d.\n", 965 reg); 966 return (ETIMEDOUT); 967 } 968 *word = AE_READ_4(sc, AE_VPD_DATA_REG); 969 return (0); 970 } 971 972 static int 973 ae_get_vpd_eaddr(ae_softc_t *sc, uint32_t *eaddr) 974 { 975 uint32_t word, reg, val; 976 int error; 977 int found; 978 int vpdc; 979 int i; 980 981 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 982 KASSERT(eaddr != NULL, ("[ae, %d]: eaddr is NULL", __LINE__)); 983 984 /* 985 * Check for EEPROM. 986 */ 987 error = ae_check_eeprom_present(sc, &vpdc); 988 if (error != 0) 989 return (error); 990 991 /* 992 * Read the VPD configuration space. 993 * Each register is prefixed with signature, 994 * so we can check if it is valid. 995 */ 996 for (i = 0, found = 0; i < AE_VPD_NREGS; i++) { 997 error = ae_vpd_read_word(sc, i, &word); 998 if (error != 0) 999 break; 1000 1001 /* 1002 * Check signature. 1003 */ 1004 if ((word & AE_VPD_SIG_MASK) != AE_VPD_SIG) 1005 break; 1006 reg = word >> AE_VPD_REG_SHIFT; 1007 i++; /* Move to the next word. */ 1008 1009 if (reg != AE_EADDR0_REG && reg != AE_EADDR1_REG) 1010 continue; 1011 1012 error = ae_vpd_read_word(sc, i, &val); 1013 if (error != 0) 1014 break; 1015 if (reg == AE_EADDR0_REG) 1016 eaddr[0] = val; 1017 else 1018 eaddr[1] = val; 1019 found++; 1020 } 1021 1022 if (found < 2) 1023 return (ENOENT); 1024 1025 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1026 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1027 if (bootverbose) 1028 device_printf(sc->dev, 1029 "VPD ethernet address registers are invalid.\n"); 1030 return (EINVAL); 1031 } 1032 return (0); 1033 } 1034 1035 static int 1036 ae_get_reg_eaddr(ae_softc_t *sc, uint32_t *eaddr) 1037 { 1038 1039 /* 1040 * BIOS is supposed to set this. 1041 */ 1042 eaddr[0] = AE_READ_4(sc, AE_EADDR0_REG); 1043 eaddr[1] = AE_READ_4(sc, AE_EADDR1_REG); 1044 eaddr[1] &= 0xffff; /* Only last 2 bytes are used. */ 1045 1046 if (AE_CHECK_EADDR_VALID(eaddr) != 0) { 1047 if (bootverbose) 1048 device_printf(sc->dev, 1049 "Ethernet address registers are invalid.\n"); 1050 return (EINVAL); 1051 } 1052 return (0); 1053 } 1054 1055 static void 1056 ae_retrieve_address(ae_softc_t *sc) 1057 { 1058 uint32_t eaddr[2] = {0, 0}; 1059 int error; 1060 1061 /* 1062 *Check for EEPROM. 1063 */ 1064 error = ae_get_vpd_eaddr(sc, eaddr); 1065 if (error != 0) 1066 error = ae_get_reg_eaddr(sc, eaddr); 1067 if (error != 0) { 1068 if (bootverbose) 1069 device_printf(sc->dev, 1070 "Generating random ethernet address.\n"); 1071 eaddr[0] = arc4random(); 1072 1073 /* 1074 * Set OUI to ASUSTek COMPUTER INC. 1075 */ 1076 sc->eaddr[0] = 0x02; /* U/L bit set. */ 1077 sc->eaddr[1] = 0x1f; 1078 sc->eaddr[2] = 0xc6; 1079 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1080 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1081 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1082 } else { 1083 sc->eaddr[0] = (eaddr[1] >> 8) & 0xff; 1084 sc->eaddr[1] = (eaddr[1] >> 0) & 0xff; 1085 sc->eaddr[2] = (eaddr[0] >> 24) & 0xff; 1086 sc->eaddr[3] = (eaddr[0] >> 16) & 0xff; 1087 sc->eaddr[4] = (eaddr[0] >> 8) & 0xff; 1088 sc->eaddr[5] = (eaddr[0] >> 0) & 0xff; 1089 } 1090 } 1091 1092 static void 1093 ae_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1094 { 1095 bus_addr_t *addr = arg; 1096 1097 if (error != 0) 1098 return; 1099 KASSERT(nsegs == 1, ("[ae, %d]: %d segments instead of 1!", __LINE__, 1100 nsegs)); 1101 *addr = segs[0].ds_addr; 1102 } 1103 1104 static int 1105 ae_alloc_rings(ae_softc_t *sc) 1106 { 1107 bus_addr_t busaddr; 1108 int error; 1109 1110 /* 1111 * Create parent DMA tag. 1112 */ 1113 error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1114 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1115 NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, 1116 BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, 1117 &sc->dma_parent_tag); 1118 if (error != 0) { 1119 device_printf(sc->dev, "could not creare parent DMA tag.\n"); 1120 return (error); 1121 } 1122 1123 /* 1124 * Create DMA tag for TxD. 1125 */ 1126 error = bus_dma_tag_create(sc->dma_parent_tag, 1127 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1128 NULL, NULL, AE_TXD_BUFSIZE_DEFAULT, 1, 1129 AE_TXD_BUFSIZE_DEFAULT, 0, NULL, NULL, 1130 &sc->dma_txd_tag); 1131 if (error != 0) { 1132 device_printf(sc->dev, "could not creare TxD DMA tag.\n"); 1133 return (error); 1134 } 1135 1136 /* 1137 * Create DMA tag for TxS. 1138 */ 1139 error = bus_dma_tag_create(sc->dma_parent_tag, 1140 8, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1141 NULL, NULL, AE_TXS_COUNT_DEFAULT * 4, 1, 1142 AE_TXS_COUNT_DEFAULT * 4, 0, NULL, NULL, 1143 &sc->dma_txs_tag); 1144 if (error != 0) { 1145 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1146 return (error); 1147 } 1148 1149 /* 1150 * Create DMA tag for RxD. 1151 */ 1152 error = bus_dma_tag_create(sc->dma_parent_tag, 1153 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1154 NULL, NULL, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1, 1155 AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 0, NULL, NULL, 1156 &sc->dma_rxd_tag); 1157 if (error != 0) { 1158 device_printf(sc->dev, "could not creare TxS DMA tag.\n"); 1159 return (error); 1160 } 1161 1162 /* 1163 * Allocate TxD DMA memory. 1164 */ 1165 error = bus_dmamem_alloc(sc->dma_txd_tag, (void **)&sc->txd_base, 1166 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1167 &sc->dma_txd_map); 1168 if (error != 0) { 1169 device_printf(sc->dev, 1170 "could not allocate DMA memory for TxD ring.\n"); 1171 return (error); 1172 } 1173 error = bus_dmamap_load(sc->dma_txd_tag, sc->dma_txd_map, sc->txd_base, 1174 AE_TXD_BUFSIZE_DEFAULT, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1175 if (error != 0 || busaddr == 0) { 1176 device_printf(sc->dev, 1177 "could not load DMA map for TxD ring.\n"); 1178 return (error); 1179 } 1180 sc->dma_txd_busaddr = busaddr; 1181 1182 /* 1183 * Allocate TxS DMA memory. 1184 */ 1185 error = bus_dmamem_alloc(sc->dma_txs_tag, (void **)&sc->txs_base, 1186 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1187 &sc->dma_txs_map); 1188 if (error != 0) { 1189 device_printf(sc->dev, 1190 "could not allocate DMA memory for TxS ring.\n"); 1191 return (error); 1192 } 1193 error = bus_dmamap_load(sc->dma_txs_tag, sc->dma_txs_map, sc->txs_base, 1194 AE_TXS_COUNT_DEFAULT * 4, ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1195 if (error != 0 || busaddr == 0) { 1196 device_printf(sc->dev, 1197 "could not load DMA map for TxS ring.\n"); 1198 return (error); 1199 } 1200 sc->dma_txs_busaddr = busaddr; 1201 1202 /* 1203 * Allocate RxD DMA memory. 1204 */ 1205 error = bus_dmamem_alloc(sc->dma_rxd_tag, (void **)&sc->rxd_base_dma, 1206 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 1207 &sc->dma_rxd_map); 1208 if (error != 0) { 1209 device_printf(sc->dev, 1210 "could not allocate DMA memory for RxD ring.\n"); 1211 return (error); 1212 } 1213 error = bus_dmamap_load(sc->dma_rxd_tag, sc->dma_rxd_map, 1214 sc->rxd_base_dma, AE_RXD_COUNT_DEFAULT * 1536 + AE_RXD_PADDING, 1215 ae_dmamap_cb, &busaddr, BUS_DMA_NOWAIT); 1216 if (error != 0 || busaddr == 0) { 1217 device_printf(sc->dev, 1218 "could not load DMA map for RxD ring.\n"); 1219 return (error); 1220 } 1221 sc->dma_rxd_busaddr = busaddr + AE_RXD_PADDING; 1222 sc->rxd_base = (ae_rxd_t *)(sc->rxd_base_dma + AE_RXD_PADDING); 1223 1224 return (0); 1225 } 1226 1227 static void 1228 ae_dma_free(ae_softc_t *sc) 1229 { 1230 1231 if (sc->dma_txd_tag != NULL) { 1232 if (sc->dma_txd_busaddr != 0) 1233 bus_dmamap_unload(sc->dma_txd_tag, sc->dma_txd_map); 1234 if (sc->txd_base != NULL) 1235 bus_dmamem_free(sc->dma_txd_tag, sc->txd_base, 1236 sc->dma_txd_map); 1237 bus_dma_tag_destroy(sc->dma_txd_tag); 1238 sc->dma_txd_tag = NULL; 1239 sc->txd_base = NULL; 1240 sc->dma_txd_busaddr = 0; 1241 } 1242 if (sc->dma_txs_tag != NULL) { 1243 if (sc->dma_txs_busaddr != 0) 1244 bus_dmamap_unload(sc->dma_txs_tag, sc->dma_txs_map); 1245 if (sc->txs_base != NULL) 1246 bus_dmamem_free(sc->dma_txs_tag, sc->txs_base, 1247 sc->dma_txs_map); 1248 bus_dma_tag_destroy(sc->dma_txs_tag); 1249 sc->dma_txs_tag = NULL; 1250 sc->txs_base = NULL; 1251 sc->dma_txs_busaddr = 0; 1252 } 1253 if (sc->dma_rxd_tag != NULL) { 1254 if (sc->dma_rxd_busaddr != 0) 1255 bus_dmamap_unload(sc->dma_rxd_tag, sc->dma_rxd_map); 1256 if (sc->rxd_base_dma != NULL) 1257 bus_dmamem_free(sc->dma_rxd_tag, sc->rxd_base_dma, 1258 sc->dma_rxd_map); 1259 bus_dma_tag_destroy(sc->dma_rxd_tag); 1260 sc->dma_rxd_tag = NULL; 1261 sc->rxd_base_dma = NULL; 1262 sc->dma_rxd_busaddr = 0; 1263 } 1264 if (sc->dma_parent_tag != NULL) { 1265 bus_dma_tag_destroy(sc->dma_parent_tag); 1266 sc->dma_parent_tag = NULL; 1267 } 1268 } 1269 1270 static int 1271 ae_shutdown(device_t dev) 1272 { 1273 ae_softc_t *sc; 1274 int error; 1275 1276 sc = device_get_softc(dev); 1277 KASSERT(sc != NULL, ("[ae: %d]: sc is NULL", __LINE__)); 1278 1279 error = ae_suspend(dev); 1280 AE_LOCK(sc); 1281 ae_powersave_enable(sc); 1282 AE_UNLOCK(sc); 1283 return (error); 1284 } 1285 1286 static void 1287 ae_powersave_disable(ae_softc_t *sc) 1288 { 1289 uint32_t val; 1290 1291 AE_LOCK_ASSERT(sc); 1292 1293 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1294 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1295 if (val & AE_PHY_DBG_POWERSAVE) { 1296 val &= ~AE_PHY_DBG_POWERSAVE; 1297 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, val); 1298 DELAY(1000); 1299 } 1300 } 1301 1302 static void 1303 ae_powersave_enable(ae_softc_t *sc) 1304 { 1305 uint32_t val; 1306 1307 AE_LOCK_ASSERT(sc); 1308 1309 /* 1310 * XXX magic numbers. 1311 */ 1312 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 0); 1313 val = AE_PHY_READ(sc, AE_PHY_DBG_DATA); 1314 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, val | 0x1000); 1315 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 2); 1316 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0x3000); 1317 AE_PHY_WRITE(sc, AE_PHY_DBG_ADDR, 3); 1318 AE_PHY_WRITE(sc, AE_PHY_DBG_DATA, 0); 1319 } 1320 1321 static void 1322 ae_pm_init(ae_softc_t *sc) 1323 { 1324 struct ifnet *ifp; 1325 uint32_t val; 1326 uint16_t pmstat; 1327 struct mii_data *mii; 1328 int pmc; 1329 1330 AE_LOCK_ASSERT(sc); 1331 1332 ifp = sc->ifp; 1333 if ((sc->flags & AE_FLAG_PMG) == 0) { 1334 /* Disable WOL entirely. */ 1335 AE_WRITE_4(sc, AE_WOL_REG, 0); 1336 return; 1337 } 1338 1339 /* 1340 * Configure WOL if enabled. 1341 */ 1342 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 1343 mii = device_get_softc(sc->miibus); 1344 mii_pollstat(mii); 1345 if ((mii->mii_media_status & IFM_AVALID) != 0 && 1346 (mii->mii_media_status & IFM_ACTIVE) != 0) { 1347 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_MAGIC | \ 1348 AE_WOL_MAGIC_PME); 1349 1350 /* 1351 * Configure MAC. 1352 */ 1353 val = AE_MAC_RX_EN | AE_MAC_CLK_PHY | \ 1354 AE_MAC_TX_CRC_EN | AE_MAC_TX_AUTOPAD | \ 1355 ((AE_HALFBUF_DEFAULT << AE_HALFBUF_SHIFT) & \ 1356 AE_HALFBUF_MASK) | \ 1357 ((AE_MAC_PREAMBLE_DEFAULT << \ 1358 AE_MAC_PREAMBLE_SHIFT) & AE_MAC_PREAMBLE_MASK) | \ 1359 AE_MAC_BCAST_EN | AE_MAC_MCAST_EN; 1360 if ((IFM_OPTIONS(mii->mii_media_active) & \ 1361 IFM_FDX) != 0) 1362 val |= AE_MAC_FULL_DUPLEX; 1363 AE_WRITE_4(sc, AE_MAC_REG, val); 1364 1365 } else { /* No link. */ 1366 AE_WRITE_4(sc, AE_WOL_REG, AE_WOL_LNKCHG | \ 1367 AE_WOL_LNKCHG_PME); 1368 AE_WRITE_4(sc, AE_MAC_REG, 0); 1369 } 1370 } else { 1371 ae_powersave_enable(sc); 1372 } 1373 1374 /* 1375 * PCIE hacks. Magic numbers. 1376 */ 1377 val = AE_READ_4(sc, AE_PCIE_PHYMISC_REG); 1378 val |= AE_PCIE_PHYMISC_FORCE_RCV_DET; 1379 AE_WRITE_4(sc, AE_PCIE_PHYMISC_REG, val); 1380 val = AE_READ_4(sc, AE_PCIE_DLL_TX_CTRL_REG); 1381 val |= AE_PCIE_DLL_TX_CTRL_SEL_NOR_CLK; 1382 AE_WRITE_4(sc, AE_PCIE_DLL_TX_CTRL_REG, val); 1383 1384 /* 1385 * Configure PME. 1386 */ 1387 if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) { 1388 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); 1389 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1390 if ((ifp->if_capenable & IFCAP_WOL) != 0) 1391 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1392 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1393 } 1394 } 1395 1396 static int 1397 ae_suspend(device_t dev) 1398 { 1399 ae_softc_t *sc; 1400 1401 sc = device_get_softc(dev); 1402 1403 AE_LOCK(sc); 1404 ae_stop(sc); 1405 ae_pm_init(sc); 1406 AE_UNLOCK(sc); 1407 1408 return (0); 1409 } 1410 1411 static int 1412 ae_resume(device_t dev) 1413 { 1414 ae_softc_t *sc; 1415 1416 sc = device_get_softc(dev); 1417 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1418 1419 AE_LOCK(sc); 1420 AE_READ_4(sc, AE_WOL_REG); /* Clear WOL status. */ 1421 if ((sc->ifp->if_flags & IFF_UP) != 0) 1422 ae_init_locked(sc); 1423 AE_UNLOCK(sc); 1424 1425 return (0); 1426 } 1427 1428 static unsigned int 1429 ae_tx_avail_size(ae_softc_t *sc) 1430 { 1431 unsigned int avail; 1432 1433 if (sc->txd_cur >= sc->txd_ack) 1434 avail = AE_TXD_BUFSIZE_DEFAULT - (sc->txd_cur - sc->txd_ack); 1435 else 1436 avail = sc->txd_ack - sc->txd_cur; 1437 1438 return (avail); 1439 } 1440 1441 static int 1442 ae_encap(ae_softc_t *sc, struct mbuf **m_head) 1443 { 1444 struct mbuf *m0; 1445 ae_txd_t *hdr; 1446 unsigned int to_end; 1447 uint16_t len; 1448 1449 AE_LOCK_ASSERT(sc); 1450 1451 m0 = *m_head; 1452 len = m0->m_pkthdr.len; 1453 1454 if ((sc->flags & AE_FLAG_TXAVAIL) == 0 || 1455 len + sizeof(ae_txd_t) + 3 > ae_tx_avail_size(sc)) { 1456 #ifdef AE_DEBUG 1457 if_printf(sc->ifp, "No free Tx available.\n"); 1458 #endif 1459 return ENOBUFS; 1460 } 1461 1462 hdr = (ae_txd_t *)(sc->txd_base + sc->txd_cur); 1463 bzero(hdr, sizeof(*hdr)); 1464 /* Skip header size. */ 1465 sc->txd_cur = (sc->txd_cur + sizeof(ae_txd_t)) % AE_TXD_BUFSIZE_DEFAULT; 1466 /* Space available to the end of the ring */ 1467 to_end = AE_TXD_BUFSIZE_DEFAULT - sc->txd_cur; 1468 if (to_end >= len) { 1469 m_copydata(m0, 0, len, (caddr_t)(sc->txd_base + sc->txd_cur)); 1470 } else { 1471 m_copydata(m0, 0, to_end, (caddr_t)(sc->txd_base + 1472 sc->txd_cur)); 1473 m_copydata(m0, to_end, len - to_end, (caddr_t)sc->txd_base); 1474 } 1475 1476 /* 1477 * Set TxD flags and parameters. 1478 */ 1479 if ((m0->m_flags & M_VLANTAG) != 0) { 1480 hdr->vlan = htole16(AE_TXD_VLAN(m0->m_pkthdr.ether_vtag)); 1481 hdr->len = htole16(len | AE_TXD_INSERT_VTAG); 1482 } else { 1483 hdr->len = htole16(len); 1484 } 1485 1486 /* 1487 * Set current TxD position and round up to a 4-byte boundary. 1488 */ 1489 sc->txd_cur = ((sc->txd_cur + len + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1490 if (sc->txd_cur == sc->txd_ack) 1491 sc->flags &= ~AE_FLAG_TXAVAIL; 1492 #ifdef AE_DEBUG 1493 if_printf(sc->ifp, "New txd_cur = %d.\n", sc->txd_cur); 1494 #endif 1495 1496 /* 1497 * Update TxS position and check if there are empty TxS available. 1498 */ 1499 sc->txs_base[sc->txs_cur].flags &= ~htole16(AE_TXS_UPDATE); 1500 sc->txs_cur = (sc->txs_cur + 1) % AE_TXS_COUNT_DEFAULT; 1501 if (sc->txs_cur == sc->txs_ack) 1502 sc->flags &= ~AE_FLAG_TXAVAIL; 1503 1504 /* 1505 * Synchronize DMA memory. 1506 */ 1507 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, BUS_DMASYNC_PREREAD | 1508 BUS_DMASYNC_PREWRITE); 1509 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1510 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1511 1512 return (0); 1513 } 1514 1515 static void 1516 ae_start(struct ifnet *ifp) 1517 { 1518 ae_softc_t *sc; 1519 1520 sc = ifp->if_softc; 1521 AE_LOCK(sc); 1522 ae_start_locked(ifp); 1523 AE_UNLOCK(sc); 1524 } 1525 1526 static void 1527 ae_start_locked(struct ifnet *ifp) 1528 { 1529 ae_softc_t *sc; 1530 unsigned int count; 1531 struct mbuf *m0; 1532 int error; 1533 1534 sc = ifp->if_softc; 1535 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1536 AE_LOCK_ASSERT(sc); 1537 1538 #ifdef AE_DEBUG 1539 if_printf(ifp, "Start called.\n"); 1540 #endif 1541 1542 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1543 IFF_DRV_RUNNING || (sc->flags & AE_FLAG_LINK) == 0) 1544 return; 1545 1546 count = 0; 1547 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { 1548 IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); 1549 if (m0 == NULL) 1550 break; /* Nothing to do. */ 1551 1552 error = ae_encap(sc, &m0); 1553 if (error != 0) { 1554 if (m0 != NULL) { 1555 IFQ_DRV_PREPEND(&ifp->if_snd, m0); 1556 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1557 #ifdef AE_DEBUG 1558 if_printf(ifp, "Setting OACTIVE.\n"); 1559 #endif 1560 } 1561 break; 1562 } 1563 count++; 1564 sc->tx_inproc++; 1565 1566 /* Bounce a copy of the frame to BPF. */ 1567 ETHER_BPF_MTAP(ifp, m0); 1568 1569 m_freem(m0); 1570 } 1571 1572 if (count > 0) { /* Something was dequeued. */ 1573 AE_WRITE_2(sc, AE_MB_TXD_IDX_REG, sc->txd_cur / 4); 1574 sc->wd_timer = AE_TX_TIMEOUT; /* Load watchdog. */ 1575 #ifdef AE_DEBUG 1576 if_printf(ifp, "%d packets dequeued.\n", count); 1577 if_printf(ifp, "Tx pos now is %d.\n", sc->txd_cur); 1578 #endif 1579 } 1580 } 1581 1582 static void 1583 ae_link_task(void *arg, int pending) 1584 { 1585 ae_softc_t *sc; 1586 struct mii_data *mii; 1587 struct ifnet *ifp; 1588 uint32_t val; 1589 1590 sc = (ae_softc_t *)arg; 1591 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1592 AE_LOCK(sc); 1593 1594 ifp = sc->ifp; 1595 mii = device_get_softc(sc->miibus); 1596 if (mii == NULL || ifp == NULL || 1597 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1598 AE_UNLOCK(sc); /* XXX: could happen? */ 1599 return; 1600 } 1601 1602 sc->flags &= ~AE_FLAG_LINK; 1603 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 1604 (IFM_AVALID | IFM_ACTIVE)) { 1605 switch(IFM_SUBTYPE(mii->mii_media_active)) { 1606 case IFM_10_T: 1607 case IFM_100_TX: 1608 sc->flags |= AE_FLAG_LINK; 1609 break; 1610 default: 1611 break; 1612 } 1613 } 1614 1615 /* 1616 * Stop Rx/Tx MACs. 1617 */ 1618 ae_stop_rxmac(sc); 1619 ae_stop_txmac(sc); 1620 1621 if ((sc->flags & AE_FLAG_LINK) != 0) { 1622 ae_mac_config(sc); 1623 1624 /* 1625 * Restart DMA engines. 1626 */ 1627 AE_WRITE_1(sc, AE_DMAREAD_REG, AE_DMAREAD_EN); 1628 AE_WRITE_1(sc, AE_DMAWRITE_REG, AE_DMAWRITE_EN); 1629 1630 /* 1631 * Enable Rx and Tx MACs. 1632 */ 1633 val = AE_READ_4(sc, AE_MAC_REG); 1634 val |= AE_MAC_TX_EN | AE_MAC_RX_EN; 1635 AE_WRITE_4(sc, AE_MAC_REG, val); 1636 } 1637 AE_UNLOCK(sc); 1638 } 1639 1640 static void 1641 ae_stop_rxmac(ae_softc_t *sc) 1642 { 1643 uint32_t val; 1644 int i; 1645 1646 AE_LOCK_ASSERT(sc); 1647 1648 /* 1649 * Stop Rx MAC engine. 1650 */ 1651 val = AE_READ_4(sc, AE_MAC_REG); 1652 if ((val & AE_MAC_RX_EN) != 0) { 1653 val &= ~AE_MAC_RX_EN; 1654 AE_WRITE_4(sc, AE_MAC_REG, val); 1655 } 1656 1657 /* 1658 * Stop Rx DMA engine. 1659 */ 1660 if (AE_READ_1(sc, AE_DMAWRITE_REG) == AE_DMAWRITE_EN) 1661 AE_WRITE_1(sc, AE_DMAWRITE_REG, 0); 1662 1663 /* 1664 * Wait for IDLE state. 1665 */ 1666 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 1667 val = AE_READ_4(sc, AE_IDLE_REG); 1668 if ((val & (AE_IDLE_RXMAC | AE_IDLE_DMAWRITE)) == 0) 1669 break; 1670 DELAY(100); 1671 } 1672 if (i == AE_IDLE_TIMEOUT) 1673 device_printf(sc->dev, "timed out while stopping Rx MAC.\n"); 1674 } 1675 1676 static void 1677 ae_stop_txmac(ae_softc_t *sc) 1678 { 1679 uint32_t val; 1680 int i; 1681 1682 AE_LOCK_ASSERT(sc); 1683 1684 /* 1685 * Stop Tx MAC engine. 1686 */ 1687 val = AE_READ_4(sc, AE_MAC_REG); 1688 if ((val & AE_MAC_TX_EN) != 0) { 1689 val &= ~AE_MAC_TX_EN; 1690 AE_WRITE_4(sc, AE_MAC_REG, val); 1691 } 1692 1693 /* 1694 * Stop Tx DMA engine. 1695 */ 1696 if (AE_READ_1(sc, AE_DMAREAD_REG) == AE_DMAREAD_EN) 1697 AE_WRITE_1(sc, AE_DMAREAD_REG, 0); 1698 1699 /* 1700 * Wait for IDLE state. 1701 */ 1702 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 1703 val = AE_READ_4(sc, AE_IDLE_REG); 1704 if ((val & (AE_IDLE_TXMAC | AE_IDLE_DMAREAD)) == 0) 1705 break; 1706 DELAY(100); 1707 } 1708 if (i == AE_IDLE_TIMEOUT) 1709 device_printf(sc->dev, "timed out while stopping Tx MAC.\n"); 1710 } 1711 1712 static void 1713 ae_mac_config(ae_softc_t *sc) 1714 { 1715 struct mii_data *mii; 1716 uint32_t val; 1717 1718 AE_LOCK_ASSERT(sc); 1719 1720 mii = device_get_softc(sc->miibus); 1721 val = AE_READ_4(sc, AE_MAC_REG); 1722 val &= ~AE_MAC_FULL_DUPLEX; 1723 /* XXX disable AE_MAC_TX_FLOW_EN? */ 1724 1725 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 1726 val |= AE_MAC_FULL_DUPLEX; 1727 1728 AE_WRITE_4(sc, AE_MAC_REG, val); 1729 } 1730 1731 static int 1732 ae_intr(void *arg) 1733 { 1734 ae_softc_t *sc; 1735 uint32_t val; 1736 1737 sc = (ae_softc_t *)arg; 1738 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL", __LINE__)); 1739 1740 val = AE_READ_4(sc, AE_ISR_REG); 1741 if (val == 0 || (val & AE_IMR_DEFAULT) == 0) 1742 return (FILTER_STRAY); 1743 1744 /* Disable interrupts. */ 1745 AE_WRITE_4(sc, AE_ISR_REG, AE_ISR_DISABLE); 1746 1747 /* Schedule interrupt processing. */ 1748 taskqueue_enqueue(sc->tq, &sc->int_task); 1749 1750 return (FILTER_HANDLED); 1751 } 1752 1753 static void 1754 ae_int_task(void *arg, int pending) 1755 { 1756 ae_softc_t *sc; 1757 struct ifnet *ifp; 1758 uint32_t val; 1759 1760 sc = (ae_softc_t *)arg; 1761 1762 AE_LOCK(sc); 1763 1764 ifp = sc->ifp; 1765 1766 val = AE_READ_4(sc, AE_ISR_REG); /* Read interrupt status. */ 1767 if (val == 0) { 1768 AE_UNLOCK(sc); 1769 return; 1770 } 1771 1772 /* 1773 * Clear interrupts and disable them. 1774 */ 1775 AE_WRITE_4(sc, AE_ISR_REG, val | AE_ISR_DISABLE); 1776 1777 #ifdef AE_DEBUG 1778 if_printf(ifp, "Interrupt received: 0x%08x\n", val); 1779 #endif 1780 1781 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 1782 if ((val & (AE_ISR_DMAR_TIMEOUT | AE_ISR_DMAW_TIMEOUT | 1783 AE_ISR_PHY_LINKDOWN)) != 0) { 1784 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1785 ae_init_locked(sc); 1786 AE_UNLOCK(sc); 1787 return; 1788 } 1789 if ((val & AE_ISR_TX_EVENT) != 0) 1790 ae_tx_intr(sc); 1791 if ((val & AE_ISR_RX_EVENT) != 0) 1792 ae_rx_intr(sc); 1793 /* 1794 * Re-enable interrupts. 1795 */ 1796 AE_WRITE_4(sc, AE_ISR_REG, 0); 1797 1798 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) { 1799 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1800 ae_start_locked(ifp); 1801 } 1802 } 1803 1804 AE_UNLOCK(sc); 1805 } 1806 1807 static void 1808 ae_tx_intr(ae_softc_t *sc) 1809 { 1810 struct ifnet *ifp; 1811 ae_txd_t *txd; 1812 ae_txs_t *txs; 1813 uint16_t flags; 1814 1815 AE_LOCK_ASSERT(sc); 1816 1817 ifp = sc->ifp; 1818 1819 #ifdef AE_DEBUG 1820 if_printf(ifp, "Tx interrupt occuried.\n"); 1821 #endif 1822 1823 /* 1824 * Syncronize DMA buffers. 1825 */ 1826 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1827 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1828 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1829 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1830 1831 for (;;) { 1832 txs = sc->txs_base + sc->txs_ack; 1833 flags = le16toh(txs->flags); 1834 if ((flags & AE_TXS_UPDATE) == 0) 1835 break; 1836 txs->flags = htole16(flags & ~AE_TXS_UPDATE); 1837 /* Update stats. */ 1838 ae_update_stats_tx(flags, &sc->stats); 1839 1840 /* 1841 * Update TxS position. 1842 */ 1843 sc->txs_ack = (sc->txs_ack + 1) % AE_TXS_COUNT_DEFAULT; 1844 sc->flags |= AE_FLAG_TXAVAIL; 1845 1846 txd = (ae_txd_t *)(sc->txd_base + sc->txd_ack); 1847 if (txs->len != txd->len) 1848 device_printf(sc->dev, "Size mismatch: TxS:%d TxD:%d\n", 1849 le16toh(txs->len), le16toh(txd->len)); 1850 1851 /* 1852 * Move txd ack and align on 4-byte boundary. 1853 */ 1854 sc->txd_ack = ((sc->txd_ack + le16toh(txd->len) + 1855 sizeof(ae_txs_t) + 3) & ~3) % AE_TXD_BUFSIZE_DEFAULT; 1856 1857 if ((flags & AE_TXS_SUCCESS) != 0) 1858 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1859 else 1860 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1861 1862 sc->tx_inproc--; 1863 } 1864 1865 if ((sc->flags & AE_FLAG_TXAVAIL) != 0) 1866 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1867 if (sc->tx_inproc < 0) { 1868 if_printf(ifp, "Received stray Tx interrupt(s).\n"); 1869 sc->tx_inproc = 0; 1870 } 1871 1872 if (sc->tx_inproc == 0) 1873 sc->wd_timer = 0; /* Unarm watchdog. */ 1874 1875 /* 1876 * Syncronize DMA buffers. 1877 */ 1878 bus_dmamap_sync(sc->dma_txd_tag, sc->dma_txd_map, 1879 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1880 bus_dmamap_sync(sc->dma_txs_tag, sc->dma_txs_map, 1881 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1882 } 1883 1884 static void 1885 ae_rxeof(ae_softc_t *sc, ae_rxd_t *rxd) 1886 { 1887 struct ifnet *ifp; 1888 struct mbuf *m; 1889 unsigned int size; 1890 uint16_t flags; 1891 1892 AE_LOCK_ASSERT(sc); 1893 1894 ifp = sc->ifp; 1895 flags = le16toh(rxd->flags); 1896 1897 #ifdef AE_DEBUG 1898 if_printf(ifp, "Rx interrupt occuried.\n"); 1899 #endif 1900 size = le16toh(rxd->len) - ETHER_CRC_LEN; 1901 if (size < (ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN)) { 1902 if_printf(ifp, "Runt frame received."); 1903 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1904 return; 1905 } 1906 1907 m = m_devget(&rxd->data[0], size, ETHER_ALIGN, ifp, NULL); 1908 if (m == NULL) { 1909 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 1910 return; 1911 } 1912 1913 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1914 (flags & AE_RXD_HAS_VLAN) != 0) { 1915 m->m_pkthdr.ether_vtag = AE_RXD_VLAN(le16toh(rxd->vlan)); 1916 m->m_flags |= M_VLANTAG; 1917 } 1918 1919 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 1920 /* 1921 * Pass it through. 1922 */ 1923 AE_UNLOCK(sc); 1924 (*ifp->if_input)(ifp, m); 1925 AE_LOCK(sc); 1926 } 1927 1928 static void 1929 ae_rx_intr(ae_softc_t *sc) 1930 { 1931 ae_rxd_t *rxd; 1932 struct ifnet *ifp; 1933 uint16_t flags; 1934 int count; 1935 1936 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1937 1938 AE_LOCK_ASSERT(sc); 1939 1940 ifp = sc->ifp; 1941 1942 /* 1943 * Syncronize DMA buffers. 1944 */ 1945 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map, 1946 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1947 1948 for (count = 0;; count++) { 1949 rxd = (ae_rxd_t *)(sc->rxd_base + sc->rxd_cur); 1950 flags = le16toh(rxd->flags); 1951 if ((flags & AE_RXD_UPDATE) == 0) 1952 break; 1953 rxd->flags = htole16(flags & ~AE_RXD_UPDATE); 1954 /* Update stats. */ 1955 ae_update_stats_rx(flags, &sc->stats); 1956 1957 /* 1958 * Update position index. 1959 */ 1960 sc->rxd_cur = (sc->rxd_cur + 1) % AE_RXD_COUNT_DEFAULT; 1961 1962 if ((flags & AE_RXD_SUCCESS) != 0) 1963 ae_rxeof(sc, rxd); 1964 else 1965 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 1966 } 1967 1968 if (count > 0) { 1969 bus_dmamap_sync(sc->dma_rxd_tag, sc->dma_rxd_map, 1970 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1971 /* 1972 * Update Rx index. 1973 */ 1974 AE_WRITE_2(sc, AE_MB_RXD_IDX_REG, sc->rxd_cur); 1975 } 1976 } 1977 1978 static void 1979 ae_watchdog(ae_softc_t *sc) 1980 { 1981 struct ifnet *ifp; 1982 1983 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 1984 AE_LOCK_ASSERT(sc); 1985 ifp = sc->ifp; 1986 1987 if (sc->wd_timer == 0 || --sc->wd_timer != 0) 1988 return; /* Noting to do. */ 1989 1990 if ((sc->flags & AE_FLAG_LINK) == 0) 1991 if_printf(ifp, "watchdog timeout (missed link).\n"); 1992 else 1993 if_printf(ifp, "watchdog timeout - resetting.\n"); 1994 1995 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1996 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1997 ae_init_locked(sc); 1998 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1999 ae_start_locked(ifp); 2000 } 2001 2002 static void 2003 ae_tick(void *arg) 2004 { 2005 ae_softc_t *sc; 2006 struct mii_data *mii; 2007 2008 sc = (ae_softc_t *)arg; 2009 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2010 AE_LOCK_ASSERT(sc); 2011 2012 mii = device_get_softc(sc->miibus); 2013 mii_tick(mii); 2014 ae_watchdog(sc); /* Watchdog check. */ 2015 callout_reset(&sc->tick_ch, hz, ae_tick, sc); 2016 } 2017 2018 static void 2019 ae_rxvlan(ae_softc_t *sc) 2020 { 2021 struct ifnet *ifp; 2022 uint32_t val; 2023 2024 AE_LOCK_ASSERT(sc); 2025 ifp = sc->ifp; 2026 val = AE_READ_4(sc, AE_MAC_REG); 2027 val &= ~AE_MAC_RMVLAN_EN; 2028 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2029 val |= AE_MAC_RMVLAN_EN; 2030 AE_WRITE_4(sc, AE_MAC_REG, val); 2031 } 2032 2033 static u_int 2034 ae_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) 2035 { 2036 uint32_t crc, *mchash = arg; 2037 2038 crc = ether_crc32_be(LLADDR(sdl), ETHER_ADDR_LEN); 2039 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 2040 2041 return (1); 2042 } 2043 2044 static void 2045 ae_rxfilter(ae_softc_t *sc) 2046 { 2047 struct ifnet *ifp; 2048 uint32_t mchash[2]; 2049 uint32_t rxcfg; 2050 2051 KASSERT(sc != NULL, ("[ae, %d]: sc is NULL!", __LINE__)); 2052 2053 AE_LOCK_ASSERT(sc); 2054 2055 ifp = sc->ifp; 2056 2057 rxcfg = AE_READ_4(sc, AE_MAC_REG); 2058 rxcfg &= ~(AE_MAC_MCAST_EN | AE_MAC_BCAST_EN | AE_MAC_PROMISC_EN); 2059 2060 if ((ifp->if_flags & IFF_BROADCAST) != 0) 2061 rxcfg |= AE_MAC_BCAST_EN; 2062 if ((ifp->if_flags & IFF_PROMISC) != 0) 2063 rxcfg |= AE_MAC_PROMISC_EN; 2064 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 2065 rxcfg |= AE_MAC_MCAST_EN; 2066 2067 /* 2068 * Wipe old settings. 2069 */ 2070 AE_WRITE_4(sc, AE_REG_MHT0, 0); 2071 AE_WRITE_4(sc, AE_REG_MHT1, 0); 2072 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 2073 AE_WRITE_4(sc, AE_REG_MHT0, 0xffffffff); 2074 AE_WRITE_4(sc, AE_REG_MHT1, 0xffffffff); 2075 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2076 return; 2077 } 2078 2079 /* 2080 * Load multicast tables. 2081 */ 2082 bzero(mchash, sizeof(mchash)); 2083 if_foreach_llmaddr(ifp, ae_hash_maddr, &mchash); 2084 AE_WRITE_4(sc, AE_REG_MHT0, mchash[0]); 2085 AE_WRITE_4(sc, AE_REG_MHT1, mchash[1]); 2086 AE_WRITE_4(sc, AE_MAC_REG, rxcfg); 2087 } 2088 2089 static int 2090 ae_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2091 { 2092 struct ae_softc *sc; 2093 struct ifreq *ifr; 2094 struct mii_data *mii; 2095 int error, mask; 2096 2097 sc = ifp->if_softc; 2098 ifr = (struct ifreq *)data; 2099 error = 0; 2100 2101 switch (cmd) { 2102 case SIOCSIFMTU: 2103 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU) 2104 error = EINVAL; 2105 else if (ifp->if_mtu != ifr->ifr_mtu) { 2106 AE_LOCK(sc); 2107 ifp->if_mtu = ifr->ifr_mtu; 2108 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2109 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2110 ae_init_locked(sc); 2111 } 2112 AE_UNLOCK(sc); 2113 } 2114 break; 2115 case SIOCSIFFLAGS: 2116 AE_LOCK(sc); 2117 if ((ifp->if_flags & IFF_UP) != 0) { 2118 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2119 if (((ifp->if_flags ^ sc->if_flags) 2120 & (IFF_PROMISC | IFF_ALLMULTI)) != 0) 2121 ae_rxfilter(sc); 2122 } else { 2123 if ((sc->flags & AE_FLAG_DETACH) == 0) 2124 ae_init_locked(sc); 2125 } 2126 } else { 2127 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2128 ae_stop(sc); 2129 } 2130 sc->if_flags = ifp->if_flags; 2131 AE_UNLOCK(sc); 2132 break; 2133 case SIOCADDMULTI: 2134 case SIOCDELMULTI: 2135 AE_LOCK(sc); 2136 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2137 ae_rxfilter(sc); 2138 AE_UNLOCK(sc); 2139 break; 2140 case SIOCSIFMEDIA: 2141 case SIOCGIFMEDIA: 2142 mii = device_get_softc(sc->miibus); 2143 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 2144 break; 2145 case SIOCSIFCAP: 2146 AE_LOCK(sc); 2147 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2148 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2149 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2150 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2151 ae_rxvlan(sc); 2152 } 2153 VLAN_CAPABILITIES(ifp); 2154 AE_UNLOCK(sc); 2155 break; 2156 default: 2157 error = ether_ioctl(ifp, cmd, data); 2158 break; 2159 } 2160 return (error); 2161 } 2162 2163 static void 2164 ae_stop(ae_softc_t *sc) 2165 { 2166 struct ifnet *ifp; 2167 int i; 2168 2169 AE_LOCK_ASSERT(sc); 2170 2171 ifp = sc->ifp; 2172 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2173 sc->flags &= ~AE_FLAG_LINK; 2174 sc->wd_timer = 0; /* Cancel watchdog. */ 2175 callout_stop(&sc->tick_ch); 2176 2177 /* 2178 * Clear and disable interrupts. 2179 */ 2180 AE_WRITE_4(sc, AE_IMR_REG, 0); 2181 AE_WRITE_4(sc, AE_ISR_REG, 0xffffffff); 2182 2183 /* 2184 * Stop Rx/Tx MACs. 2185 */ 2186 ae_stop_txmac(sc); 2187 ae_stop_rxmac(sc); 2188 2189 /* 2190 * Stop DMA engines. 2191 */ 2192 AE_WRITE_1(sc, AE_DMAREAD_REG, ~AE_DMAREAD_EN); 2193 AE_WRITE_1(sc, AE_DMAWRITE_REG, ~AE_DMAWRITE_EN); 2194 2195 /* 2196 * Wait for everything to enter idle state. 2197 */ 2198 for (i = 0; i < AE_IDLE_TIMEOUT; i++) { 2199 if (AE_READ_4(sc, AE_IDLE_REG) == 0) 2200 break; 2201 DELAY(100); 2202 } 2203 if (i == AE_IDLE_TIMEOUT) 2204 device_printf(sc->dev, "could not enter idle state in stop.\n"); 2205 } 2206 2207 static void 2208 ae_update_stats_tx(uint16_t flags, ae_stats_t *stats) 2209 { 2210 2211 if ((flags & AE_TXS_BCAST) != 0) 2212 stats->tx_bcast++; 2213 if ((flags & AE_TXS_MCAST) != 0) 2214 stats->tx_mcast++; 2215 if ((flags & AE_TXS_PAUSE) != 0) 2216 stats->tx_pause++; 2217 if ((flags & AE_TXS_CTRL) != 0) 2218 stats->tx_ctrl++; 2219 if ((flags & AE_TXS_DEFER) != 0) 2220 stats->tx_defer++; 2221 if ((flags & AE_TXS_EXCDEFER) != 0) 2222 stats->tx_excdefer++; 2223 if ((flags & AE_TXS_SINGLECOL) != 0) 2224 stats->tx_singlecol++; 2225 if ((flags & AE_TXS_MULTICOL) != 0) 2226 stats->tx_multicol++; 2227 if ((flags & AE_TXS_LATECOL) != 0) 2228 stats->tx_latecol++; 2229 if ((flags & AE_TXS_ABORTCOL) != 0) 2230 stats->tx_abortcol++; 2231 if ((flags & AE_TXS_UNDERRUN) != 0) 2232 stats->tx_underrun++; 2233 } 2234 2235 static void 2236 ae_update_stats_rx(uint16_t flags, ae_stats_t *stats) 2237 { 2238 2239 if ((flags & AE_RXD_BCAST) != 0) 2240 stats->rx_bcast++; 2241 if ((flags & AE_RXD_MCAST) != 0) 2242 stats->rx_mcast++; 2243 if ((flags & AE_RXD_PAUSE) != 0) 2244 stats->rx_pause++; 2245 if ((flags & AE_RXD_CTRL) != 0) 2246 stats->rx_ctrl++; 2247 if ((flags & AE_RXD_CRCERR) != 0) 2248 stats->rx_crcerr++; 2249 if ((flags & AE_RXD_CODEERR) != 0) 2250 stats->rx_codeerr++; 2251 if ((flags & AE_RXD_RUNT) != 0) 2252 stats->rx_runt++; 2253 if ((flags & AE_RXD_FRAG) != 0) 2254 stats->rx_frag++; 2255 if ((flags & AE_RXD_TRUNC) != 0) 2256 stats->rx_trunc++; 2257 if ((flags & AE_RXD_ALIGN) != 0) 2258 stats->rx_align++; 2259 } 2260