1 /*- 2 * Copyright (c) 2009, Pyun YongHyeon <yongari@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28 /* Driver for Atheros AR813x/AR815x PCIe Ethernet. */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/bus.h> 36 #include <sys/endian.h> 37 #include <sys/kernel.h> 38 #include <sys/lock.h> 39 #include <sys/malloc.h> 40 #include <sys/mbuf.h> 41 #include <sys/module.h> 42 #include <sys/mutex.h> 43 #include <sys/rman.h> 44 #include <sys/queue.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/sysctl.h> 48 #include <sys/taskqueue.h> 49 50 #include <net/bpf.h> 51 #include <net/if.h> 52 #include <net/if_var.h> 53 #include <net/if_arp.h> 54 #include <net/ethernet.h> 55 #include <net/if_dl.h> 56 #include <net/if_llc.h> 57 #include <net/if_media.h> 58 #include <net/if_types.h> 59 #include <net/if_vlan_var.h> 60 61 #include <netinet/in.h> 62 #include <netinet/in_systm.h> 63 #include <netinet/ip.h> 64 #include <netinet/tcp.h> 65 66 #include <dev/mii/mii.h> 67 #include <dev/mii/miivar.h> 68 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcivar.h> 71 72 #include <machine/bus.h> 73 #include <machine/in_cksum.h> 74 75 #include <dev/alc/if_alcreg.h> 76 #include <dev/alc/if_alcvar.h> 77 78 /* "device miibus" required. See GENERIC if you get errors here. */ 79 #include "miibus_if.h" 80 #undef ALC_USE_CUSTOM_CSUM 81 82 #ifdef ALC_USE_CUSTOM_CSUM 83 #define ALC_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 84 #else 85 #define ALC_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 86 #endif 87 88 MODULE_DEPEND(alc, pci, 1, 1, 1); 89 MODULE_DEPEND(alc, ether, 1, 1, 1); 90 MODULE_DEPEND(alc, miibus, 1, 1, 1); 91 92 /* Tunables. */ 93 static int msi_disable = 0; 94 static int msix_disable = 0; 95 TUNABLE_INT("hw.alc.msi_disable", &msi_disable); 96 TUNABLE_INT("hw.alc.msix_disable", &msix_disable); 97 98 /* 99 * Devices supported by this driver. 100 */ 101 static struct alc_ident alc_ident_table[] = { 102 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8131, 9 * 1024, 103 "Atheros AR8131 PCIe Gigabit Ethernet" }, 104 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8132, 9 * 1024, 105 "Atheros AR8132 PCIe Fast Ethernet" }, 106 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151, 6 * 1024, 107 "Atheros AR8151 v1.0 PCIe Gigabit Ethernet" }, 108 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8151_V2, 6 * 1024, 109 "Atheros AR8151 v2.0 PCIe Gigabit Ethernet" }, 110 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B, 6 * 1024, 111 "Atheros AR8152 v1.1 PCIe Fast Ethernet" }, 112 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8152_B2, 6 * 1024, 113 "Atheros AR8152 v2.0 PCIe Fast Ethernet" }, 114 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8161, 9 * 1024, 115 "Atheros AR8161 PCIe Gigabit Ethernet" }, 116 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8162, 9 * 1024, 117 "Atheros AR8161 PCIe Fast Ethernet" }, 118 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8171, 9 * 1024, 119 "Atheros AR8161 PCIe Gigabit Ethernet" }, 120 { VENDORID_ATHEROS, DEVICEID_ATHEROS_AR8172, 9 * 1024, 121 "Atheros AR8161 PCIe Fast Ethernet" }, 122 { VENDORID_ATHEROS, DEVICEID_ATHEROS_E2200, 9 * 1024, 123 "Killer E2200 Gigabit Ethernet" }, 124 { 0, 0, 0, NULL} 125 }; 126 127 static void alc_aspm(struct alc_softc *, int, int); 128 static void alc_aspm_813x(struct alc_softc *, int); 129 static void alc_aspm_816x(struct alc_softc *, int); 130 static int alc_attach(device_t); 131 static int alc_check_boundary(struct alc_softc *); 132 static void alc_config_msi(struct alc_softc *); 133 static int alc_detach(device_t); 134 static void alc_disable_l0s_l1(struct alc_softc *); 135 static int alc_dma_alloc(struct alc_softc *); 136 static void alc_dma_free(struct alc_softc *); 137 static void alc_dmamap_cb(void *, bus_dma_segment_t *, int, int); 138 static void alc_dsp_fixup(struct alc_softc *, int); 139 static int alc_encap(struct alc_softc *, struct mbuf **); 140 static struct alc_ident * 141 alc_find_ident(device_t); 142 #ifndef __NO_STRICT_ALIGNMENT 143 static struct mbuf * 144 alc_fixup_rx(struct ifnet *, struct mbuf *); 145 #endif 146 static void alc_get_macaddr(struct alc_softc *); 147 static void alc_get_macaddr_813x(struct alc_softc *); 148 static void alc_get_macaddr_816x(struct alc_softc *); 149 static void alc_get_macaddr_par(struct alc_softc *); 150 static void alc_init(void *); 151 static void alc_init_cmb(struct alc_softc *); 152 static void alc_init_locked(struct alc_softc *); 153 static void alc_init_rr_ring(struct alc_softc *); 154 static int alc_init_rx_ring(struct alc_softc *); 155 static void alc_init_smb(struct alc_softc *); 156 static void alc_init_tx_ring(struct alc_softc *); 157 static void alc_int_task(void *, int); 158 static int alc_intr(void *); 159 static int alc_ioctl(struct ifnet *, u_long, caddr_t); 160 static void alc_mac_config(struct alc_softc *); 161 static uint32_t alc_mii_readreg_813x(struct alc_softc *, int, int); 162 static uint32_t alc_mii_readreg_816x(struct alc_softc *, int, int); 163 static uint32_t alc_mii_writereg_813x(struct alc_softc *, int, int, int); 164 static uint32_t alc_mii_writereg_816x(struct alc_softc *, int, int, int); 165 static int alc_miibus_readreg(device_t, int, int); 166 static void alc_miibus_statchg(device_t); 167 static int alc_miibus_writereg(device_t, int, int, int); 168 static uint32_t alc_miidbg_readreg(struct alc_softc *, int); 169 static uint32_t alc_miidbg_writereg(struct alc_softc *, int, int); 170 static uint32_t alc_miiext_readreg(struct alc_softc *, int, int); 171 static uint32_t alc_miiext_writereg(struct alc_softc *, int, int, int); 172 static int alc_mediachange(struct ifnet *); 173 static int alc_mediachange_locked(struct alc_softc *); 174 static void alc_mediastatus(struct ifnet *, struct ifmediareq *); 175 static int alc_newbuf(struct alc_softc *, struct alc_rxdesc *); 176 static void alc_osc_reset(struct alc_softc *); 177 static void alc_phy_down(struct alc_softc *); 178 static void alc_phy_reset(struct alc_softc *); 179 static void alc_phy_reset_813x(struct alc_softc *); 180 static void alc_phy_reset_816x(struct alc_softc *); 181 static int alc_probe(device_t); 182 static void alc_reset(struct alc_softc *); 183 static int alc_resume(device_t); 184 static void alc_rxeof(struct alc_softc *, struct rx_rdesc *); 185 static int alc_rxintr(struct alc_softc *, int); 186 static void alc_rxfilter(struct alc_softc *); 187 static void alc_rxvlan(struct alc_softc *); 188 static void alc_setlinkspeed(struct alc_softc *); 189 static void alc_setwol(struct alc_softc *); 190 static void alc_setwol_813x(struct alc_softc *); 191 static void alc_setwol_816x(struct alc_softc *); 192 static int alc_shutdown(device_t); 193 static void alc_start(struct ifnet *); 194 static void alc_start_locked(struct ifnet *); 195 static void alc_start_queue(struct alc_softc *); 196 static void alc_stats_clear(struct alc_softc *); 197 static void alc_stats_update(struct alc_softc *); 198 static void alc_stop(struct alc_softc *); 199 static void alc_stop_mac(struct alc_softc *); 200 static void alc_stop_queue(struct alc_softc *); 201 static int alc_suspend(device_t); 202 static void alc_sysctl_node(struct alc_softc *); 203 static void alc_tick(void *); 204 static void alc_txeof(struct alc_softc *); 205 static void alc_watchdog(struct alc_softc *); 206 static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 207 static int sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS); 208 static int sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS); 209 210 static device_method_t alc_methods[] = { 211 /* Device interface. */ 212 DEVMETHOD(device_probe, alc_probe), 213 DEVMETHOD(device_attach, alc_attach), 214 DEVMETHOD(device_detach, alc_detach), 215 DEVMETHOD(device_shutdown, alc_shutdown), 216 DEVMETHOD(device_suspend, alc_suspend), 217 DEVMETHOD(device_resume, alc_resume), 218 219 /* MII interface. */ 220 DEVMETHOD(miibus_readreg, alc_miibus_readreg), 221 DEVMETHOD(miibus_writereg, alc_miibus_writereg), 222 DEVMETHOD(miibus_statchg, alc_miibus_statchg), 223 224 { NULL, NULL } 225 }; 226 227 static driver_t alc_driver = { 228 "alc", 229 alc_methods, 230 sizeof(struct alc_softc) 231 }; 232 233 static devclass_t alc_devclass; 234 235 DRIVER_MODULE(alc, pci, alc_driver, alc_devclass, 0, 0); 236 DRIVER_MODULE(miibus, alc, miibus_driver, miibus_devclass, 0, 0); 237 238 static struct resource_spec alc_res_spec_mem[] = { 239 { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, 240 { -1, 0, 0 } 241 }; 242 243 static struct resource_spec alc_irq_spec_legacy[] = { 244 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 245 { -1, 0, 0 } 246 }; 247 248 static struct resource_spec alc_irq_spec_msi[] = { 249 { SYS_RES_IRQ, 1, RF_ACTIVE }, 250 { -1, 0, 0 } 251 }; 252 253 static struct resource_spec alc_irq_spec_msix[] = { 254 { SYS_RES_IRQ, 1, RF_ACTIVE }, 255 { -1, 0, 0 } 256 }; 257 258 static uint32_t alc_dma_burst[] = { 128, 256, 512, 1024, 2048, 4096, 0 }; 259 260 static int 261 alc_miibus_readreg(device_t dev, int phy, int reg) 262 { 263 struct alc_softc *sc; 264 int v; 265 266 sc = device_get_softc(dev); 267 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 268 v = alc_mii_readreg_816x(sc, phy, reg); 269 else 270 v = alc_mii_readreg_813x(sc, phy, reg); 271 return (v); 272 } 273 274 static uint32_t 275 alc_mii_readreg_813x(struct alc_softc *sc, int phy, int reg) 276 { 277 uint32_t v; 278 int i; 279 280 /* 281 * For AR8132 fast ethernet controller, do not report 1000baseT 282 * capability to mii(4). Even though AR8132 uses the same 283 * model/revision number of F1 gigabit PHY, the PHY has no 284 * ability to establish 1000baseT link. 285 */ 286 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0 && 287 reg == MII_EXTSR) 288 return (0); 289 290 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 291 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 292 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 293 DELAY(5); 294 v = CSR_READ_4(sc, ALC_MDIO); 295 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 296 break; 297 } 298 299 if (i == 0) { 300 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 301 return (0); 302 } 303 304 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 305 } 306 307 static uint32_t 308 alc_mii_readreg_816x(struct alc_softc *sc, int phy, int reg) 309 { 310 uint32_t clk, v; 311 int i; 312 313 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 314 clk = MDIO_CLK_25_128; 315 else 316 clk = MDIO_CLK_25_4; 317 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 318 MDIO_SUP_PREAMBLE | clk | MDIO_REG_ADDR(reg)); 319 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 320 DELAY(5); 321 v = CSR_READ_4(sc, ALC_MDIO); 322 if ((v & MDIO_OP_BUSY) == 0) 323 break; 324 } 325 326 if (i == 0) { 327 device_printf(sc->alc_dev, "phy read timeout : %d\n", reg); 328 return (0); 329 } 330 331 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 332 } 333 334 static int 335 alc_miibus_writereg(device_t dev, int phy, int reg, int val) 336 { 337 struct alc_softc *sc; 338 int v; 339 340 sc = device_get_softc(dev); 341 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 342 v = alc_mii_writereg_816x(sc, phy, reg, val); 343 else 344 v = alc_mii_writereg_813x(sc, phy, reg, val); 345 return (v); 346 } 347 348 static uint32_t 349 alc_mii_writereg_813x(struct alc_softc *sc, int phy, int reg, int val) 350 { 351 uint32_t v; 352 int i; 353 354 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 355 (val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT | 356 MDIO_SUP_PREAMBLE | MDIO_CLK_25_4 | MDIO_REG_ADDR(reg)); 357 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 358 DELAY(5); 359 v = CSR_READ_4(sc, ALC_MDIO); 360 if ((v & (MDIO_OP_EXECUTE | MDIO_OP_BUSY)) == 0) 361 break; 362 } 363 364 if (i == 0) 365 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 366 367 return (0); 368 } 369 370 static uint32_t 371 alc_mii_writereg_816x(struct alc_softc *sc, int phy, int reg, int val) 372 { 373 uint32_t clk, v; 374 int i; 375 376 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 377 clk = MDIO_CLK_25_128; 378 else 379 clk = MDIO_CLK_25_4; 380 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 381 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | MDIO_REG_ADDR(reg) | 382 MDIO_SUP_PREAMBLE | clk); 383 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 384 DELAY(5); 385 v = CSR_READ_4(sc, ALC_MDIO); 386 if ((v & MDIO_OP_BUSY) == 0) 387 break; 388 } 389 390 if (i == 0) 391 device_printf(sc->alc_dev, "phy write timeout : %d\n", reg); 392 393 return (0); 394 } 395 396 static void 397 alc_miibus_statchg(device_t dev) 398 { 399 struct alc_softc *sc; 400 struct mii_data *mii; 401 struct ifnet *ifp; 402 uint32_t reg; 403 404 sc = device_get_softc(dev); 405 406 mii = device_get_softc(sc->alc_miibus); 407 ifp = sc->alc_ifp; 408 if (mii == NULL || ifp == NULL || 409 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 410 return; 411 412 sc->alc_flags &= ~ALC_FLAG_LINK; 413 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 414 (IFM_ACTIVE | IFM_AVALID)) { 415 switch (IFM_SUBTYPE(mii->mii_media_active)) { 416 case IFM_10_T: 417 case IFM_100_TX: 418 sc->alc_flags |= ALC_FLAG_LINK; 419 break; 420 case IFM_1000_T: 421 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 422 sc->alc_flags |= ALC_FLAG_LINK; 423 break; 424 default: 425 break; 426 } 427 } 428 /* Stop Rx/Tx MACs. */ 429 alc_stop_mac(sc); 430 431 /* Program MACs with resolved speed/duplex/flow-control. */ 432 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 433 alc_start_queue(sc); 434 alc_mac_config(sc); 435 /* Re-enable Tx/Rx MACs. */ 436 reg = CSR_READ_4(sc, ALC_MAC_CFG); 437 reg |= MAC_CFG_TX_ENB | MAC_CFG_RX_ENB; 438 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 439 } 440 alc_aspm(sc, 0, IFM_SUBTYPE(mii->mii_media_active)); 441 alc_dsp_fixup(sc, IFM_SUBTYPE(mii->mii_media_active)); 442 } 443 444 static uint32_t 445 alc_miidbg_readreg(struct alc_softc *sc, int reg) 446 { 447 448 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 449 reg); 450 return (alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 451 ALC_MII_DBG_DATA)); 452 } 453 454 static uint32_t 455 alc_miidbg_writereg(struct alc_softc *sc, int reg, int val) 456 { 457 458 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 459 reg); 460 return (alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 461 ALC_MII_DBG_DATA, val)); 462 } 463 464 static uint32_t 465 alc_miiext_readreg(struct alc_softc *sc, int devaddr, int reg) 466 { 467 uint32_t clk, v; 468 int i; 469 470 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 471 EXT_MDIO_DEVADDR(devaddr)); 472 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 473 clk = MDIO_CLK_25_128; 474 else 475 clk = MDIO_CLK_25_4; 476 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_READ | 477 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 478 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 479 DELAY(5); 480 v = CSR_READ_4(sc, ALC_MDIO); 481 if ((v & MDIO_OP_BUSY) == 0) 482 break; 483 } 484 485 if (i == 0) { 486 device_printf(sc->alc_dev, "phy ext read timeout : %d, %d\n", 487 devaddr, reg); 488 return (0); 489 } 490 491 return ((v & MDIO_DATA_MASK) >> MDIO_DATA_SHIFT); 492 } 493 494 static uint32_t 495 alc_miiext_writereg(struct alc_softc *sc, int devaddr, int reg, int val) 496 { 497 uint32_t clk, v; 498 int i; 499 500 CSR_WRITE_4(sc, ALC_EXT_MDIO, EXT_MDIO_REG(reg) | 501 EXT_MDIO_DEVADDR(devaddr)); 502 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) 503 clk = MDIO_CLK_25_128; 504 else 505 clk = MDIO_CLK_25_4; 506 CSR_WRITE_4(sc, ALC_MDIO, MDIO_OP_EXECUTE | MDIO_OP_WRITE | 507 ((val & MDIO_DATA_MASK) << MDIO_DATA_SHIFT) | 508 MDIO_SUP_PREAMBLE | clk | MDIO_MODE_EXT); 509 for (i = ALC_PHY_TIMEOUT; i > 0; i--) { 510 DELAY(5); 511 v = CSR_READ_4(sc, ALC_MDIO); 512 if ((v & MDIO_OP_BUSY) == 0) 513 break; 514 } 515 516 if (i == 0) 517 device_printf(sc->alc_dev, "phy ext write timeout : %d, %d\n", 518 devaddr, reg); 519 520 return (0); 521 } 522 523 static void 524 alc_dsp_fixup(struct alc_softc *sc, int media) 525 { 526 uint16_t agc, len, val; 527 528 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 529 return; 530 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_C0) 531 return; 532 533 /* 534 * Vendor PHY magic. 535 * 1000BT/AZ, wrong cable length 536 */ 537 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 538 len = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL6); 539 len = (len >> EXT_CLDCTL6_CAB_LEN_SHIFT) & 540 EXT_CLDCTL6_CAB_LEN_MASK; 541 agc = alc_miidbg_readreg(sc, MII_DBG_AGC); 542 agc = (agc >> DBG_AGC_2_VGA_SHIFT) & DBG_AGC_2_VGA_MASK; 543 if ((media == IFM_1000_T && len > EXT_CLDCTL6_CAB_LEN_SHORT1G && 544 agc > DBG_AGC_LONG1G_LIMT) || 545 (media == IFM_100_TX && len > DBG_AGC_LONG100M_LIMT && 546 agc > DBG_AGC_LONG1G_LIMT)) { 547 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 548 DBG_AZ_ANADECT_LONG); 549 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 550 MII_EXT_ANEG_AFE); 551 val |= ANEG_AFEE_10BT_100M_TH; 552 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 553 val); 554 } else { 555 alc_miidbg_writereg(sc, MII_DBG_AZ_ANADECT, 556 DBG_AZ_ANADECT_DEFAULT); 557 val = alc_miiext_readreg(sc, MII_EXT_ANEG, 558 MII_EXT_ANEG_AFE); 559 val &= ~ANEG_AFEE_10BT_100M_TH; 560 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, 561 val); 562 } 563 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 564 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 565 if (media == IFM_1000_T) { 566 /* 567 * Giga link threshold, raise the tolerance of 568 * noise 50%. 569 */ 570 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 571 val &= ~DBG_MSE20DB_TH_MASK; 572 val |= (DBG_MSE20DB_TH_HI << 573 DBG_MSE20DB_TH_SHIFT); 574 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 575 } else if (media == IFM_100_TX) 576 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 577 DBG_MSE16DB_UP); 578 } 579 } else { 580 val = alc_miiext_readreg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE); 581 val &= ~ANEG_AFEE_10BT_100M_TH; 582 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_AFE, val); 583 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0 && 584 AR816X_REV(sc->alc_rev) == AR816X_REV_B0) { 585 alc_miidbg_writereg(sc, MII_DBG_MSE16DB, 586 DBG_MSE16DB_DOWN); 587 val = alc_miidbg_readreg(sc, MII_DBG_MSE20DB); 588 val &= ~DBG_MSE20DB_TH_MASK; 589 val |= (DBG_MSE20DB_TH_DEFAULT << DBG_MSE20DB_TH_SHIFT); 590 alc_miidbg_writereg(sc, MII_DBG_MSE20DB, val); 591 } 592 } 593 } 594 595 static void 596 alc_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 597 { 598 struct alc_softc *sc; 599 struct mii_data *mii; 600 601 sc = ifp->if_softc; 602 ALC_LOCK(sc); 603 if ((ifp->if_flags & IFF_UP) == 0) { 604 ALC_UNLOCK(sc); 605 return; 606 } 607 mii = device_get_softc(sc->alc_miibus); 608 609 mii_pollstat(mii); 610 ifmr->ifm_status = mii->mii_media_status; 611 ifmr->ifm_active = mii->mii_media_active; 612 ALC_UNLOCK(sc); 613 } 614 615 static int 616 alc_mediachange(struct ifnet *ifp) 617 { 618 struct alc_softc *sc; 619 int error; 620 621 sc = ifp->if_softc; 622 ALC_LOCK(sc); 623 error = alc_mediachange_locked(sc); 624 ALC_UNLOCK(sc); 625 626 return (error); 627 } 628 629 static int 630 alc_mediachange_locked(struct alc_softc *sc) 631 { 632 struct mii_data *mii; 633 struct mii_softc *miisc; 634 int error; 635 636 ALC_LOCK_ASSERT(sc); 637 638 mii = device_get_softc(sc->alc_miibus); 639 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 640 PHY_RESET(miisc); 641 error = mii_mediachg(mii); 642 643 return (error); 644 } 645 646 static struct alc_ident * 647 alc_find_ident(device_t dev) 648 { 649 struct alc_ident *ident; 650 uint16_t vendor, devid; 651 652 vendor = pci_get_vendor(dev); 653 devid = pci_get_device(dev); 654 for (ident = alc_ident_table; ident->name != NULL; ident++) { 655 if (vendor == ident->vendorid && devid == ident->deviceid) 656 return (ident); 657 } 658 659 return (NULL); 660 } 661 662 static int 663 alc_probe(device_t dev) 664 { 665 struct alc_ident *ident; 666 667 ident = alc_find_ident(dev); 668 if (ident != NULL) { 669 device_set_desc(dev, ident->name); 670 return (BUS_PROBE_DEFAULT); 671 } 672 673 return (ENXIO); 674 } 675 676 static void 677 alc_get_macaddr(struct alc_softc *sc) 678 { 679 680 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 681 alc_get_macaddr_816x(sc); 682 else 683 alc_get_macaddr_813x(sc); 684 } 685 686 static void 687 alc_get_macaddr_813x(struct alc_softc *sc) 688 { 689 uint32_t opt; 690 uint16_t val; 691 int eeprom, i; 692 693 eeprom = 0; 694 opt = CSR_READ_4(sc, ALC_OPT_CFG); 695 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_OTP_SEL) != 0 && 696 (CSR_READ_4(sc, ALC_TWSI_DEBUG) & TWSI_DEBUG_DEV_EXIST) != 0) { 697 /* 698 * EEPROM found, let TWSI reload EEPROM configuration. 699 * This will set ethernet address of controller. 700 */ 701 eeprom++; 702 switch (sc->alc_ident->deviceid) { 703 case DEVICEID_ATHEROS_AR8131: 704 case DEVICEID_ATHEROS_AR8132: 705 if ((opt & OPT_CFG_CLK_ENB) == 0) { 706 opt |= OPT_CFG_CLK_ENB; 707 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 708 CSR_READ_4(sc, ALC_OPT_CFG); 709 DELAY(1000); 710 } 711 break; 712 case DEVICEID_ATHEROS_AR8151: 713 case DEVICEID_ATHEROS_AR8151_V2: 714 case DEVICEID_ATHEROS_AR8152_B: 715 case DEVICEID_ATHEROS_AR8152_B2: 716 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 717 ALC_MII_DBG_ADDR, 0x00); 718 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 719 ALC_MII_DBG_DATA); 720 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 721 ALC_MII_DBG_DATA, val & 0xFF7F); 722 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 723 ALC_MII_DBG_ADDR, 0x3B); 724 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 725 ALC_MII_DBG_DATA); 726 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 727 ALC_MII_DBG_DATA, val | 0x0008); 728 DELAY(20); 729 break; 730 } 731 732 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 733 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 734 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 735 CSR_READ_4(sc, ALC_WOL_CFG); 736 737 CSR_WRITE_4(sc, ALC_TWSI_CFG, CSR_READ_4(sc, ALC_TWSI_CFG) | 738 TWSI_CFG_SW_LD_START); 739 for (i = 100; i > 0; i--) { 740 DELAY(1000); 741 if ((CSR_READ_4(sc, ALC_TWSI_CFG) & 742 TWSI_CFG_SW_LD_START) == 0) 743 break; 744 } 745 if (i == 0) 746 device_printf(sc->alc_dev, 747 "reloading EEPROM timeout!\n"); 748 } else { 749 if (bootverbose) 750 device_printf(sc->alc_dev, "EEPROM not found!\n"); 751 } 752 if (eeprom != 0) { 753 switch (sc->alc_ident->deviceid) { 754 case DEVICEID_ATHEROS_AR8131: 755 case DEVICEID_ATHEROS_AR8132: 756 if ((opt & OPT_CFG_CLK_ENB) != 0) { 757 opt &= ~OPT_CFG_CLK_ENB; 758 CSR_WRITE_4(sc, ALC_OPT_CFG, opt); 759 CSR_READ_4(sc, ALC_OPT_CFG); 760 DELAY(1000); 761 } 762 break; 763 case DEVICEID_ATHEROS_AR8151: 764 case DEVICEID_ATHEROS_AR8151_V2: 765 case DEVICEID_ATHEROS_AR8152_B: 766 case DEVICEID_ATHEROS_AR8152_B2: 767 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 768 ALC_MII_DBG_ADDR, 0x00); 769 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 770 ALC_MII_DBG_DATA); 771 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 772 ALC_MII_DBG_DATA, val | 0x0080); 773 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 774 ALC_MII_DBG_ADDR, 0x3B); 775 val = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 776 ALC_MII_DBG_DATA); 777 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 778 ALC_MII_DBG_DATA, val & 0xFFF7); 779 DELAY(20); 780 break; 781 } 782 } 783 784 alc_get_macaddr_par(sc); 785 } 786 787 static void 788 alc_get_macaddr_816x(struct alc_softc *sc) 789 { 790 uint32_t reg; 791 int i, reloaded; 792 793 reloaded = 0; 794 /* Try to reload station address via TWSI. */ 795 for (i = 100; i > 0; i--) { 796 reg = CSR_READ_4(sc, ALC_SLD); 797 if ((reg & (SLD_PROGRESS | SLD_START)) == 0) 798 break; 799 DELAY(1000); 800 } 801 if (i != 0) { 802 CSR_WRITE_4(sc, ALC_SLD, reg | SLD_START); 803 for (i = 100; i > 0; i--) { 804 DELAY(1000); 805 reg = CSR_READ_4(sc, ALC_SLD); 806 if ((reg & SLD_START) == 0) 807 break; 808 } 809 if (i != 0) 810 reloaded++; 811 else if (bootverbose) 812 device_printf(sc->alc_dev, 813 "reloading station address via TWSI timed out!\n"); 814 } 815 816 /* Try to reload station address from EEPROM or FLASH. */ 817 if (reloaded == 0) { 818 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 819 if ((reg & (EEPROM_LD_EEPROM_EXIST | 820 EEPROM_LD_FLASH_EXIST)) != 0) { 821 for (i = 100; i > 0; i--) { 822 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 823 if ((reg & (EEPROM_LD_PROGRESS | 824 EEPROM_LD_START)) == 0) 825 break; 826 DELAY(1000); 827 } 828 if (i != 0) { 829 CSR_WRITE_4(sc, ALC_EEPROM_LD, reg | 830 EEPROM_LD_START); 831 for (i = 100; i > 0; i--) { 832 DELAY(1000); 833 reg = CSR_READ_4(sc, ALC_EEPROM_LD); 834 if ((reg & EEPROM_LD_START) == 0) 835 break; 836 } 837 } else if (bootverbose) 838 device_printf(sc->alc_dev, 839 "reloading EEPROM/FLASH timed out!\n"); 840 } 841 } 842 843 alc_get_macaddr_par(sc); 844 } 845 846 static void 847 alc_get_macaddr_par(struct alc_softc *sc) 848 { 849 uint32_t ea[2]; 850 851 ea[0] = CSR_READ_4(sc, ALC_PAR0); 852 ea[1] = CSR_READ_4(sc, ALC_PAR1); 853 sc->alc_eaddr[0] = (ea[1] >> 8) & 0xFF; 854 sc->alc_eaddr[1] = (ea[1] >> 0) & 0xFF; 855 sc->alc_eaddr[2] = (ea[0] >> 24) & 0xFF; 856 sc->alc_eaddr[3] = (ea[0] >> 16) & 0xFF; 857 sc->alc_eaddr[4] = (ea[0] >> 8) & 0xFF; 858 sc->alc_eaddr[5] = (ea[0] >> 0) & 0xFF; 859 } 860 861 static void 862 alc_disable_l0s_l1(struct alc_softc *sc) 863 { 864 uint32_t pmcfg; 865 866 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 867 /* Another magic from vendor. */ 868 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 869 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_CLK_SWH_L1 | 870 PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 871 PM_CFG_MAC_ASPM_CHK | PM_CFG_SERDES_PD_EX_L1); 872 pmcfg |= PM_CFG_SERDES_BUDS_RX_L1_ENB | 873 PM_CFG_SERDES_PLL_L1_ENB | PM_CFG_SERDES_L1_ENB; 874 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 875 } 876 } 877 878 static void 879 alc_phy_reset(struct alc_softc *sc) 880 { 881 882 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 883 alc_phy_reset_816x(sc); 884 else 885 alc_phy_reset_813x(sc); 886 } 887 888 static void 889 alc_phy_reset_813x(struct alc_softc *sc) 890 { 891 uint16_t data; 892 893 /* Reset magic from Linux. */ 894 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_SEL_ANA_RESET); 895 CSR_READ_2(sc, ALC_GPHY_CFG); 896 DELAY(10 * 1000); 897 898 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 899 GPHY_CFG_SEL_ANA_RESET); 900 CSR_READ_2(sc, ALC_GPHY_CFG); 901 DELAY(10 * 1000); 902 903 /* DSP fixup, Vendor magic. */ 904 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 905 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 906 ALC_MII_DBG_ADDR, 0x000A); 907 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 908 ALC_MII_DBG_DATA); 909 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 910 ALC_MII_DBG_DATA, data & 0xDFFF); 911 } 912 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 913 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 914 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 915 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 916 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 917 ALC_MII_DBG_ADDR, 0x003B); 918 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 919 ALC_MII_DBG_DATA); 920 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 921 ALC_MII_DBG_DATA, data & 0xFFF7); 922 DELAY(20 * 1000); 923 } 924 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151) { 925 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 926 ALC_MII_DBG_ADDR, 0x0029); 927 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 928 ALC_MII_DBG_DATA, 0x929D); 929 } 930 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 931 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132 || 932 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 933 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) { 934 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 935 ALC_MII_DBG_ADDR, 0x0029); 936 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 937 ALC_MII_DBG_DATA, 0xB6DD); 938 } 939 940 /* Load DSP codes, vendor magic. */ 941 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE | 942 ((1 << ANA_INTERVAL_SEL_TIMER_SHIFT) & ANA_INTERVAL_SEL_TIMER_MASK); 943 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 944 ALC_MII_DBG_ADDR, MII_ANA_CFG18); 945 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 946 ALC_MII_DBG_DATA, data); 947 948 data = ((2 << ANA_SERDES_CDR_BW_SHIFT) & ANA_SERDES_CDR_BW_MASK) | 949 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL | 950 ANA_SERDES_EN_LCKDT; 951 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 952 ALC_MII_DBG_ADDR, MII_ANA_CFG5); 953 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 954 ALC_MII_DBG_DATA, data); 955 956 data = ((44 << ANA_LONG_CABLE_TH_100_SHIFT) & 957 ANA_LONG_CABLE_TH_100_MASK) | 958 ((33 << ANA_SHORT_CABLE_TH_100_SHIFT) & 959 ANA_SHORT_CABLE_TH_100_SHIFT) | 960 ANA_BP_BAD_LINK_ACCUM | ANA_BP_SMALL_BW; 961 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 962 ALC_MII_DBG_ADDR, MII_ANA_CFG54); 963 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 964 ALC_MII_DBG_DATA, data); 965 966 data = ((11 << ANA_IECHO_ADJ_3_SHIFT) & ANA_IECHO_ADJ_3_MASK) | 967 ((11 << ANA_IECHO_ADJ_2_SHIFT) & ANA_IECHO_ADJ_2_MASK) | 968 ((8 << ANA_IECHO_ADJ_1_SHIFT) & ANA_IECHO_ADJ_1_MASK) | 969 ((8 << ANA_IECHO_ADJ_0_SHIFT) & ANA_IECHO_ADJ_0_MASK); 970 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 971 ALC_MII_DBG_ADDR, MII_ANA_CFG4); 972 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 973 ALC_MII_DBG_DATA, data); 974 975 data = ((7 & ANA_MANUL_SWICH_ON_SHIFT) & ANA_MANUL_SWICH_ON_MASK) | 976 ANA_RESTART_CAL | ANA_MAN_ENABLE | ANA_SEL_HSP | ANA_EN_HB | 977 ANA_OEN_125M; 978 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 979 ALC_MII_DBG_ADDR, MII_ANA_CFG0); 980 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 981 ALC_MII_DBG_DATA, data); 982 DELAY(1000); 983 984 /* Disable hibernation. */ 985 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 986 0x0029); 987 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 988 ALC_MII_DBG_DATA); 989 data &= ~0x8000; 990 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 991 data); 992 993 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_ADDR, 994 0x000B); 995 data = alc_miibus_readreg(sc->alc_dev, sc->alc_phyaddr, 996 ALC_MII_DBG_DATA); 997 data &= ~0x8000; 998 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, ALC_MII_DBG_DATA, 999 data); 1000 } 1001 1002 static void 1003 alc_phy_reset_816x(struct alc_softc *sc) 1004 { 1005 uint32_t val; 1006 1007 val = CSR_READ_4(sc, ALC_GPHY_CFG); 1008 val &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 1009 GPHY_CFG_GATE_25M_ENB | GPHY_CFG_PHY_IDDQ | GPHY_CFG_PHY_PLL_ON | 1010 GPHY_CFG_PWDOWN_HW | GPHY_CFG_100AB_ENB); 1011 val |= GPHY_CFG_SEL_ANA_RESET; 1012 #ifdef notyet 1013 val |= GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN | GPHY_CFG_SEL_ANA_RESET; 1014 #else 1015 /* Disable PHY hibernation. */ 1016 val &= ~(GPHY_CFG_HIB_PULSE | GPHY_CFG_HIB_EN); 1017 #endif 1018 CSR_WRITE_4(sc, ALC_GPHY_CFG, val); 1019 DELAY(10); 1020 CSR_WRITE_4(sc, ALC_GPHY_CFG, val | GPHY_CFG_EXT_RESET); 1021 DELAY(800); 1022 1023 /* Vendor PHY magic. */ 1024 #ifdef notyet 1025 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, DBG_LEGCYPS_DEFAULT); 1026 alc_miidbg_writereg(sc, MII_DBG_SYSMODCTL, DBG_SYSMODCTL_DEFAULT); 1027 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_VDRVBIAS, 1028 EXT_VDRVBIAS_DEFAULT); 1029 #else 1030 /* Disable PHY hibernation. */ 1031 alc_miidbg_writereg(sc, MII_DBG_LEGCYPS, 1032 DBG_LEGCYPS_DEFAULT & ~DBG_LEGCYPS_ENB); 1033 alc_miidbg_writereg(sc, MII_DBG_HIBNEG, 1034 DBG_HIBNEG_DEFAULT & ~(DBG_HIBNEG_PSHIB_EN | DBG_HIBNEG_HIB_PULSE)); 1035 alc_miidbg_writereg(sc, MII_DBG_GREENCFG, DBG_GREENCFG_DEFAULT); 1036 #endif 1037 1038 /* XXX Disable EEE. */ 1039 val = CSR_READ_4(sc, ALC_LPI_CTL); 1040 val &= ~LPI_CTL_ENB; 1041 CSR_WRITE_4(sc, ALC_LPI_CTL, val); 1042 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_LOCAL_EEEADV, 0); 1043 1044 /* PHY power saving. */ 1045 alc_miidbg_writereg(sc, MII_DBG_TST10BTCFG, DBG_TST10BTCFG_DEFAULT); 1046 alc_miidbg_writereg(sc, MII_DBG_SRDSYSMOD, DBG_SRDSYSMOD_DEFAULT); 1047 alc_miidbg_writereg(sc, MII_DBG_TST100BTCFG, DBG_TST100BTCFG_DEFAULT); 1048 alc_miidbg_writereg(sc, MII_DBG_ANACTL, DBG_ANACTL_DEFAULT); 1049 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 1050 val &= ~DBG_GREENCFG2_GATE_DFSE_EN; 1051 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 1052 1053 /* RTL8139C, 120m issue. */ 1054 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_NLP78, 1055 ANEG_NLP78_120M_DEFAULT); 1056 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 1057 ANEG_S3DIG10_DEFAULT); 1058 1059 if ((sc->alc_flags & ALC_FLAG_LINK_WAR) != 0) { 1060 /* Turn off half amplitude. */ 1061 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3); 1062 val |= EXT_CLDCTL3_BP_CABLE1TH_DET_GT; 1063 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL3, val); 1064 /* Turn off Green feature. */ 1065 val = alc_miidbg_readreg(sc, MII_DBG_GREENCFG2); 1066 val |= DBG_GREENCFG2_BP_GREEN; 1067 alc_miidbg_writereg(sc, MII_DBG_GREENCFG2, val); 1068 /* Turn off half bias. */ 1069 val = alc_miiext_readreg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5); 1070 val |= EXT_CLDCTL5_BP_VD_HLFBIAS; 1071 alc_miiext_writereg(sc, MII_EXT_PCS, MII_EXT_CLDCTL5, val); 1072 } 1073 } 1074 1075 static void 1076 alc_phy_down(struct alc_softc *sc) 1077 { 1078 uint32_t gphy; 1079 1080 switch (sc->alc_ident->deviceid) { 1081 case DEVICEID_ATHEROS_AR8161: 1082 case DEVICEID_ATHEROS_E2200: 1083 case DEVICEID_ATHEROS_AR8162: 1084 case DEVICEID_ATHEROS_AR8171: 1085 case DEVICEID_ATHEROS_AR8172: 1086 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 1087 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | 1088 GPHY_CFG_100AB_ENB | GPHY_CFG_PHY_PLL_ON); 1089 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | 1090 GPHY_CFG_SEL_ANA_RESET; 1091 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 1092 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 1093 break; 1094 case DEVICEID_ATHEROS_AR8151: 1095 case DEVICEID_ATHEROS_AR8151_V2: 1096 case DEVICEID_ATHEROS_AR8152_B: 1097 case DEVICEID_ATHEROS_AR8152_B2: 1098 /* 1099 * GPHY power down caused more problems on AR8151 v2.0. 1100 * When driver is reloaded after GPHY power down, 1101 * accesses to PHY/MAC registers hung the system. Only 1102 * cold boot recovered from it. I'm not sure whether 1103 * AR8151 v1.0 also requires this one though. I don't 1104 * have AR8151 v1.0 controller in hand. 1105 * The only option left is to isolate the PHY and 1106 * initiates power down the PHY which in turn saves 1107 * more power when driver is unloaded. 1108 */ 1109 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 1110 MII_BMCR, BMCR_ISO | BMCR_PDOWN); 1111 break; 1112 default: 1113 /* Force PHY down. */ 1114 CSR_WRITE_2(sc, ALC_GPHY_CFG, GPHY_CFG_EXT_RESET | 1115 GPHY_CFG_SEL_ANA_RESET | GPHY_CFG_PHY_IDDQ | 1116 GPHY_CFG_PWDOWN_HW); 1117 DELAY(1000); 1118 break; 1119 } 1120 } 1121 1122 static void 1123 alc_aspm(struct alc_softc *sc, int init, int media) 1124 { 1125 1126 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1127 alc_aspm_816x(sc, init); 1128 else 1129 alc_aspm_813x(sc, media); 1130 } 1131 1132 static void 1133 alc_aspm_813x(struct alc_softc *sc, int media) 1134 { 1135 uint32_t pmcfg; 1136 uint16_t linkcfg; 1137 1138 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) 1139 return; 1140 1141 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1142 if ((sc->alc_flags & (ALC_FLAG_APS | ALC_FLAG_PCIE)) == 1143 (ALC_FLAG_APS | ALC_FLAG_PCIE)) 1144 linkcfg = CSR_READ_2(sc, sc->alc_expcap + 1145 PCIER_LINK_CTL); 1146 else 1147 linkcfg = 0; 1148 pmcfg &= ~PM_CFG_SERDES_PD_EX_L1; 1149 pmcfg &= ~(PM_CFG_L1_ENTRY_TIMER_MASK | PM_CFG_LCKDET_TIMER_MASK); 1150 pmcfg |= PM_CFG_MAC_ASPM_CHK; 1151 pmcfg |= (PM_CFG_LCKDET_TIMER_DEFAULT << PM_CFG_LCKDET_TIMER_SHIFT); 1152 pmcfg &= ~(PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1153 1154 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1155 /* Disable extended sync except AR8152 B v1.0 */ 1156 linkcfg &= ~PCIEM_LINK_CTL_EXTENDED_SYNC; 1157 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 1158 sc->alc_rev == ATHEROS_AR8152_B_V10) 1159 linkcfg |= PCIEM_LINK_CTL_EXTENDED_SYNC; 1160 CSR_WRITE_2(sc, sc->alc_expcap + PCIER_LINK_CTL, 1161 linkcfg); 1162 pmcfg &= ~(PM_CFG_EN_BUFS_RX_L0S | PM_CFG_SA_DLY_ENB | 1163 PM_CFG_HOTRST); 1164 pmcfg |= (PM_CFG_L1_ENTRY_TIMER_DEFAULT << 1165 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1166 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1167 pmcfg |= (PM_CFG_PM_REQ_TIMER_DEFAULT << 1168 PM_CFG_PM_REQ_TIMER_SHIFT); 1169 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_PCIE_RECV; 1170 } 1171 1172 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1173 if ((sc->alc_flags & ALC_FLAG_L0S) != 0) 1174 pmcfg |= PM_CFG_ASPM_L0S_ENB; 1175 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1176 pmcfg |= PM_CFG_ASPM_L1_ENB; 1177 if ((sc->alc_flags & ALC_FLAG_APS) != 0) { 1178 if (sc->alc_ident->deviceid == 1179 DEVICEID_ATHEROS_AR8152_B) 1180 pmcfg &= ~PM_CFG_ASPM_L0S_ENB; 1181 pmcfg &= ~(PM_CFG_SERDES_L1_ENB | 1182 PM_CFG_SERDES_PLL_L1_ENB | 1183 PM_CFG_SERDES_BUDS_RX_L1_ENB); 1184 pmcfg |= PM_CFG_CLK_SWH_L1; 1185 if (media == IFM_100_TX || media == IFM_1000_T) { 1186 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_MASK; 1187 switch (sc->alc_ident->deviceid) { 1188 case DEVICEID_ATHEROS_AR8152_B: 1189 pmcfg |= (7 << 1190 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1191 break; 1192 case DEVICEID_ATHEROS_AR8152_B2: 1193 case DEVICEID_ATHEROS_AR8151_V2: 1194 pmcfg |= (4 << 1195 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1196 break; 1197 default: 1198 pmcfg |= (15 << 1199 PM_CFG_L1_ENTRY_TIMER_SHIFT); 1200 break; 1201 } 1202 } 1203 } else { 1204 pmcfg |= PM_CFG_SERDES_L1_ENB | 1205 PM_CFG_SERDES_PLL_L1_ENB | 1206 PM_CFG_SERDES_BUDS_RX_L1_ENB; 1207 pmcfg &= ~(PM_CFG_CLK_SWH_L1 | 1208 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB); 1209 } 1210 } else { 1211 pmcfg &= ~(PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SERDES_L1_ENB | 1212 PM_CFG_SERDES_PLL_L1_ENB); 1213 pmcfg |= PM_CFG_CLK_SWH_L1; 1214 if ((sc->alc_flags & ALC_FLAG_L1S) != 0) 1215 pmcfg |= PM_CFG_ASPM_L1_ENB; 1216 } 1217 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1218 } 1219 1220 static void 1221 alc_aspm_816x(struct alc_softc *sc, int init) 1222 { 1223 uint32_t pmcfg; 1224 1225 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 1226 pmcfg &= ~PM_CFG_L1_ENTRY_TIMER_816X_MASK; 1227 pmcfg |= PM_CFG_L1_ENTRY_TIMER_816X_DEFAULT; 1228 pmcfg &= ~PM_CFG_PM_REQ_TIMER_MASK; 1229 pmcfg |= PM_CFG_PM_REQ_TIMER_816X_DEFAULT; 1230 pmcfg &= ~PM_CFG_LCKDET_TIMER_MASK; 1231 pmcfg |= PM_CFG_LCKDET_TIMER_DEFAULT; 1232 pmcfg |= PM_CFG_SERDES_PD_EX_L1 | PM_CFG_CLK_SWH_L1 | PM_CFG_PCIE_RECV; 1233 pmcfg &= ~(PM_CFG_RX_L1_AFTER_L0S | PM_CFG_TX_L1_AFTER_L0S | 1234 PM_CFG_ASPM_L1_ENB | PM_CFG_ASPM_L0S_ENB | 1235 PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB | 1236 PM_CFG_SERDES_BUDS_RX_L1_ENB | PM_CFG_SA_DLY_ENB | 1237 PM_CFG_MAC_ASPM_CHK | PM_CFG_HOTRST); 1238 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1239 (sc->alc_rev & 0x01) != 0) 1240 pmcfg |= PM_CFG_SERDES_L1_ENB | PM_CFG_SERDES_PLL_L1_ENB; 1241 if ((sc->alc_flags & ALC_FLAG_LINK) != 0) { 1242 /* Link up, enable both L0s, L1s. */ 1243 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1244 PM_CFG_MAC_ASPM_CHK; 1245 } else { 1246 if (init != 0) 1247 pmcfg |= PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB | 1248 PM_CFG_MAC_ASPM_CHK; 1249 else if ((sc->alc_ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1250 pmcfg |= PM_CFG_ASPM_L1_ENB | PM_CFG_MAC_ASPM_CHK; 1251 } 1252 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 1253 } 1254 1255 static void 1256 alc_init_pcie(struct alc_softc *sc) 1257 { 1258 const char *aspm_state[] = { "L0s/L1", "L0s", "L1", "L0s/L1" }; 1259 uint32_t cap, ctl, val; 1260 int state; 1261 1262 /* Clear data link and flow-control protocol error. */ 1263 val = CSR_READ_4(sc, ALC_PEX_UNC_ERR_SEV); 1264 val &= ~(PEX_UNC_ERR_SEV_DLP | PEX_UNC_ERR_SEV_FCP); 1265 CSR_WRITE_4(sc, ALC_PEX_UNC_ERR_SEV, val); 1266 1267 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1268 CSR_WRITE_4(sc, ALC_LTSSM_ID_CFG, 1269 CSR_READ_4(sc, ALC_LTSSM_ID_CFG) & ~LTSSM_ID_WRO_ENB); 1270 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, 1271 CSR_READ_4(sc, ALC_PCIE_PHYMISC) | 1272 PCIE_PHYMISC_FORCE_RCV_DET); 1273 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B && 1274 sc->alc_rev == ATHEROS_AR8152_B_V10) { 1275 val = CSR_READ_4(sc, ALC_PCIE_PHYMISC2); 1276 val &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK | 1277 PCIE_PHYMISC2_SERDES_TH_MASK); 1278 val |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 1279 val |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT; 1280 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC2, val); 1281 } 1282 /* Disable ASPM L0S and L1. */ 1283 cap = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CAP); 1284 if ((cap & PCIEM_LINK_CAP_ASPM) != 0) { 1285 ctl = CSR_READ_2(sc, sc->alc_expcap + PCIER_LINK_CTL); 1286 if ((ctl & PCIEM_LINK_CTL_RCB) != 0) 1287 sc->alc_rcb = DMA_CFG_RCB_128; 1288 if (bootverbose) 1289 device_printf(sc->alc_dev, "RCB %u bytes\n", 1290 sc->alc_rcb == DMA_CFG_RCB_64 ? 64 : 128); 1291 state = ctl & PCIEM_LINK_CTL_ASPMC; 1292 if (state & PCIEM_LINK_CTL_ASPMC_L0S) 1293 sc->alc_flags |= ALC_FLAG_L0S; 1294 if (state & PCIEM_LINK_CTL_ASPMC_L1) 1295 sc->alc_flags |= ALC_FLAG_L1S; 1296 if (bootverbose) 1297 device_printf(sc->alc_dev, "ASPM %s %s\n", 1298 aspm_state[state], 1299 state == 0 ? "disabled" : "enabled"); 1300 alc_disable_l0s_l1(sc); 1301 } else { 1302 if (bootverbose) 1303 device_printf(sc->alc_dev, 1304 "no ASPM support\n"); 1305 } 1306 } else { 1307 val = CSR_READ_4(sc, ALC_PDLL_TRNS1); 1308 val &= ~PDLL_TRNS1_D3PLLOFF_ENB; 1309 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, val); 1310 val = CSR_READ_4(sc, ALC_MASTER_CFG); 1311 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 1312 (sc->alc_rev & 0x01) != 0) { 1313 if ((val & MASTER_WAKEN_25M) == 0 || 1314 (val & MASTER_CLK_SEL_DIS) == 0) { 1315 val |= MASTER_WAKEN_25M | MASTER_CLK_SEL_DIS; 1316 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1317 } 1318 } else { 1319 if ((val & MASTER_WAKEN_25M) == 0 || 1320 (val & MASTER_CLK_SEL_DIS) != 0) { 1321 val |= MASTER_WAKEN_25M; 1322 val &= ~MASTER_CLK_SEL_DIS; 1323 CSR_WRITE_4(sc, ALC_MASTER_CFG, val); 1324 } 1325 } 1326 } 1327 alc_aspm(sc, 1, IFM_UNKNOWN); 1328 } 1329 1330 static void 1331 alc_config_msi(struct alc_softc *sc) 1332 { 1333 uint32_t ctl, mod; 1334 1335 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 1336 /* 1337 * It seems interrupt moderation is controlled by 1338 * ALC_MSI_RETRANS_TIMER register if MSI/MSIX is active. 1339 * Driver uses RX interrupt moderation parameter to 1340 * program ALC_MSI_RETRANS_TIMER register. 1341 */ 1342 ctl = CSR_READ_4(sc, ALC_MSI_RETRANS_TIMER); 1343 ctl &= ~MSI_RETRANS_TIMER_MASK; 1344 ctl &= ~MSI_RETRANS_MASK_SEL_LINE; 1345 mod = ALC_USECS(sc->alc_int_rx_mod); 1346 if (mod == 0) 1347 mod = 1; 1348 ctl |= mod; 1349 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 1350 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl | 1351 MSI_RETRANS_MASK_SEL_STD); 1352 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1353 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, ctl | 1354 MSI_RETRANS_MASK_SEL_LINE); 1355 else 1356 CSR_WRITE_4(sc, ALC_MSI_RETRANS_TIMER, 0); 1357 } 1358 } 1359 1360 static int 1361 alc_attach(device_t dev) 1362 { 1363 struct alc_softc *sc; 1364 struct ifnet *ifp; 1365 int base, error, i, msic, msixc; 1366 uint16_t burst; 1367 1368 error = 0; 1369 sc = device_get_softc(dev); 1370 sc->alc_dev = dev; 1371 sc->alc_rev = pci_get_revid(dev); 1372 1373 mtx_init(&sc->alc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1374 MTX_DEF); 1375 callout_init_mtx(&sc->alc_tick_ch, &sc->alc_mtx, 0); 1376 TASK_INIT(&sc->alc_int_task, 0, alc_int_task, sc); 1377 sc->alc_ident = alc_find_ident(dev); 1378 1379 /* Map the device. */ 1380 pci_enable_busmaster(dev); 1381 sc->alc_res_spec = alc_res_spec_mem; 1382 sc->alc_irq_spec = alc_irq_spec_legacy; 1383 error = bus_alloc_resources(dev, sc->alc_res_spec, sc->alc_res); 1384 if (error != 0) { 1385 device_printf(dev, "cannot allocate memory resources.\n"); 1386 goto fail; 1387 } 1388 1389 /* Set PHY address. */ 1390 sc->alc_phyaddr = ALC_PHY_ADDR; 1391 1392 /* 1393 * One odd thing is AR8132 uses the same PHY hardware(F1 1394 * gigabit PHY) of AR8131. So atphy(4) of AR8132 reports 1395 * the PHY supports 1000Mbps but that's not true. The PHY 1396 * used in AR8132 can't establish gigabit link even if it 1397 * shows the same PHY model/revision number of AR8131. 1398 */ 1399 switch (sc->alc_ident->deviceid) { 1400 case DEVICEID_ATHEROS_AR8161: 1401 if (pci_get_subvendor(dev) == VENDORID_ATHEROS && 1402 pci_get_subdevice(dev) == 0x0091 && sc->alc_rev == 0) 1403 sc->alc_flags |= ALC_FLAG_LINK_WAR; 1404 /* FALLTHROUGH */ 1405 case DEVICEID_ATHEROS_E2200: 1406 case DEVICEID_ATHEROS_AR8171: 1407 sc->alc_flags |= ALC_FLAG_AR816X_FAMILY; 1408 break; 1409 case DEVICEID_ATHEROS_AR8162: 1410 case DEVICEID_ATHEROS_AR8172: 1411 sc->alc_flags |= ALC_FLAG_FASTETHER | ALC_FLAG_AR816X_FAMILY; 1412 break; 1413 case DEVICEID_ATHEROS_AR8152_B: 1414 case DEVICEID_ATHEROS_AR8152_B2: 1415 sc->alc_flags |= ALC_FLAG_APS; 1416 /* FALLTHROUGH */ 1417 case DEVICEID_ATHEROS_AR8132: 1418 sc->alc_flags |= ALC_FLAG_FASTETHER; 1419 break; 1420 case DEVICEID_ATHEROS_AR8151: 1421 case DEVICEID_ATHEROS_AR8151_V2: 1422 sc->alc_flags |= ALC_FLAG_APS; 1423 /* FALLTHROUGH */ 1424 default: 1425 break; 1426 } 1427 sc->alc_flags |= ALC_FLAG_JUMBO; 1428 1429 /* 1430 * It seems that AR813x/AR815x has silicon bug for SMB. In 1431 * addition, Atheros said that enabling SMB wouldn't improve 1432 * performance. However I think it's bad to access lots of 1433 * registers to extract MAC statistics. 1434 */ 1435 sc->alc_flags |= ALC_FLAG_SMB_BUG; 1436 /* 1437 * Don't use Tx CMB. It is known to have silicon bug. 1438 */ 1439 sc->alc_flags |= ALC_FLAG_CMB_BUG; 1440 sc->alc_chip_rev = CSR_READ_4(sc, ALC_MASTER_CFG) >> 1441 MASTER_CHIP_REV_SHIFT; 1442 if (bootverbose) { 1443 device_printf(dev, "PCI device revision : 0x%04x\n", 1444 sc->alc_rev); 1445 device_printf(dev, "Chip id/revision : 0x%04x\n", 1446 sc->alc_chip_rev); 1447 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 1448 device_printf(dev, "AR816x revision : 0x%x\n", 1449 AR816X_REV(sc->alc_rev)); 1450 } 1451 device_printf(dev, "%u Tx FIFO, %u Rx FIFO\n", 1452 CSR_READ_4(sc, ALC_SRAM_TX_FIFO_LEN) * 8, 1453 CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN) * 8); 1454 1455 /* Initialize DMA parameters. */ 1456 sc->alc_dma_rd_burst = 0; 1457 sc->alc_dma_wr_burst = 0; 1458 sc->alc_rcb = DMA_CFG_RCB_64; 1459 if (pci_find_cap(dev, PCIY_EXPRESS, &base) == 0) { 1460 sc->alc_flags |= ALC_FLAG_PCIE; 1461 sc->alc_expcap = base; 1462 burst = CSR_READ_2(sc, base + PCIER_DEVICE_CTL); 1463 sc->alc_dma_rd_burst = 1464 (burst & PCIEM_CTL_MAX_READ_REQUEST) >> 12; 1465 sc->alc_dma_wr_burst = (burst & PCIEM_CTL_MAX_PAYLOAD) >> 5; 1466 if (bootverbose) { 1467 device_printf(dev, "Read request size : %u bytes.\n", 1468 alc_dma_burst[sc->alc_dma_rd_burst]); 1469 device_printf(dev, "TLP payload size : %u bytes.\n", 1470 alc_dma_burst[sc->alc_dma_wr_burst]); 1471 } 1472 if (alc_dma_burst[sc->alc_dma_rd_burst] > 1024) 1473 sc->alc_dma_rd_burst = 3; 1474 if (alc_dma_burst[sc->alc_dma_wr_burst] > 1024) 1475 sc->alc_dma_wr_burst = 3; 1476 alc_init_pcie(sc); 1477 } 1478 1479 /* Reset PHY. */ 1480 alc_phy_reset(sc); 1481 1482 /* Reset the ethernet controller. */ 1483 alc_stop_mac(sc); 1484 alc_reset(sc); 1485 1486 /* Allocate IRQ resources. */ 1487 msixc = pci_msix_count(dev); 1488 msic = pci_msi_count(dev); 1489 if (bootverbose) { 1490 device_printf(dev, "MSIX count : %d\n", msixc); 1491 device_printf(dev, "MSI count : %d\n", msic); 1492 } 1493 if (msixc > 1) 1494 msixc = 1; 1495 if (msic > 1) 1496 msic = 1; 1497 /* 1498 * Prefer MSIX over MSI. 1499 * AR816x controller has a silicon bug that MSI interrupt 1500 * does not assert if PCIM_CMD_INTxDIS bit of command 1501 * register is set. pci(4) was taught to handle that case. 1502 */ 1503 if (msix_disable == 0 || msi_disable == 0) { 1504 if (msix_disable == 0 && msixc > 0 && 1505 pci_alloc_msix(dev, &msixc) == 0) { 1506 if (msic == 1) { 1507 device_printf(dev, 1508 "Using %d MSIX message(s).\n", msixc); 1509 sc->alc_flags |= ALC_FLAG_MSIX; 1510 sc->alc_irq_spec = alc_irq_spec_msix; 1511 } else 1512 pci_release_msi(dev); 1513 } 1514 if (msi_disable == 0 && (sc->alc_flags & ALC_FLAG_MSIX) == 0 && 1515 msic > 0 && pci_alloc_msi(dev, &msic) == 0) { 1516 if (msic == 1) { 1517 device_printf(dev, 1518 "Using %d MSI message(s).\n", msic); 1519 sc->alc_flags |= ALC_FLAG_MSI; 1520 sc->alc_irq_spec = alc_irq_spec_msi; 1521 } else 1522 pci_release_msi(dev); 1523 } 1524 } 1525 1526 error = bus_alloc_resources(dev, sc->alc_irq_spec, sc->alc_irq); 1527 if (error != 0) { 1528 device_printf(dev, "cannot allocate IRQ resources.\n"); 1529 goto fail; 1530 } 1531 1532 /* Create device sysctl node. */ 1533 alc_sysctl_node(sc); 1534 1535 if ((error = alc_dma_alloc(sc) != 0)) 1536 goto fail; 1537 1538 /* Load station address. */ 1539 alc_get_macaddr(sc); 1540 1541 ifp = sc->alc_ifp = if_alloc(IFT_ETHER); 1542 if (ifp == NULL) { 1543 device_printf(dev, "cannot allocate ifnet structure.\n"); 1544 error = ENXIO; 1545 goto fail; 1546 } 1547 1548 ifp->if_softc = sc; 1549 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1550 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1551 ifp->if_ioctl = alc_ioctl; 1552 ifp->if_start = alc_start; 1553 ifp->if_init = alc_init; 1554 ifp->if_snd.ifq_drv_maxlen = ALC_TX_RING_CNT - 1; 1555 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1556 IFQ_SET_READY(&ifp->if_snd); 1557 ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_TSO4; 1558 ifp->if_hwassist = ALC_CSUM_FEATURES | CSUM_TSO; 1559 if (pci_find_cap(dev, PCIY_PMG, &base) == 0) { 1560 ifp->if_capabilities |= IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST; 1561 sc->alc_flags |= ALC_FLAG_PM; 1562 sc->alc_pmcap = base; 1563 } 1564 ifp->if_capenable = ifp->if_capabilities; 1565 1566 /* Set up MII bus. */ 1567 error = mii_attach(dev, &sc->alc_miibus, ifp, alc_mediachange, 1568 alc_mediastatus, BMSR_DEFCAPMASK, sc->alc_phyaddr, MII_OFFSET_ANY, 1569 MIIF_DOPAUSE); 1570 if (error != 0) { 1571 device_printf(dev, "attaching PHYs failed\n"); 1572 goto fail; 1573 } 1574 1575 ether_ifattach(ifp, sc->alc_eaddr); 1576 1577 /* VLAN capability setup. */ 1578 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | 1579 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; 1580 ifp->if_capenable = ifp->if_capabilities; 1581 /* 1582 * XXX 1583 * It seems enabling Tx checksum offloading makes more trouble. 1584 * Sometimes the controller does not receive any frames when 1585 * Tx checksum offloading is enabled. I'm not sure whether this 1586 * is a bug in Tx checksum offloading logic or I got broken 1587 * sample boards. To safety, don't enable Tx checksum offloading 1588 * by default but give chance to users to toggle it if they know 1589 * their controllers work without problems. 1590 * Fortunately, Tx checksum offloading for AR816x family 1591 * seems to work. 1592 */ 1593 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 1594 ifp->if_capenable &= ~IFCAP_TXCSUM; 1595 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 1596 } 1597 1598 /* Tell the upper layer(s) we support long frames. */ 1599 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1600 1601 /* Create local taskq. */ 1602 sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK, 1603 taskqueue_thread_enqueue, &sc->alc_tq); 1604 if (sc->alc_tq == NULL) { 1605 device_printf(dev, "could not create taskqueue.\n"); 1606 ether_ifdetach(ifp); 1607 error = ENXIO; 1608 goto fail; 1609 } 1610 taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq", 1611 device_get_nameunit(sc->alc_dev)); 1612 1613 alc_config_msi(sc); 1614 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 1615 msic = ALC_MSIX_MESSAGES; 1616 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1617 msic = ALC_MSI_MESSAGES; 1618 else 1619 msic = 1; 1620 for (i = 0; i < msic; i++) { 1621 error = bus_setup_intr(dev, sc->alc_irq[i], 1622 INTR_TYPE_NET | INTR_MPSAFE, alc_intr, NULL, sc, 1623 &sc->alc_intrhand[i]); 1624 if (error != 0) 1625 break; 1626 } 1627 if (error != 0) { 1628 device_printf(dev, "could not set up interrupt handler.\n"); 1629 taskqueue_free(sc->alc_tq); 1630 sc->alc_tq = NULL; 1631 ether_ifdetach(ifp); 1632 goto fail; 1633 } 1634 1635 fail: 1636 if (error != 0) 1637 alc_detach(dev); 1638 1639 return (error); 1640 } 1641 1642 static int 1643 alc_detach(device_t dev) 1644 { 1645 struct alc_softc *sc; 1646 struct ifnet *ifp; 1647 int i, msic; 1648 1649 sc = device_get_softc(dev); 1650 1651 ifp = sc->alc_ifp; 1652 if (device_is_attached(dev)) { 1653 ether_ifdetach(ifp); 1654 ALC_LOCK(sc); 1655 alc_stop(sc); 1656 ALC_UNLOCK(sc); 1657 callout_drain(&sc->alc_tick_ch); 1658 taskqueue_drain(sc->alc_tq, &sc->alc_int_task); 1659 } 1660 1661 if (sc->alc_tq != NULL) { 1662 taskqueue_drain(sc->alc_tq, &sc->alc_int_task); 1663 taskqueue_free(sc->alc_tq); 1664 sc->alc_tq = NULL; 1665 } 1666 1667 if (sc->alc_miibus != NULL) { 1668 device_delete_child(dev, sc->alc_miibus); 1669 sc->alc_miibus = NULL; 1670 } 1671 bus_generic_detach(dev); 1672 alc_dma_free(sc); 1673 1674 if (ifp != NULL) { 1675 if_free(ifp); 1676 sc->alc_ifp = NULL; 1677 } 1678 1679 if ((sc->alc_flags & ALC_FLAG_MSIX) != 0) 1680 msic = ALC_MSIX_MESSAGES; 1681 else if ((sc->alc_flags & ALC_FLAG_MSI) != 0) 1682 msic = ALC_MSI_MESSAGES; 1683 else 1684 msic = 1; 1685 for (i = 0; i < msic; i++) { 1686 if (sc->alc_intrhand[i] != NULL) { 1687 bus_teardown_intr(dev, sc->alc_irq[i], 1688 sc->alc_intrhand[i]); 1689 sc->alc_intrhand[i] = NULL; 1690 } 1691 } 1692 if (sc->alc_res[0] != NULL) 1693 alc_phy_down(sc); 1694 bus_release_resources(dev, sc->alc_irq_spec, sc->alc_irq); 1695 if ((sc->alc_flags & (ALC_FLAG_MSI | ALC_FLAG_MSIX)) != 0) 1696 pci_release_msi(dev); 1697 bus_release_resources(dev, sc->alc_res_spec, sc->alc_res); 1698 mtx_destroy(&sc->alc_mtx); 1699 1700 return (0); 1701 } 1702 1703 #define ALC_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 1704 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 1705 #define ALC_SYSCTL_STAT_ADD64(c, h, n, p, d) \ 1706 SYSCTL_ADD_UQUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) 1707 1708 static void 1709 alc_sysctl_node(struct alc_softc *sc) 1710 { 1711 struct sysctl_ctx_list *ctx; 1712 struct sysctl_oid_list *child, *parent; 1713 struct sysctl_oid *tree; 1714 struct alc_hw_stats *stats; 1715 int error; 1716 1717 stats = &sc->alc_stats; 1718 ctx = device_get_sysctl_ctx(sc->alc_dev); 1719 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->alc_dev)); 1720 1721 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_rx_mod", 1722 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_rx_mod, 0, 1723 sysctl_hw_alc_int_mod, "I", "alc Rx interrupt moderation"); 1724 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_tx_mod", 1725 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_int_tx_mod, 0, 1726 sysctl_hw_alc_int_mod, "I", "alc Tx interrupt moderation"); 1727 /* Pull in device tunables. */ 1728 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1729 error = resource_int_value(device_get_name(sc->alc_dev), 1730 device_get_unit(sc->alc_dev), "int_rx_mod", &sc->alc_int_rx_mod); 1731 if (error == 0) { 1732 if (sc->alc_int_rx_mod < ALC_IM_TIMER_MIN || 1733 sc->alc_int_rx_mod > ALC_IM_TIMER_MAX) { 1734 device_printf(sc->alc_dev, "int_rx_mod value out of " 1735 "range; using default: %d\n", 1736 ALC_IM_RX_TIMER_DEFAULT); 1737 sc->alc_int_rx_mod = ALC_IM_RX_TIMER_DEFAULT; 1738 } 1739 } 1740 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1741 error = resource_int_value(device_get_name(sc->alc_dev), 1742 device_get_unit(sc->alc_dev), "int_tx_mod", &sc->alc_int_tx_mod); 1743 if (error == 0) { 1744 if (sc->alc_int_tx_mod < ALC_IM_TIMER_MIN || 1745 sc->alc_int_tx_mod > ALC_IM_TIMER_MAX) { 1746 device_printf(sc->alc_dev, "int_tx_mod value out of " 1747 "range; using default: %d\n", 1748 ALC_IM_TX_TIMER_DEFAULT); 1749 sc->alc_int_tx_mod = ALC_IM_TX_TIMER_DEFAULT; 1750 } 1751 } 1752 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", 1753 CTLTYPE_INT | CTLFLAG_RW, &sc->alc_process_limit, 0, 1754 sysctl_hw_alc_proc_limit, "I", 1755 "max number of Rx events to process"); 1756 /* Pull in device tunables. */ 1757 sc->alc_process_limit = ALC_PROC_DEFAULT; 1758 error = resource_int_value(device_get_name(sc->alc_dev), 1759 device_get_unit(sc->alc_dev), "process_limit", 1760 &sc->alc_process_limit); 1761 if (error == 0) { 1762 if (sc->alc_process_limit < ALC_PROC_MIN || 1763 sc->alc_process_limit > ALC_PROC_MAX) { 1764 device_printf(sc->alc_dev, 1765 "process_limit value out of range; " 1766 "using default: %d\n", ALC_PROC_DEFAULT); 1767 sc->alc_process_limit = ALC_PROC_DEFAULT; 1768 } 1769 } 1770 1771 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 1772 NULL, "ALC statistics"); 1773 parent = SYSCTL_CHILDREN(tree); 1774 1775 /* Rx statistics. */ 1776 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 1777 NULL, "Rx MAC statistics"); 1778 child = SYSCTL_CHILDREN(tree); 1779 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1780 &stats->rx_frames, "Good frames"); 1781 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1782 &stats->rx_bcast_frames, "Good broadcast frames"); 1783 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1784 &stats->rx_mcast_frames, "Good multicast frames"); 1785 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1786 &stats->rx_pause_frames, "Pause control frames"); 1787 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1788 &stats->rx_control_frames, "Control frames"); 1789 ALC_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 1790 &stats->rx_crcerrs, "CRC errors"); 1791 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1792 &stats->rx_lenerrs, "Frames with length mismatched"); 1793 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1794 &stats->rx_bytes, "Good octets"); 1795 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1796 &stats->rx_bcast_bytes, "Good broadcast octets"); 1797 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1798 &stats->rx_mcast_bytes, "Good multicast octets"); 1799 ALC_SYSCTL_STAT_ADD32(ctx, child, "runts", 1800 &stats->rx_runts, "Too short frames"); 1801 ALC_SYSCTL_STAT_ADD32(ctx, child, "fragments", 1802 &stats->rx_fragments, "Fragmented frames"); 1803 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1804 &stats->rx_pkts_64, "64 bytes frames"); 1805 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1806 &stats->rx_pkts_65_127, "65 to 127 bytes frames"); 1807 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1808 &stats->rx_pkts_128_255, "128 to 255 bytes frames"); 1809 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1810 &stats->rx_pkts_256_511, "256 to 511 bytes frames"); 1811 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1812 &stats->rx_pkts_512_1023, "512 to 1023 bytes frames"); 1813 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1814 &stats->rx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1815 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1816 &stats->rx_pkts_1519_max, "1519 to max frames"); 1817 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1818 &stats->rx_pkts_truncated, "Truncated frames due to MTU size"); 1819 ALC_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 1820 &stats->rx_fifo_oflows, "FIFO overflows"); 1821 ALC_SYSCTL_STAT_ADD32(ctx, child, "rrs_errs", 1822 &stats->rx_rrs_errs, "Return status write-back errors"); 1823 ALC_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 1824 &stats->rx_alignerrs, "Alignment errors"); 1825 ALC_SYSCTL_STAT_ADD32(ctx, child, "filtered", 1826 &stats->rx_pkts_filtered, 1827 "Frames dropped due to address filtering"); 1828 1829 /* Tx statistics. */ 1830 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 1831 NULL, "Tx MAC statistics"); 1832 child = SYSCTL_CHILDREN(tree); 1833 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_frames", 1834 &stats->tx_frames, "Good frames"); 1835 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_bcast_frames", 1836 &stats->tx_bcast_frames, "Good broadcast frames"); 1837 ALC_SYSCTL_STAT_ADD32(ctx, child, "good_mcast_frames", 1838 &stats->tx_mcast_frames, "Good multicast frames"); 1839 ALC_SYSCTL_STAT_ADD32(ctx, child, "pause_frames", 1840 &stats->tx_pause_frames, "Pause control frames"); 1841 ALC_SYSCTL_STAT_ADD32(ctx, child, "control_frames", 1842 &stats->tx_control_frames, "Control frames"); 1843 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_defers", 1844 &stats->tx_excess_defer, "Frames with excessive derferrals"); 1845 ALC_SYSCTL_STAT_ADD32(ctx, child, "defers", 1846 &stats->tx_excess_defer, "Frames with derferrals"); 1847 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_octets", 1848 &stats->tx_bytes, "Good octets"); 1849 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_bcast_octets", 1850 &stats->tx_bcast_bytes, "Good broadcast octets"); 1851 ALC_SYSCTL_STAT_ADD64(ctx, child, "good_mcast_octets", 1852 &stats->tx_mcast_bytes, "Good multicast octets"); 1853 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_64", 1854 &stats->tx_pkts_64, "64 bytes frames"); 1855 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_65_127", 1856 &stats->tx_pkts_65_127, "65 to 127 bytes frames"); 1857 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_128_255", 1858 &stats->tx_pkts_128_255, "128 to 255 bytes frames"); 1859 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_256_511", 1860 &stats->tx_pkts_256_511, "256 to 511 bytes frames"); 1861 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_512_1023", 1862 &stats->tx_pkts_512_1023, "512 to 1023 bytes frames"); 1863 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1024_1518", 1864 &stats->tx_pkts_1024_1518, "1024 to 1518 bytes frames"); 1865 ALC_SYSCTL_STAT_ADD32(ctx, child, "frames_1519_max", 1866 &stats->tx_pkts_1519_max, "1519 to max frames"); 1867 ALC_SYSCTL_STAT_ADD32(ctx, child, "single_colls", 1868 &stats->tx_single_colls, "Single collisions"); 1869 ALC_SYSCTL_STAT_ADD32(ctx, child, "multi_colls", 1870 &stats->tx_multi_colls, "Multiple collisions"); 1871 ALC_SYSCTL_STAT_ADD32(ctx, child, "late_colls", 1872 &stats->tx_late_colls, "Late collisions"); 1873 ALC_SYSCTL_STAT_ADD32(ctx, child, "excess_colls", 1874 &stats->tx_excess_colls, "Excessive collisions"); 1875 ALC_SYSCTL_STAT_ADD32(ctx, child, "underruns", 1876 &stats->tx_underrun, "FIFO underruns"); 1877 ALC_SYSCTL_STAT_ADD32(ctx, child, "desc_underruns", 1878 &stats->tx_desc_underrun, "Descriptor write-back errors"); 1879 ALC_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 1880 &stats->tx_lenerrs, "Frames with length mismatched"); 1881 ALC_SYSCTL_STAT_ADD32(ctx, child, "trunc_errs", 1882 &stats->tx_pkts_truncated, "Truncated frames due to MTU size"); 1883 } 1884 1885 #undef ALC_SYSCTL_STAT_ADD32 1886 #undef ALC_SYSCTL_STAT_ADD64 1887 1888 struct alc_dmamap_arg { 1889 bus_addr_t alc_busaddr; 1890 }; 1891 1892 static void 1893 alc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1894 { 1895 struct alc_dmamap_arg *ctx; 1896 1897 if (error != 0) 1898 return; 1899 1900 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 1901 1902 ctx = (struct alc_dmamap_arg *)arg; 1903 ctx->alc_busaddr = segs[0].ds_addr; 1904 } 1905 1906 /* 1907 * Normal and high Tx descriptors shares single Tx high address. 1908 * Four Rx descriptor/return rings and CMB shares the same Rx 1909 * high address. 1910 */ 1911 static int 1912 alc_check_boundary(struct alc_softc *sc) 1913 { 1914 bus_addr_t cmb_end, rx_ring_end, rr_ring_end, tx_ring_end; 1915 1916 rx_ring_end = sc->alc_rdata.alc_rx_ring_paddr + ALC_RX_RING_SZ; 1917 rr_ring_end = sc->alc_rdata.alc_rr_ring_paddr + ALC_RR_RING_SZ; 1918 cmb_end = sc->alc_rdata.alc_cmb_paddr + ALC_CMB_SZ; 1919 tx_ring_end = sc->alc_rdata.alc_tx_ring_paddr + ALC_TX_RING_SZ; 1920 1921 /* 4GB boundary crossing is not allowed. */ 1922 if ((ALC_ADDR_HI(rx_ring_end) != 1923 ALC_ADDR_HI(sc->alc_rdata.alc_rx_ring_paddr)) || 1924 (ALC_ADDR_HI(rr_ring_end) != 1925 ALC_ADDR_HI(sc->alc_rdata.alc_rr_ring_paddr)) || 1926 (ALC_ADDR_HI(cmb_end) != 1927 ALC_ADDR_HI(sc->alc_rdata.alc_cmb_paddr)) || 1928 (ALC_ADDR_HI(tx_ring_end) != 1929 ALC_ADDR_HI(sc->alc_rdata.alc_tx_ring_paddr))) 1930 return (EFBIG); 1931 /* 1932 * Make sure Rx return descriptor/Rx descriptor/CMB use 1933 * the same high address. 1934 */ 1935 if ((ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(rr_ring_end)) || 1936 (ALC_ADDR_HI(rx_ring_end) != ALC_ADDR_HI(cmb_end))) 1937 return (EFBIG); 1938 1939 return (0); 1940 } 1941 1942 static int 1943 alc_dma_alloc(struct alc_softc *sc) 1944 { 1945 struct alc_txdesc *txd; 1946 struct alc_rxdesc *rxd; 1947 bus_addr_t lowaddr; 1948 struct alc_dmamap_arg ctx; 1949 int error, i; 1950 1951 lowaddr = BUS_SPACE_MAXADDR; 1952 again: 1953 /* Create parent DMA tag. */ 1954 error = bus_dma_tag_create( 1955 bus_get_dma_tag(sc->alc_dev), /* parent */ 1956 1, 0, /* alignment, boundary */ 1957 lowaddr, /* lowaddr */ 1958 BUS_SPACE_MAXADDR, /* highaddr */ 1959 NULL, NULL, /* filter, filterarg */ 1960 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1961 0, /* nsegments */ 1962 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1963 0, /* flags */ 1964 NULL, NULL, /* lockfunc, lockarg */ 1965 &sc->alc_cdata.alc_parent_tag); 1966 if (error != 0) { 1967 device_printf(sc->alc_dev, 1968 "could not create parent DMA tag.\n"); 1969 goto fail; 1970 } 1971 1972 /* Create DMA tag for Tx descriptor ring. */ 1973 error = bus_dma_tag_create( 1974 sc->alc_cdata.alc_parent_tag, /* parent */ 1975 ALC_TX_RING_ALIGN, 0, /* alignment, boundary */ 1976 BUS_SPACE_MAXADDR, /* lowaddr */ 1977 BUS_SPACE_MAXADDR, /* highaddr */ 1978 NULL, NULL, /* filter, filterarg */ 1979 ALC_TX_RING_SZ, /* maxsize */ 1980 1, /* nsegments */ 1981 ALC_TX_RING_SZ, /* maxsegsize */ 1982 0, /* flags */ 1983 NULL, NULL, /* lockfunc, lockarg */ 1984 &sc->alc_cdata.alc_tx_ring_tag); 1985 if (error != 0) { 1986 device_printf(sc->alc_dev, 1987 "could not create Tx ring DMA tag.\n"); 1988 goto fail; 1989 } 1990 1991 /* Create DMA tag for Rx free descriptor ring. */ 1992 error = bus_dma_tag_create( 1993 sc->alc_cdata.alc_parent_tag, /* parent */ 1994 ALC_RX_RING_ALIGN, 0, /* alignment, boundary */ 1995 BUS_SPACE_MAXADDR, /* lowaddr */ 1996 BUS_SPACE_MAXADDR, /* highaddr */ 1997 NULL, NULL, /* filter, filterarg */ 1998 ALC_RX_RING_SZ, /* maxsize */ 1999 1, /* nsegments */ 2000 ALC_RX_RING_SZ, /* maxsegsize */ 2001 0, /* flags */ 2002 NULL, NULL, /* lockfunc, lockarg */ 2003 &sc->alc_cdata.alc_rx_ring_tag); 2004 if (error != 0) { 2005 device_printf(sc->alc_dev, 2006 "could not create Rx ring DMA tag.\n"); 2007 goto fail; 2008 } 2009 /* Create DMA tag for Rx return descriptor ring. */ 2010 error = bus_dma_tag_create( 2011 sc->alc_cdata.alc_parent_tag, /* parent */ 2012 ALC_RR_RING_ALIGN, 0, /* alignment, boundary */ 2013 BUS_SPACE_MAXADDR, /* lowaddr */ 2014 BUS_SPACE_MAXADDR, /* highaddr */ 2015 NULL, NULL, /* filter, filterarg */ 2016 ALC_RR_RING_SZ, /* maxsize */ 2017 1, /* nsegments */ 2018 ALC_RR_RING_SZ, /* maxsegsize */ 2019 0, /* flags */ 2020 NULL, NULL, /* lockfunc, lockarg */ 2021 &sc->alc_cdata.alc_rr_ring_tag); 2022 if (error != 0) { 2023 device_printf(sc->alc_dev, 2024 "could not create Rx return ring DMA tag.\n"); 2025 goto fail; 2026 } 2027 2028 /* Create DMA tag for coalescing message block. */ 2029 error = bus_dma_tag_create( 2030 sc->alc_cdata.alc_parent_tag, /* parent */ 2031 ALC_CMB_ALIGN, 0, /* alignment, boundary */ 2032 BUS_SPACE_MAXADDR, /* lowaddr */ 2033 BUS_SPACE_MAXADDR, /* highaddr */ 2034 NULL, NULL, /* filter, filterarg */ 2035 ALC_CMB_SZ, /* maxsize */ 2036 1, /* nsegments */ 2037 ALC_CMB_SZ, /* maxsegsize */ 2038 0, /* flags */ 2039 NULL, NULL, /* lockfunc, lockarg */ 2040 &sc->alc_cdata.alc_cmb_tag); 2041 if (error != 0) { 2042 device_printf(sc->alc_dev, 2043 "could not create CMB DMA tag.\n"); 2044 goto fail; 2045 } 2046 /* Create DMA tag for status message block. */ 2047 error = bus_dma_tag_create( 2048 sc->alc_cdata.alc_parent_tag, /* parent */ 2049 ALC_SMB_ALIGN, 0, /* alignment, boundary */ 2050 BUS_SPACE_MAXADDR, /* lowaddr */ 2051 BUS_SPACE_MAXADDR, /* highaddr */ 2052 NULL, NULL, /* filter, filterarg */ 2053 ALC_SMB_SZ, /* maxsize */ 2054 1, /* nsegments */ 2055 ALC_SMB_SZ, /* maxsegsize */ 2056 0, /* flags */ 2057 NULL, NULL, /* lockfunc, lockarg */ 2058 &sc->alc_cdata.alc_smb_tag); 2059 if (error != 0) { 2060 device_printf(sc->alc_dev, 2061 "could not create SMB DMA tag.\n"); 2062 goto fail; 2063 } 2064 2065 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 2066 error = bus_dmamem_alloc(sc->alc_cdata.alc_tx_ring_tag, 2067 (void **)&sc->alc_rdata.alc_tx_ring, 2068 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2069 &sc->alc_cdata.alc_tx_ring_map); 2070 if (error != 0) { 2071 device_printf(sc->alc_dev, 2072 "could not allocate DMA'able memory for Tx ring.\n"); 2073 goto fail; 2074 } 2075 ctx.alc_busaddr = 0; 2076 error = bus_dmamap_load(sc->alc_cdata.alc_tx_ring_tag, 2077 sc->alc_cdata.alc_tx_ring_map, sc->alc_rdata.alc_tx_ring, 2078 ALC_TX_RING_SZ, alc_dmamap_cb, &ctx, 0); 2079 if (error != 0 || ctx.alc_busaddr == 0) { 2080 device_printf(sc->alc_dev, 2081 "could not load DMA'able memory for Tx ring.\n"); 2082 goto fail; 2083 } 2084 sc->alc_rdata.alc_tx_ring_paddr = ctx.alc_busaddr; 2085 2086 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 2087 error = bus_dmamem_alloc(sc->alc_cdata.alc_rx_ring_tag, 2088 (void **)&sc->alc_rdata.alc_rx_ring, 2089 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2090 &sc->alc_cdata.alc_rx_ring_map); 2091 if (error != 0) { 2092 device_printf(sc->alc_dev, 2093 "could not allocate DMA'able memory for Rx ring.\n"); 2094 goto fail; 2095 } 2096 ctx.alc_busaddr = 0; 2097 error = bus_dmamap_load(sc->alc_cdata.alc_rx_ring_tag, 2098 sc->alc_cdata.alc_rx_ring_map, sc->alc_rdata.alc_rx_ring, 2099 ALC_RX_RING_SZ, alc_dmamap_cb, &ctx, 0); 2100 if (error != 0 || ctx.alc_busaddr == 0) { 2101 device_printf(sc->alc_dev, 2102 "could not load DMA'able memory for Rx ring.\n"); 2103 goto fail; 2104 } 2105 sc->alc_rdata.alc_rx_ring_paddr = ctx.alc_busaddr; 2106 2107 /* Allocate DMA'able memory and load the DMA map for Rx return ring. */ 2108 error = bus_dmamem_alloc(sc->alc_cdata.alc_rr_ring_tag, 2109 (void **)&sc->alc_rdata.alc_rr_ring, 2110 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2111 &sc->alc_cdata.alc_rr_ring_map); 2112 if (error != 0) { 2113 device_printf(sc->alc_dev, 2114 "could not allocate DMA'able memory for Rx return ring.\n"); 2115 goto fail; 2116 } 2117 ctx.alc_busaddr = 0; 2118 error = bus_dmamap_load(sc->alc_cdata.alc_rr_ring_tag, 2119 sc->alc_cdata.alc_rr_ring_map, sc->alc_rdata.alc_rr_ring, 2120 ALC_RR_RING_SZ, alc_dmamap_cb, &ctx, 0); 2121 if (error != 0 || ctx.alc_busaddr == 0) { 2122 device_printf(sc->alc_dev, 2123 "could not load DMA'able memory for Tx ring.\n"); 2124 goto fail; 2125 } 2126 sc->alc_rdata.alc_rr_ring_paddr = ctx.alc_busaddr; 2127 2128 /* Allocate DMA'able memory and load the DMA map for CMB. */ 2129 error = bus_dmamem_alloc(sc->alc_cdata.alc_cmb_tag, 2130 (void **)&sc->alc_rdata.alc_cmb, 2131 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2132 &sc->alc_cdata.alc_cmb_map); 2133 if (error != 0) { 2134 device_printf(sc->alc_dev, 2135 "could not allocate DMA'able memory for CMB.\n"); 2136 goto fail; 2137 } 2138 ctx.alc_busaddr = 0; 2139 error = bus_dmamap_load(sc->alc_cdata.alc_cmb_tag, 2140 sc->alc_cdata.alc_cmb_map, sc->alc_rdata.alc_cmb, 2141 ALC_CMB_SZ, alc_dmamap_cb, &ctx, 0); 2142 if (error != 0 || ctx.alc_busaddr == 0) { 2143 device_printf(sc->alc_dev, 2144 "could not load DMA'able memory for CMB.\n"); 2145 goto fail; 2146 } 2147 sc->alc_rdata.alc_cmb_paddr = ctx.alc_busaddr; 2148 2149 /* Allocate DMA'able memory and load the DMA map for SMB. */ 2150 error = bus_dmamem_alloc(sc->alc_cdata.alc_smb_tag, 2151 (void **)&sc->alc_rdata.alc_smb, 2152 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, 2153 &sc->alc_cdata.alc_smb_map); 2154 if (error != 0) { 2155 device_printf(sc->alc_dev, 2156 "could not allocate DMA'able memory for SMB.\n"); 2157 goto fail; 2158 } 2159 ctx.alc_busaddr = 0; 2160 error = bus_dmamap_load(sc->alc_cdata.alc_smb_tag, 2161 sc->alc_cdata.alc_smb_map, sc->alc_rdata.alc_smb, 2162 ALC_SMB_SZ, alc_dmamap_cb, &ctx, 0); 2163 if (error != 0 || ctx.alc_busaddr == 0) { 2164 device_printf(sc->alc_dev, 2165 "could not load DMA'able memory for CMB.\n"); 2166 goto fail; 2167 } 2168 sc->alc_rdata.alc_smb_paddr = ctx.alc_busaddr; 2169 2170 /* Make sure we've not crossed 4GB boundary. */ 2171 if (lowaddr != BUS_SPACE_MAXADDR_32BIT && 2172 (error = alc_check_boundary(sc)) != 0) { 2173 device_printf(sc->alc_dev, "4GB boundary crossed, " 2174 "switching to 32bit DMA addressing mode.\n"); 2175 alc_dma_free(sc); 2176 /* 2177 * Limit max allowable DMA address space to 32bit 2178 * and try again. 2179 */ 2180 lowaddr = BUS_SPACE_MAXADDR_32BIT; 2181 goto again; 2182 } 2183 2184 /* 2185 * Create Tx buffer parent tag. 2186 * AR81[3567]x allows 64bit DMA addressing of Tx/Rx buffers 2187 * so it needs separate parent DMA tag as parent DMA address 2188 * space could be restricted to be within 32bit address space 2189 * by 4GB boundary crossing. 2190 */ 2191 error = bus_dma_tag_create( 2192 bus_get_dma_tag(sc->alc_dev), /* parent */ 2193 1, 0, /* alignment, boundary */ 2194 BUS_SPACE_MAXADDR, /* lowaddr */ 2195 BUS_SPACE_MAXADDR, /* highaddr */ 2196 NULL, NULL, /* filter, filterarg */ 2197 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 2198 0, /* nsegments */ 2199 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 2200 0, /* flags */ 2201 NULL, NULL, /* lockfunc, lockarg */ 2202 &sc->alc_cdata.alc_buffer_tag); 2203 if (error != 0) { 2204 device_printf(sc->alc_dev, 2205 "could not create parent buffer DMA tag.\n"); 2206 goto fail; 2207 } 2208 2209 /* Create DMA tag for Tx buffers. */ 2210 error = bus_dma_tag_create( 2211 sc->alc_cdata.alc_buffer_tag, /* parent */ 2212 1, 0, /* alignment, boundary */ 2213 BUS_SPACE_MAXADDR, /* lowaddr */ 2214 BUS_SPACE_MAXADDR, /* highaddr */ 2215 NULL, NULL, /* filter, filterarg */ 2216 ALC_TSO_MAXSIZE, /* maxsize */ 2217 ALC_MAXTXSEGS, /* nsegments */ 2218 ALC_TSO_MAXSEGSIZE, /* maxsegsize */ 2219 0, /* flags */ 2220 NULL, NULL, /* lockfunc, lockarg */ 2221 &sc->alc_cdata.alc_tx_tag); 2222 if (error != 0) { 2223 device_printf(sc->alc_dev, "could not create Tx DMA tag.\n"); 2224 goto fail; 2225 } 2226 2227 /* Create DMA tag for Rx buffers. */ 2228 error = bus_dma_tag_create( 2229 sc->alc_cdata.alc_buffer_tag, /* parent */ 2230 ALC_RX_BUF_ALIGN, 0, /* alignment, boundary */ 2231 BUS_SPACE_MAXADDR, /* lowaddr */ 2232 BUS_SPACE_MAXADDR, /* highaddr */ 2233 NULL, NULL, /* filter, filterarg */ 2234 MCLBYTES, /* maxsize */ 2235 1, /* nsegments */ 2236 MCLBYTES, /* maxsegsize */ 2237 0, /* flags */ 2238 NULL, NULL, /* lockfunc, lockarg */ 2239 &sc->alc_cdata.alc_rx_tag); 2240 if (error != 0) { 2241 device_printf(sc->alc_dev, "could not create Rx DMA tag.\n"); 2242 goto fail; 2243 } 2244 /* Create DMA maps for Tx buffers. */ 2245 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2246 txd = &sc->alc_cdata.alc_txdesc[i]; 2247 txd->tx_m = NULL; 2248 txd->tx_dmamap = NULL; 2249 error = bus_dmamap_create(sc->alc_cdata.alc_tx_tag, 0, 2250 &txd->tx_dmamap); 2251 if (error != 0) { 2252 device_printf(sc->alc_dev, 2253 "could not create Tx dmamap.\n"); 2254 goto fail; 2255 } 2256 } 2257 /* Create DMA maps for Rx buffers. */ 2258 if ((error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0, 2259 &sc->alc_cdata.alc_rx_sparemap)) != 0) { 2260 device_printf(sc->alc_dev, 2261 "could not create spare Rx dmamap.\n"); 2262 goto fail; 2263 } 2264 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2265 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2266 rxd->rx_m = NULL; 2267 rxd->rx_dmamap = NULL; 2268 error = bus_dmamap_create(sc->alc_cdata.alc_rx_tag, 0, 2269 &rxd->rx_dmamap); 2270 if (error != 0) { 2271 device_printf(sc->alc_dev, 2272 "could not create Rx dmamap.\n"); 2273 goto fail; 2274 } 2275 } 2276 2277 fail: 2278 return (error); 2279 } 2280 2281 static void 2282 alc_dma_free(struct alc_softc *sc) 2283 { 2284 struct alc_txdesc *txd; 2285 struct alc_rxdesc *rxd; 2286 int i; 2287 2288 /* Tx buffers. */ 2289 if (sc->alc_cdata.alc_tx_tag != NULL) { 2290 for (i = 0; i < ALC_TX_RING_CNT; i++) { 2291 txd = &sc->alc_cdata.alc_txdesc[i]; 2292 if (txd->tx_dmamap != NULL) { 2293 bus_dmamap_destroy(sc->alc_cdata.alc_tx_tag, 2294 txd->tx_dmamap); 2295 txd->tx_dmamap = NULL; 2296 } 2297 } 2298 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_tag); 2299 sc->alc_cdata.alc_tx_tag = NULL; 2300 } 2301 /* Rx buffers */ 2302 if (sc->alc_cdata.alc_rx_tag != NULL) { 2303 for (i = 0; i < ALC_RX_RING_CNT; i++) { 2304 rxd = &sc->alc_cdata.alc_rxdesc[i]; 2305 if (rxd->rx_dmamap != NULL) { 2306 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 2307 rxd->rx_dmamap); 2308 rxd->rx_dmamap = NULL; 2309 } 2310 } 2311 if (sc->alc_cdata.alc_rx_sparemap != NULL) { 2312 bus_dmamap_destroy(sc->alc_cdata.alc_rx_tag, 2313 sc->alc_cdata.alc_rx_sparemap); 2314 sc->alc_cdata.alc_rx_sparemap = NULL; 2315 } 2316 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_tag); 2317 sc->alc_cdata.alc_rx_tag = NULL; 2318 } 2319 /* Tx descriptor ring. */ 2320 if (sc->alc_cdata.alc_tx_ring_tag != NULL) { 2321 if (sc->alc_rdata.alc_tx_ring_paddr != 0) 2322 bus_dmamap_unload(sc->alc_cdata.alc_tx_ring_tag, 2323 sc->alc_cdata.alc_tx_ring_map); 2324 if (sc->alc_rdata.alc_tx_ring != NULL) 2325 bus_dmamem_free(sc->alc_cdata.alc_tx_ring_tag, 2326 sc->alc_rdata.alc_tx_ring, 2327 sc->alc_cdata.alc_tx_ring_map); 2328 sc->alc_rdata.alc_tx_ring_paddr = 0; 2329 sc->alc_rdata.alc_tx_ring = NULL; 2330 bus_dma_tag_destroy(sc->alc_cdata.alc_tx_ring_tag); 2331 sc->alc_cdata.alc_tx_ring_tag = NULL; 2332 } 2333 /* Rx ring. */ 2334 if (sc->alc_cdata.alc_rx_ring_tag != NULL) { 2335 if (sc->alc_rdata.alc_rx_ring_paddr != 0) 2336 bus_dmamap_unload(sc->alc_cdata.alc_rx_ring_tag, 2337 sc->alc_cdata.alc_rx_ring_map); 2338 if (sc->alc_rdata.alc_rx_ring != NULL) 2339 bus_dmamem_free(sc->alc_cdata.alc_rx_ring_tag, 2340 sc->alc_rdata.alc_rx_ring, 2341 sc->alc_cdata.alc_rx_ring_map); 2342 sc->alc_rdata.alc_rx_ring_paddr = 0; 2343 sc->alc_rdata.alc_rx_ring = NULL; 2344 bus_dma_tag_destroy(sc->alc_cdata.alc_rx_ring_tag); 2345 sc->alc_cdata.alc_rx_ring_tag = NULL; 2346 } 2347 /* Rx return ring. */ 2348 if (sc->alc_cdata.alc_rr_ring_tag != NULL) { 2349 if (sc->alc_rdata.alc_rr_ring_paddr != 0) 2350 bus_dmamap_unload(sc->alc_cdata.alc_rr_ring_tag, 2351 sc->alc_cdata.alc_rr_ring_map); 2352 if (sc->alc_rdata.alc_rr_ring != NULL) 2353 bus_dmamem_free(sc->alc_cdata.alc_rr_ring_tag, 2354 sc->alc_rdata.alc_rr_ring, 2355 sc->alc_cdata.alc_rr_ring_map); 2356 sc->alc_rdata.alc_rr_ring_paddr = 0; 2357 sc->alc_rdata.alc_rr_ring = NULL; 2358 bus_dma_tag_destroy(sc->alc_cdata.alc_rr_ring_tag); 2359 sc->alc_cdata.alc_rr_ring_tag = NULL; 2360 } 2361 /* CMB block */ 2362 if (sc->alc_cdata.alc_cmb_tag != NULL) { 2363 if (sc->alc_rdata.alc_cmb_paddr != 0) 2364 bus_dmamap_unload(sc->alc_cdata.alc_cmb_tag, 2365 sc->alc_cdata.alc_cmb_map); 2366 if (sc->alc_rdata.alc_cmb != NULL) 2367 bus_dmamem_free(sc->alc_cdata.alc_cmb_tag, 2368 sc->alc_rdata.alc_cmb, 2369 sc->alc_cdata.alc_cmb_map); 2370 sc->alc_rdata.alc_cmb_paddr = 0; 2371 sc->alc_rdata.alc_cmb = NULL; 2372 bus_dma_tag_destroy(sc->alc_cdata.alc_cmb_tag); 2373 sc->alc_cdata.alc_cmb_tag = NULL; 2374 } 2375 /* SMB block */ 2376 if (sc->alc_cdata.alc_smb_tag != NULL) { 2377 if (sc->alc_rdata.alc_smb_paddr != 0) 2378 bus_dmamap_unload(sc->alc_cdata.alc_smb_tag, 2379 sc->alc_cdata.alc_smb_map); 2380 if (sc->alc_rdata.alc_smb != NULL) 2381 bus_dmamem_free(sc->alc_cdata.alc_smb_tag, 2382 sc->alc_rdata.alc_smb, 2383 sc->alc_cdata.alc_smb_map); 2384 sc->alc_rdata.alc_smb_paddr = 0; 2385 sc->alc_rdata.alc_smb = NULL; 2386 bus_dma_tag_destroy(sc->alc_cdata.alc_smb_tag); 2387 sc->alc_cdata.alc_smb_tag = NULL; 2388 } 2389 if (sc->alc_cdata.alc_buffer_tag != NULL) { 2390 bus_dma_tag_destroy(sc->alc_cdata.alc_buffer_tag); 2391 sc->alc_cdata.alc_buffer_tag = NULL; 2392 } 2393 if (sc->alc_cdata.alc_parent_tag != NULL) { 2394 bus_dma_tag_destroy(sc->alc_cdata.alc_parent_tag); 2395 sc->alc_cdata.alc_parent_tag = NULL; 2396 } 2397 } 2398 2399 static int 2400 alc_shutdown(device_t dev) 2401 { 2402 2403 return (alc_suspend(dev)); 2404 } 2405 2406 /* 2407 * Note, this driver resets the link speed to 10/100Mbps by 2408 * restarting auto-negotiation in suspend/shutdown phase but we 2409 * don't know whether that auto-negotiation would succeed or not 2410 * as driver has no control after powering off/suspend operation. 2411 * If the renegotiation fail WOL may not work. Running at 1Gbps 2412 * will draw more power than 375mA at 3.3V which is specified in 2413 * PCI specification and that would result in complete 2414 * shutdowning power to ethernet controller. 2415 * 2416 * TODO 2417 * Save current negotiated media speed/duplex/flow-control to 2418 * softc and restore the same link again after resuming. PHY 2419 * handling such as power down/resetting to 100Mbps may be better 2420 * handled in suspend method in phy driver. 2421 */ 2422 static void 2423 alc_setlinkspeed(struct alc_softc *sc) 2424 { 2425 struct mii_data *mii; 2426 int aneg, i; 2427 2428 mii = device_get_softc(sc->alc_miibus); 2429 mii_pollstat(mii); 2430 aneg = 0; 2431 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 2432 (IFM_ACTIVE | IFM_AVALID)) { 2433 switch IFM_SUBTYPE(mii->mii_media_active) { 2434 case IFM_10_T: 2435 case IFM_100_TX: 2436 return; 2437 case IFM_1000_T: 2438 aneg++; 2439 break; 2440 default: 2441 break; 2442 } 2443 } 2444 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, MII_100T2CR, 0); 2445 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 2446 MII_ANAR, ANAR_TX_FD | ANAR_TX | ANAR_10_FD | ANAR_10 | ANAR_CSMA); 2447 alc_miibus_writereg(sc->alc_dev, sc->alc_phyaddr, 2448 MII_BMCR, BMCR_RESET | BMCR_AUTOEN | BMCR_STARTNEG); 2449 DELAY(1000); 2450 if (aneg != 0) { 2451 /* 2452 * Poll link state until alc(4) get a 10/100Mbps link. 2453 */ 2454 for (i = 0; i < MII_ANEGTICKS_GIGE; i++) { 2455 mii_pollstat(mii); 2456 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) 2457 == (IFM_ACTIVE | IFM_AVALID)) { 2458 switch (IFM_SUBTYPE( 2459 mii->mii_media_active)) { 2460 case IFM_10_T: 2461 case IFM_100_TX: 2462 alc_mac_config(sc); 2463 return; 2464 default: 2465 break; 2466 } 2467 } 2468 ALC_UNLOCK(sc); 2469 pause("alclnk", hz); 2470 ALC_LOCK(sc); 2471 } 2472 if (i == MII_ANEGTICKS_GIGE) 2473 device_printf(sc->alc_dev, 2474 "establishing a link failed, WOL may not work!"); 2475 } 2476 /* 2477 * No link, force MAC to have 100Mbps, full-duplex link. 2478 * This is the last resort and may/may not work. 2479 */ 2480 mii->mii_media_status = IFM_AVALID | IFM_ACTIVE; 2481 mii->mii_media_active = IFM_ETHER | IFM_100_TX | IFM_FDX; 2482 alc_mac_config(sc); 2483 } 2484 2485 static void 2486 alc_setwol(struct alc_softc *sc) 2487 { 2488 2489 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2490 alc_setwol_816x(sc); 2491 else 2492 alc_setwol_813x(sc); 2493 } 2494 2495 static void 2496 alc_setwol_813x(struct alc_softc *sc) 2497 { 2498 struct ifnet *ifp; 2499 uint32_t reg, pmcs; 2500 uint16_t pmstat; 2501 2502 ALC_LOCK_ASSERT(sc); 2503 2504 alc_disable_l0s_l1(sc); 2505 ifp = sc->alc_ifp; 2506 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 2507 /* Disable WOL. */ 2508 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2509 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 2510 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 2511 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 2512 /* Force PHY power down. */ 2513 alc_phy_down(sc); 2514 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2515 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 2516 return; 2517 } 2518 2519 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2520 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 2521 alc_setlinkspeed(sc); 2522 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2523 CSR_READ_4(sc, ALC_MASTER_CFG) & ~MASTER_CLK_SEL_DIS); 2524 } 2525 2526 pmcs = 0; 2527 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2528 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 2529 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 2530 reg = CSR_READ_4(sc, ALC_MAC_CFG); 2531 reg &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 2532 MAC_CFG_BCAST); 2533 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2534 reg |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 2535 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2536 reg |= MAC_CFG_RX_ENB; 2537 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 2538 2539 reg = CSR_READ_4(sc, ALC_PCIE_PHYMISC); 2540 reg |= PCIE_PHYMISC_FORCE_RCV_DET; 2541 CSR_WRITE_4(sc, ALC_PCIE_PHYMISC, reg); 2542 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2543 /* WOL disabled, PHY power down. */ 2544 alc_phy_down(sc); 2545 CSR_WRITE_4(sc, ALC_MASTER_CFG, 2546 CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS); 2547 } 2548 /* Request PME. */ 2549 pmstat = pci_read_config(sc->alc_dev, 2550 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2551 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2552 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2553 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2554 pci_write_config(sc->alc_dev, 2555 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2556 } 2557 2558 static void 2559 alc_setwol_816x(struct alc_softc *sc) 2560 { 2561 struct ifnet *ifp; 2562 uint32_t gphy, mac, master, pmcs, reg; 2563 uint16_t pmstat; 2564 2565 ALC_LOCK_ASSERT(sc); 2566 2567 ifp = sc->alc_ifp; 2568 master = CSR_READ_4(sc, ALC_MASTER_CFG); 2569 master &= ~MASTER_CLK_SEL_DIS; 2570 gphy = CSR_READ_4(sc, ALC_GPHY_CFG); 2571 gphy &= ~(GPHY_CFG_EXT_RESET | GPHY_CFG_LED_MODE | GPHY_CFG_100AB_ENB | 2572 GPHY_CFG_PHY_PLL_ON); 2573 gphy |= GPHY_CFG_HIB_EN | GPHY_CFG_HIB_PULSE | GPHY_CFG_SEL_ANA_RESET; 2574 if ((sc->alc_flags & ALC_FLAG_PM) == 0) { 2575 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 2576 gphy |= GPHY_CFG_PHY_IDDQ | GPHY_CFG_PWDOWN_HW; 2577 mac = CSR_READ_4(sc, ALC_MAC_CFG); 2578 } else { 2579 if ((ifp->if_capenable & IFCAP_WOL) != 0) { 2580 gphy |= GPHY_CFG_EXT_RESET; 2581 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0) 2582 alc_setlinkspeed(sc); 2583 } 2584 pmcs = 0; 2585 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2586 pmcs |= WOL_CFG_MAGIC | WOL_CFG_MAGIC_ENB; 2587 CSR_WRITE_4(sc, ALC_WOL_CFG, pmcs); 2588 mac = CSR_READ_4(sc, ALC_MAC_CFG); 2589 mac &= ~(MAC_CFG_DBG | MAC_CFG_PROMISC | MAC_CFG_ALLMULTI | 2590 MAC_CFG_BCAST); 2591 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2592 mac |= MAC_CFG_ALLMULTI | MAC_CFG_BCAST; 2593 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2594 mac |= MAC_CFG_RX_ENB; 2595 alc_miiext_writereg(sc, MII_EXT_ANEG, MII_EXT_ANEG_S3DIG10, 2596 ANEG_S3DIG10_SL); 2597 } 2598 2599 /* Enable OSC. */ 2600 reg = CSR_READ_4(sc, ALC_MISC); 2601 reg &= ~MISC_INTNLOSC_OPEN; 2602 CSR_WRITE_4(sc, ALC_MISC, reg); 2603 reg |= MISC_INTNLOSC_OPEN; 2604 CSR_WRITE_4(sc, ALC_MISC, reg); 2605 CSR_WRITE_4(sc, ALC_MASTER_CFG, master); 2606 CSR_WRITE_4(sc, ALC_MAC_CFG, mac); 2607 CSR_WRITE_4(sc, ALC_GPHY_CFG, gphy); 2608 reg = CSR_READ_4(sc, ALC_PDLL_TRNS1); 2609 reg |= PDLL_TRNS1_D3PLLOFF_ENB; 2610 CSR_WRITE_4(sc, ALC_PDLL_TRNS1, reg); 2611 2612 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 2613 /* Request PME. */ 2614 pmstat = pci_read_config(sc->alc_dev, 2615 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2616 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2617 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2618 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2619 pci_write_config(sc->alc_dev, 2620 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2621 } 2622 } 2623 2624 static int 2625 alc_suspend(device_t dev) 2626 { 2627 struct alc_softc *sc; 2628 2629 sc = device_get_softc(dev); 2630 2631 ALC_LOCK(sc); 2632 alc_stop(sc); 2633 alc_setwol(sc); 2634 ALC_UNLOCK(sc); 2635 2636 return (0); 2637 } 2638 2639 static int 2640 alc_resume(device_t dev) 2641 { 2642 struct alc_softc *sc; 2643 struct ifnet *ifp; 2644 uint16_t pmstat; 2645 2646 sc = device_get_softc(dev); 2647 2648 ALC_LOCK(sc); 2649 if ((sc->alc_flags & ALC_FLAG_PM) != 0) { 2650 /* Disable PME and clear PME status. */ 2651 pmstat = pci_read_config(sc->alc_dev, 2652 sc->alc_pmcap + PCIR_POWER_STATUS, 2); 2653 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2654 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2655 pci_write_config(sc->alc_dev, 2656 sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2); 2657 } 2658 } 2659 /* Reset PHY. */ 2660 alc_phy_reset(sc); 2661 ifp = sc->alc_ifp; 2662 if ((ifp->if_flags & IFF_UP) != 0) { 2663 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2664 alc_init_locked(sc); 2665 } 2666 ALC_UNLOCK(sc); 2667 2668 return (0); 2669 } 2670 2671 static int 2672 alc_encap(struct alc_softc *sc, struct mbuf **m_head) 2673 { 2674 struct alc_txdesc *txd, *txd_last; 2675 struct tx_desc *desc; 2676 struct mbuf *m; 2677 struct ip *ip; 2678 struct tcphdr *tcp; 2679 bus_dma_segment_t txsegs[ALC_MAXTXSEGS]; 2680 bus_dmamap_t map; 2681 uint32_t cflags, hdrlen, ip_off, poff, vtag; 2682 int error, idx, nsegs, prod; 2683 2684 ALC_LOCK_ASSERT(sc); 2685 2686 M_ASSERTPKTHDR((*m_head)); 2687 2688 m = *m_head; 2689 ip = NULL; 2690 tcp = NULL; 2691 ip_off = poff = 0; 2692 if ((m->m_pkthdr.csum_flags & (ALC_CSUM_FEATURES | CSUM_TSO)) != 0) { 2693 /* 2694 * AR81[3567]x requires offset of TCP/UDP header in its 2695 * Tx descriptor to perform Tx checksum offloading. TSO 2696 * also requires TCP header offset and modification of 2697 * IP/TCP header. This kind of operation takes many CPU 2698 * cycles on FreeBSD so fast host CPU is required to get 2699 * smooth TSO performance. 2700 */ 2701 struct ether_header *eh; 2702 2703 if (M_WRITABLE(m) == 0) { 2704 /* Get a writable copy. */ 2705 m = m_dup(*m_head, M_NOWAIT); 2706 /* Release original mbufs. */ 2707 m_freem(*m_head); 2708 if (m == NULL) { 2709 *m_head = NULL; 2710 return (ENOBUFS); 2711 } 2712 *m_head = m; 2713 } 2714 2715 ip_off = sizeof(struct ether_header); 2716 m = m_pullup(m, ip_off); 2717 if (m == NULL) { 2718 *m_head = NULL; 2719 return (ENOBUFS); 2720 } 2721 eh = mtod(m, struct ether_header *); 2722 /* 2723 * Check if hardware VLAN insertion is off. 2724 * Additional check for LLC/SNAP frame? 2725 */ 2726 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 2727 ip_off = sizeof(struct ether_vlan_header); 2728 m = m_pullup(m, ip_off); 2729 if (m == NULL) { 2730 *m_head = NULL; 2731 return (ENOBUFS); 2732 } 2733 } 2734 m = m_pullup(m, ip_off + sizeof(struct ip)); 2735 if (m == NULL) { 2736 *m_head = NULL; 2737 return (ENOBUFS); 2738 } 2739 ip = (struct ip *)(mtod(m, char *) + ip_off); 2740 poff = ip_off + (ip->ip_hl << 2); 2741 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2742 m = m_pullup(m, poff + sizeof(struct tcphdr)); 2743 if (m == NULL) { 2744 *m_head = NULL; 2745 return (ENOBUFS); 2746 } 2747 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2748 m = m_pullup(m, poff + (tcp->th_off << 2)); 2749 if (m == NULL) { 2750 *m_head = NULL; 2751 return (ENOBUFS); 2752 } 2753 /* 2754 * Due to strict adherence of Microsoft NDIS 2755 * Large Send specification, hardware expects 2756 * a pseudo TCP checksum inserted by upper 2757 * stack. Unfortunately the pseudo TCP 2758 * checksum that NDIS refers to does not include 2759 * TCP payload length so driver should recompute 2760 * the pseudo checksum here. Hopefully this 2761 * wouldn't be much burden on modern CPUs. 2762 * 2763 * Reset IP checksum and recompute TCP pseudo 2764 * checksum as NDIS specification said. 2765 */ 2766 ip = (struct ip *)(mtod(m, char *) + ip_off); 2767 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 2768 ip->ip_sum = 0; 2769 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, 2770 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2771 } 2772 *m_head = m; 2773 } 2774 2775 prod = sc->alc_cdata.alc_tx_prod; 2776 txd = &sc->alc_cdata.alc_txdesc[prod]; 2777 txd_last = txd; 2778 map = txd->tx_dmamap; 2779 2780 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map, 2781 *m_head, txsegs, &nsegs, 0); 2782 if (error == EFBIG) { 2783 m = m_collapse(*m_head, M_NOWAIT, ALC_MAXTXSEGS); 2784 if (m == NULL) { 2785 m_freem(*m_head); 2786 *m_head = NULL; 2787 return (ENOMEM); 2788 } 2789 *m_head = m; 2790 error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map, 2791 *m_head, txsegs, &nsegs, 0); 2792 if (error != 0) { 2793 m_freem(*m_head); 2794 *m_head = NULL; 2795 return (error); 2796 } 2797 } else if (error != 0) 2798 return (error); 2799 if (nsegs == 0) { 2800 m_freem(*m_head); 2801 *m_head = NULL; 2802 return (EIO); 2803 } 2804 2805 /* Check descriptor overrun. */ 2806 if (sc->alc_cdata.alc_tx_cnt + nsegs >= ALC_TX_RING_CNT - 3) { 2807 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, map); 2808 return (ENOBUFS); 2809 } 2810 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, map, BUS_DMASYNC_PREWRITE); 2811 2812 m = *m_head; 2813 cflags = TD_ETHERNET; 2814 vtag = 0; 2815 desc = NULL; 2816 idx = 0; 2817 /* Configure VLAN hardware tag insertion. */ 2818 if ((m->m_flags & M_VLANTAG) != 0) { 2819 vtag = htons(m->m_pkthdr.ether_vtag); 2820 vtag = (vtag << TD_VLAN_SHIFT) & TD_VLAN_MASK; 2821 cflags |= TD_INS_VLAN_TAG; 2822 } 2823 if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { 2824 /* Request TSO and set MSS. */ 2825 cflags |= TD_TSO | TD_TSO_DESCV1; 2826 cflags |= ((uint32_t)m->m_pkthdr.tso_segsz << TD_MSS_SHIFT) & 2827 TD_MSS_MASK; 2828 /* Set TCP header offset. */ 2829 cflags |= (poff << TD_TCPHDR_OFFSET_SHIFT) & 2830 TD_TCPHDR_OFFSET_MASK; 2831 /* 2832 * AR81[3567]x requires the first buffer should 2833 * only hold IP/TCP header data. Payload should 2834 * be handled in other descriptors. 2835 */ 2836 hdrlen = poff + (tcp->th_off << 2); 2837 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2838 desc->len = htole32(TX_BYTES(hdrlen | vtag)); 2839 desc->flags = htole32(cflags); 2840 desc->addr = htole64(txsegs[0].ds_addr); 2841 sc->alc_cdata.alc_tx_cnt++; 2842 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2843 if (m->m_len - hdrlen > 0) { 2844 /* Handle remaining payload of the first fragment. */ 2845 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2846 desc->len = htole32(TX_BYTES((m->m_len - hdrlen) | 2847 vtag)); 2848 desc->flags = htole32(cflags); 2849 desc->addr = htole64(txsegs[0].ds_addr + hdrlen); 2850 sc->alc_cdata.alc_tx_cnt++; 2851 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2852 } 2853 /* Handle remaining fragments. */ 2854 idx = 1; 2855 } else if ((m->m_pkthdr.csum_flags & ALC_CSUM_FEATURES) != 0) { 2856 /* Configure Tx checksum offload. */ 2857 #ifdef ALC_USE_CUSTOM_CSUM 2858 cflags |= TD_CUSTOM_CSUM; 2859 /* Set checksum start offset. */ 2860 cflags |= ((poff >> 1) << TD_PLOAD_OFFSET_SHIFT) & 2861 TD_PLOAD_OFFSET_MASK; 2862 /* Set checksum insertion position of TCP/UDP. */ 2863 cflags |= (((poff + m->m_pkthdr.csum_data) >> 1) << 2864 TD_CUSTOM_CSUM_OFFSET_SHIFT) & TD_CUSTOM_CSUM_OFFSET_MASK; 2865 #else 2866 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2867 cflags |= TD_IPCSUM; 2868 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2869 cflags |= TD_TCPCSUM; 2870 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2871 cflags |= TD_UDPCSUM; 2872 /* Set TCP/UDP header offset. */ 2873 cflags |= (poff << TD_L4HDR_OFFSET_SHIFT) & 2874 TD_L4HDR_OFFSET_MASK; 2875 #endif 2876 } 2877 for (; idx < nsegs; idx++) { 2878 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2879 desc->len = htole32(TX_BYTES(txsegs[idx].ds_len) | vtag); 2880 desc->flags = htole32(cflags); 2881 desc->addr = htole64(txsegs[idx].ds_addr); 2882 sc->alc_cdata.alc_tx_cnt++; 2883 ALC_DESC_INC(prod, ALC_TX_RING_CNT); 2884 } 2885 /* Update producer index. */ 2886 sc->alc_cdata.alc_tx_prod = prod; 2887 2888 /* Finally set EOP on the last descriptor. */ 2889 prod = (prod + ALC_TX_RING_CNT - 1) % ALC_TX_RING_CNT; 2890 desc = &sc->alc_rdata.alc_tx_ring[prod]; 2891 desc->flags |= htole32(TD_EOP); 2892 2893 /* Swap dmamap of the first and the last. */ 2894 txd = &sc->alc_cdata.alc_txdesc[prod]; 2895 map = txd_last->tx_dmamap; 2896 txd_last->tx_dmamap = txd->tx_dmamap; 2897 txd->tx_dmamap = map; 2898 txd->tx_m = m; 2899 2900 return (0); 2901 } 2902 2903 static void 2904 alc_start(struct ifnet *ifp) 2905 { 2906 struct alc_softc *sc; 2907 2908 sc = ifp->if_softc; 2909 ALC_LOCK(sc); 2910 alc_start_locked(ifp); 2911 ALC_UNLOCK(sc); 2912 } 2913 2914 static void 2915 alc_start_locked(struct ifnet *ifp) 2916 { 2917 struct alc_softc *sc; 2918 struct mbuf *m_head; 2919 int enq; 2920 2921 sc = ifp->if_softc; 2922 2923 ALC_LOCK_ASSERT(sc); 2924 2925 /* Reclaim transmitted frames. */ 2926 if (sc->alc_cdata.alc_tx_cnt >= ALC_TX_DESC_HIWAT) 2927 alc_txeof(sc); 2928 2929 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2930 IFF_DRV_RUNNING || (sc->alc_flags & ALC_FLAG_LINK) == 0) 2931 return; 2932 2933 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { 2934 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); 2935 if (m_head == NULL) 2936 break; 2937 /* 2938 * Pack the data into the transmit ring. If we 2939 * don't have room, set the OACTIVE flag and wait 2940 * for the NIC to drain the ring. 2941 */ 2942 if (alc_encap(sc, &m_head)) { 2943 if (m_head == NULL) 2944 break; 2945 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); 2946 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2947 break; 2948 } 2949 2950 enq++; 2951 /* 2952 * If there's a BPF listener, bounce a copy of this frame 2953 * to him. 2954 */ 2955 ETHER_BPF_MTAP(ifp, m_head); 2956 } 2957 2958 if (enq > 0) { 2959 /* Sync descriptors. */ 2960 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 2961 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 2962 /* Kick. Assume we're using normal Tx priority queue. */ 2963 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 2964 CSR_WRITE_2(sc, ALC_MBOX_TD_PRI0_PROD_IDX, 2965 (uint16_t)sc->alc_cdata.alc_tx_prod); 2966 else 2967 CSR_WRITE_4(sc, ALC_MBOX_TD_PROD_IDX, 2968 (sc->alc_cdata.alc_tx_prod << 2969 MBOX_TD_PROD_LO_IDX_SHIFT) & 2970 MBOX_TD_PROD_LO_IDX_MASK); 2971 /* Set a timeout in case the chip goes out to lunch. */ 2972 sc->alc_watchdog_timer = ALC_TX_TIMEOUT; 2973 } 2974 } 2975 2976 static void 2977 alc_watchdog(struct alc_softc *sc) 2978 { 2979 struct ifnet *ifp; 2980 2981 ALC_LOCK_ASSERT(sc); 2982 2983 if (sc->alc_watchdog_timer == 0 || --sc->alc_watchdog_timer) 2984 return; 2985 2986 ifp = sc->alc_ifp; 2987 if ((sc->alc_flags & ALC_FLAG_LINK) == 0) { 2988 if_printf(sc->alc_ifp, "watchdog timeout (lost link)\n"); 2989 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2990 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2991 alc_init_locked(sc); 2992 return; 2993 } 2994 if_printf(sc->alc_ifp, "watchdog timeout -- resetting\n"); 2995 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 2996 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2997 alc_init_locked(sc); 2998 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 2999 alc_start_locked(ifp); 3000 } 3001 3002 static int 3003 alc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 3004 { 3005 struct alc_softc *sc; 3006 struct ifreq *ifr; 3007 struct mii_data *mii; 3008 int error, mask; 3009 3010 sc = ifp->if_softc; 3011 ifr = (struct ifreq *)data; 3012 error = 0; 3013 switch (cmd) { 3014 case SIOCSIFMTU: 3015 if (ifr->ifr_mtu < ETHERMIN || 3016 ifr->ifr_mtu > (sc->alc_ident->max_framelen - 3017 sizeof(struct ether_vlan_header) - ETHER_CRC_LEN) || 3018 ((sc->alc_flags & ALC_FLAG_JUMBO) == 0 && 3019 ifr->ifr_mtu > ETHERMTU)) 3020 error = EINVAL; 3021 else if (ifp->if_mtu != ifr->ifr_mtu) { 3022 ALC_LOCK(sc); 3023 ifp->if_mtu = ifr->ifr_mtu; 3024 /* AR81[3567]x has 13 bits MSS field. */ 3025 if (ifp->if_mtu > ALC_TSO_MTU && 3026 (ifp->if_capenable & IFCAP_TSO4) != 0) { 3027 ifp->if_capenable &= ~IFCAP_TSO4; 3028 ifp->if_hwassist &= ~CSUM_TSO; 3029 VLAN_CAPABILITIES(ifp); 3030 } 3031 ALC_UNLOCK(sc); 3032 } 3033 break; 3034 case SIOCSIFFLAGS: 3035 ALC_LOCK(sc); 3036 if ((ifp->if_flags & IFF_UP) != 0) { 3037 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3038 ((ifp->if_flags ^ sc->alc_if_flags) & 3039 (IFF_PROMISC | IFF_ALLMULTI)) != 0) 3040 alc_rxfilter(sc); 3041 else 3042 alc_init_locked(sc); 3043 } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3044 alc_stop(sc); 3045 sc->alc_if_flags = ifp->if_flags; 3046 ALC_UNLOCK(sc); 3047 break; 3048 case SIOCADDMULTI: 3049 case SIOCDELMULTI: 3050 ALC_LOCK(sc); 3051 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3052 alc_rxfilter(sc); 3053 ALC_UNLOCK(sc); 3054 break; 3055 case SIOCSIFMEDIA: 3056 case SIOCGIFMEDIA: 3057 mii = device_get_softc(sc->alc_miibus); 3058 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 3059 break; 3060 case SIOCSIFCAP: 3061 ALC_LOCK(sc); 3062 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 3063 if ((mask & IFCAP_TXCSUM) != 0 && 3064 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 3065 ifp->if_capenable ^= IFCAP_TXCSUM; 3066 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 3067 ifp->if_hwassist |= ALC_CSUM_FEATURES; 3068 else 3069 ifp->if_hwassist &= ~ALC_CSUM_FEATURES; 3070 } 3071 if ((mask & IFCAP_TSO4) != 0 && 3072 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 3073 ifp->if_capenable ^= IFCAP_TSO4; 3074 if ((ifp->if_capenable & IFCAP_TSO4) != 0) { 3075 /* AR81[3567]x has 13 bits MSS field. */ 3076 if (ifp->if_mtu > ALC_TSO_MTU) { 3077 ifp->if_capenable &= ~IFCAP_TSO4; 3078 ifp->if_hwassist &= ~CSUM_TSO; 3079 } else 3080 ifp->if_hwassist |= CSUM_TSO; 3081 } else 3082 ifp->if_hwassist &= ~CSUM_TSO; 3083 } 3084 if ((mask & IFCAP_WOL_MCAST) != 0 && 3085 (ifp->if_capabilities & IFCAP_WOL_MCAST) != 0) 3086 ifp->if_capenable ^= IFCAP_WOL_MCAST; 3087 if ((mask & IFCAP_WOL_MAGIC) != 0 && 3088 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 3089 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 3090 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 3091 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 3092 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 3093 alc_rxvlan(sc); 3094 } 3095 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 3096 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 3097 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 3098 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 3099 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 3100 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 3101 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 3102 ifp->if_capenable &= 3103 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 3104 ALC_UNLOCK(sc); 3105 VLAN_CAPABILITIES(ifp); 3106 break; 3107 default: 3108 error = ether_ioctl(ifp, cmd, data); 3109 break; 3110 } 3111 3112 return (error); 3113 } 3114 3115 static void 3116 alc_mac_config(struct alc_softc *sc) 3117 { 3118 struct mii_data *mii; 3119 uint32_t reg; 3120 3121 ALC_LOCK_ASSERT(sc); 3122 3123 mii = device_get_softc(sc->alc_miibus); 3124 reg = CSR_READ_4(sc, ALC_MAC_CFG); 3125 reg &= ~(MAC_CFG_FULL_DUPLEX | MAC_CFG_TX_FC | MAC_CFG_RX_FC | 3126 MAC_CFG_SPEED_MASK); 3127 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3128 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 3129 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 3130 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) 3131 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 3132 /* Reprogram MAC with resolved speed/duplex. */ 3133 switch (IFM_SUBTYPE(mii->mii_media_active)) { 3134 case IFM_10_T: 3135 case IFM_100_TX: 3136 reg |= MAC_CFG_SPEED_10_100; 3137 break; 3138 case IFM_1000_T: 3139 reg |= MAC_CFG_SPEED_1000; 3140 break; 3141 } 3142 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 3143 reg |= MAC_CFG_FULL_DUPLEX; 3144 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 3145 reg |= MAC_CFG_TX_FC; 3146 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 3147 reg |= MAC_CFG_RX_FC; 3148 } 3149 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 3150 } 3151 3152 static void 3153 alc_stats_clear(struct alc_softc *sc) 3154 { 3155 struct smb sb, *smb; 3156 uint32_t *reg; 3157 int i; 3158 3159 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3160 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3161 sc->alc_cdata.alc_smb_map, 3162 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3163 smb = sc->alc_rdata.alc_smb; 3164 /* Update done, clear. */ 3165 smb->updated = 0; 3166 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3167 sc->alc_cdata.alc_smb_map, 3168 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3169 } else { 3170 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 3171 reg++) { 3172 CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 3173 i += sizeof(uint32_t); 3174 } 3175 /* Read Tx statistics. */ 3176 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 3177 reg++) { 3178 CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 3179 i += sizeof(uint32_t); 3180 } 3181 } 3182 } 3183 3184 static void 3185 alc_stats_update(struct alc_softc *sc) 3186 { 3187 struct alc_hw_stats *stat; 3188 struct smb sb, *smb; 3189 struct ifnet *ifp; 3190 uint32_t *reg; 3191 int i; 3192 3193 ALC_LOCK_ASSERT(sc); 3194 3195 ifp = sc->alc_ifp; 3196 stat = &sc->alc_stats; 3197 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3198 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3199 sc->alc_cdata.alc_smb_map, 3200 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3201 smb = sc->alc_rdata.alc_smb; 3202 if (smb->updated == 0) 3203 return; 3204 } else { 3205 smb = &sb; 3206 /* Read Rx statistics. */ 3207 for (reg = &sb.rx_frames, i = 0; reg <= &sb.rx_pkts_filtered; 3208 reg++) { 3209 *reg = CSR_READ_4(sc, ALC_RX_MIB_BASE + i); 3210 i += sizeof(uint32_t); 3211 } 3212 /* Read Tx statistics. */ 3213 for (reg = &sb.tx_frames, i = 0; reg <= &sb.tx_mcast_bytes; 3214 reg++) { 3215 *reg = CSR_READ_4(sc, ALC_TX_MIB_BASE + i); 3216 i += sizeof(uint32_t); 3217 } 3218 } 3219 3220 /* Rx stats. */ 3221 stat->rx_frames += smb->rx_frames; 3222 stat->rx_bcast_frames += smb->rx_bcast_frames; 3223 stat->rx_mcast_frames += smb->rx_mcast_frames; 3224 stat->rx_pause_frames += smb->rx_pause_frames; 3225 stat->rx_control_frames += smb->rx_control_frames; 3226 stat->rx_crcerrs += smb->rx_crcerrs; 3227 stat->rx_lenerrs += smb->rx_lenerrs; 3228 stat->rx_bytes += smb->rx_bytes; 3229 stat->rx_runts += smb->rx_runts; 3230 stat->rx_fragments += smb->rx_fragments; 3231 stat->rx_pkts_64 += smb->rx_pkts_64; 3232 stat->rx_pkts_65_127 += smb->rx_pkts_65_127; 3233 stat->rx_pkts_128_255 += smb->rx_pkts_128_255; 3234 stat->rx_pkts_256_511 += smb->rx_pkts_256_511; 3235 stat->rx_pkts_512_1023 += smb->rx_pkts_512_1023; 3236 stat->rx_pkts_1024_1518 += smb->rx_pkts_1024_1518; 3237 stat->rx_pkts_1519_max += smb->rx_pkts_1519_max; 3238 stat->rx_pkts_truncated += smb->rx_pkts_truncated; 3239 stat->rx_fifo_oflows += smb->rx_fifo_oflows; 3240 stat->rx_rrs_errs += smb->rx_rrs_errs; 3241 stat->rx_alignerrs += smb->rx_alignerrs; 3242 stat->rx_bcast_bytes += smb->rx_bcast_bytes; 3243 stat->rx_mcast_bytes += smb->rx_mcast_bytes; 3244 stat->rx_pkts_filtered += smb->rx_pkts_filtered; 3245 3246 /* Tx stats. */ 3247 stat->tx_frames += smb->tx_frames; 3248 stat->tx_bcast_frames += smb->tx_bcast_frames; 3249 stat->tx_mcast_frames += smb->tx_mcast_frames; 3250 stat->tx_pause_frames += smb->tx_pause_frames; 3251 stat->tx_excess_defer += smb->tx_excess_defer; 3252 stat->tx_control_frames += smb->tx_control_frames; 3253 stat->tx_deferred += smb->tx_deferred; 3254 stat->tx_bytes += smb->tx_bytes; 3255 stat->tx_pkts_64 += smb->tx_pkts_64; 3256 stat->tx_pkts_65_127 += smb->tx_pkts_65_127; 3257 stat->tx_pkts_128_255 += smb->tx_pkts_128_255; 3258 stat->tx_pkts_256_511 += smb->tx_pkts_256_511; 3259 stat->tx_pkts_512_1023 += smb->tx_pkts_512_1023; 3260 stat->tx_pkts_1024_1518 += smb->tx_pkts_1024_1518; 3261 stat->tx_pkts_1519_max += smb->tx_pkts_1519_max; 3262 stat->tx_single_colls += smb->tx_single_colls; 3263 stat->tx_multi_colls += smb->tx_multi_colls; 3264 stat->tx_late_colls += smb->tx_late_colls; 3265 stat->tx_excess_colls += smb->tx_excess_colls; 3266 stat->tx_underrun += smb->tx_underrun; 3267 stat->tx_desc_underrun += smb->tx_desc_underrun; 3268 stat->tx_lenerrs += smb->tx_lenerrs; 3269 stat->tx_pkts_truncated += smb->tx_pkts_truncated; 3270 stat->tx_bcast_bytes += smb->tx_bcast_bytes; 3271 stat->tx_mcast_bytes += smb->tx_mcast_bytes; 3272 3273 /* Update counters in ifnet. */ 3274 if_inc_counter(ifp, IFCOUNTER_OPACKETS, smb->tx_frames); 3275 3276 if_inc_counter(ifp, IFCOUNTER_COLLISIONS, smb->tx_single_colls + 3277 smb->tx_multi_colls * 2 + smb->tx_late_colls + 3278 smb->tx_excess_colls * HDPX_CFG_RETRY_DEFAULT); 3279 3280 if_inc_counter(ifp, IFCOUNTER_OERRORS, smb->tx_late_colls + 3281 smb->tx_excess_colls + smb->tx_underrun + smb->tx_pkts_truncated); 3282 3283 if_inc_counter(ifp, IFCOUNTER_IPACKETS, smb->rx_frames); 3284 3285 if_inc_counter(ifp, IFCOUNTER_IERRORS, 3286 smb->rx_crcerrs + smb->rx_lenerrs + 3287 smb->rx_runts + smb->rx_pkts_truncated + 3288 smb->rx_fifo_oflows + smb->rx_rrs_errs + 3289 smb->rx_alignerrs); 3290 3291 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) { 3292 /* Update done, clear. */ 3293 smb->updated = 0; 3294 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, 3295 sc->alc_cdata.alc_smb_map, 3296 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3297 } 3298 } 3299 3300 static int 3301 alc_intr(void *arg) 3302 { 3303 struct alc_softc *sc; 3304 uint32_t status; 3305 3306 sc = (struct alc_softc *)arg; 3307 3308 status = CSR_READ_4(sc, ALC_INTR_STATUS); 3309 if ((status & ALC_INTRS) == 0) 3310 return (FILTER_STRAY); 3311 /* Disable interrupts. */ 3312 CSR_WRITE_4(sc, ALC_INTR_STATUS, INTR_DIS_INT); 3313 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); 3314 3315 return (FILTER_HANDLED); 3316 } 3317 3318 static void 3319 alc_int_task(void *arg, int pending) 3320 { 3321 struct alc_softc *sc; 3322 struct ifnet *ifp; 3323 uint32_t status; 3324 int more; 3325 3326 sc = (struct alc_softc *)arg; 3327 ifp = sc->alc_ifp; 3328 3329 status = CSR_READ_4(sc, ALC_INTR_STATUS); 3330 ALC_LOCK(sc); 3331 if (sc->alc_morework != 0) { 3332 sc->alc_morework = 0; 3333 status |= INTR_RX_PKT; 3334 } 3335 if ((status & ALC_INTRS) == 0) 3336 goto done; 3337 3338 /* Acknowledge interrupts but still disable interrupts. */ 3339 CSR_WRITE_4(sc, ALC_INTR_STATUS, status | INTR_DIS_INT); 3340 3341 more = 0; 3342 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3343 if ((status & INTR_RX_PKT) != 0) { 3344 more = alc_rxintr(sc, sc->alc_process_limit); 3345 if (more == EAGAIN) 3346 sc->alc_morework = 1; 3347 else if (more == EIO) { 3348 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3349 alc_init_locked(sc); 3350 ALC_UNLOCK(sc); 3351 return; 3352 } 3353 } 3354 if ((status & (INTR_DMA_RD_TO_RST | INTR_DMA_WR_TO_RST | 3355 INTR_TXQ_TO_RST)) != 0) { 3356 if ((status & INTR_DMA_RD_TO_RST) != 0) 3357 device_printf(sc->alc_dev, 3358 "DMA read error! -- resetting\n"); 3359 if ((status & INTR_DMA_WR_TO_RST) != 0) 3360 device_printf(sc->alc_dev, 3361 "DMA write error! -- resetting\n"); 3362 if ((status & INTR_TXQ_TO_RST) != 0) 3363 device_printf(sc->alc_dev, 3364 "TxQ reset! -- resetting\n"); 3365 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 3366 alc_init_locked(sc); 3367 ALC_UNLOCK(sc); 3368 return; 3369 } 3370 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && 3371 !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 3372 alc_start_locked(ifp); 3373 } 3374 3375 if (more == EAGAIN || 3376 (CSR_READ_4(sc, ALC_INTR_STATUS) & ALC_INTRS) != 0) { 3377 ALC_UNLOCK(sc); 3378 taskqueue_enqueue(sc->alc_tq, &sc->alc_int_task); 3379 return; 3380 } 3381 3382 done: 3383 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 3384 /* Re-enable interrupts if we're running. */ 3385 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0x7FFFFFFF); 3386 } 3387 ALC_UNLOCK(sc); 3388 } 3389 3390 static void 3391 alc_txeof(struct alc_softc *sc) 3392 { 3393 struct ifnet *ifp; 3394 struct alc_txdesc *txd; 3395 uint32_t cons, prod; 3396 int prog; 3397 3398 ALC_LOCK_ASSERT(sc); 3399 3400 ifp = sc->alc_ifp; 3401 3402 if (sc->alc_cdata.alc_tx_cnt == 0) 3403 return; 3404 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 3405 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_POSTWRITE); 3406 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 3407 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 3408 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_POSTREAD); 3409 prod = sc->alc_rdata.alc_cmb->cons; 3410 } else { 3411 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3412 prod = CSR_READ_2(sc, ALC_MBOX_TD_PRI0_CONS_IDX); 3413 else { 3414 prod = CSR_READ_4(sc, ALC_MBOX_TD_CONS_IDX); 3415 /* Assume we're using normal Tx priority queue. */ 3416 prod = (prod & MBOX_TD_CONS_LO_IDX_MASK) >> 3417 MBOX_TD_CONS_LO_IDX_SHIFT; 3418 } 3419 } 3420 cons = sc->alc_cdata.alc_tx_cons; 3421 /* 3422 * Go through our Tx list and free mbufs for those 3423 * frames which have been transmitted. 3424 */ 3425 for (prog = 0; cons != prod; prog++, 3426 ALC_DESC_INC(cons, ALC_TX_RING_CNT)) { 3427 if (sc->alc_cdata.alc_tx_cnt <= 0) 3428 break; 3429 prog++; 3430 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 3431 sc->alc_cdata.alc_tx_cnt--; 3432 txd = &sc->alc_cdata.alc_txdesc[cons]; 3433 if (txd->tx_m != NULL) { 3434 /* Reclaim transmitted mbufs. */ 3435 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 3436 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 3437 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 3438 txd->tx_dmamap); 3439 m_freem(txd->tx_m); 3440 txd->tx_m = NULL; 3441 } 3442 } 3443 3444 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 3445 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, 3446 sc->alc_cdata.alc_cmb_map, BUS_DMASYNC_PREREAD); 3447 sc->alc_cdata.alc_tx_cons = cons; 3448 /* 3449 * Unarm watchdog timer only when there is no pending 3450 * frames in Tx queue. 3451 */ 3452 if (sc->alc_cdata.alc_tx_cnt == 0) 3453 sc->alc_watchdog_timer = 0; 3454 } 3455 3456 static int 3457 alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd) 3458 { 3459 struct mbuf *m; 3460 bus_dma_segment_t segs[1]; 3461 bus_dmamap_t map; 3462 int nsegs; 3463 3464 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 3465 if (m == NULL) 3466 return (ENOBUFS); 3467 m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX; 3468 #ifndef __NO_STRICT_ALIGNMENT 3469 m_adj(m, sizeof(uint64_t)); 3470 #endif 3471 3472 if (bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_rx_tag, 3473 sc->alc_cdata.alc_rx_sparemap, m, segs, &nsegs, 0) != 0) { 3474 m_freem(m); 3475 return (ENOBUFS); 3476 } 3477 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); 3478 3479 if (rxd->rx_m != NULL) { 3480 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 3481 BUS_DMASYNC_POSTREAD); 3482 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap); 3483 } 3484 map = rxd->rx_dmamap; 3485 rxd->rx_dmamap = sc->alc_cdata.alc_rx_sparemap; 3486 sc->alc_cdata.alc_rx_sparemap = map; 3487 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, rxd->rx_dmamap, 3488 BUS_DMASYNC_PREREAD); 3489 rxd->rx_m = m; 3490 rxd->rx_desc->addr = htole64(segs[0].ds_addr); 3491 return (0); 3492 } 3493 3494 static int 3495 alc_rxintr(struct alc_softc *sc, int count) 3496 { 3497 struct ifnet *ifp; 3498 struct rx_rdesc *rrd; 3499 uint32_t nsegs, status; 3500 int rr_cons, prog; 3501 3502 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3503 sc->alc_cdata.alc_rr_ring_map, 3504 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 3505 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3506 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_POSTWRITE); 3507 rr_cons = sc->alc_cdata.alc_rr_cons; 3508 ifp = sc->alc_ifp; 3509 for (prog = 0; (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0;) { 3510 if (count-- <= 0) 3511 break; 3512 rrd = &sc->alc_rdata.alc_rr_ring[rr_cons]; 3513 status = le32toh(rrd->status); 3514 if ((status & RRD_VALID) == 0) 3515 break; 3516 nsegs = RRD_RD_CNT(le32toh(rrd->rdinfo)); 3517 if (nsegs == 0) { 3518 /* This should not happen! */ 3519 device_printf(sc->alc_dev, 3520 "unexpected segment count -- resetting\n"); 3521 return (EIO); 3522 } 3523 alc_rxeof(sc, rrd); 3524 /* Clear Rx return status. */ 3525 rrd->status = 0; 3526 ALC_DESC_INC(rr_cons, ALC_RR_RING_CNT); 3527 sc->alc_cdata.alc_rx_cons += nsegs; 3528 sc->alc_cdata.alc_rx_cons %= ALC_RR_RING_CNT; 3529 prog += nsegs; 3530 } 3531 3532 if (prog > 0) { 3533 /* Update the consumer index. */ 3534 sc->alc_cdata.alc_rr_cons = rr_cons; 3535 /* Sync Rx return descriptors. */ 3536 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 3537 sc->alc_cdata.alc_rr_ring_map, 3538 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3539 /* 3540 * Sync updated Rx descriptors such that controller see 3541 * modified buffer addresses. 3542 */ 3543 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 3544 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 3545 /* 3546 * Let controller know availability of new Rx buffers. 3547 * Since alc(4) use RXQ_CFG_RD_BURST_DEFAULT descriptors 3548 * it may be possible to update ALC_MBOX_RD0_PROD_IDX 3549 * only when Rx buffer pre-fetching is required. In 3550 * addition we already set ALC_RX_RD_FREE_THRESH to 3551 * RX_RD_FREE_THRESH_LO_DEFAULT descriptors. However 3552 * it still seems that pre-fetching needs more 3553 * experimentation. 3554 */ 3555 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 3556 CSR_WRITE_2(sc, ALC_MBOX_RD0_PROD_IDX, 3557 (uint16_t)sc->alc_cdata.alc_rx_cons); 3558 else 3559 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 3560 sc->alc_cdata.alc_rx_cons); 3561 } 3562 3563 return (count > 0 ? 0 : EAGAIN); 3564 } 3565 3566 #ifndef __NO_STRICT_ALIGNMENT 3567 static struct mbuf * 3568 alc_fixup_rx(struct ifnet *ifp, struct mbuf *m) 3569 { 3570 struct mbuf *n; 3571 int i; 3572 uint16_t *src, *dst; 3573 3574 src = mtod(m, uint16_t *); 3575 dst = src - 3; 3576 3577 if (m->m_next == NULL) { 3578 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 3579 *dst++ = *src++; 3580 m->m_data -= 6; 3581 return (m); 3582 } 3583 /* 3584 * Append a new mbuf to received mbuf chain and copy ethernet 3585 * header from the mbuf chain. This can save lots of CPU 3586 * cycles for jumbo frame. 3587 */ 3588 MGETHDR(n, M_NOWAIT, MT_DATA); 3589 if (n == NULL) { 3590 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 3591 m_freem(m); 3592 return (NULL); 3593 } 3594 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 3595 m->m_data += ETHER_HDR_LEN; 3596 m->m_len -= ETHER_HDR_LEN; 3597 n->m_len = ETHER_HDR_LEN; 3598 M_MOVE_PKTHDR(n, m); 3599 n->m_next = m; 3600 return (n); 3601 } 3602 #endif 3603 3604 /* Receive a frame. */ 3605 static void 3606 alc_rxeof(struct alc_softc *sc, struct rx_rdesc *rrd) 3607 { 3608 struct alc_rxdesc *rxd; 3609 struct ifnet *ifp; 3610 struct mbuf *mp, *m; 3611 uint32_t rdinfo, status, vtag; 3612 int count, nsegs, rx_cons; 3613 3614 ifp = sc->alc_ifp; 3615 status = le32toh(rrd->status); 3616 rdinfo = le32toh(rrd->rdinfo); 3617 rx_cons = RRD_RD_IDX(rdinfo); 3618 nsegs = RRD_RD_CNT(rdinfo); 3619 3620 sc->alc_cdata.alc_rxlen = RRD_BYTES(status); 3621 if ((status & (RRD_ERR_SUM | RRD_ERR_LENGTH)) != 0) { 3622 /* 3623 * We want to pass the following frames to upper 3624 * layer regardless of error status of Rx return 3625 * ring. 3626 * 3627 * o IP/TCP/UDP checksum is bad. 3628 * o frame length and protocol specific length 3629 * does not match. 3630 * 3631 * Force network stack compute checksum for 3632 * errored frames. 3633 */ 3634 status |= RRD_TCP_UDPCSUM_NOK | RRD_IPCSUM_NOK; 3635 if ((status & (RRD_ERR_CRC | RRD_ERR_ALIGN | 3636 RRD_ERR_TRUNC | RRD_ERR_RUNT)) != 0) 3637 return; 3638 } 3639 3640 for (count = 0; count < nsegs; count++, 3641 ALC_DESC_INC(rx_cons, ALC_RX_RING_CNT)) { 3642 rxd = &sc->alc_cdata.alc_rxdesc[rx_cons]; 3643 mp = rxd->rx_m; 3644 /* Add a new receive buffer to the ring. */ 3645 if (alc_newbuf(sc, rxd) != 0) { 3646 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 3647 /* Reuse Rx buffers. */ 3648 if (sc->alc_cdata.alc_rxhead != NULL) 3649 m_freem(sc->alc_cdata.alc_rxhead); 3650 break; 3651 } 3652 3653 /* 3654 * Assume we've received a full sized frame. 3655 * Actual size is fixed when we encounter the end of 3656 * multi-segmented frame. 3657 */ 3658 mp->m_len = sc->alc_buf_size; 3659 3660 /* Chain received mbufs. */ 3661 if (sc->alc_cdata.alc_rxhead == NULL) { 3662 sc->alc_cdata.alc_rxhead = mp; 3663 sc->alc_cdata.alc_rxtail = mp; 3664 } else { 3665 mp->m_flags &= ~M_PKTHDR; 3666 sc->alc_cdata.alc_rxprev_tail = 3667 sc->alc_cdata.alc_rxtail; 3668 sc->alc_cdata.alc_rxtail->m_next = mp; 3669 sc->alc_cdata.alc_rxtail = mp; 3670 } 3671 3672 if (count == nsegs - 1) { 3673 /* Last desc. for this frame. */ 3674 m = sc->alc_cdata.alc_rxhead; 3675 m->m_flags |= M_PKTHDR; 3676 /* 3677 * It seems that L1C/L2C controller has no way 3678 * to tell hardware to strip CRC bytes. 3679 */ 3680 m->m_pkthdr.len = 3681 sc->alc_cdata.alc_rxlen - ETHER_CRC_LEN; 3682 if (nsegs > 1) { 3683 /* Set last mbuf size. */ 3684 mp->m_len = sc->alc_cdata.alc_rxlen - 3685 (nsegs - 1) * sc->alc_buf_size; 3686 /* Remove the CRC bytes in chained mbufs. */ 3687 if (mp->m_len <= ETHER_CRC_LEN) { 3688 sc->alc_cdata.alc_rxtail = 3689 sc->alc_cdata.alc_rxprev_tail; 3690 sc->alc_cdata.alc_rxtail->m_len -= 3691 (ETHER_CRC_LEN - mp->m_len); 3692 sc->alc_cdata.alc_rxtail->m_next = NULL; 3693 m_freem(mp); 3694 } else { 3695 mp->m_len -= ETHER_CRC_LEN; 3696 } 3697 } else 3698 m->m_len = m->m_pkthdr.len; 3699 m->m_pkthdr.rcvif = ifp; 3700 /* 3701 * Due to hardware bugs, Rx checksum offloading 3702 * was intentionally disabled. 3703 */ 3704 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 3705 (status & RRD_VLAN_TAG) != 0) { 3706 vtag = RRD_VLAN(le32toh(rrd->vtag)); 3707 m->m_pkthdr.ether_vtag = ntohs(vtag); 3708 m->m_flags |= M_VLANTAG; 3709 } 3710 #ifndef __NO_STRICT_ALIGNMENT 3711 m = alc_fixup_rx(ifp, m); 3712 if (m != NULL) 3713 #endif 3714 { 3715 /* Pass it on. */ 3716 ALC_UNLOCK(sc); 3717 (*ifp->if_input)(ifp, m); 3718 ALC_LOCK(sc); 3719 } 3720 } 3721 } 3722 /* Reset mbuf chains. */ 3723 ALC_RXCHAIN_RESET(sc); 3724 } 3725 3726 static void 3727 alc_tick(void *arg) 3728 { 3729 struct alc_softc *sc; 3730 struct mii_data *mii; 3731 3732 sc = (struct alc_softc *)arg; 3733 3734 ALC_LOCK_ASSERT(sc); 3735 3736 mii = device_get_softc(sc->alc_miibus); 3737 mii_tick(mii); 3738 alc_stats_update(sc); 3739 /* 3740 * alc(4) does not rely on Tx completion interrupts to reclaim 3741 * transferred buffers. Instead Tx completion interrupts are 3742 * used to hint for scheduling Tx task. So it's necessary to 3743 * release transmitted buffers by kicking Tx completion 3744 * handler. This limits the maximum reclamation delay to a hz. 3745 */ 3746 alc_txeof(sc); 3747 alc_watchdog(sc); 3748 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 3749 } 3750 3751 static void 3752 alc_osc_reset(struct alc_softc *sc) 3753 { 3754 uint32_t reg; 3755 3756 reg = CSR_READ_4(sc, ALC_MISC3); 3757 reg &= ~MISC3_25M_BY_SW; 3758 reg |= MISC3_25M_NOTO_INTNL; 3759 CSR_WRITE_4(sc, ALC_MISC3, reg); 3760 3761 reg = CSR_READ_4(sc, ALC_MISC); 3762 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) { 3763 /* 3764 * Restore over-current protection default value. 3765 * This value could be reset by MAC reset. 3766 */ 3767 reg &= ~MISC_PSW_OCP_MASK; 3768 reg |= (MISC_PSW_OCP_DEFAULT << MISC_PSW_OCP_SHIFT); 3769 reg &= ~MISC_INTNLOSC_OPEN; 3770 CSR_WRITE_4(sc, ALC_MISC, reg); 3771 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 3772 reg = CSR_READ_4(sc, ALC_MISC2); 3773 reg &= ~MISC2_CALB_START; 3774 CSR_WRITE_4(sc, ALC_MISC2, reg); 3775 CSR_WRITE_4(sc, ALC_MISC2, reg | MISC2_CALB_START); 3776 3777 } else { 3778 reg &= ~MISC_INTNLOSC_OPEN; 3779 /* Disable isolate for revision A devices. */ 3780 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 3781 reg &= ~MISC_ISO_ENB; 3782 CSR_WRITE_4(sc, ALC_MISC, reg | MISC_INTNLOSC_OPEN); 3783 CSR_WRITE_4(sc, ALC_MISC, reg); 3784 } 3785 3786 DELAY(20); 3787 } 3788 3789 static void 3790 alc_reset(struct alc_softc *sc) 3791 { 3792 uint32_t pmcfg, reg; 3793 int i; 3794 3795 pmcfg = 0; 3796 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3797 /* Reset workaround. */ 3798 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, 1); 3799 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 3800 (sc->alc_rev & 0x01) != 0) { 3801 /* Disable L0s/L1s before reset. */ 3802 pmcfg = CSR_READ_4(sc, ALC_PM_CFG); 3803 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 3804 != 0) { 3805 pmcfg &= ~(PM_CFG_ASPM_L0S_ENB | 3806 PM_CFG_ASPM_L1_ENB); 3807 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 3808 } 3809 } 3810 } 3811 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3812 reg |= MASTER_OOB_DIS_OFF | MASTER_RESET; 3813 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3814 3815 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3816 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3817 DELAY(10); 3818 if (CSR_READ_4(sc, ALC_MBOX_RD0_PROD_IDX) == 0) 3819 break; 3820 } 3821 if (i == 0) 3822 device_printf(sc->alc_dev, "MAC reset timeout!\n"); 3823 } 3824 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3825 DELAY(10); 3826 if ((CSR_READ_4(sc, ALC_MASTER_CFG) & MASTER_RESET) == 0) 3827 break; 3828 } 3829 if (i == 0) 3830 device_printf(sc->alc_dev, "master reset timeout!\n"); 3831 3832 for (i = ALC_RESET_TIMEOUT; i > 0; i--) { 3833 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 3834 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC | 3835 IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 3836 break; 3837 DELAY(10); 3838 } 3839 if (i == 0) 3840 device_printf(sc->alc_dev, "reset timeout(0x%08x)!\n", reg); 3841 3842 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3843 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1 && 3844 (sc->alc_rev & 0x01) != 0) { 3845 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 3846 reg |= MASTER_CLK_SEL_DIS; 3847 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 3848 /* Restore L0s/L1s config. */ 3849 if ((pmcfg & (PM_CFG_ASPM_L0S_ENB | PM_CFG_ASPM_L1_ENB)) 3850 != 0) 3851 CSR_WRITE_4(sc, ALC_PM_CFG, pmcfg); 3852 } 3853 3854 alc_osc_reset(sc); 3855 reg = CSR_READ_4(sc, ALC_MISC3); 3856 reg &= ~MISC3_25M_BY_SW; 3857 reg |= MISC3_25M_NOTO_INTNL; 3858 CSR_WRITE_4(sc, ALC_MISC3, reg); 3859 reg = CSR_READ_4(sc, ALC_MISC); 3860 reg &= ~MISC_INTNLOSC_OPEN; 3861 if (AR816X_REV(sc->alc_rev) <= AR816X_REV_A1) 3862 reg &= ~MISC_ISO_ENB; 3863 CSR_WRITE_4(sc, ALC_MISC, reg); 3864 DELAY(20); 3865 } 3866 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 3867 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 3868 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2) 3869 CSR_WRITE_4(sc, ALC_SERDES_LOCK, 3870 CSR_READ_4(sc, ALC_SERDES_LOCK) | SERDES_MAC_CLK_SLOWDOWN | 3871 SERDES_PHY_CLK_SLOWDOWN); 3872 } 3873 3874 static void 3875 alc_init(void *xsc) 3876 { 3877 struct alc_softc *sc; 3878 3879 sc = (struct alc_softc *)xsc; 3880 ALC_LOCK(sc); 3881 alc_init_locked(sc); 3882 ALC_UNLOCK(sc); 3883 } 3884 3885 static void 3886 alc_init_locked(struct alc_softc *sc) 3887 { 3888 struct ifnet *ifp; 3889 struct mii_data *mii; 3890 uint8_t eaddr[ETHER_ADDR_LEN]; 3891 bus_addr_t paddr; 3892 uint32_t reg, rxf_hi, rxf_lo; 3893 3894 ALC_LOCK_ASSERT(sc); 3895 3896 ifp = sc->alc_ifp; 3897 mii = device_get_softc(sc->alc_miibus); 3898 3899 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 3900 return; 3901 /* 3902 * Cancel any pending I/O. 3903 */ 3904 alc_stop(sc); 3905 /* 3906 * Reset the chip to a known state. 3907 */ 3908 alc_reset(sc); 3909 3910 /* Initialize Rx descriptors. */ 3911 if (alc_init_rx_ring(sc) != 0) { 3912 device_printf(sc->alc_dev, "no memory for Rx buffers.\n"); 3913 alc_stop(sc); 3914 return; 3915 } 3916 alc_init_rr_ring(sc); 3917 alc_init_tx_ring(sc); 3918 alc_init_cmb(sc); 3919 alc_init_smb(sc); 3920 3921 /* Enable all clocks. */ 3922 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 3923 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, CLK_GATING_DMAW_ENB | 3924 CLK_GATING_DMAR_ENB | CLK_GATING_TXQ_ENB | 3925 CLK_GATING_RXQ_ENB | CLK_GATING_TXMAC_ENB | 3926 CLK_GATING_RXMAC_ENB); 3927 if (AR816X_REV(sc->alc_rev) >= AR816X_REV_B0) 3928 CSR_WRITE_4(sc, ALC_IDLE_DECISN_TIMER, 3929 IDLE_DECISN_TIMER_DEFAULT_1MS); 3930 } else 3931 CSR_WRITE_4(sc, ALC_CLK_GATING_CFG, 0); 3932 3933 /* Reprogram the station address. */ 3934 bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); 3935 CSR_WRITE_4(sc, ALC_PAR0, 3936 eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); 3937 CSR_WRITE_4(sc, ALC_PAR1, eaddr[0] << 8 | eaddr[1]); 3938 /* 3939 * Clear WOL status and disable all WOL feature as WOL 3940 * would interfere Rx operation under normal environments. 3941 */ 3942 CSR_READ_4(sc, ALC_WOL_CFG); 3943 CSR_WRITE_4(sc, ALC_WOL_CFG, 0); 3944 /* Set Tx descriptor base addresses. */ 3945 paddr = sc->alc_rdata.alc_tx_ring_paddr; 3946 CSR_WRITE_4(sc, ALC_TX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3947 CSR_WRITE_4(sc, ALC_TDL_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3948 /* We don't use high priority ring. */ 3949 CSR_WRITE_4(sc, ALC_TDH_HEAD_ADDR_LO, 0); 3950 /* Set Tx descriptor counter. */ 3951 CSR_WRITE_4(sc, ALC_TD_RING_CNT, 3952 (ALC_TX_RING_CNT << TD_RING_CNT_SHIFT) & TD_RING_CNT_MASK); 3953 /* Set Rx descriptor base addresses. */ 3954 paddr = sc->alc_rdata.alc_rx_ring_paddr; 3955 CSR_WRITE_4(sc, ALC_RX_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 3956 CSR_WRITE_4(sc, ALC_RD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3957 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3958 /* We use one Rx ring. */ 3959 CSR_WRITE_4(sc, ALC_RD1_HEAD_ADDR_LO, 0); 3960 CSR_WRITE_4(sc, ALC_RD2_HEAD_ADDR_LO, 0); 3961 CSR_WRITE_4(sc, ALC_RD3_HEAD_ADDR_LO, 0); 3962 } 3963 /* Set Rx descriptor counter. */ 3964 CSR_WRITE_4(sc, ALC_RD_RING_CNT, 3965 (ALC_RX_RING_CNT << RD_RING_CNT_SHIFT) & RD_RING_CNT_MASK); 3966 3967 /* 3968 * Let hardware split jumbo frames into alc_max_buf_sized chunks. 3969 * if it do not fit the buffer size. Rx return descriptor holds 3970 * a counter that indicates how many fragments were made by the 3971 * hardware. The buffer size should be multiple of 8 bytes. 3972 * Since hardware has limit on the size of buffer size, always 3973 * use the maximum value. 3974 * For strict-alignment architectures make sure to reduce buffer 3975 * size by 8 bytes to make room for alignment fixup. 3976 */ 3977 #ifndef __NO_STRICT_ALIGNMENT 3978 sc->alc_buf_size = RX_BUF_SIZE_MAX - sizeof(uint64_t); 3979 #else 3980 sc->alc_buf_size = RX_BUF_SIZE_MAX; 3981 #endif 3982 CSR_WRITE_4(sc, ALC_RX_BUF_SIZE, sc->alc_buf_size); 3983 3984 paddr = sc->alc_rdata.alc_rr_ring_paddr; 3985 /* Set Rx return descriptor base addresses. */ 3986 CSR_WRITE_4(sc, ALC_RRD0_HEAD_ADDR_LO, ALC_ADDR_LO(paddr)); 3987 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 3988 /* We use one Rx return ring. */ 3989 CSR_WRITE_4(sc, ALC_RRD1_HEAD_ADDR_LO, 0); 3990 CSR_WRITE_4(sc, ALC_RRD2_HEAD_ADDR_LO, 0); 3991 CSR_WRITE_4(sc, ALC_RRD3_HEAD_ADDR_LO, 0); 3992 } 3993 /* Set Rx return descriptor counter. */ 3994 CSR_WRITE_4(sc, ALC_RRD_RING_CNT, 3995 (ALC_RR_RING_CNT << RRD_RING_CNT_SHIFT) & RRD_RING_CNT_MASK); 3996 paddr = sc->alc_rdata.alc_cmb_paddr; 3997 CSR_WRITE_4(sc, ALC_CMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 3998 paddr = sc->alc_rdata.alc_smb_paddr; 3999 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_HI, ALC_ADDR_HI(paddr)); 4000 CSR_WRITE_4(sc, ALC_SMB_BASE_ADDR_LO, ALC_ADDR_LO(paddr)); 4001 4002 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B) { 4003 /* Reconfigure SRAM - Vendor magic. */ 4004 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_LEN, 0x000002A0); 4005 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_LEN, 0x00000100); 4006 CSR_WRITE_4(sc, ALC_SRAM_RX_FIFO_ADDR, 0x029F0000); 4007 CSR_WRITE_4(sc, ALC_SRAM_RD0_ADDR, 0x02BF02A0); 4008 CSR_WRITE_4(sc, ALC_SRAM_TX_FIFO_ADDR, 0x03BF02C0); 4009 CSR_WRITE_4(sc, ALC_SRAM_TD_ADDR, 0x03DF03C0); 4010 CSR_WRITE_4(sc, ALC_TXF_WATER_MARK, 0x00000000); 4011 CSR_WRITE_4(sc, ALC_RD_DMA_CFG, 0x00000000); 4012 } 4013 4014 /* Tell hardware that we're ready to load DMA blocks. */ 4015 CSR_WRITE_4(sc, ALC_DMA_BLOCK, DMA_BLOCK_LOAD); 4016 4017 /* Configure interrupt moderation timer. */ 4018 reg = ALC_USECS(sc->alc_int_rx_mod) << IM_TIMER_RX_SHIFT; 4019 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) 4020 reg |= ALC_USECS(sc->alc_int_tx_mod) << IM_TIMER_TX_SHIFT; 4021 CSR_WRITE_4(sc, ALC_IM_TIMER, reg); 4022 /* 4023 * We don't want to automatic interrupt clear as task queue 4024 * for the interrupt should know interrupt status. 4025 */ 4026 reg = CSR_READ_4(sc, ALC_MASTER_CFG); 4027 reg &= ~(MASTER_IM_RX_TIMER_ENB | MASTER_IM_TX_TIMER_ENB); 4028 reg |= MASTER_SA_TIMER_ENB; 4029 if (ALC_USECS(sc->alc_int_rx_mod) != 0) 4030 reg |= MASTER_IM_RX_TIMER_ENB; 4031 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0 && 4032 ALC_USECS(sc->alc_int_tx_mod) != 0) 4033 reg |= MASTER_IM_TX_TIMER_ENB; 4034 CSR_WRITE_4(sc, ALC_MASTER_CFG, reg); 4035 /* 4036 * Disable interrupt re-trigger timer. We don't want automatic 4037 * re-triggering of un-ACKed interrupts. 4038 */ 4039 CSR_WRITE_4(sc, ALC_INTR_RETRIG_TIMER, ALC_USECS(0)); 4040 /* Configure CMB. */ 4041 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4042 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, ALC_TX_RING_CNT / 3); 4043 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, 4044 ALC_USECS(sc->alc_int_tx_mod)); 4045 } else { 4046 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) { 4047 CSR_WRITE_4(sc, ALC_CMB_TD_THRESH, 4); 4048 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(5000)); 4049 } else 4050 CSR_WRITE_4(sc, ALC_CMB_TX_TIMER, ALC_USECS(0)); 4051 } 4052 /* 4053 * Hardware can be configured to issue SMB interrupt based 4054 * on programmed interval. Since there is a callout that is 4055 * invoked for every hz in driver we use that instead of 4056 * relying on periodic SMB interrupt. 4057 */ 4058 CSR_WRITE_4(sc, ALC_SMB_STAT_TIMER, ALC_USECS(0)); 4059 /* Clear MAC statistics. */ 4060 alc_stats_clear(sc); 4061 4062 /* 4063 * Always use maximum frame size that controller can support. 4064 * Otherwise received frames that has larger frame length 4065 * than alc(4) MTU would be silently dropped in hardware. This 4066 * would make path-MTU discovery hard as sender wouldn't get 4067 * any responses from receiver. alc(4) supports 4068 * multi-fragmented frames on Rx path so it has no issue on 4069 * assembling fragmented frames. Using maximum frame size also 4070 * removes the need to reinitialize hardware when interface 4071 * MTU configuration was changed. 4072 * 4073 * Be conservative in what you do, be liberal in what you 4074 * accept from others - RFC 793. 4075 */ 4076 CSR_WRITE_4(sc, ALC_FRAME_SIZE, sc->alc_ident->max_framelen); 4077 4078 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4079 /* Disable header split(?) */ 4080 CSR_WRITE_4(sc, ALC_HDS_CFG, 0); 4081 4082 /* Configure IPG/IFG parameters. */ 4083 CSR_WRITE_4(sc, ALC_IPG_IFG_CFG, 4084 ((IPG_IFG_IPGT_DEFAULT << IPG_IFG_IPGT_SHIFT) & 4085 IPG_IFG_IPGT_MASK) | 4086 ((IPG_IFG_MIFG_DEFAULT << IPG_IFG_MIFG_SHIFT) & 4087 IPG_IFG_MIFG_MASK) | 4088 ((IPG_IFG_IPG1_DEFAULT << IPG_IFG_IPG1_SHIFT) & 4089 IPG_IFG_IPG1_MASK) | 4090 ((IPG_IFG_IPG2_DEFAULT << IPG_IFG_IPG2_SHIFT) & 4091 IPG_IFG_IPG2_MASK)); 4092 /* Set parameters for half-duplex media. */ 4093 CSR_WRITE_4(sc, ALC_HDPX_CFG, 4094 ((HDPX_CFG_LCOL_DEFAULT << HDPX_CFG_LCOL_SHIFT) & 4095 HDPX_CFG_LCOL_MASK) | 4096 ((HDPX_CFG_RETRY_DEFAULT << HDPX_CFG_RETRY_SHIFT) & 4097 HDPX_CFG_RETRY_MASK) | HDPX_CFG_EXC_DEF_EN | 4098 ((HDPX_CFG_ABEBT_DEFAULT << HDPX_CFG_ABEBT_SHIFT) & 4099 HDPX_CFG_ABEBT_MASK) | 4100 ((HDPX_CFG_JAMIPG_DEFAULT << HDPX_CFG_JAMIPG_SHIFT) & 4101 HDPX_CFG_JAMIPG_MASK)); 4102 } 4103 4104 /* 4105 * Set TSO/checksum offload threshold. For frames that is 4106 * larger than this threshold, hardware wouldn't do 4107 * TSO/checksum offloading. 4108 */ 4109 reg = (sc->alc_ident->max_framelen >> TSO_OFFLOAD_THRESH_UNIT_SHIFT) & 4110 TSO_OFFLOAD_THRESH_MASK; 4111 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 4112 reg |= TSO_OFFLOAD_ERRLGPKT_DROP_ENB; 4113 CSR_WRITE_4(sc, ALC_TSO_OFFLOAD_THRESH, reg); 4114 /* Configure TxQ. */ 4115 reg = (alc_dma_burst[sc->alc_dma_rd_burst] << 4116 TXQ_CFG_TX_FIFO_BURST_SHIFT) & TXQ_CFG_TX_FIFO_BURST_MASK; 4117 if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B || 4118 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) 4119 reg >>= 1; 4120 reg |= (TXQ_CFG_TD_BURST_DEFAULT << TXQ_CFG_TD_BURST_SHIFT) & 4121 TXQ_CFG_TD_BURST_MASK; 4122 reg |= TXQ_CFG_IP_OPTION_ENB | TXQ_CFG_8023_ENB; 4123 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg | TXQ_CFG_ENHANCED_MODE); 4124 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4125 reg = (TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q1_BURST_SHIFT | 4126 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q2_BURST_SHIFT | 4127 TXQ_CFG_TD_BURST_DEFAULT << HQTD_CFG_Q3_BURST_SHIFT | 4128 HQTD_CFG_BURST_ENB); 4129 CSR_WRITE_4(sc, ALC_HQTD_CFG, reg); 4130 reg = WRR_PRI_RESTRICT_NONE; 4131 reg |= (WRR_PRI_DEFAULT << WRR_PRI0_SHIFT | 4132 WRR_PRI_DEFAULT << WRR_PRI1_SHIFT | 4133 WRR_PRI_DEFAULT << WRR_PRI2_SHIFT | 4134 WRR_PRI_DEFAULT << WRR_PRI3_SHIFT); 4135 CSR_WRITE_4(sc, ALC_WRR, reg); 4136 } else { 4137 /* Configure Rx free descriptor pre-fetching. */ 4138 CSR_WRITE_4(sc, ALC_RX_RD_FREE_THRESH, 4139 ((RX_RD_FREE_THRESH_HI_DEFAULT << 4140 RX_RD_FREE_THRESH_HI_SHIFT) & RX_RD_FREE_THRESH_HI_MASK) | 4141 ((RX_RD_FREE_THRESH_LO_DEFAULT << 4142 RX_RD_FREE_THRESH_LO_SHIFT) & RX_RD_FREE_THRESH_LO_MASK)); 4143 } 4144 4145 /* 4146 * Configure flow control parameters. 4147 * XON : 80% of Rx FIFO 4148 * XOFF : 30% of Rx FIFO 4149 */ 4150 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4151 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 4152 reg &= SRAM_RX_FIFO_LEN_MASK; 4153 reg *= 8; 4154 if (reg > 8 * 1024) 4155 reg -= RX_FIFO_PAUSE_816X_RSVD; 4156 else 4157 reg -= RX_BUF_SIZE_MAX; 4158 reg /= 8; 4159 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 4160 ((reg << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 4161 RX_FIFO_PAUSE_THRESH_LO_MASK) | 4162 (((RX_FIFO_PAUSE_816X_RSVD / 8) << 4163 RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 4164 RX_FIFO_PAUSE_THRESH_HI_MASK)); 4165 } else if (sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8131 || 4166 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8132) { 4167 reg = CSR_READ_4(sc, ALC_SRAM_RX_FIFO_LEN); 4168 rxf_hi = (reg * 8) / 10; 4169 rxf_lo = (reg * 3) / 10; 4170 CSR_WRITE_4(sc, ALC_RX_FIFO_PAUSE_THRESH, 4171 ((rxf_lo << RX_FIFO_PAUSE_THRESH_LO_SHIFT) & 4172 RX_FIFO_PAUSE_THRESH_LO_MASK) | 4173 ((rxf_hi << RX_FIFO_PAUSE_THRESH_HI_SHIFT) & 4174 RX_FIFO_PAUSE_THRESH_HI_MASK)); 4175 } 4176 4177 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4178 /* Disable RSS until I understand L1C/L2C's RSS logic. */ 4179 CSR_WRITE_4(sc, ALC_RSS_IDT_TABLE0, 0); 4180 CSR_WRITE_4(sc, ALC_RSS_CPU, 0); 4181 } 4182 4183 /* Configure RxQ. */ 4184 reg = (RXQ_CFG_RD_BURST_DEFAULT << RXQ_CFG_RD_BURST_SHIFT) & 4185 RXQ_CFG_RD_BURST_MASK; 4186 reg |= RXQ_CFG_RSS_MODE_DIS; 4187 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) 4188 reg |= (RXQ_CFG_816X_IDT_TBL_SIZE_DEFAULT << 4189 RXQ_CFG_816X_IDT_TBL_SIZE_SHIFT) & 4190 RXQ_CFG_816X_IDT_TBL_SIZE_MASK; 4191 if ((sc->alc_flags & ALC_FLAG_FASTETHER) == 0 && 4192 sc->alc_ident->deviceid != DEVICEID_ATHEROS_AR8151_V2) 4193 reg |= RXQ_CFG_ASPM_THROUGHPUT_LIMIT_1M; 4194 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4195 4196 /* Configure DMA parameters. */ 4197 reg = DMA_CFG_OUT_ORDER | DMA_CFG_RD_REQ_PRI; 4198 reg |= sc->alc_rcb; 4199 if ((sc->alc_flags & ALC_FLAG_CMB_BUG) == 0) 4200 reg |= DMA_CFG_CMB_ENB; 4201 if ((sc->alc_flags & ALC_FLAG_SMB_BUG) == 0) 4202 reg |= DMA_CFG_SMB_ENB; 4203 else 4204 reg |= DMA_CFG_SMB_DIS; 4205 reg |= (sc->alc_dma_rd_burst & DMA_CFG_RD_BURST_MASK) << 4206 DMA_CFG_RD_BURST_SHIFT; 4207 reg |= (sc->alc_dma_wr_burst & DMA_CFG_WR_BURST_MASK) << 4208 DMA_CFG_WR_BURST_SHIFT; 4209 reg |= (DMA_CFG_RD_DELAY_CNT_DEFAULT << DMA_CFG_RD_DELAY_CNT_SHIFT) & 4210 DMA_CFG_RD_DELAY_CNT_MASK; 4211 reg |= (DMA_CFG_WR_DELAY_CNT_DEFAULT << DMA_CFG_WR_DELAY_CNT_SHIFT) & 4212 DMA_CFG_WR_DELAY_CNT_MASK; 4213 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0) { 4214 switch (AR816X_REV(sc->alc_rev)) { 4215 case AR816X_REV_A0: 4216 case AR816X_REV_A1: 4217 reg |= DMA_CFG_RD_CHNL_SEL_1; 4218 break; 4219 case AR816X_REV_B0: 4220 /* FALLTHROUGH */ 4221 default: 4222 reg |= DMA_CFG_RD_CHNL_SEL_3; 4223 break; 4224 } 4225 } 4226 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 4227 4228 /* 4229 * Configure Tx/Rx MACs. 4230 * - Auto-padding for short frames. 4231 * - Enable CRC generation. 4232 * Actual reconfiguration of MAC for resolved speed/duplex 4233 * is followed after detection of link establishment. 4234 * AR813x/AR815x always does checksum computation regardless 4235 * of MAC_CFG_RXCSUM_ENB bit. Also the controller is known to 4236 * have bug in protocol field in Rx return structure so 4237 * these controllers can't handle fragmented frames. Disable 4238 * Rx checksum offloading until there is a newer controller 4239 * that has sane implementation. 4240 */ 4241 reg = MAC_CFG_TX_CRC_ENB | MAC_CFG_TX_AUTO_PAD | MAC_CFG_FULL_DUPLEX | 4242 ((MAC_CFG_PREAMBLE_DEFAULT << MAC_CFG_PREAMBLE_SHIFT) & 4243 MAC_CFG_PREAMBLE_MASK); 4244 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) != 0 || 4245 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151 || 4246 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8151_V2 || 4247 sc->alc_ident->deviceid == DEVICEID_ATHEROS_AR8152_B2) 4248 reg |= MAC_CFG_HASH_ALG_CRC32 | MAC_CFG_SPEED_MODE_SW; 4249 if ((sc->alc_flags & ALC_FLAG_FASTETHER) != 0) 4250 reg |= MAC_CFG_SPEED_10_100; 4251 else 4252 reg |= MAC_CFG_SPEED_1000; 4253 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4254 4255 /* Set up the receive filter. */ 4256 alc_rxfilter(sc); 4257 alc_rxvlan(sc); 4258 4259 /* Acknowledge all pending interrupts and clear it. */ 4260 CSR_WRITE_4(sc, ALC_INTR_MASK, ALC_INTRS); 4261 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4262 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0); 4263 4264 ifp->if_drv_flags |= IFF_DRV_RUNNING; 4265 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 4266 4267 sc->alc_flags &= ~ALC_FLAG_LINK; 4268 /* Switch to the current media. */ 4269 alc_mediachange_locked(sc); 4270 4271 callout_reset(&sc->alc_tick_ch, hz, alc_tick, sc); 4272 } 4273 4274 static void 4275 alc_stop(struct alc_softc *sc) 4276 { 4277 struct ifnet *ifp; 4278 struct alc_txdesc *txd; 4279 struct alc_rxdesc *rxd; 4280 uint32_t reg; 4281 int i; 4282 4283 ALC_LOCK_ASSERT(sc); 4284 /* 4285 * Mark the interface down and cancel the watchdog timer. 4286 */ 4287 ifp = sc->alc_ifp; 4288 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 4289 sc->alc_flags &= ~ALC_FLAG_LINK; 4290 callout_stop(&sc->alc_tick_ch); 4291 sc->alc_watchdog_timer = 0; 4292 alc_stats_update(sc); 4293 /* Disable interrupts. */ 4294 CSR_WRITE_4(sc, ALC_INTR_MASK, 0); 4295 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4296 /* Disable DMA. */ 4297 reg = CSR_READ_4(sc, ALC_DMA_CFG); 4298 reg &= ~(DMA_CFG_CMB_ENB | DMA_CFG_SMB_ENB); 4299 reg |= DMA_CFG_SMB_DIS; 4300 CSR_WRITE_4(sc, ALC_DMA_CFG, reg); 4301 DELAY(1000); 4302 /* Stop Rx/Tx MACs. */ 4303 alc_stop_mac(sc); 4304 /* Disable interrupts which might be touched in taskq handler. */ 4305 CSR_WRITE_4(sc, ALC_INTR_STATUS, 0xFFFFFFFF); 4306 /* Disable L0s/L1s */ 4307 alc_aspm(sc, 0, IFM_UNKNOWN); 4308 /* Reclaim Rx buffers that have been processed. */ 4309 if (sc->alc_cdata.alc_rxhead != NULL) 4310 m_freem(sc->alc_cdata.alc_rxhead); 4311 ALC_RXCHAIN_RESET(sc); 4312 /* 4313 * Free Tx/Rx mbufs still in the queues. 4314 */ 4315 for (i = 0; i < ALC_RX_RING_CNT; i++) { 4316 rxd = &sc->alc_cdata.alc_rxdesc[i]; 4317 if (rxd->rx_m != NULL) { 4318 bus_dmamap_sync(sc->alc_cdata.alc_rx_tag, 4319 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 4320 bus_dmamap_unload(sc->alc_cdata.alc_rx_tag, 4321 rxd->rx_dmamap); 4322 m_freem(rxd->rx_m); 4323 rxd->rx_m = NULL; 4324 } 4325 } 4326 for (i = 0; i < ALC_TX_RING_CNT; i++) { 4327 txd = &sc->alc_cdata.alc_txdesc[i]; 4328 if (txd->tx_m != NULL) { 4329 bus_dmamap_sync(sc->alc_cdata.alc_tx_tag, 4330 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 4331 bus_dmamap_unload(sc->alc_cdata.alc_tx_tag, 4332 txd->tx_dmamap); 4333 m_freem(txd->tx_m); 4334 txd->tx_m = NULL; 4335 } 4336 } 4337 } 4338 4339 static void 4340 alc_stop_mac(struct alc_softc *sc) 4341 { 4342 uint32_t reg; 4343 int i; 4344 4345 alc_stop_queue(sc); 4346 /* Disable Rx/Tx MAC. */ 4347 reg = CSR_READ_4(sc, ALC_MAC_CFG); 4348 if ((reg & (MAC_CFG_TX_ENB | MAC_CFG_RX_ENB)) != 0) { 4349 reg &= ~(MAC_CFG_TX_ENB | MAC_CFG_RX_ENB); 4350 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4351 } 4352 for (i = ALC_TIMEOUT; i > 0; i--) { 4353 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 4354 if ((reg & (IDLE_STATUS_RXMAC | IDLE_STATUS_TXMAC)) == 0) 4355 break; 4356 DELAY(10); 4357 } 4358 if (i == 0) 4359 device_printf(sc->alc_dev, 4360 "could not disable Rx/Tx MAC(0x%08x)!\n", reg); 4361 } 4362 4363 static void 4364 alc_start_queue(struct alc_softc *sc) 4365 { 4366 uint32_t qcfg[] = { 4367 0, 4368 RXQ_CFG_QUEUE0_ENB, 4369 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB, 4370 RXQ_CFG_QUEUE0_ENB | RXQ_CFG_QUEUE1_ENB | RXQ_CFG_QUEUE2_ENB, 4371 RXQ_CFG_ENB 4372 }; 4373 uint32_t cfg; 4374 4375 ALC_LOCK_ASSERT(sc); 4376 4377 /* Enable RxQ. */ 4378 cfg = CSR_READ_4(sc, ALC_RXQ_CFG); 4379 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4380 cfg &= ~RXQ_CFG_ENB; 4381 cfg |= qcfg[1]; 4382 } else 4383 cfg |= RXQ_CFG_QUEUE0_ENB; 4384 CSR_WRITE_4(sc, ALC_RXQ_CFG, cfg); 4385 /* Enable TxQ. */ 4386 cfg = CSR_READ_4(sc, ALC_TXQ_CFG); 4387 cfg |= TXQ_CFG_ENB; 4388 CSR_WRITE_4(sc, ALC_TXQ_CFG, cfg); 4389 } 4390 4391 static void 4392 alc_stop_queue(struct alc_softc *sc) 4393 { 4394 uint32_t reg; 4395 int i; 4396 4397 ALC_LOCK_ASSERT(sc); 4398 4399 /* Disable RxQ. */ 4400 reg = CSR_READ_4(sc, ALC_RXQ_CFG); 4401 if ((sc->alc_flags & ALC_FLAG_AR816X_FAMILY) == 0) { 4402 if ((reg & RXQ_CFG_ENB) != 0) { 4403 reg &= ~RXQ_CFG_ENB; 4404 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4405 } 4406 } else { 4407 if ((reg & RXQ_CFG_QUEUE0_ENB) != 0) { 4408 reg &= ~RXQ_CFG_QUEUE0_ENB; 4409 CSR_WRITE_4(sc, ALC_RXQ_CFG, reg); 4410 } 4411 } 4412 /* Disable TxQ. */ 4413 reg = CSR_READ_4(sc, ALC_TXQ_CFG); 4414 if ((reg & TXQ_CFG_ENB) != 0) { 4415 reg &= ~TXQ_CFG_ENB; 4416 CSR_WRITE_4(sc, ALC_TXQ_CFG, reg); 4417 } 4418 DELAY(40); 4419 for (i = ALC_TIMEOUT; i > 0; i--) { 4420 reg = CSR_READ_4(sc, ALC_IDLE_STATUS); 4421 if ((reg & (IDLE_STATUS_RXQ | IDLE_STATUS_TXQ)) == 0) 4422 break; 4423 DELAY(10); 4424 } 4425 if (i == 0) 4426 device_printf(sc->alc_dev, 4427 "could not disable RxQ/TxQ (0x%08x)!\n", reg); 4428 } 4429 4430 static void 4431 alc_init_tx_ring(struct alc_softc *sc) 4432 { 4433 struct alc_ring_data *rd; 4434 struct alc_txdesc *txd; 4435 int i; 4436 4437 ALC_LOCK_ASSERT(sc); 4438 4439 sc->alc_cdata.alc_tx_prod = 0; 4440 sc->alc_cdata.alc_tx_cons = 0; 4441 sc->alc_cdata.alc_tx_cnt = 0; 4442 4443 rd = &sc->alc_rdata; 4444 bzero(rd->alc_tx_ring, ALC_TX_RING_SZ); 4445 for (i = 0; i < ALC_TX_RING_CNT; i++) { 4446 txd = &sc->alc_cdata.alc_txdesc[i]; 4447 txd->tx_m = NULL; 4448 } 4449 4450 bus_dmamap_sync(sc->alc_cdata.alc_tx_ring_tag, 4451 sc->alc_cdata.alc_tx_ring_map, BUS_DMASYNC_PREWRITE); 4452 } 4453 4454 static int 4455 alc_init_rx_ring(struct alc_softc *sc) 4456 { 4457 struct alc_ring_data *rd; 4458 struct alc_rxdesc *rxd; 4459 int i; 4460 4461 ALC_LOCK_ASSERT(sc); 4462 4463 sc->alc_cdata.alc_rx_cons = ALC_RX_RING_CNT - 1; 4464 sc->alc_morework = 0; 4465 rd = &sc->alc_rdata; 4466 bzero(rd->alc_rx_ring, ALC_RX_RING_SZ); 4467 for (i = 0; i < ALC_RX_RING_CNT; i++) { 4468 rxd = &sc->alc_cdata.alc_rxdesc[i]; 4469 rxd->rx_m = NULL; 4470 rxd->rx_desc = &rd->alc_rx_ring[i]; 4471 if (alc_newbuf(sc, rxd) != 0) 4472 return (ENOBUFS); 4473 } 4474 4475 /* 4476 * Since controller does not update Rx descriptors, driver 4477 * does have to read Rx descriptors back so BUS_DMASYNC_PREWRITE 4478 * is enough to ensure coherence. 4479 */ 4480 bus_dmamap_sync(sc->alc_cdata.alc_rx_ring_tag, 4481 sc->alc_cdata.alc_rx_ring_map, BUS_DMASYNC_PREWRITE); 4482 /* Let controller know availability of new Rx buffers. */ 4483 CSR_WRITE_4(sc, ALC_MBOX_RD0_PROD_IDX, sc->alc_cdata.alc_rx_cons); 4484 4485 return (0); 4486 } 4487 4488 static void 4489 alc_init_rr_ring(struct alc_softc *sc) 4490 { 4491 struct alc_ring_data *rd; 4492 4493 ALC_LOCK_ASSERT(sc); 4494 4495 sc->alc_cdata.alc_rr_cons = 0; 4496 ALC_RXCHAIN_RESET(sc); 4497 4498 rd = &sc->alc_rdata; 4499 bzero(rd->alc_rr_ring, ALC_RR_RING_SZ); 4500 bus_dmamap_sync(sc->alc_cdata.alc_rr_ring_tag, 4501 sc->alc_cdata.alc_rr_ring_map, 4502 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4503 } 4504 4505 static void 4506 alc_init_cmb(struct alc_softc *sc) 4507 { 4508 struct alc_ring_data *rd; 4509 4510 ALC_LOCK_ASSERT(sc); 4511 4512 rd = &sc->alc_rdata; 4513 bzero(rd->alc_cmb, ALC_CMB_SZ); 4514 bus_dmamap_sync(sc->alc_cdata.alc_cmb_tag, sc->alc_cdata.alc_cmb_map, 4515 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4516 } 4517 4518 static void 4519 alc_init_smb(struct alc_softc *sc) 4520 { 4521 struct alc_ring_data *rd; 4522 4523 ALC_LOCK_ASSERT(sc); 4524 4525 rd = &sc->alc_rdata; 4526 bzero(rd->alc_smb, ALC_SMB_SZ); 4527 bus_dmamap_sync(sc->alc_cdata.alc_smb_tag, sc->alc_cdata.alc_smb_map, 4528 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 4529 } 4530 4531 static void 4532 alc_rxvlan(struct alc_softc *sc) 4533 { 4534 struct ifnet *ifp; 4535 uint32_t reg; 4536 4537 ALC_LOCK_ASSERT(sc); 4538 4539 ifp = sc->alc_ifp; 4540 reg = CSR_READ_4(sc, ALC_MAC_CFG); 4541 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 4542 reg |= MAC_CFG_VLAN_TAG_STRIP; 4543 else 4544 reg &= ~MAC_CFG_VLAN_TAG_STRIP; 4545 CSR_WRITE_4(sc, ALC_MAC_CFG, reg); 4546 } 4547 4548 static void 4549 alc_rxfilter(struct alc_softc *sc) 4550 { 4551 struct ifnet *ifp; 4552 struct ifmultiaddr *ifma; 4553 uint32_t crc; 4554 uint32_t mchash[2]; 4555 uint32_t rxcfg; 4556 4557 ALC_LOCK_ASSERT(sc); 4558 4559 ifp = sc->alc_ifp; 4560 4561 bzero(mchash, sizeof(mchash)); 4562 rxcfg = CSR_READ_4(sc, ALC_MAC_CFG); 4563 rxcfg &= ~(MAC_CFG_ALLMULTI | MAC_CFG_BCAST | MAC_CFG_PROMISC); 4564 if ((ifp->if_flags & IFF_BROADCAST) != 0) 4565 rxcfg |= MAC_CFG_BCAST; 4566 if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { 4567 if ((ifp->if_flags & IFF_PROMISC) != 0) 4568 rxcfg |= MAC_CFG_PROMISC; 4569 if ((ifp->if_flags & IFF_ALLMULTI) != 0) 4570 rxcfg |= MAC_CFG_ALLMULTI; 4571 mchash[0] = 0xFFFFFFFF; 4572 mchash[1] = 0xFFFFFFFF; 4573 goto chipit; 4574 } 4575 4576 if_maddr_rlock(ifp); 4577 TAILQ_FOREACH(ifma, &sc->alc_ifp->if_multiaddrs, ifma_link) { 4578 if (ifma->ifma_addr->sa_family != AF_LINK) 4579 continue; 4580 crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) 4581 ifma->ifma_addr), ETHER_ADDR_LEN); 4582 mchash[crc >> 31] |= 1 << ((crc >> 26) & 0x1f); 4583 } 4584 if_maddr_runlock(ifp); 4585 4586 chipit: 4587 CSR_WRITE_4(sc, ALC_MAR0, mchash[0]); 4588 CSR_WRITE_4(sc, ALC_MAR1, mchash[1]); 4589 CSR_WRITE_4(sc, ALC_MAC_CFG, rxcfg); 4590 } 4591 4592 static int 4593 sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 4594 { 4595 int error, value; 4596 4597 if (arg1 == NULL) 4598 return (EINVAL); 4599 value = *(int *)arg1; 4600 error = sysctl_handle_int(oidp, &value, 0, req); 4601 if (error || req->newptr == NULL) 4602 return (error); 4603 if (value < low || value > high) 4604 return (EINVAL); 4605 *(int *)arg1 = value; 4606 4607 return (0); 4608 } 4609 4610 static int 4611 sysctl_hw_alc_proc_limit(SYSCTL_HANDLER_ARGS) 4612 { 4613 return (sysctl_int_range(oidp, arg1, arg2, req, 4614 ALC_PROC_MIN, ALC_PROC_MAX)); 4615 } 4616 4617 static int 4618 sysctl_hw_alc_int_mod(SYSCTL_HANDLER_ARGS) 4619 { 4620 4621 return (sysctl_int_range(oidp, arg1, arg2, req, 4622 ALC_IM_TIMER_MIN, ALC_IM_TIMER_MAX)); 4623 } 4624