1 /*- 2 * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Allwinner Gigabit Ethernet MAC (EMAC) controller 31 */ 32 33 #include "opt_device_polling.h" 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/rman.h> 42 #include <sys/kernel.h> 43 #include <sys/endian.h> 44 #include <sys/mbuf.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/module.h> 48 #include <sys/taskqueue.h> 49 #include <sys/gpio.h> 50 51 #include <net/bpf.h> 52 #include <net/if.h> 53 #include <net/ethernet.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_types.h> 57 #include <net/if_var.h> 58 59 #include <machine/bus.h> 60 61 #include <dev/ofw/ofw_bus.h> 62 #include <dev/ofw/ofw_bus_subr.h> 63 64 #include <arm/allwinner/if_awgreg.h> 65 #include <arm/allwinner/aw_sid.h> 66 #include <dev/mii/mii.h> 67 #include <dev/mii/miivar.h> 68 69 #include <dev/extres/clk/clk.h> 70 #include <dev/extres/hwreset/hwreset.h> 71 #include <dev/extres/regulator/regulator.h> 72 #include <dev/extres/syscon/syscon.h> 73 74 #include "syscon_if.h" 75 #include "miibus_if.h" 76 #include "gpio_if.h" 77 78 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg)) 79 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val)) 80 81 #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx) 82 #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx); 83 #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 84 #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 85 86 #define DESC_ALIGN 4 87 #define TX_DESC_COUNT 1024 88 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT) 89 #define RX_DESC_COUNT 256 90 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT) 91 92 #define DESC_OFF(n) ((n) * sizeof(struct emac_desc)) 93 #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) 94 #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) 95 #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) 96 97 #define TX_MAX_SEGS 20 98 99 #define SOFT_RST_RETRY 1000 100 #define MII_BUSY_RETRY 1000 101 #define MDIO_FREQ 2500000 102 103 #define BURST_LEN_DEFAULT 8 104 #define RX_TX_PRI_DEFAULT 0 105 #define PAUSE_TIME_DEFAULT 0x400 106 #define TX_INTERVAL_DEFAULT 64 107 #define RX_BATCH_DEFAULT 64 108 109 /* syscon EMAC clock register */ 110 #define EMAC_CLK_REG 0x30 111 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ 112 #define EMAC_CLK_EPHY_ADDR_SHIFT 20 113 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ 114 #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ 115 #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ 116 #define EMAC_CLK_RMII_EN (1 << 13) 117 #define EMAC_CLK_ETXDC (0x7 << 10) 118 #define EMAC_CLK_ETXDC_SHIFT 10 119 #define EMAC_CLK_ERXDC (0x1f << 5) 120 #define EMAC_CLK_ERXDC_SHIFT 5 121 #define EMAC_CLK_PIT (0x1 << 2) 122 #define EMAC_CLK_PIT_MII (0 << 2) 123 #define EMAC_CLK_PIT_RGMII (1 << 2) 124 #define EMAC_CLK_SRC (0x3 << 0) 125 #define EMAC_CLK_SRC_MII (0 << 0) 126 #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) 127 #define EMAC_CLK_SRC_RGMII (2 << 0) 128 129 /* Burst length of RX and TX DMA transfers */ 130 static int awg_burst_len = BURST_LEN_DEFAULT; 131 TUNABLE_INT("hw.awg.burst_len", &awg_burst_len); 132 133 /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ 134 static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT; 135 TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri); 136 137 /* Pause time field in the transmitted control frame */ 138 static int awg_pause_time = PAUSE_TIME_DEFAULT; 139 TUNABLE_INT("hw.awg.pause_time", &awg_pause_time); 140 141 /* Request a TX interrupt every <n> descriptors */ 142 static int awg_tx_interval = TX_INTERVAL_DEFAULT; 143 TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval); 144 145 /* Maximum number of mbufs to send to if_input */ 146 static int awg_rx_batch = RX_BATCH_DEFAULT; 147 TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch); 148 149 enum awg_type { 150 EMAC_A83T = 1, 151 EMAC_H3, 152 EMAC_A64, 153 }; 154 155 static struct ofw_compat_data compat_data[] = { 156 { "allwinner,sun8i-a83t-emac", EMAC_A83T }, 157 { "allwinner,sun8i-h3-emac", EMAC_H3 }, 158 { "allwinner,sun50i-a64-emac", EMAC_A64 }, 159 { NULL, 0 } 160 }; 161 162 struct awg_bufmap { 163 bus_dmamap_t map; 164 struct mbuf *mbuf; 165 }; 166 167 struct awg_txring { 168 bus_dma_tag_t desc_tag; 169 bus_dmamap_t desc_map; 170 struct emac_desc *desc_ring; 171 bus_addr_t desc_ring_paddr; 172 bus_dma_tag_t buf_tag; 173 struct awg_bufmap buf_map[TX_DESC_COUNT]; 174 u_int cur, next, queued; 175 u_int segs; 176 }; 177 178 struct awg_rxring { 179 bus_dma_tag_t desc_tag; 180 bus_dmamap_t desc_map; 181 struct emac_desc *desc_ring; 182 bus_addr_t desc_ring_paddr; 183 bus_dma_tag_t buf_tag; 184 struct awg_bufmap buf_map[RX_DESC_COUNT]; 185 bus_dmamap_t buf_spare_map; 186 u_int cur; 187 }; 188 189 enum { 190 _RES_EMAC, 191 _RES_IRQ, 192 _RES_SYSCON, 193 _RES_NITEMS 194 }; 195 196 struct awg_softc { 197 struct resource *res[_RES_NITEMS]; 198 struct mtx mtx; 199 if_t ifp; 200 device_t dev; 201 device_t miibus; 202 struct callout stat_ch; 203 struct task link_task; 204 void *ih; 205 u_int mdc_div_ratio_m; 206 int link; 207 int if_flags; 208 enum awg_type type; 209 struct syscon *syscon; 210 211 struct awg_txring tx; 212 struct awg_rxring rx; 213 }; 214 215 static struct resource_spec awg_spec[] = { 216 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 217 { SYS_RES_IRQ, 0, RF_ACTIVE }, 218 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL }, 219 { -1, 0 } 220 }; 221 222 static void awg_txeof(struct awg_softc *sc); 223 224 static uint32_t syscon_read_emac_clk_reg(device_t dev); 225 static void syscon_write_emac_clk_reg(device_t dev, uint32_t val); 226 static phandle_t awg_get_phy_node(device_t dev); 227 static bool awg_has_internal_phy(device_t dev); 228 229 static int 230 awg_miibus_readreg(device_t dev, int phy, int reg) 231 { 232 struct awg_softc *sc; 233 int retry, val; 234 235 sc = device_get_softc(dev); 236 val = 0; 237 238 WR4(sc, EMAC_MII_CMD, 239 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 240 (phy << PHY_ADDR_SHIFT) | 241 (reg << PHY_REG_ADDR_SHIFT) | 242 MII_BUSY); 243 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 244 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { 245 val = RD4(sc, EMAC_MII_DATA); 246 break; 247 } 248 DELAY(10); 249 } 250 251 if (retry == 0) 252 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 253 phy, reg); 254 255 return (val); 256 } 257 258 static int 259 awg_miibus_writereg(device_t dev, int phy, int reg, int val) 260 { 261 struct awg_softc *sc; 262 int retry; 263 264 sc = device_get_softc(dev); 265 266 WR4(sc, EMAC_MII_DATA, val); 267 WR4(sc, EMAC_MII_CMD, 268 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 269 (phy << PHY_ADDR_SHIFT) | 270 (reg << PHY_REG_ADDR_SHIFT) | 271 MII_WR | MII_BUSY); 272 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 273 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) 274 break; 275 DELAY(10); 276 } 277 278 if (retry == 0) 279 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 280 phy, reg); 281 282 return (0); 283 } 284 285 static void 286 awg_update_link_locked(struct awg_softc *sc) 287 { 288 struct mii_data *mii; 289 uint32_t val; 290 291 AWG_ASSERT_LOCKED(sc); 292 293 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 294 return; 295 mii = device_get_softc(sc->miibus); 296 297 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 298 (IFM_ACTIVE | IFM_AVALID)) { 299 switch (IFM_SUBTYPE(mii->mii_media_active)) { 300 case IFM_1000_T: 301 case IFM_1000_SX: 302 case IFM_100_TX: 303 case IFM_10_T: 304 sc->link = 1; 305 break; 306 default: 307 sc->link = 0; 308 break; 309 } 310 } else 311 sc->link = 0; 312 313 if (sc->link == 0) 314 return; 315 316 val = RD4(sc, EMAC_BASIC_CTL_0); 317 val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); 318 319 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 320 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 321 val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; 322 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 323 val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; 324 else 325 val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; 326 327 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 328 val |= BASIC_CTL_DUPLEX; 329 330 WR4(sc, EMAC_BASIC_CTL_0, val); 331 332 val = RD4(sc, EMAC_RX_CTL_0); 333 val &= ~RX_FLOW_CTL_EN; 334 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 335 val |= RX_FLOW_CTL_EN; 336 WR4(sc, EMAC_RX_CTL_0, val); 337 338 val = RD4(sc, EMAC_TX_FLOW_CTL); 339 val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); 340 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 341 val |= TX_FLOW_CTL_EN; 342 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 343 val |= awg_pause_time << PAUSE_TIME_SHIFT; 344 WR4(sc, EMAC_TX_FLOW_CTL, val); 345 } 346 347 static void 348 awg_link_task(void *arg, int pending) 349 { 350 struct awg_softc *sc; 351 352 sc = arg; 353 354 AWG_LOCK(sc); 355 awg_update_link_locked(sc); 356 AWG_UNLOCK(sc); 357 } 358 359 static void 360 awg_miibus_statchg(device_t dev) 361 { 362 struct awg_softc *sc; 363 364 sc = device_get_softc(dev); 365 366 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 367 } 368 369 static void 370 awg_media_status(if_t ifp, struct ifmediareq *ifmr) 371 { 372 struct awg_softc *sc; 373 struct mii_data *mii; 374 375 sc = if_getsoftc(ifp); 376 mii = device_get_softc(sc->miibus); 377 378 AWG_LOCK(sc); 379 mii_pollstat(mii); 380 ifmr->ifm_active = mii->mii_media_active; 381 ifmr->ifm_status = mii->mii_media_status; 382 AWG_UNLOCK(sc); 383 } 384 385 static int 386 awg_media_change(if_t ifp) 387 { 388 struct awg_softc *sc; 389 struct mii_data *mii; 390 int error; 391 392 sc = if_getsoftc(ifp); 393 mii = device_get_softc(sc->miibus); 394 395 AWG_LOCK(sc); 396 error = mii_mediachg(mii); 397 AWG_UNLOCK(sc); 398 399 return (error); 400 } 401 402 static int 403 awg_encap(struct awg_softc *sc, struct mbuf **mp) 404 { 405 bus_dmamap_t map; 406 bus_dma_segment_t segs[TX_MAX_SEGS]; 407 int error, nsegs, cur, first, last, i; 408 u_int csum_flags; 409 uint32_t flags, status; 410 struct mbuf *m; 411 412 cur = first = sc->tx.cur; 413 map = sc->tx.buf_map[first].map; 414 415 m = *mp; 416 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, segs, 417 &nsegs, BUS_DMA_NOWAIT); 418 if (error == EFBIG) { 419 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 420 if (m == NULL) { 421 device_printf(sc->dev, "awg_encap: m_collapse failed\n"); 422 m_freem(*mp); 423 *mp = NULL; 424 return (ENOMEM); 425 } 426 *mp = m; 427 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, map, m, 428 segs, &nsegs, BUS_DMA_NOWAIT); 429 if (error != 0) { 430 m_freem(*mp); 431 *mp = NULL; 432 } 433 } 434 if (error != 0) { 435 device_printf(sc->dev, "awg_encap: bus_dmamap_load_mbuf_sg failed\n"); 436 return (error); 437 } 438 if (nsegs == 0) { 439 m_freem(*mp); 440 *mp = NULL; 441 return (EIO); 442 } 443 444 if (sc->tx.queued + nsegs > TX_DESC_COUNT) { 445 bus_dmamap_unload(sc->tx.buf_tag, map); 446 return (ENOBUFS); 447 } 448 449 bus_dmamap_sync(sc->tx.buf_tag, map, BUS_DMASYNC_PREWRITE); 450 451 flags = TX_FIR_DESC; 452 status = 0; 453 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 454 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) 455 csum_flags = TX_CHECKSUM_CTL_FULL; 456 else 457 csum_flags = TX_CHECKSUM_CTL_IP; 458 flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); 459 } 460 461 for (i = 0; i < nsegs; i++) { 462 sc->tx.segs++; 463 if (i == nsegs - 1) { 464 flags |= TX_LAST_DESC; 465 /* 466 * Can only request TX completion 467 * interrupt on last descriptor. 468 */ 469 if (sc->tx.segs >= awg_tx_interval) { 470 sc->tx.segs = 0; 471 flags |= TX_INT_CTL; 472 } 473 } 474 475 sc->tx.desc_ring[cur].addr = htole32((uint32_t)segs[i].ds_addr); 476 sc->tx.desc_ring[cur].size = htole32(flags | segs[i].ds_len); 477 sc->tx.desc_ring[cur].status = htole32(status); 478 479 flags &= ~TX_FIR_DESC; 480 /* 481 * Setting of the valid bit in the first descriptor is 482 * deferred until the whole chain is fully set up. 483 */ 484 status = TX_DESC_CTL; 485 486 ++sc->tx.queued; 487 cur = TX_NEXT(cur); 488 } 489 490 sc->tx.cur = cur; 491 492 /* Store mapping and mbuf in the last segment */ 493 last = TX_SKIP(cur, TX_DESC_COUNT - 1); 494 sc->tx.buf_map[first].map = sc->tx.buf_map[last].map; 495 sc->tx.buf_map[last].map = map; 496 sc->tx.buf_map[last].mbuf = m; 497 498 /* 499 * The whole mbuf chain has been DMA mapped, 500 * fix the first descriptor. 501 */ 502 sc->tx.desc_ring[first].status = htole32(TX_DESC_CTL); 503 504 return (0); 505 } 506 507 static void 508 awg_clean_txbuf(struct awg_softc *sc, int index) 509 { 510 struct awg_bufmap *bmap; 511 512 --sc->tx.queued; 513 514 bmap = &sc->tx.buf_map[index]; 515 if (bmap->mbuf != NULL) { 516 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, 517 BUS_DMASYNC_POSTWRITE); 518 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); 519 m_freem(bmap->mbuf); 520 bmap->mbuf = NULL; 521 } 522 } 523 524 static void 525 awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr) 526 { 527 uint32_t status, size; 528 529 status = RX_DESC_CTL; 530 size = MCLBYTES - 1; 531 532 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); 533 sc->rx.desc_ring[index].size = htole32(size); 534 sc->rx.desc_ring[index].status = htole32(status); 535 } 536 537 static void 538 awg_reuse_rxdesc(struct awg_softc *sc, int index) 539 { 540 541 sc->rx.desc_ring[index].status = htole32(RX_DESC_CTL); 542 } 543 544 static int 545 awg_newbuf_rx(struct awg_softc *sc, int index) 546 { 547 struct mbuf *m; 548 bus_dma_segment_t seg; 549 bus_dmamap_t map; 550 int nsegs; 551 552 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 553 if (m == NULL) 554 return (ENOBUFS); 555 556 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 557 m_adj(m, ETHER_ALIGN); 558 559 if (bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, sc->rx.buf_spare_map, 560 m, &seg, &nsegs, BUS_DMA_NOWAIT) != 0) { 561 m_freem(m); 562 return (ENOBUFS); 563 } 564 565 if (sc->rx.buf_map[index].mbuf != NULL) { 566 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 567 BUS_DMASYNC_POSTREAD); 568 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); 569 } 570 map = sc->rx.buf_map[index].map; 571 sc->rx.buf_map[index].map = sc->rx.buf_spare_map; 572 sc->rx.buf_spare_map = map; 573 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 574 BUS_DMASYNC_PREREAD); 575 576 sc->rx.buf_map[index].mbuf = m; 577 awg_setup_rxdesc(sc, index, seg.ds_addr); 578 579 return (0); 580 } 581 582 static void 583 awg_start_locked(struct awg_softc *sc) 584 { 585 struct mbuf *m; 586 uint32_t val; 587 if_t ifp; 588 int cnt, err; 589 590 AWG_ASSERT_LOCKED(sc); 591 592 if (!sc->link) 593 return; 594 595 ifp = sc->ifp; 596 597 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 598 IFF_DRV_RUNNING) 599 return; 600 601 for (cnt = 0; ; cnt++) { 602 m = if_dequeue(ifp); 603 if (m == NULL) 604 break; 605 606 err = awg_encap(sc, &m); 607 if (err != 0) { 608 if (err == ENOBUFS) 609 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 610 if (m != NULL) 611 if_sendq_prepend(ifp, m); 612 break; 613 } 614 if_bpfmtap(ifp, m); 615 } 616 617 if (cnt != 0) { 618 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 619 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 620 621 /* Start and run TX DMA */ 622 val = RD4(sc, EMAC_TX_CTL_1); 623 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); 624 } 625 } 626 627 static void 628 awg_start(if_t ifp) 629 { 630 struct awg_softc *sc; 631 632 sc = if_getsoftc(ifp); 633 634 AWG_LOCK(sc); 635 awg_start_locked(sc); 636 AWG_UNLOCK(sc); 637 } 638 639 static void 640 awg_tick(void *softc) 641 { 642 struct awg_softc *sc; 643 struct mii_data *mii; 644 if_t ifp; 645 int link; 646 647 sc = softc; 648 ifp = sc->ifp; 649 mii = device_get_softc(sc->miibus); 650 651 AWG_ASSERT_LOCKED(sc); 652 653 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 654 return; 655 656 link = sc->link; 657 mii_tick(mii); 658 if (sc->link && !link) 659 awg_start_locked(sc); 660 661 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 662 } 663 664 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 665 static uint32_t 666 bitrev32(uint32_t x) 667 { 668 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 669 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 670 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 671 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 672 673 return (x >> 16) | (x << 16); 674 } 675 676 static void 677 awg_setup_rxfilter(struct awg_softc *sc) 678 { 679 uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo; 680 int mc_count, mcnt, i; 681 uint8_t *eaddr, *mta; 682 if_t ifp; 683 684 AWG_ASSERT_LOCKED(sc); 685 686 ifp = sc->ifp; 687 val = 0; 688 hash[0] = hash[1] = 0; 689 690 mc_count = if_multiaddr_count(ifp, -1); 691 692 if (if_getflags(ifp) & IFF_PROMISC) 693 val |= DIS_ADDR_FILTER; 694 else if (if_getflags(ifp) & IFF_ALLMULTI) { 695 val |= RX_ALL_MULTICAST; 696 hash[0] = hash[1] = ~0; 697 } else if (mc_count > 0) { 698 val |= HASH_MULTICAST; 699 700 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count, 701 M_DEVBUF, M_NOWAIT); 702 if (mta == NULL) { 703 if_printf(ifp, 704 "failed to allocate temporary multicast list\n"); 705 return; 706 } 707 708 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 709 for (i = 0; i < mcnt; i++) { 710 crc = ether_crc32_le(mta + (i * ETHER_ADDR_LEN), 711 ETHER_ADDR_LEN) & 0x7f; 712 crc = bitrev32(~crc) >> 26; 713 hashreg = (crc >> 5); 714 hashbit = (crc & 0x1f); 715 hash[hashreg] |= (1 << hashbit); 716 } 717 718 free(mta, M_DEVBUF); 719 } 720 721 /* Write our unicast address */ 722 eaddr = IF_LLADDR(ifp); 723 machi = (eaddr[5] << 8) | eaddr[4]; 724 maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | 725 (eaddr[0] << 0); 726 WR4(sc, EMAC_ADDR_HIGH(0), machi); 727 WR4(sc, EMAC_ADDR_LOW(0), maclo); 728 729 /* Multicast hash filters */ 730 WR4(sc, EMAC_RX_HASH_0, hash[1]); 731 WR4(sc, EMAC_RX_HASH_1, hash[0]); 732 733 /* RX frame filter config */ 734 WR4(sc, EMAC_RX_FRM_FLT, val); 735 } 736 737 static void 738 awg_enable_intr(struct awg_softc *sc) 739 { 740 /* Enable interrupts */ 741 WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); 742 } 743 744 static void 745 awg_disable_intr(struct awg_softc *sc) 746 { 747 /* Disable interrupts */ 748 WR4(sc, EMAC_INT_EN, 0); 749 } 750 751 static void 752 awg_init_locked(struct awg_softc *sc) 753 { 754 struct mii_data *mii; 755 uint32_t val; 756 if_t ifp; 757 758 mii = device_get_softc(sc->miibus); 759 ifp = sc->ifp; 760 761 AWG_ASSERT_LOCKED(sc); 762 763 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 764 return; 765 766 awg_setup_rxfilter(sc); 767 768 /* Configure DMA burst length and priorities */ 769 val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT; 770 if (awg_rx_tx_pri) 771 val |= BASIC_CTL_RX_TX_PRI; 772 WR4(sc, EMAC_BASIC_CTL_1, val); 773 774 /* Enable interrupts */ 775 #ifdef DEVICE_POLLING 776 if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0) 777 awg_enable_intr(sc); 778 else 779 awg_disable_intr(sc); 780 #else 781 awg_enable_intr(sc); 782 #endif 783 784 /* Enable transmit DMA */ 785 val = RD4(sc, EMAC_TX_CTL_1); 786 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); 787 788 /* Enable receive DMA */ 789 val = RD4(sc, EMAC_RX_CTL_1); 790 WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); 791 792 /* Enable transmitter */ 793 val = RD4(sc, EMAC_TX_CTL_0); 794 WR4(sc, EMAC_TX_CTL_0, val | TX_EN); 795 796 /* Enable receiver */ 797 val = RD4(sc, EMAC_RX_CTL_0); 798 WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC); 799 800 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 801 802 mii_mediachg(mii); 803 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 804 } 805 806 static void 807 awg_init(void *softc) 808 { 809 struct awg_softc *sc; 810 811 sc = softc; 812 813 AWG_LOCK(sc); 814 awg_init_locked(sc); 815 AWG_UNLOCK(sc); 816 } 817 818 static void 819 awg_stop(struct awg_softc *sc) 820 { 821 if_t ifp; 822 uint32_t val; 823 int i; 824 825 AWG_ASSERT_LOCKED(sc); 826 827 ifp = sc->ifp; 828 829 callout_stop(&sc->stat_ch); 830 831 /* Stop transmit DMA and flush data in the TX FIFO */ 832 val = RD4(sc, EMAC_TX_CTL_1); 833 val &= ~TX_DMA_EN; 834 val |= FLUSH_TX_FIFO; 835 WR4(sc, EMAC_TX_CTL_1, val); 836 837 /* Disable transmitter */ 838 val = RD4(sc, EMAC_TX_CTL_0); 839 WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN); 840 841 /* Disable receiver */ 842 val = RD4(sc, EMAC_RX_CTL_0); 843 WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN); 844 845 /* Disable interrupts */ 846 awg_disable_intr(sc); 847 848 /* Disable transmit DMA */ 849 val = RD4(sc, EMAC_TX_CTL_1); 850 WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); 851 852 /* Disable receive DMA */ 853 val = RD4(sc, EMAC_RX_CTL_1); 854 WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); 855 856 sc->link = 0; 857 858 /* Finish handling transmitted buffers */ 859 awg_txeof(sc); 860 861 /* Release any untransmitted buffers. */ 862 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 863 val = le32toh(sc->tx.desc_ring[i].status); 864 if ((val & TX_DESC_CTL) != 0) 865 break; 866 awg_clean_txbuf(sc, i); 867 } 868 sc->tx.next = i; 869 for (; sc->tx.queued > 0; i = TX_NEXT(i)) { 870 sc->tx.desc_ring[i].status = 0; 871 awg_clean_txbuf(sc, i); 872 } 873 sc->tx.cur = sc->tx.next; 874 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 875 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 876 877 /* Setup RX buffers for reuse */ 878 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 879 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 880 881 for (i = sc->rx.cur; ; i = RX_NEXT(i)) { 882 val = le32toh(sc->rx.desc_ring[i].status); 883 if ((val & RX_DESC_CTL) != 0) 884 break; 885 awg_reuse_rxdesc(sc, i); 886 } 887 sc->rx.cur = i; 888 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 889 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 890 891 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 892 } 893 894 static int 895 awg_rxintr(struct awg_softc *sc) 896 { 897 if_t ifp; 898 struct mbuf *m, *mh, *mt; 899 int error, index, len, cnt, npkt; 900 uint32_t status; 901 902 ifp = sc->ifp; 903 mh = mt = NULL; 904 cnt = 0; 905 npkt = 0; 906 907 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 908 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 909 910 for (index = sc->rx.cur; ; index = RX_NEXT(index)) { 911 status = le32toh(sc->rx.desc_ring[index].status); 912 if ((status & RX_DESC_CTL) != 0) 913 break; 914 915 len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; 916 917 if (len == 0) { 918 if ((status & (RX_NO_ENOUGH_BUF_ERR | RX_OVERFLOW_ERR)) != 0) 919 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 920 awg_reuse_rxdesc(sc, index); 921 continue; 922 } 923 924 m = sc->rx.buf_map[index].mbuf; 925 926 error = awg_newbuf_rx(sc, index); 927 if (error != 0) { 928 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 929 awg_reuse_rxdesc(sc, index); 930 continue; 931 } 932 933 m->m_pkthdr.rcvif = ifp; 934 m->m_pkthdr.len = len; 935 m->m_len = len; 936 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 937 938 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 939 (status & RX_FRM_TYPE) != 0) { 940 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 941 if ((status & RX_HEADER_ERR) == 0) 942 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 943 if ((status & RX_PAYLOAD_ERR) == 0) { 944 m->m_pkthdr.csum_flags |= 945 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 946 m->m_pkthdr.csum_data = 0xffff; 947 } 948 } 949 950 m->m_nextpkt = NULL; 951 if (mh == NULL) 952 mh = m; 953 else 954 mt->m_nextpkt = m; 955 mt = m; 956 ++cnt; 957 ++npkt; 958 959 if (cnt == awg_rx_batch) { 960 AWG_UNLOCK(sc); 961 if_input(ifp, mh); 962 AWG_LOCK(sc); 963 mh = mt = NULL; 964 cnt = 0; 965 } 966 } 967 968 if (index != sc->rx.cur) { 969 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 970 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 971 } 972 973 if (mh != NULL) { 974 AWG_UNLOCK(sc); 975 if_input(ifp, mh); 976 AWG_LOCK(sc); 977 } 978 979 sc->rx.cur = index; 980 981 return (npkt); 982 } 983 984 static void 985 awg_txeof(struct awg_softc *sc) 986 { 987 struct emac_desc *desc; 988 uint32_t status, size; 989 if_t ifp; 990 int i, prog; 991 992 AWG_ASSERT_LOCKED(sc); 993 994 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 995 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 996 997 ifp = sc->ifp; 998 999 prog = 0; 1000 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 1001 desc = &sc->tx.desc_ring[i]; 1002 status = le32toh(desc->status); 1003 if ((status & TX_DESC_CTL) != 0) 1004 break; 1005 size = le32toh(desc->size); 1006 if (size & TX_LAST_DESC) { 1007 if ((status & (TX_HEADER_ERR | TX_PAYLOAD_ERR)) != 0) 1008 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 1009 else 1010 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 1011 } 1012 prog++; 1013 awg_clean_txbuf(sc, i); 1014 } 1015 1016 if (prog > 0) { 1017 sc->tx.next = i; 1018 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 1019 } 1020 } 1021 1022 static void 1023 awg_intr(void *arg) 1024 { 1025 struct awg_softc *sc; 1026 uint32_t val; 1027 1028 sc = arg; 1029 1030 AWG_LOCK(sc); 1031 val = RD4(sc, EMAC_INT_STA); 1032 WR4(sc, EMAC_INT_STA, val); 1033 1034 if (val & RX_INT) 1035 awg_rxintr(sc); 1036 1037 if (val & TX_INT) 1038 awg_txeof(sc); 1039 1040 if (val & (TX_INT | TX_BUF_UA_INT)) { 1041 if (!if_sendq_empty(sc->ifp)) 1042 awg_start_locked(sc); 1043 } 1044 1045 AWG_UNLOCK(sc); 1046 } 1047 1048 #ifdef DEVICE_POLLING 1049 static int 1050 awg_poll(if_t ifp, enum poll_cmd cmd, int count) 1051 { 1052 struct awg_softc *sc; 1053 uint32_t val; 1054 int rx_npkts; 1055 1056 sc = if_getsoftc(ifp); 1057 rx_npkts = 0; 1058 1059 AWG_LOCK(sc); 1060 1061 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 1062 AWG_UNLOCK(sc); 1063 return (0); 1064 } 1065 1066 rx_npkts = awg_rxintr(sc); 1067 awg_txeof(sc); 1068 if (!if_sendq_empty(ifp)) 1069 awg_start_locked(sc); 1070 1071 if (cmd == POLL_AND_CHECK_STATUS) { 1072 val = RD4(sc, EMAC_INT_STA); 1073 if (val != 0) 1074 WR4(sc, EMAC_INT_STA, val); 1075 } 1076 1077 AWG_UNLOCK(sc); 1078 1079 return (rx_npkts); 1080 } 1081 #endif 1082 1083 static int 1084 awg_ioctl(if_t ifp, u_long cmd, caddr_t data) 1085 { 1086 struct awg_softc *sc; 1087 struct mii_data *mii; 1088 struct ifreq *ifr; 1089 int flags, mask, error; 1090 1091 sc = if_getsoftc(ifp); 1092 mii = device_get_softc(sc->miibus); 1093 ifr = (struct ifreq *)data; 1094 error = 0; 1095 1096 switch (cmd) { 1097 case SIOCSIFFLAGS: 1098 AWG_LOCK(sc); 1099 if (if_getflags(ifp) & IFF_UP) { 1100 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1101 flags = if_getflags(ifp) ^ sc->if_flags; 1102 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1103 awg_setup_rxfilter(sc); 1104 } else 1105 awg_init_locked(sc); 1106 } else { 1107 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1108 awg_stop(sc); 1109 } 1110 sc->if_flags = if_getflags(ifp); 1111 AWG_UNLOCK(sc); 1112 break; 1113 case SIOCADDMULTI: 1114 case SIOCDELMULTI: 1115 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1116 AWG_LOCK(sc); 1117 awg_setup_rxfilter(sc); 1118 AWG_UNLOCK(sc); 1119 } 1120 break; 1121 case SIOCSIFMEDIA: 1122 case SIOCGIFMEDIA: 1123 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1124 break; 1125 case SIOCSIFCAP: 1126 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1127 #ifdef DEVICE_POLLING 1128 if (mask & IFCAP_POLLING) { 1129 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1130 error = ether_poll_register(awg_poll, ifp); 1131 if (error != 0) 1132 break; 1133 AWG_LOCK(sc); 1134 awg_disable_intr(sc); 1135 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 1136 AWG_UNLOCK(sc); 1137 } else { 1138 error = ether_poll_deregister(ifp); 1139 AWG_LOCK(sc); 1140 awg_enable_intr(sc); 1141 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1142 AWG_UNLOCK(sc); 1143 } 1144 } 1145 #endif 1146 if (mask & IFCAP_VLAN_MTU) 1147 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1148 if (mask & IFCAP_RXCSUM) 1149 if_togglecapenable(ifp, IFCAP_RXCSUM); 1150 if (mask & IFCAP_TXCSUM) 1151 if_togglecapenable(ifp, IFCAP_TXCSUM); 1152 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1153 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); 1154 else 1155 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); 1156 break; 1157 default: 1158 error = ether_ioctl(ifp, cmd, data); 1159 break; 1160 } 1161 1162 return (error); 1163 } 1164 1165 static uint32_t 1166 syscon_read_emac_clk_reg(device_t dev) 1167 { 1168 struct awg_softc *sc; 1169 1170 sc = device_get_softc(dev); 1171 if (sc->syscon != NULL) 1172 return (SYSCON_READ_4(sc->syscon, EMAC_CLK_REG)); 1173 else if (sc->res[_RES_SYSCON] != NULL) 1174 return (bus_read_4(sc->res[_RES_SYSCON], 0)); 1175 1176 return (0); 1177 } 1178 1179 static void 1180 syscon_write_emac_clk_reg(device_t dev, uint32_t val) 1181 { 1182 struct awg_softc *sc; 1183 1184 sc = device_get_softc(dev); 1185 if (sc->syscon != NULL) 1186 SYSCON_WRITE_4(sc->syscon, EMAC_CLK_REG, val); 1187 else if (sc->res[_RES_SYSCON] != NULL) 1188 bus_write_4(sc->res[_RES_SYSCON], 0, val); 1189 } 1190 1191 static phandle_t 1192 awg_get_phy_node(device_t dev) 1193 { 1194 phandle_t node; 1195 pcell_t phy_handle; 1196 1197 node = ofw_bus_get_node(dev); 1198 if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, 1199 sizeof(phy_handle)) <= 0) 1200 return (0); 1201 1202 return (OF_node_from_xref(phy_handle)); 1203 } 1204 1205 static bool 1206 awg_has_internal_phy(device_t dev) 1207 { 1208 phandle_t node, phy_node; 1209 1210 node = ofw_bus_get_node(dev); 1211 /* Legacy binding */ 1212 if (OF_hasprop(node, "allwinner,use-internal-phy")) 1213 return (true); 1214 1215 phy_node = awg_get_phy_node(dev); 1216 return (phy_node != 0 && ofw_bus_node_is_compatible(OF_parent(phy_node), 1217 "allwinner,sun8i-h3-mdio-internal") != 0); 1218 } 1219 1220 static int 1221 awg_setup_phy(device_t dev) 1222 { 1223 struct awg_softc *sc; 1224 clk_t clk_tx, clk_tx_parent; 1225 const char *tx_parent_name; 1226 char *phy_type; 1227 phandle_t node; 1228 uint32_t reg, tx_delay, rx_delay; 1229 int error; 1230 bool use_syscon; 1231 1232 sc = device_get_softc(dev); 1233 node = ofw_bus_get_node(dev); 1234 use_syscon = false; 1235 1236 if (OF_getprop_alloc(node, "phy-mode", 1, (void **)&phy_type) == 0) 1237 return (0); 1238 1239 if (sc->syscon != NULL || sc->res[_RES_SYSCON] != NULL) 1240 use_syscon = true; 1241 1242 if (bootverbose) 1243 device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type, 1244 use_syscon ? "reg" : "clk"); 1245 1246 if (use_syscon) { 1247 /* 1248 * Abstract away writing to syscon for devices like the pine64. 1249 * For the pine64, we get dtb from U-Boot and it still uses the 1250 * legacy setup of specifying syscon register in emac node 1251 * rather than as its own node and using an xref in emac. 1252 * These abstractions can go away once U-Boot dts is up-to-date. 1253 */ 1254 reg = syscon_read_emac_clk_reg(dev); 1255 reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); 1256 if (strncmp(phy_type, "rgmii", 5) == 0) 1257 reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; 1258 else if (strcmp(phy_type, "rmii") == 0) 1259 reg |= EMAC_CLK_RMII_EN; 1260 else 1261 reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; 1262 1263 if (OF_getencprop(node, "tx-delay", &tx_delay, 1264 sizeof(tx_delay)) > 0) { 1265 reg &= ~EMAC_CLK_ETXDC; 1266 reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); 1267 } 1268 if (OF_getencprop(node, "rx-delay", &rx_delay, 1269 sizeof(rx_delay)) > 0) { 1270 reg &= ~EMAC_CLK_ERXDC; 1271 reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); 1272 } 1273 1274 if (sc->type == EMAC_H3) { 1275 if (awg_has_internal_phy(dev)) { 1276 reg |= EMAC_CLK_EPHY_SELECT; 1277 reg &= ~EMAC_CLK_EPHY_SHUTDOWN; 1278 if (OF_hasprop(node, 1279 "allwinner,leds-active-low")) 1280 reg |= EMAC_CLK_EPHY_LED_POL; 1281 else 1282 reg &= ~EMAC_CLK_EPHY_LED_POL; 1283 1284 /* Set internal PHY addr to 1 */ 1285 reg &= ~EMAC_CLK_EPHY_ADDR; 1286 reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); 1287 } else { 1288 reg &= ~EMAC_CLK_EPHY_SELECT; 1289 } 1290 } 1291 1292 if (bootverbose) 1293 device_printf(dev, "EMAC clock: 0x%08x\n", reg); 1294 syscon_write_emac_clk_reg(dev, reg); 1295 } else { 1296 if (strncmp(phy_type, "rgmii", 5) == 0) 1297 tx_parent_name = "emac_int_tx"; 1298 else 1299 tx_parent_name = "mii_phy_tx"; 1300 1301 /* Get the TX clock */ 1302 error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx); 1303 if (error != 0) { 1304 device_printf(dev, "cannot get tx clock\n"); 1305 goto fail; 1306 } 1307 1308 /* Find the desired parent clock based on phy-mode property */ 1309 error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); 1310 if (error != 0) { 1311 device_printf(dev, "cannot get clock '%s'\n", 1312 tx_parent_name); 1313 goto fail; 1314 } 1315 1316 /* Set TX clock parent */ 1317 error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); 1318 if (error != 0) { 1319 device_printf(dev, "cannot set tx clock parent\n"); 1320 goto fail; 1321 } 1322 1323 /* Enable TX clock */ 1324 error = clk_enable(clk_tx); 1325 if (error != 0) { 1326 device_printf(dev, "cannot enable tx clock\n"); 1327 goto fail; 1328 } 1329 } 1330 1331 error = 0; 1332 1333 fail: 1334 OF_prop_free(phy_type); 1335 return (error); 1336 } 1337 1338 static int 1339 awg_setup_extres(device_t dev) 1340 { 1341 struct awg_softc *sc; 1342 phandle_t node, phy_node; 1343 hwreset_t rst_ahb, rst_ephy; 1344 clk_t clk_ahb, clk_ephy; 1345 regulator_t reg; 1346 uint64_t freq; 1347 int error, div; 1348 1349 sc = device_get_softc(dev); 1350 rst_ahb = rst_ephy = NULL; 1351 clk_ahb = clk_ephy = NULL; 1352 reg = NULL; 1353 node = ofw_bus_get_node(dev); 1354 phy_node = awg_get_phy_node(dev); 1355 1356 if (phy_node == 0 && OF_hasprop(node, "phy-handle")) { 1357 error = ENXIO; 1358 device_printf(dev, "cannot get phy handle\n"); 1359 goto fail; 1360 } 1361 1362 /* Get AHB clock and reset resources */ 1363 error = hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst_ahb); 1364 if (error != 0) 1365 error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb); 1366 if (error != 0) { 1367 device_printf(dev, "cannot get ahb reset\n"); 1368 goto fail; 1369 } 1370 if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0) 1371 if (phy_node == 0 || hwreset_get_by_ofw_idx(dev, phy_node, 0, 1372 &rst_ephy) != 0) 1373 rst_ephy = NULL; 1374 error = clk_get_by_ofw_name(dev, 0, "stmmaceth", &clk_ahb); 1375 if (error != 0) 1376 error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb); 1377 if (error != 0) { 1378 device_printf(dev, "cannot get ahb clock\n"); 1379 goto fail; 1380 } 1381 if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0) 1382 if (phy_node == 0 || clk_get_by_ofw_index(dev, phy_node, 0, 1383 &clk_ephy) != 0) 1384 clk_ephy = NULL; 1385 1386 if (OF_hasprop(node, "syscon") && syscon_get_by_ofw_property(dev, node, 1387 "syscon", &sc->syscon) != 0) { 1388 device_printf(dev, "cannot get syscon driver handle\n"); 1389 goto fail; 1390 } 1391 1392 /* Configure PHY for MII or RGMII mode */ 1393 if (awg_setup_phy(dev) != 0) 1394 goto fail; 1395 1396 /* Enable clocks */ 1397 error = clk_enable(clk_ahb); 1398 if (error != 0) { 1399 device_printf(dev, "cannot enable ahb clock\n"); 1400 goto fail; 1401 } 1402 if (clk_ephy != NULL) { 1403 error = clk_enable(clk_ephy); 1404 if (error != 0) { 1405 device_printf(dev, "cannot enable ephy clock\n"); 1406 goto fail; 1407 } 1408 } 1409 1410 /* De-assert reset */ 1411 error = hwreset_deassert(rst_ahb); 1412 if (error != 0) { 1413 device_printf(dev, "cannot de-assert ahb reset\n"); 1414 goto fail; 1415 } 1416 if (rst_ephy != NULL) { 1417 error = hwreset_deassert(rst_ephy); 1418 if (error != 0) { 1419 device_printf(dev, "cannot de-assert ephy reset\n"); 1420 goto fail; 1421 } 1422 } 1423 1424 /* Enable PHY regulator if applicable */ 1425 if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { 1426 error = regulator_enable(reg); 1427 if (error != 0) { 1428 device_printf(dev, "cannot enable PHY regulator\n"); 1429 goto fail; 1430 } 1431 } 1432 1433 /* Determine MDC clock divide ratio based on AHB clock */ 1434 error = clk_get_freq(clk_ahb, &freq); 1435 if (error != 0) { 1436 device_printf(dev, "cannot get AHB clock frequency\n"); 1437 goto fail; 1438 } 1439 div = freq / MDIO_FREQ; 1440 if (div <= 16) 1441 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; 1442 else if (div <= 32) 1443 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; 1444 else if (div <= 64) 1445 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; 1446 else if (div <= 128) 1447 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; 1448 else { 1449 device_printf(dev, "cannot determine MDC clock divide ratio\n"); 1450 error = ENXIO; 1451 goto fail; 1452 } 1453 1454 if (bootverbose) 1455 device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n", 1456 (uintmax_t)freq, sc->mdc_div_ratio_m); 1457 1458 return (0); 1459 1460 fail: 1461 if (reg != NULL) 1462 regulator_release(reg); 1463 if (clk_ephy != NULL) 1464 clk_release(clk_ephy); 1465 if (clk_ahb != NULL) 1466 clk_release(clk_ahb); 1467 if (rst_ephy != NULL) 1468 hwreset_release(rst_ephy); 1469 if (rst_ahb != NULL) 1470 hwreset_release(rst_ahb); 1471 return (error); 1472 } 1473 1474 static void 1475 awg_get_eaddr(device_t dev, uint8_t *eaddr) 1476 { 1477 struct awg_softc *sc; 1478 uint32_t maclo, machi, rnd; 1479 u_char rootkey[16]; 1480 1481 sc = device_get_softc(dev); 1482 1483 machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; 1484 maclo = RD4(sc, EMAC_ADDR_LOW(0)); 1485 1486 if (maclo == 0xffffffff && machi == 0xffff) { 1487 /* MAC address in hardware is invalid, create one */ 1488 if (aw_sid_get_rootkey(rootkey) == 0 && 1489 (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | 1490 rootkey[15]) != 0) { 1491 /* MAC address is derived from the root key in SID */ 1492 maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | 1493 (rootkey[3] << 8) | 0x02; 1494 machi = (rootkey[15] << 8) | rootkey[14]; 1495 } else { 1496 /* Create one */ 1497 rnd = arc4random(); 1498 maclo = 0x00f2 | (rnd & 0xffff0000); 1499 machi = rnd & 0xffff; 1500 } 1501 } 1502 1503 eaddr[0] = maclo & 0xff; 1504 eaddr[1] = (maclo >> 8) & 0xff; 1505 eaddr[2] = (maclo >> 16) & 0xff; 1506 eaddr[3] = (maclo >> 24) & 0xff; 1507 eaddr[4] = machi & 0xff; 1508 eaddr[5] = (machi >> 8) & 0xff; 1509 } 1510 1511 #ifdef AWG_DEBUG 1512 static void 1513 awg_dump_regs(device_t dev) 1514 { 1515 static const struct { 1516 const char *name; 1517 u_int reg; 1518 } regs[] = { 1519 { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, 1520 { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, 1521 { "INT_STA", EMAC_INT_STA }, 1522 { "INT_EN", EMAC_INT_EN }, 1523 { "TX_CTL_0", EMAC_TX_CTL_0 }, 1524 { "TX_CTL_1", EMAC_TX_CTL_1 }, 1525 { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, 1526 { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, 1527 { "RX_CTL_0", EMAC_RX_CTL_0 }, 1528 { "RX_CTL_1", EMAC_RX_CTL_1 }, 1529 { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, 1530 { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, 1531 { "RX_HASH_0", EMAC_RX_HASH_0 }, 1532 { "RX_HASH_1", EMAC_RX_HASH_1 }, 1533 { "MII_CMD", EMAC_MII_CMD }, 1534 { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, 1535 { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, 1536 { "TX_DMA_STA", EMAC_TX_DMA_STA }, 1537 { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, 1538 { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, 1539 { "RX_DMA_STA", EMAC_RX_DMA_STA }, 1540 { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, 1541 { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, 1542 { "RGMII_STA", EMAC_RGMII_STA }, 1543 }; 1544 struct awg_softc *sc; 1545 unsigned int n; 1546 1547 sc = device_get_softc(dev); 1548 1549 for (n = 0; n < nitems(regs); n++) 1550 device_printf(dev, " %-20s %08x\n", regs[n].name, 1551 RD4(sc, regs[n].reg)); 1552 } 1553 #endif 1554 1555 #define GPIO_ACTIVE_LOW 1 1556 1557 static int 1558 awg_phy_reset(device_t dev) 1559 { 1560 pcell_t gpio_prop[4], delay_prop[3]; 1561 phandle_t node, gpio_node; 1562 device_t gpio; 1563 uint32_t pin, flags; 1564 uint32_t pin_value; 1565 1566 node = ofw_bus_get_node(dev); 1567 if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop, 1568 sizeof(gpio_prop)) <= 0) 1569 return (0); 1570 1571 if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop, 1572 sizeof(delay_prop)) <= 0) 1573 return (ENXIO); 1574 1575 gpio_node = OF_node_from_xref(gpio_prop[0]); 1576 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) 1577 return (ENXIO); 1578 1579 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, 1580 gpio_prop + 1, &pin, &flags) != 0) 1581 return (ENXIO); 1582 1583 pin_value = GPIO_PIN_LOW; 1584 if (OF_hasprop(node, "allwinner,reset-active-low")) 1585 pin_value = GPIO_PIN_HIGH; 1586 1587 if (flags & GPIO_ACTIVE_LOW) 1588 pin_value = !pin_value; 1589 1590 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1591 GPIO_PIN_SET(gpio, pin, pin_value); 1592 DELAY(delay_prop[0]); 1593 GPIO_PIN_SET(gpio, pin, !pin_value); 1594 DELAY(delay_prop[1]); 1595 GPIO_PIN_SET(gpio, pin, pin_value); 1596 DELAY(delay_prop[2]); 1597 1598 return (0); 1599 } 1600 1601 static int 1602 awg_reset(device_t dev) 1603 { 1604 struct awg_softc *sc; 1605 int retry; 1606 1607 sc = device_get_softc(dev); 1608 1609 /* Reset PHY if necessary */ 1610 if (awg_phy_reset(dev) != 0) { 1611 device_printf(dev, "failed to reset PHY\n"); 1612 return (ENXIO); 1613 } 1614 1615 /* Soft reset all registers and logic */ 1616 WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); 1617 1618 /* Wait for soft reset bit to self-clear */ 1619 for (retry = SOFT_RST_RETRY; retry > 0; retry--) { 1620 if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) 1621 break; 1622 DELAY(10); 1623 } 1624 if (retry == 0) { 1625 device_printf(dev, "soft reset timed out\n"); 1626 #ifdef AWG_DEBUG 1627 awg_dump_regs(dev); 1628 #endif 1629 return (ETIMEDOUT); 1630 } 1631 1632 return (0); 1633 } 1634 1635 static void 1636 awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1637 { 1638 if (error != 0) 1639 return; 1640 *(bus_addr_t *)arg = segs[0].ds_addr; 1641 } 1642 1643 static int 1644 awg_setup_dma(device_t dev) 1645 { 1646 struct awg_softc *sc; 1647 int error, i; 1648 1649 sc = device_get_softc(dev); 1650 1651 /* Setup TX ring */ 1652 error = bus_dma_tag_create( 1653 bus_get_dma_tag(dev), /* Parent tag */ 1654 DESC_ALIGN, 0, /* alignment, boundary */ 1655 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1656 BUS_SPACE_MAXADDR, /* highaddr */ 1657 NULL, NULL, /* filter, filterarg */ 1658 TX_DESC_SIZE, 1, /* maxsize, nsegs */ 1659 TX_DESC_SIZE, /* maxsegsize */ 1660 0, /* flags */ 1661 NULL, NULL, /* lockfunc, lockarg */ 1662 &sc->tx.desc_tag); 1663 if (error != 0) { 1664 device_printf(dev, "cannot create TX descriptor ring tag\n"); 1665 return (error); 1666 } 1667 1668 error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring, 1669 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map); 1670 if (error != 0) { 1671 device_printf(dev, "cannot allocate TX descriptor ring\n"); 1672 return (error); 1673 } 1674 1675 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, 1676 sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb, 1677 &sc->tx.desc_ring_paddr, 0); 1678 if (error != 0) { 1679 device_printf(dev, "cannot load TX descriptor ring\n"); 1680 return (error); 1681 } 1682 1683 for (i = 0; i < TX_DESC_COUNT; i++) 1684 sc->tx.desc_ring[i].next = 1685 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); 1686 1687 error = bus_dma_tag_create( 1688 bus_get_dma_tag(dev), /* Parent tag */ 1689 1, 0, /* alignment, boundary */ 1690 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1691 BUS_SPACE_MAXADDR, /* highaddr */ 1692 NULL, NULL, /* filter, filterarg */ 1693 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 1694 MCLBYTES, /* maxsegsize */ 1695 0, /* flags */ 1696 NULL, NULL, /* lockfunc, lockarg */ 1697 &sc->tx.buf_tag); 1698 if (error != 0) { 1699 device_printf(dev, "cannot create TX buffer tag\n"); 1700 return (error); 1701 } 1702 1703 sc->tx.queued = 0; 1704 for (i = 0; i < TX_DESC_COUNT; i++) { 1705 error = bus_dmamap_create(sc->tx.buf_tag, 0, 1706 &sc->tx.buf_map[i].map); 1707 if (error != 0) { 1708 device_printf(dev, "cannot create TX buffer map\n"); 1709 return (error); 1710 } 1711 } 1712 1713 /* Setup RX ring */ 1714 error = bus_dma_tag_create( 1715 bus_get_dma_tag(dev), /* Parent tag */ 1716 DESC_ALIGN, 0, /* alignment, boundary */ 1717 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1718 BUS_SPACE_MAXADDR, /* highaddr */ 1719 NULL, NULL, /* filter, filterarg */ 1720 RX_DESC_SIZE, 1, /* maxsize, nsegs */ 1721 RX_DESC_SIZE, /* maxsegsize */ 1722 0, /* flags */ 1723 NULL, NULL, /* lockfunc, lockarg */ 1724 &sc->rx.desc_tag); 1725 if (error != 0) { 1726 device_printf(dev, "cannot create RX descriptor ring tag\n"); 1727 return (error); 1728 } 1729 1730 error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, 1731 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map); 1732 if (error != 0) { 1733 device_printf(dev, "cannot allocate RX descriptor ring\n"); 1734 return (error); 1735 } 1736 1737 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, 1738 sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb, 1739 &sc->rx.desc_ring_paddr, 0); 1740 if (error != 0) { 1741 device_printf(dev, "cannot load RX descriptor ring\n"); 1742 return (error); 1743 } 1744 1745 error = bus_dma_tag_create( 1746 bus_get_dma_tag(dev), /* Parent tag */ 1747 1, 0, /* alignment, boundary */ 1748 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1749 BUS_SPACE_MAXADDR, /* highaddr */ 1750 NULL, NULL, /* filter, filterarg */ 1751 MCLBYTES, 1, /* maxsize, nsegs */ 1752 MCLBYTES, /* maxsegsize */ 1753 0, /* flags */ 1754 NULL, NULL, /* lockfunc, lockarg */ 1755 &sc->rx.buf_tag); 1756 if (error != 0) { 1757 device_printf(dev, "cannot create RX buffer tag\n"); 1758 return (error); 1759 } 1760 1761 error = bus_dmamap_create(sc->rx.buf_tag, 0, &sc->rx.buf_spare_map); 1762 if (error != 0) { 1763 device_printf(dev, 1764 "cannot create RX buffer spare map\n"); 1765 return (error); 1766 } 1767 1768 for (i = 0; i < RX_DESC_COUNT; i++) { 1769 sc->rx.desc_ring[i].next = 1770 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(i))); 1771 1772 error = bus_dmamap_create(sc->rx.buf_tag, 0, 1773 &sc->rx.buf_map[i].map); 1774 if (error != 0) { 1775 device_printf(dev, "cannot create RX buffer map\n"); 1776 return (error); 1777 } 1778 sc->rx.buf_map[i].mbuf = NULL; 1779 error = awg_newbuf_rx(sc, i); 1780 if (error != 0) { 1781 device_printf(dev, "cannot create RX buffer\n"); 1782 return (error); 1783 } 1784 } 1785 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 1786 BUS_DMASYNC_PREWRITE); 1787 1788 /* Write transmit and receive descriptor base address registers */ 1789 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); 1790 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); 1791 1792 return (0); 1793 } 1794 1795 static int 1796 awg_probe(device_t dev) 1797 { 1798 if (!ofw_bus_status_okay(dev)) 1799 return (ENXIO); 1800 1801 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 1802 return (ENXIO); 1803 1804 device_set_desc(dev, "Allwinner Gigabit Ethernet"); 1805 return (BUS_PROBE_DEFAULT); 1806 } 1807 1808 static int 1809 awg_attach(device_t dev) 1810 { 1811 uint8_t eaddr[ETHER_ADDR_LEN]; 1812 struct awg_softc *sc; 1813 int error; 1814 1815 sc = device_get_softc(dev); 1816 sc->dev = dev; 1817 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 1818 1819 if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) { 1820 device_printf(dev, "cannot allocate resources for device\n"); 1821 return (ENXIO); 1822 } 1823 1824 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 1825 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 1826 TASK_INIT(&sc->link_task, 0, awg_link_task, sc); 1827 1828 /* Setup clocks and regulators */ 1829 error = awg_setup_extres(dev); 1830 if (error != 0) 1831 return (error); 1832 1833 /* Read MAC address before resetting the chip */ 1834 awg_get_eaddr(dev, eaddr); 1835 1836 /* Soft reset EMAC core */ 1837 error = awg_reset(dev); 1838 if (error != 0) 1839 return (error); 1840 1841 /* Setup DMA descriptors */ 1842 error = awg_setup_dma(dev); 1843 if (error != 0) 1844 return (error); 1845 1846 /* Install interrupt handler */ 1847 error = bus_setup_intr(dev, sc->res[_RES_IRQ], 1848 INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih); 1849 if (error != 0) { 1850 device_printf(dev, "cannot setup interrupt handler\n"); 1851 return (error); 1852 } 1853 1854 /* Setup ethernet interface */ 1855 sc->ifp = if_alloc(IFT_ETHER); 1856 if_setsoftc(sc->ifp, sc); 1857 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 1858 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1859 if_setstartfn(sc->ifp, awg_start); 1860 if_setioctlfn(sc->ifp, awg_ioctl); 1861 if_setinitfn(sc->ifp, awg_init); 1862 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 1863 if_setsendqready(sc->ifp); 1864 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1865 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1866 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1867 #ifdef DEVICE_POLLING 1868 if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0); 1869 #endif 1870 1871 /* Attach MII driver */ 1872 error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change, 1873 awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 1874 MIIF_DOPAUSE); 1875 if (error != 0) { 1876 device_printf(dev, "cannot attach PHY\n"); 1877 return (error); 1878 } 1879 1880 /* Attach ethernet interface */ 1881 ether_ifattach(sc->ifp, eaddr); 1882 1883 return (0); 1884 } 1885 1886 static device_method_t awg_methods[] = { 1887 /* Device interface */ 1888 DEVMETHOD(device_probe, awg_probe), 1889 DEVMETHOD(device_attach, awg_attach), 1890 1891 /* MII interface */ 1892 DEVMETHOD(miibus_readreg, awg_miibus_readreg), 1893 DEVMETHOD(miibus_writereg, awg_miibus_writereg), 1894 DEVMETHOD(miibus_statchg, awg_miibus_statchg), 1895 1896 DEVMETHOD_END 1897 }; 1898 1899 static driver_t awg_driver = { 1900 "awg", 1901 awg_methods, 1902 sizeof(struct awg_softc), 1903 }; 1904 1905 static devclass_t awg_devclass; 1906 1907 DRIVER_MODULE(awg, simplebus, awg_driver, awg_devclass, 0, 0); 1908 DRIVER_MODULE(miibus, awg, miibus_driver, miibus_devclass, 0, 0); 1909 1910 MODULE_DEPEND(awg, ether, 1, 1, 1); 1911 MODULE_DEPEND(awg, miibus, 1, 1, 1); 1912