1 /*- 2 * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Allwinner Gigabit Ethernet MAC (EMAC) controller 31 */ 32 33 #include "opt_device_polling.h" 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/rman.h> 42 #include <sys/kernel.h> 43 #include <sys/endian.h> 44 #include <sys/mbuf.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/module.h> 48 #include <sys/taskqueue.h> 49 #include <sys/gpio.h> 50 51 #include <net/bpf.h> 52 #include <net/if.h> 53 #include <net/ethernet.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_types.h> 57 #include <net/if_var.h> 58 59 #include <machine/bus.h> 60 61 #include <dev/ofw/ofw_bus.h> 62 #include <dev/ofw/ofw_bus_subr.h> 63 64 #include <arm/allwinner/if_awgreg.h> 65 #include <arm/allwinner/aw_sid.h> 66 #include <dev/mii/mii.h> 67 #include <dev/mii/miivar.h> 68 69 #include <dev/extres/clk/clk.h> 70 #include <dev/extres/hwreset/hwreset.h> 71 #include <dev/extres/regulator/regulator.h> 72 73 #include "miibus_if.h" 74 #include "gpio_if.h" 75 76 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg)) 77 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val)) 78 79 #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx) 80 #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx); 81 #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 82 #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 83 84 #define DESC_ALIGN 4 85 #define TX_DESC_COUNT 1024 86 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT) 87 #define RX_DESC_COUNT 256 88 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT) 89 90 #define DESC_OFF(n) ((n) * sizeof(struct emac_desc)) 91 #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) 92 #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) 93 #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) 94 95 #define TX_MAX_SEGS 20 96 97 #define SOFT_RST_RETRY 1000 98 #define MII_BUSY_RETRY 1000 99 #define MDIO_FREQ 2500000 100 101 #define BURST_LEN_DEFAULT 8 102 #define RX_TX_PRI_DEFAULT 0 103 #define PAUSE_TIME_DEFAULT 0x400 104 #define TX_INTERVAL_DEFAULT 64 105 #define RX_BATCH_DEFAULT 64 106 107 /* syscon EMAC clock register */ 108 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ 109 #define EMAC_CLK_EPHY_ADDR_SHIFT 20 110 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ 111 #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ 112 #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ 113 #define EMAC_CLK_RMII_EN (1 << 13) 114 #define EMAC_CLK_ETXDC (0x7 << 10) 115 #define EMAC_CLK_ETXDC_SHIFT 10 116 #define EMAC_CLK_ERXDC (0x1f << 5) 117 #define EMAC_CLK_ERXDC_SHIFT 5 118 #define EMAC_CLK_PIT (0x1 << 2) 119 #define EMAC_CLK_PIT_MII (0 << 2) 120 #define EMAC_CLK_PIT_RGMII (1 << 2) 121 #define EMAC_CLK_SRC (0x3 << 0) 122 #define EMAC_CLK_SRC_MII (0 << 0) 123 #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) 124 #define EMAC_CLK_SRC_RGMII (2 << 0) 125 126 /* Burst length of RX and TX DMA transfers */ 127 static int awg_burst_len = BURST_LEN_DEFAULT; 128 TUNABLE_INT("hw.awg.burst_len", &awg_burst_len); 129 130 /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ 131 static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT; 132 TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri); 133 134 /* Pause time field in the transmitted control frame */ 135 static int awg_pause_time = PAUSE_TIME_DEFAULT; 136 TUNABLE_INT("hw.awg.pause_time", &awg_pause_time); 137 138 /* Request a TX interrupt every <n> descriptors */ 139 static int awg_tx_interval = TX_INTERVAL_DEFAULT; 140 TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval); 141 142 /* Maximum number of mbufs to send to if_input */ 143 static int awg_rx_batch = RX_BATCH_DEFAULT; 144 TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch); 145 146 enum awg_type { 147 EMAC_A83T = 1, 148 EMAC_H3, 149 EMAC_A64, 150 }; 151 152 static struct ofw_compat_data compat_data[] = { 153 { "allwinner,sun8i-a83t-emac", EMAC_A83T }, 154 { "allwinner,sun8i-h3-emac", EMAC_H3 }, 155 { "allwinner,sun50i-a64-emac", EMAC_A64 }, 156 { NULL, 0 } 157 }; 158 159 struct awg_bufmap { 160 bus_dmamap_t map; 161 struct mbuf *mbuf; 162 }; 163 164 struct awg_txring { 165 bus_dma_tag_t desc_tag; 166 bus_dmamap_t desc_map; 167 struct emac_desc *desc_ring; 168 bus_addr_t desc_ring_paddr; 169 bus_dma_tag_t buf_tag; 170 struct awg_bufmap buf_map[TX_DESC_COUNT]; 171 u_int cur, next, queued; 172 }; 173 174 struct awg_rxring { 175 bus_dma_tag_t desc_tag; 176 bus_dmamap_t desc_map; 177 struct emac_desc *desc_ring; 178 bus_addr_t desc_ring_paddr; 179 bus_dma_tag_t buf_tag; 180 struct awg_bufmap buf_map[RX_DESC_COUNT]; 181 u_int cur; 182 }; 183 184 enum { 185 _RES_EMAC, 186 _RES_IRQ, 187 _RES_SYSCON, 188 _RES_NITEMS 189 }; 190 191 struct awg_softc { 192 struct resource *res[_RES_NITEMS]; 193 struct mtx mtx; 194 if_t ifp; 195 device_t dev; 196 device_t miibus; 197 struct callout stat_ch; 198 struct task link_task; 199 void *ih; 200 u_int mdc_div_ratio_m; 201 int link; 202 int if_flags; 203 enum awg_type type; 204 205 struct awg_txring tx; 206 struct awg_rxring rx; 207 }; 208 209 static struct resource_spec awg_spec[] = { 210 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 211 { SYS_RES_IRQ, 0, RF_ACTIVE }, 212 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL }, 213 { -1, 0 } 214 }; 215 216 static int 217 awg_miibus_readreg(device_t dev, int phy, int reg) 218 { 219 struct awg_softc *sc; 220 int retry, val; 221 222 sc = device_get_softc(dev); 223 val = 0; 224 225 WR4(sc, EMAC_MII_CMD, 226 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 227 (phy << PHY_ADDR_SHIFT) | 228 (reg << PHY_REG_ADDR_SHIFT) | 229 MII_BUSY); 230 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 231 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { 232 val = RD4(sc, EMAC_MII_DATA); 233 break; 234 } 235 DELAY(10); 236 } 237 238 if (retry == 0) 239 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 240 phy, reg); 241 242 return (val); 243 } 244 245 static int 246 awg_miibus_writereg(device_t dev, int phy, int reg, int val) 247 { 248 struct awg_softc *sc; 249 int retry; 250 251 sc = device_get_softc(dev); 252 253 WR4(sc, EMAC_MII_DATA, val); 254 WR4(sc, EMAC_MII_CMD, 255 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 256 (phy << PHY_ADDR_SHIFT) | 257 (reg << PHY_REG_ADDR_SHIFT) | 258 MII_WR | MII_BUSY); 259 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 260 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) 261 break; 262 DELAY(10); 263 } 264 265 if (retry == 0) 266 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 267 phy, reg); 268 269 return (0); 270 } 271 272 static void 273 awg_update_link_locked(struct awg_softc *sc) 274 { 275 struct mii_data *mii; 276 uint32_t val; 277 278 AWG_ASSERT_LOCKED(sc); 279 280 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 281 return; 282 mii = device_get_softc(sc->miibus); 283 284 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 285 (IFM_ACTIVE | IFM_AVALID)) { 286 switch (IFM_SUBTYPE(mii->mii_media_active)) { 287 case IFM_1000_T: 288 case IFM_1000_SX: 289 case IFM_100_TX: 290 case IFM_10_T: 291 sc->link = 1; 292 break; 293 default: 294 sc->link = 0; 295 break; 296 } 297 } else 298 sc->link = 0; 299 300 if (sc->link == 0) 301 return; 302 303 val = RD4(sc, EMAC_BASIC_CTL_0); 304 val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); 305 306 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 307 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 308 val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; 309 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 310 val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; 311 else 312 val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; 313 314 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 315 val |= BASIC_CTL_DUPLEX; 316 317 WR4(sc, EMAC_BASIC_CTL_0, val); 318 319 val = RD4(sc, EMAC_RX_CTL_0); 320 val &= ~RX_FLOW_CTL_EN; 321 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 322 val |= RX_FLOW_CTL_EN; 323 WR4(sc, EMAC_RX_CTL_0, val); 324 325 val = RD4(sc, EMAC_TX_FLOW_CTL); 326 val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); 327 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 328 val |= TX_FLOW_CTL_EN; 329 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 330 val |= awg_pause_time << PAUSE_TIME_SHIFT; 331 WR4(sc, EMAC_TX_FLOW_CTL, val); 332 } 333 334 static void 335 awg_link_task(void *arg, int pending) 336 { 337 struct awg_softc *sc; 338 339 sc = arg; 340 341 AWG_LOCK(sc); 342 awg_update_link_locked(sc); 343 AWG_UNLOCK(sc); 344 } 345 346 static void 347 awg_miibus_statchg(device_t dev) 348 { 349 struct awg_softc *sc; 350 351 sc = device_get_softc(dev); 352 353 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 354 } 355 356 static void 357 awg_media_status(if_t ifp, struct ifmediareq *ifmr) 358 { 359 struct awg_softc *sc; 360 struct mii_data *mii; 361 362 sc = if_getsoftc(ifp); 363 mii = device_get_softc(sc->miibus); 364 365 AWG_LOCK(sc); 366 mii_pollstat(mii); 367 ifmr->ifm_active = mii->mii_media_active; 368 ifmr->ifm_status = mii->mii_media_status; 369 AWG_UNLOCK(sc); 370 } 371 372 static int 373 awg_media_change(if_t ifp) 374 { 375 struct awg_softc *sc; 376 struct mii_data *mii; 377 int error; 378 379 sc = if_getsoftc(ifp); 380 mii = device_get_softc(sc->miibus); 381 382 AWG_LOCK(sc); 383 error = mii_mediachg(mii); 384 AWG_UNLOCK(sc); 385 386 return (error); 387 } 388 389 static void 390 awg_setup_txdesc(struct awg_softc *sc, int index, int flags, bus_addr_t paddr, 391 u_int len) 392 { 393 uint32_t status, size; 394 395 if (paddr == 0 || len == 0) { 396 status = 0; 397 size = 0; 398 --sc->tx.queued; 399 } else { 400 status = TX_DESC_CTL; 401 size = flags | len; 402 if ((index & (awg_tx_interval - 1)) == 0) 403 size |= TX_INT_CTL; 404 ++sc->tx.queued; 405 } 406 407 sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr); 408 sc->tx.desc_ring[index].size = htole32(size); 409 sc->tx.desc_ring[index].status = htole32(status); 410 } 411 412 static int 413 awg_setup_txbuf(struct awg_softc *sc, int index, struct mbuf **mp) 414 { 415 bus_dma_segment_t segs[TX_MAX_SEGS]; 416 int error, nsegs, cur, i, flags; 417 u_int csum_flags; 418 struct mbuf *m; 419 420 m = *mp; 421 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, 422 sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); 423 if (error == EFBIG) { 424 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 425 if (m == NULL) { 426 device_printf(sc->dev, "awg_setup_txbuf: m_collapse failed\n"); 427 return (0); 428 } 429 *mp = m; 430 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, 431 sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); 432 } 433 if (error != 0) { 434 device_printf(sc->dev, "awg_setup_txbuf: bus_dmamap_load_mbuf_sg failed\n"); 435 return (0); 436 } 437 438 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map, 439 BUS_DMASYNC_PREWRITE); 440 441 flags = TX_FIR_DESC; 442 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 443 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) 444 csum_flags = TX_CHECKSUM_CTL_FULL; 445 else 446 csum_flags = TX_CHECKSUM_CTL_IP; 447 flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); 448 } 449 450 for (cur = index, i = 0; i < nsegs; i++) { 451 sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); 452 if (i == nsegs - 1) 453 flags |= TX_LAST_DESC; 454 awg_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 455 segs[i].ds_len); 456 flags &= ~TX_FIR_DESC; 457 cur = TX_NEXT(cur); 458 } 459 460 return (nsegs); 461 } 462 463 static void 464 awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr) 465 { 466 uint32_t status, size; 467 468 status = RX_DESC_CTL; 469 size = MCLBYTES - 1; 470 471 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); 472 sc->rx.desc_ring[index].size = htole32(size); 473 sc->rx.desc_ring[index].next = 474 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index))); 475 sc->rx.desc_ring[index].status = htole32(status); 476 } 477 478 static int 479 awg_setup_rxbuf(struct awg_softc *sc, int index, struct mbuf *m) 480 { 481 bus_dma_segment_t seg; 482 int error, nsegs; 483 484 m_adj(m, ETHER_ALIGN); 485 486 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, 487 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); 488 if (error != 0) 489 return (error); 490 491 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 492 BUS_DMASYNC_PREREAD); 493 494 sc->rx.buf_map[index].mbuf = m; 495 awg_setup_rxdesc(sc, index, seg.ds_addr); 496 497 return (0); 498 } 499 500 static struct mbuf * 501 awg_alloc_mbufcl(struct awg_softc *sc) 502 { 503 struct mbuf *m; 504 505 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 506 if (m != NULL) 507 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 508 509 return (m); 510 } 511 512 static void 513 awg_start_locked(struct awg_softc *sc) 514 { 515 struct mbuf *m; 516 uint32_t val; 517 if_t ifp; 518 int cnt, nsegs; 519 520 AWG_ASSERT_LOCKED(sc); 521 522 if (!sc->link) 523 return; 524 525 ifp = sc->ifp; 526 527 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 528 IFF_DRV_RUNNING) 529 return; 530 531 for (cnt = 0; ; cnt++) { 532 if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 533 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 534 break; 535 } 536 537 m = if_dequeue(ifp); 538 if (m == NULL) 539 break; 540 541 nsegs = awg_setup_txbuf(sc, sc->tx.cur, &m); 542 if (nsegs == 0) { 543 if_sendq_prepend(ifp, m); 544 break; 545 } 546 if_bpfmtap(ifp, m); 547 sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs); 548 } 549 550 if (cnt != 0) { 551 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 552 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 553 554 /* Start and run TX DMA */ 555 val = RD4(sc, EMAC_TX_CTL_1); 556 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); 557 } 558 } 559 560 static void 561 awg_start(if_t ifp) 562 { 563 struct awg_softc *sc; 564 565 sc = if_getsoftc(ifp); 566 567 AWG_LOCK(sc); 568 awg_start_locked(sc); 569 AWG_UNLOCK(sc); 570 } 571 572 static void 573 awg_tick(void *softc) 574 { 575 struct awg_softc *sc; 576 struct mii_data *mii; 577 if_t ifp; 578 int link; 579 580 sc = softc; 581 ifp = sc->ifp; 582 mii = device_get_softc(sc->miibus); 583 584 AWG_ASSERT_LOCKED(sc); 585 586 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 587 return; 588 589 link = sc->link; 590 mii_tick(mii); 591 if (sc->link && !link) 592 awg_start_locked(sc); 593 594 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 595 } 596 597 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 598 static uint32_t 599 bitrev32(uint32_t x) 600 { 601 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 602 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 603 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 604 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 605 606 return (x >> 16) | (x << 16); 607 } 608 609 static void 610 awg_setup_rxfilter(struct awg_softc *sc) 611 { 612 uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo; 613 int mc_count, mcnt, i; 614 uint8_t *eaddr, *mta; 615 if_t ifp; 616 617 AWG_ASSERT_LOCKED(sc); 618 619 ifp = sc->ifp; 620 val = 0; 621 hash[0] = hash[1] = 0; 622 623 mc_count = if_multiaddr_count(ifp, -1); 624 625 if (if_getflags(ifp) & IFF_PROMISC) 626 val |= DIS_ADDR_FILTER; 627 else if (if_getflags(ifp) & IFF_ALLMULTI) { 628 val |= RX_ALL_MULTICAST; 629 hash[0] = hash[1] = ~0; 630 } else if (mc_count > 0) { 631 val |= HASH_MULTICAST; 632 633 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count, 634 M_DEVBUF, M_NOWAIT); 635 if (mta == NULL) { 636 if_printf(ifp, 637 "failed to allocate temporary multicast list\n"); 638 return; 639 } 640 641 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 642 for (i = 0; i < mcnt; i++) { 643 crc = ether_crc32_le(mta + (i * ETHER_ADDR_LEN), 644 ETHER_ADDR_LEN) & 0x7f; 645 crc = bitrev32(~crc) >> 26; 646 hashreg = (crc >> 5); 647 hashbit = (crc & 0x1f); 648 hash[hashreg] |= (1 << hashbit); 649 } 650 651 free(mta, M_DEVBUF); 652 } 653 654 /* Write our unicast address */ 655 eaddr = IF_LLADDR(ifp); 656 machi = (eaddr[5] << 8) | eaddr[4]; 657 maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | 658 (eaddr[0] << 0); 659 WR4(sc, EMAC_ADDR_HIGH(0), machi); 660 WR4(sc, EMAC_ADDR_LOW(0), maclo); 661 662 /* Multicast hash filters */ 663 WR4(sc, EMAC_RX_HASH_0, hash[1]); 664 WR4(sc, EMAC_RX_HASH_1, hash[0]); 665 666 /* RX frame filter config */ 667 WR4(sc, EMAC_RX_FRM_FLT, val); 668 } 669 670 static void 671 awg_enable_intr(struct awg_softc *sc) 672 { 673 /* Enable interrupts */ 674 WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); 675 } 676 677 static void 678 awg_disable_intr(struct awg_softc *sc) 679 { 680 /* Disable interrupts */ 681 WR4(sc, EMAC_INT_EN, 0); 682 } 683 684 static void 685 awg_init_locked(struct awg_softc *sc) 686 { 687 struct mii_data *mii; 688 uint32_t val; 689 if_t ifp; 690 691 mii = device_get_softc(sc->miibus); 692 ifp = sc->ifp; 693 694 AWG_ASSERT_LOCKED(sc); 695 696 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 697 return; 698 699 awg_setup_rxfilter(sc); 700 701 /* Configure DMA burst length and priorities */ 702 val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT; 703 if (awg_rx_tx_pri) 704 val |= BASIC_CTL_RX_TX_PRI; 705 WR4(sc, EMAC_BASIC_CTL_1, val); 706 707 /* Enable interrupts */ 708 #ifdef DEVICE_POLLING 709 if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0) 710 awg_enable_intr(sc); 711 else 712 awg_disable_intr(sc); 713 #else 714 awg_enable_intr(sc); 715 #endif 716 717 /* Enable transmit DMA */ 718 val = RD4(sc, EMAC_TX_CTL_1); 719 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); 720 721 /* Enable receive DMA */ 722 val = RD4(sc, EMAC_RX_CTL_1); 723 WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); 724 725 /* Enable transmitter */ 726 val = RD4(sc, EMAC_TX_CTL_0); 727 WR4(sc, EMAC_TX_CTL_0, val | TX_EN); 728 729 /* Enable receiver */ 730 val = RD4(sc, EMAC_RX_CTL_0); 731 WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC); 732 733 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 734 735 mii_mediachg(mii); 736 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 737 } 738 739 static void 740 awg_init(void *softc) 741 { 742 struct awg_softc *sc; 743 744 sc = softc; 745 746 AWG_LOCK(sc); 747 awg_init_locked(sc); 748 AWG_UNLOCK(sc); 749 } 750 751 static void 752 awg_stop(struct awg_softc *sc) 753 { 754 if_t ifp; 755 uint32_t val; 756 757 AWG_ASSERT_LOCKED(sc); 758 759 ifp = sc->ifp; 760 761 callout_stop(&sc->stat_ch); 762 763 /* Stop transmit DMA and flush data in the TX FIFO */ 764 val = RD4(sc, EMAC_TX_CTL_1); 765 val &= ~TX_DMA_EN; 766 val |= FLUSH_TX_FIFO; 767 WR4(sc, EMAC_TX_CTL_1, val); 768 769 /* Disable transmitter */ 770 val = RD4(sc, EMAC_TX_CTL_0); 771 WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN); 772 773 /* Disable receiver */ 774 val = RD4(sc, EMAC_RX_CTL_0); 775 WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN); 776 777 /* Disable interrupts */ 778 awg_disable_intr(sc); 779 780 /* Disable transmit DMA */ 781 val = RD4(sc, EMAC_TX_CTL_1); 782 WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); 783 784 /* Disable receive DMA */ 785 val = RD4(sc, EMAC_RX_CTL_1); 786 WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); 787 788 sc->link = 0; 789 790 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 791 } 792 793 static int 794 awg_rxintr(struct awg_softc *sc) 795 { 796 if_t ifp; 797 struct mbuf *m, *m0, *mh, *mt; 798 int error, index, len, cnt, npkt; 799 uint32_t status; 800 801 ifp = sc->ifp; 802 mh = mt = NULL; 803 cnt = 0; 804 npkt = 0; 805 806 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 807 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 808 809 for (index = sc->rx.cur; ; index = RX_NEXT(index)) { 810 status = le32toh(sc->rx.desc_ring[index].status); 811 if ((status & RX_DESC_CTL) != 0) 812 break; 813 814 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 815 BUS_DMASYNC_POSTREAD); 816 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); 817 818 len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; 819 if (len != 0) { 820 m = sc->rx.buf_map[index].mbuf; 821 m->m_pkthdr.rcvif = ifp; 822 m->m_pkthdr.len = len; 823 m->m_len = len; 824 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 825 826 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 827 (status & RX_FRM_TYPE) != 0) { 828 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 829 if ((status & RX_HEADER_ERR) == 0) 830 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 831 if ((status & RX_PAYLOAD_ERR) == 0) { 832 m->m_pkthdr.csum_flags |= 833 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 834 m->m_pkthdr.csum_data = 0xffff; 835 } 836 } 837 838 m->m_nextpkt = NULL; 839 if (mh == NULL) 840 mh = m; 841 else 842 mt->m_nextpkt = m; 843 mt = m; 844 ++cnt; 845 ++npkt; 846 847 if (cnt == awg_rx_batch) { 848 AWG_UNLOCK(sc); 849 if_input(ifp, mh); 850 AWG_LOCK(sc); 851 mh = mt = NULL; 852 cnt = 0; 853 } 854 855 } 856 857 if ((m0 = awg_alloc_mbufcl(sc)) != NULL) { 858 error = awg_setup_rxbuf(sc, index, m0); 859 if (error != 0) { 860 /* XXX hole in RX ring */ 861 } 862 } else 863 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 864 } 865 866 if (index != sc->rx.cur) { 867 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 868 BUS_DMASYNC_PREWRITE); 869 } 870 871 if (mh != NULL) { 872 AWG_UNLOCK(sc); 873 if_input(ifp, mh); 874 AWG_LOCK(sc); 875 } 876 877 sc->rx.cur = index; 878 879 return (npkt); 880 } 881 882 static void 883 awg_txintr(struct awg_softc *sc) 884 { 885 struct awg_bufmap *bmap; 886 struct emac_desc *desc; 887 uint32_t status; 888 if_t ifp; 889 int i; 890 891 AWG_ASSERT_LOCKED(sc); 892 893 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 894 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 895 896 ifp = sc->ifp; 897 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 898 desc = &sc->tx.desc_ring[i]; 899 status = le32toh(desc->status); 900 if ((status & TX_DESC_CTL) != 0) 901 break; 902 bmap = &sc->tx.buf_map[i]; 903 if (bmap->mbuf != NULL) { 904 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, 905 BUS_DMASYNC_POSTWRITE); 906 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); 907 m_freem(bmap->mbuf); 908 bmap->mbuf = NULL; 909 } 910 awg_setup_txdesc(sc, i, 0, 0, 0); 911 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 912 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 913 } 914 915 sc->tx.next = i; 916 917 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 918 BUS_DMASYNC_PREWRITE); 919 } 920 921 static void 922 awg_intr(void *arg) 923 { 924 struct awg_softc *sc; 925 uint32_t val; 926 927 sc = arg; 928 929 AWG_LOCK(sc); 930 val = RD4(sc, EMAC_INT_STA); 931 WR4(sc, EMAC_INT_STA, val); 932 933 if (val & RX_INT) 934 awg_rxintr(sc); 935 936 if (val & (TX_INT|TX_BUF_UA_INT)) { 937 awg_txintr(sc); 938 if (!if_sendq_empty(sc->ifp)) 939 awg_start_locked(sc); 940 } 941 942 AWG_UNLOCK(sc); 943 } 944 945 #ifdef DEVICE_POLLING 946 static int 947 awg_poll(if_t ifp, enum poll_cmd cmd, int count) 948 { 949 struct awg_softc *sc; 950 uint32_t val; 951 int rx_npkts; 952 953 sc = if_getsoftc(ifp); 954 rx_npkts = 0; 955 956 AWG_LOCK(sc); 957 958 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 959 AWG_UNLOCK(sc); 960 return (0); 961 } 962 963 rx_npkts = awg_rxintr(sc); 964 awg_txintr(sc); 965 if (!if_sendq_empty(ifp)) 966 awg_start_locked(sc); 967 968 if (cmd == POLL_AND_CHECK_STATUS) { 969 val = RD4(sc, EMAC_INT_STA); 970 if (val != 0) 971 WR4(sc, EMAC_INT_STA, val); 972 } 973 974 AWG_UNLOCK(sc); 975 976 return (rx_npkts); 977 } 978 #endif 979 980 static int 981 awg_ioctl(if_t ifp, u_long cmd, caddr_t data) 982 { 983 struct awg_softc *sc; 984 struct mii_data *mii; 985 struct ifreq *ifr; 986 int flags, mask, error; 987 988 sc = if_getsoftc(ifp); 989 mii = device_get_softc(sc->miibus); 990 ifr = (struct ifreq *)data; 991 error = 0; 992 993 switch (cmd) { 994 case SIOCSIFFLAGS: 995 AWG_LOCK(sc); 996 if (if_getflags(ifp) & IFF_UP) { 997 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 998 flags = if_getflags(ifp) ^ sc->if_flags; 999 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 1000 awg_setup_rxfilter(sc); 1001 } else 1002 awg_init_locked(sc); 1003 } else { 1004 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1005 awg_stop(sc); 1006 } 1007 sc->if_flags = if_getflags(ifp); 1008 AWG_UNLOCK(sc); 1009 break; 1010 case SIOCADDMULTI: 1011 case SIOCDELMULTI: 1012 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1013 AWG_LOCK(sc); 1014 awg_setup_rxfilter(sc); 1015 AWG_UNLOCK(sc); 1016 } 1017 break; 1018 case SIOCSIFMEDIA: 1019 case SIOCGIFMEDIA: 1020 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1021 break; 1022 case SIOCSIFCAP: 1023 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1024 #ifdef DEVICE_POLLING 1025 if (mask & IFCAP_POLLING) { 1026 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1027 error = ether_poll_register(awg_poll, ifp); 1028 if (error != 0) 1029 break; 1030 AWG_LOCK(sc); 1031 awg_disable_intr(sc); 1032 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 1033 AWG_UNLOCK(sc); 1034 } else { 1035 error = ether_poll_deregister(ifp); 1036 AWG_LOCK(sc); 1037 awg_enable_intr(sc); 1038 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1039 AWG_UNLOCK(sc); 1040 } 1041 } 1042 #endif 1043 if (mask & IFCAP_VLAN_MTU) 1044 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1045 if (mask & IFCAP_RXCSUM) 1046 if_togglecapenable(ifp, IFCAP_RXCSUM); 1047 if (mask & IFCAP_TXCSUM) 1048 if_togglecapenable(ifp, IFCAP_TXCSUM); 1049 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1050 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); 1051 else 1052 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); 1053 break; 1054 default: 1055 error = ether_ioctl(ifp, cmd, data); 1056 break; 1057 } 1058 1059 return (error); 1060 } 1061 1062 static int 1063 awg_setup_phy(device_t dev) 1064 { 1065 struct awg_softc *sc; 1066 clk_t clk_tx, clk_tx_parent; 1067 const char *tx_parent_name; 1068 char *phy_type; 1069 phandle_t node; 1070 uint32_t reg, tx_delay, rx_delay; 1071 int error; 1072 1073 sc = device_get_softc(dev); 1074 node = ofw_bus_get_node(dev); 1075 1076 if (OF_getprop_alloc(node, "phy-mode", 1, (void **)&phy_type) == 0) 1077 return (0); 1078 1079 if (bootverbose) 1080 device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type, 1081 sc->res[_RES_SYSCON] != NULL ? "reg" : "clk"); 1082 1083 if (sc->res[_RES_SYSCON] != NULL) { 1084 reg = bus_read_4(sc->res[_RES_SYSCON], 0); 1085 reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); 1086 if (strcmp(phy_type, "rgmii") == 0) 1087 reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; 1088 else if (strcmp(phy_type, "rmii") == 0) 1089 reg |= EMAC_CLK_RMII_EN; 1090 else 1091 reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; 1092 1093 if (OF_getencprop(node, "tx-delay", &tx_delay, 1094 sizeof(tx_delay)) > 0) { 1095 reg &= ~EMAC_CLK_ETXDC; 1096 reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); 1097 } 1098 if (OF_getencprop(node, "rx-delay", &rx_delay, 1099 sizeof(rx_delay)) > 0) { 1100 reg &= ~EMAC_CLK_ERXDC; 1101 reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); 1102 } 1103 1104 if (sc->type == EMAC_H3) { 1105 if (OF_hasprop(node, "allwinner,use-internal-phy")) { 1106 reg |= EMAC_CLK_EPHY_SELECT; 1107 reg &= ~EMAC_CLK_EPHY_SHUTDOWN; 1108 if (OF_hasprop(node, 1109 "allwinner,leds-active-low")) 1110 reg |= EMAC_CLK_EPHY_LED_POL; 1111 else 1112 reg &= ~EMAC_CLK_EPHY_LED_POL; 1113 1114 /* Set internal PHY addr to 1 */ 1115 reg &= ~EMAC_CLK_EPHY_ADDR; 1116 reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); 1117 } else { 1118 reg &= ~EMAC_CLK_EPHY_SELECT; 1119 } 1120 } 1121 1122 if (bootverbose) 1123 device_printf(dev, "EMAC clock: 0x%08x\n", reg); 1124 bus_write_4(sc->res[_RES_SYSCON], 0, reg); 1125 } else { 1126 if (strcmp(phy_type, "rgmii") == 0) 1127 tx_parent_name = "emac_int_tx"; 1128 else 1129 tx_parent_name = "mii_phy_tx"; 1130 1131 /* Get the TX clock */ 1132 error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx); 1133 if (error != 0) { 1134 device_printf(dev, "cannot get tx clock\n"); 1135 goto fail; 1136 } 1137 1138 /* Find the desired parent clock based on phy-mode property */ 1139 error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); 1140 if (error != 0) { 1141 device_printf(dev, "cannot get clock '%s'\n", 1142 tx_parent_name); 1143 goto fail; 1144 } 1145 1146 /* Set TX clock parent */ 1147 error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); 1148 if (error != 0) { 1149 device_printf(dev, "cannot set tx clock parent\n"); 1150 goto fail; 1151 } 1152 1153 /* Enable TX clock */ 1154 error = clk_enable(clk_tx); 1155 if (error != 0) { 1156 device_printf(dev, "cannot enable tx clock\n"); 1157 goto fail; 1158 } 1159 } 1160 1161 error = 0; 1162 1163 fail: 1164 OF_prop_free(phy_type); 1165 return (error); 1166 } 1167 1168 static int 1169 awg_setup_extres(device_t dev) 1170 { 1171 struct awg_softc *sc; 1172 hwreset_t rst_ahb, rst_ephy; 1173 clk_t clk_ahb, clk_ephy; 1174 regulator_t reg; 1175 phandle_t node; 1176 uint64_t freq; 1177 int error, div; 1178 1179 sc = device_get_softc(dev); 1180 node = ofw_bus_get_node(dev); 1181 rst_ahb = rst_ephy = NULL; 1182 clk_ahb = clk_ephy = NULL; 1183 reg = NULL; 1184 1185 /* Get AHB clock and reset resources */ 1186 error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb); 1187 if (error != 0) { 1188 device_printf(dev, "cannot get ahb reset\n"); 1189 goto fail; 1190 } 1191 if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0) 1192 rst_ephy = NULL; 1193 error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb); 1194 if (error != 0) { 1195 device_printf(dev, "cannot get ahb clock\n"); 1196 goto fail; 1197 } 1198 if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0) 1199 clk_ephy = NULL; 1200 1201 /* Configure PHY for MII or RGMII mode */ 1202 if (awg_setup_phy(dev) != 0) 1203 goto fail; 1204 1205 /* Enable clocks */ 1206 error = clk_enable(clk_ahb); 1207 if (error != 0) { 1208 device_printf(dev, "cannot enable ahb clock\n"); 1209 goto fail; 1210 } 1211 if (clk_ephy != NULL) { 1212 error = clk_enable(clk_ephy); 1213 if (error != 0) { 1214 device_printf(dev, "cannot enable ephy clock\n"); 1215 goto fail; 1216 } 1217 } 1218 1219 /* De-assert reset */ 1220 error = hwreset_deassert(rst_ahb); 1221 if (error != 0) { 1222 device_printf(dev, "cannot de-assert ahb reset\n"); 1223 goto fail; 1224 } 1225 if (rst_ephy != NULL) { 1226 error = hwreset_deassert(rst_ephy); 1227 if (error != 0) { 1228 device_printf(dev, "cannot de-assert ephy reset\n"); 1229 goto fail; 1230 } 1231 } 1232 1233 /* Enable PHY regulator if applicable */ 1234 if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { 1235 error = regulator_enable(reg); 1236 if (error != 0) { 1237 device_printf(dev, "cannot enable PHY regulator\n"); 1238 goto fail; 1239 } 1240 } 1241 1242 /* Determine MDC clock divide ratio based on AHB clock */ 1243 error = clk_get_freq(clk_ahb, &freq); 1244 if (error != 0) { 1245 device_printf(dev, "cannot get AHB clock frequency\n"); 1246 goto fail; 1247 } 1248 div = freq / MDIO_FREQ; 1249 if (div <= 16) 1250 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; 1251 else if (div <= 32) 1252 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; 1253 else if (div <= 64) 1254 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; 1255 else if (div <= 128) 1256 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; 1257 else { 1258 device_printf(dev, "cannot determine MDC clock divide ratio\n"); 1259 error = ENXIO; 1260 goto fail; 1261 } 1262 1263 if (bootverbose) 1264 device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n", 1265 (uintmax_t)freq, sc->mdc_div_ratio_m); 1266 1267 return (0); 1268 1269 fail: 1270 if (reg != NULL) 1271 regulator_release(reg); 1272 if (clk_ephy != NULL) 1273 clk_release(clk_ephy); 1274 if (clk_ahb != NULL) 1275 clk_release(clk_ahb); 1276 if (rst_ephy != NULL) 1277 hwreset_release(rst_ephy); 1278 if (rst_ahb != NULL) 1279 hwreset_release(rst_ahb); 1280 return (error); 1281 } 1282 1283 static void 1284 awg_get_eaddr(device_t dev, uint8_t *eaddr) 1285 { 1286 struct awg_softc *sc; 1287 uint32_t maclo, machi, rnd; 1288 u_char rootkey[16]; 1289 1290 sc = device_get_softc(dev); 1291 1292 machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; 1293 maclo = RD4(sc, EMAC_ADDR_LOW(0)); 1294 1295 if (maclo == 0xffffffff && machi == 0xffff) { 1296 /* MAC address in hardware is invalid, create one */ 1297 if (aw_sid_get_rootkey(rootkey) == 0 && 1298 (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | 1299 rootkey[15]) != 0) { 1300 /* MAC address is derived from the root key in SID */ 1301 maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | 1302 (rootkey[3] << 8) | 0x02; 1303 machi = (rootkey[15] << 8) | rootkey[14]; 1304 } else { 1305 /* Create one */ 1306 rnd = arc4random(); 1307 maclo = 0x00f2 | (rnd & 0xffff0000); 1308 machi = rnd & 0xffff; 1309 } 1310 } 1311 1312 eaddr[0] = maclo & 0xff; 1313 eaddr[1] = (maclo >> 8) & 0xff; 1314 eaddr[2] = (maclo >> 16) & 0xff; 1315 eaddr[3] = (maclo >> 24) & 0xff; 1316 eaddr[4] = machi & 0xff; 1317 eaddr[5] = (machi >> 8) & 0xff; 1318 } 1319 1320 #ifdef AWG_DEBUG 1321 static void 1322 awg_dump_regs(device_t dev) 1323 { 1324 static const struct { 1325 const char *name; 1326 u_int reg; 1327 } regs[] = { 1328 { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, 1329 { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, 1330 { "INT_STA", EMAC_INT_STA }, 1331 { "INT_EN", EMAC_INT_EN }, 1332 { "TX_CTL_0", EMAC_TX_CTL_0 }, 1333 { "TX_CTL_1", EMAC_TX_CTL_1 }, 1334 { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, 1335 { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, 1336 { "RX_CTL_0", EMAC_RX_CTL_0 }, 1337 { "RX_CTL_1", EMAC_RX_CTL_1 }, 1338 { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, 1339 { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, 1340 { "RX_HASH_0", EMAC_RX_HASH_0 }, 1341 { "RX_HASH_1", EMAC_RX_HASH_1 }, 1342 { "MII_CMD", EMAC_MII_CMD }, 1343 { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, 1344 { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, 1345 { "TX_DMA_STA", EMAC_TX_DMA_STA }, 1346 { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, 1347 { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, 1348 { "RX_DMA_STA", EMAC_RX_DMA_STA }, 1349 { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, 1350 { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, 1351 { "RGMII_STA", EMAC_RGMII_STA }, 1352 }; 1353 struct awg_softc *sc; 1354 unsigned int n; 1355 1356 sc = device_get_softc(dev); 1357 1358 for (n = 0; n < nitems(regs); n++) 1359 device_printf(dev, " %-20s %08x\n", regs[n].name, 1360 RD4(sc, regs[n].reg)); 1361 } 1362 #endif 1363 1364 #define GPIO_ACTIVE_LOW 1 1365 1366 static int 1367 awg_phy_reset(device_t dev) 1368 { 1369 pcell_t gpio_prop[4], delay_prop[3]; 1370 phandle_t node, gpio_node; 1371 device_t gpio; 1372 uint32_t pin, flags; 1373 uint32_t pin_value; 1374 1375 node = ofw_bus_get_node(dev); 1376 if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop, 1377 sizeof(gpio_prop)) <= 0) 1378 return (0); 1379 1380 if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop, 1381 sizeof(delay_prop)) <= 0) 1382 return (ENXIO); 1383 1384 gpio_node = OF_node_from_xref(gpio_prop[0]); 1385 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) 1386 return (ENXIO); 1387 1388 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, 1389 gpio_prop + 1, &pin, &flags) != 0) 1390 return (ENXIO); 1391 1392 pin_value = GPIO_PIN_LOW; 1393 if (OF_hasprop(node, "allwinner,reset-active-low")) 1394 pin_value = GPIO_PIN_HIGH; 1395 1396 if (flags & GPIO_ACTIVE_LOW) 1397 pin_value = !pin_value; 1398 1399 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1400 GPIO_PIN_SET(gpio, pin, pin_value); 1401 DELAY(delay_prop[0]); 1402 GPIO_PIN_SET(gpio, pin, !pin_value); 1403 DELAY(delay_prop[1]); 1404 GPIO_PIN_SET(gpio, pin, pin_value); 1405 DELAY(delay_prop[2]); 1406 1407 return (0); 1408 } 1409 1410 static int 1411 awg_reset(device_t dev) 1412 { 1413 struct awg_softc *sc; 1414 int retry; 1415 1416 sc = device_get_softc(dev); 1417 1418 /* Reset PHY if necessary */ 1419 if (awg_phy_reset(dev) != 0) { 1420 device_printf(dev, "failed to reset PHY\n"); 1421 return (ENXIO); 1422 } 1423 1424 /* Soft reset all registers and logic */ 1425 WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); 1426 1427 /* Wait for soft reset bit to self-clear */ 1428 for (retry = SOFT_RST_RETRY; retry > 0; retry--) { 1429 if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) 1430 break; 1431 DELAY(10); 1432 } 1433 if (retry == 0) { 1434 device_printf(dev, "soft reset timed out\n"); 1435 #ifdef AWG_DEBUG 1436 awg_dump_regs(dev); 1437 #endif 1438 return (ETIMEDOUT); 1439 } 1440 1441 return (0); 1442 } 1443 1444 static void 1445 awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1446 { 1447 if (error != 0) 1448 return; 1449 *(bus_addr_t *)arg = segs[0].ds_addr; 1450 } 1451 1452 static int 1453 awg_setup_dma(device_t dev) 1454 { 1455 struct awg_softc *sc; 1456 struct mbuf *m; 1457 int error, i; 1458 1459 sc = device_get_softc(dev); 1460 1461 /* Setup TX ring */ 1462 error = bus_dma_tag_create( 1463 bus_get_dma_tag(dev), /* Parent tag */ 1464 DESC_ALIGN, 0, /* alignment, boundary */ 1465 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1466 BUS_SPACE_MAXADDR, /* highaddr */ 1467 NULL, NULL, /* filter, filterarg */ 1468 TX_DESC_SIZE, 1, /* maxsize, nsegs */ 1469 TX_DESC_SIZE, /* maxsegsize */ 1470 0, /* flags */ 1471 NULL, NULL, /* lockfunc, lockarg */ 1472 &sc->tx.desc_tag); 1473 if (error != 0) { 1474 device_printf(dev, "cannot create TX descriptor ring tag\n"); 1475 return (error); 1476 } 1477 1478 error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring, 1479 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map); 1480 if (error != 0) { 1481 device_printf(dev, "cannot allocate TX descriptor ring\n"); 1482 return (error); 1483 } 1484 1485 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, 1486 sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb, 1487 &sc->tx.desc_ring_paddr, 0); 1488 if (error != 0) { 1489 device_printf(dev, "cannot load TX descriptor ring\n"); 1490 return (error); 1491 } 1492 1493 for (i = 0; i < TX_DESC_COUNT; i++) 1494 sc->tx.desc_ring[i].next = 1495 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); 1496 1497 error = bus_dma_tag_create( 1498 bus_get_dma_tag(dev), /* Parent tag */ 1499 1, 0, /* alignment, boundary */ 1500 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1501 BUS_SPACE_MAXADDR, /* highaddr */ 1502 NULL, NULL, /* filter, filterarg */ 1503 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 1504 MCLBYTES, /* maxsegsize */ 1505 0, /* flags */ 1506 NULL, NULL, /* lockfunc, lockarg */ 1507 &sc->tx.buf_tag); 1508 if (error != 0) { 1509 device_printf(dev, "cannot create TX buffer tag\n"); 1510 return (error); 1511 } 1512 1513 sc->tx.queued = TX_DESC_COUNT; 1514 for (i = 0; i < TX_DESC_COUNT; i++) { 1515 error = bus_dmamap_create(sc->tx.buf_tag, 0, 1516 &sc->tx.buf_map[i].map); 1517 if (error != 0) { 1518 device_printf(dev, "cannot create TX buffer map\n"); 1519 return (error); 1520 } 1521 awg_setup_txdesc(sc, i, 0, 0, 0); 1522 } 1523 1524 /* Setup RX ring */ 1525 error = bus_dma_tag_create( 1526 bus_get_dma_tag(dev), /* Parent tag */ 1527 DESC_ALIGN, 0, /* alignment, boundary */ 1528 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1529 BUS_SPACE_MAXADDR, /* highaddr */ 1530 NULL, NULL, /* filter, filterarg */ 1531 RX_DESC_SIZE, 1, /* maxsize, nsegs */ 1532 RX_DESC_SIZE, /* maxsegsize */ 1533 0, /* flags */ 1534 NULL, NULL, /* lockfunc, lockarg */ 1535 &sc->rx.desc_tag); 1536 if (error != 0) { 1537 device_printf(dev, "cannot create RX descriptor ring tag\n"); 1538 return (error); 1539 } 1540 1541 error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, 1542 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map); 1543 if (error != 0) { 1544 device_printf(dev, "cannot allocate RX descriptor ring\n"); 1545 return (error); 1546 } 1547 1548 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, 1549 sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb, 1550 &sc->rx.desc_ring_paddr, 0); 1551 if (error != 0) { 1552 device_printf(dev, "cannot load RX descriptor ring\n"); 1553 return (error); 1554 } 1555 1556 error = bus_dma_tag_create( 1557 bus_get_dma_tag(dev), /* Parent tag */ 1558 1, 0, /* alignment, boundary */ 1559 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1560 BUS_SPACE_MAXADDR, /* highaddr */ 1561 NULL, NULL, /* filter, filterarg */ 1562 MCLBYTES, 1, /* maxsize, nsegs */ 1563 MCLBYTES, /* maxsegsize */ 1564 0, /* flags */ 1565 NULL, NULL, /* lockfunc, lockarg */ 1566 &sc->rx.buf_tag); 1567 if (error != 0) { 1568 device_printf(dev, "cannot create RX buffer tag\n"); 1569 return (error); 1570 } 1571 1572 for (i = 0; i < RX_DESC_COUNT; i++) { 1573 error = bus_dmamap_create(sc->rx.buf_tag, 0, 1574 &sc->rx.buf_map[i].map); 1575 if (error != 0) { 1576 device_printf(dev, "cannot create RX buffer map\n"); 1577 return (error); 1578 } 1579 if ((m = awg_alloc_mbufcl(sc)) == NULL) { 1580 device_printf(dev, "cannot allocate RX mbuf\n"); 1581 return (ENOMEM); 1582 } 1583 error = awg_setup_rxbuf(sc, i, m); 1584 if (error != 0) { 1585 device_printf(dev, "cannot create RX buffer\n"); 1586 return (error); 1587 } 1588 } 1589 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 1590 BUS_DMASYNC_PREWRITE); 1591 1592 /* Write transmit and receive descriptor base address registers */ 1593 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); 1594 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); 1595 1596 return (0); 1597 } 1598 1599 static int 1600 awg_probe(device_t dev) 1601 { 1602 if (!ofw_bus_status_okay(dev)) 1603 return (ENXIO); 1604 1605 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 1606 return (ENXIO); 1607 1608 device_set_desc(dev, "Allwinner Gigabit Ethernet"); 1609 return (BUS_PROBE_DEFAULT); 1610 } 1611 1612 static int 1613 awg_attach(device_t dev) 1614 { 1615 uint8_t eaddr[ETHER_ADDR_LEN]; 1616 struct awg_softc *sc; 1617 phandle_t node; 1618 int error; 1619 1620 sc = device_get_softc(dev); 1621 sc->dev = dev; 1622 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 1623 node = ofw_bus_get_node(dev); 1624 1625 if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) { 1626 device_printf(dev, "cannot allocate resources for device\n"); 1627 return (ENXIO); 1628 } 1629 1630 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 1631 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 1632 TASK_INIT(&sc->link_task, 0, awg_link_task, sc); 1633 1634 /* Setup clocks and regulators */ 1635 error = awg_setup_extres(dev); 1636 if (error != 0) 1637 return (error); 1638 1639 /* Read MAC address before resetting the chip */ 1640 awg_get_eaddr(dev, eaddr); 1641 1642 /* Soft reset EMAC core */ 1643 error = awg_reset(dev); 1644 if (error != 0) 1645 return (error); 1646 1647 /* Setup DMA descriptors */ 1648 error = awg_setup_dma(dev); 1649 if (error != 0) 1650 return (error); 1651 1652 /* Install interrupt handler */ 1653 error = bus_setup_intr(dev, sc->res[_RES_IRQ], 1654 INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih); 1655 if (error != 0) { 1656 device_printf(dev, "cannot setup interrupt handler\n"); 1657 return (error); 1658 } 1659 1660 /* Setup ethernet interface */ 1661 sc->ifp = if_alloc(IFT_ETHER); 1662 if_setsoftc(sc->ifp, sc); 1663 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 1664 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1665 if_setstartfn(sc->ifp, awg_start); 1666 if_setioctlfn(sc->ifp, awg_ioctl); 1667 if_setinitfn(sc->ifp, awg_init); 1668 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 1669 if_setsendqready(sc->ifp); 1670 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1671 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1672 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1673 #ifdef DEVICE_POLLING 1674 if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0); 1675 #endif 1676 1677 /* Attach MII driver */ 1678 error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change, 1679 awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 1680 MIIF_DOPAUSE); 1681 if (error != 0) { 1682 device_printf(dev, "cannot attach PHY\n"); 1683 return (error); 1684 } 1685 1686 /* Attach ethernet interface */ 1687 ether_ifattach(sc->ifp, eaddr); 1688 1689 return (0); 1690 } 1691 1692 static device_method_t awg_methods[] = { 1693 /* Device interface */ 1694 DEVMETHOD(device_probe, awg_probe), 1695 DEVMETHOD(device_attach, awg_attach), 1696 1697 /* MII interface */ 1698 DEVMETHOD(miibus_readreg, awg_miibus_readreg), 1699 DEVMETHOD(miibus_writereg, awg_miibus_writereg), 1700 DEVMETHOD(miibus_statchg, awg_miibus_statchg), 1701 1702 DEVMETHOD_END 1703 }; 1704 1705 static driver_t awg_driver = { 1706 "awg", 1707 awg_methods, 1708 sizeof(struct awg_softc), 1709 }; 1710 1711 static devclass_t awg_devclass; 1712 1713 DRIVER_MODULE(awg, simplebus, awg_driver, awg_devclass, 0, 0); 1714 DRIVER_MODULE(miibus, awg, miibus_driver, miibus_devclass, 0, 0); 1715 1716 MODULE_DEPEND(awg, ether, 1, 1, 1); 1717 MODULE_DEPEND(awg, miibus, 1, 1, 1); 1718