1 /*- 2 * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Allwinner Gigabit Ethernet MAC (EMAC) controller 31 */ 32 33 #include "opt_device_polling.h" 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/rman.h> 42 #include <sys/kernel.h> 43 #include <sys/endian.h> 44 #include <sys/mbuf.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/module.h> 48 #include <sys/taskqueue.h> 49 #include <sys/gpio.h> 50 51 #include <net/bpf.h> 52 #include <net/if.h> 53 #include <net/ethernet.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_types.h> 57 #include <net/if_var.h> 58 59 #include <machine/bus.h> 60 61 #include <dev/ofw/ofw_bus.h> 62 #include <dev/ofw/ofw_bus_subr.h> 63 64 #include <arm/allwinner/if_awgreg.h> 65 #include <arm/allwinner/aw_sid.h> 66 #include <dev/mii/mii.h> 67 #include <dev/mii/miivar.h> 68 69 #include <dev/extres/clk/clk.h> 70 #include <dev/extres/hwreset/hwreset.h> 71 #include <dev/extres/regulator/regulator.h> 72 73 #include "miibus_if.h" 74 #include "gpio_if.h" 75 76 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg)) 77 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val)) 78 79 #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx) 80 #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx); 81 #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 82 #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 83 84 #define DESC_ALIGN 4 85 #define TX_DESC_COUNT 1024 86 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT) 87 #define RX_DESC_COUNT 256 88 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT) 89 90 #define DESC_OFF(n) ((n) * sizeof(struct emac_desc)) 91 #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) 92 #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) 93 #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) 94 95 #define TX_MAX_SEGS 10 96 97 #define SOFT_RST_RETRY 1000 98 #define MII_BUSY_RETRY 1000 99 #define MDIO_FREQ 2500000 100 101 #define BURST_LEN_DEFAULT 8 102 #define RX_TX_PRI_DEFAULT 0 103 #define PAUSE_TIME_DEFAULT 0x400 104 #define TX_INTERVAL_DEFAULT 64 105 #define RX_BATCH_DEFAULT 64 106 107 /* syscon EMAC clock register */ 108 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ 109 #define EMAC_CLK_EPHY_ADDR_SHIFT 20 110 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ 111 #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ 112 #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ 113 #define EMAC_CLK_RMII_EN (1 << 13) 114 #define EMAC_CLK_ETXDC (0x7 << 10) 115 #define EMAC_CLK_ETXDC_SHIFT 10 116 #define EMAC_CLK_ERXDC (0x1f << 5) 117 #define EMAC_CLK_ERXDC_SHIFT 5 118 #define EMAC_CLK_PIT (0x1 << 2) 119 #define EMAC_CLK_PIT_MII (0 << 2) 120 #define EMAC_CLK_PIT_RGMII (1 << 2) 121 #define EMAC_CLK_SRC (0x3 << 0) 122 #define EMAC_CLK_SRC_MII (0 << 0) 123 #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) 124 #define EMAC_CLK_SRC_RGMII (2 << 0) 125 126 /* Burst length of RX and TX DMA transfers */ 127 static int awg_burst_len = BURST_LEN_DEFAULT; 128 TUNABLE_INT("hw.awg.burst_len", &awg_burst_len); 129 130 /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ 131 static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT; 132 TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri); 133 134 /* Pause time field in the transmitted control frame */ 135 static int awg_pause_time = PAUSE_TIME_DEFAULT; 136 TUNABLE_INT("hw.awg.pause_time", &awg_pause_time); 137 138 /* Request a TX interrupt every <n> descriptors */ 139 static int awg_tx_interval = TX_INTERVAL_DEFAULT; 140 TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval); 141 142 /* Maximum number of mbufs to send to if_input */ 143 static int awg_rx_batch = RX_BATCH_DEFAULT; 144 TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch); 145 146 enum awg_type { 147 EMAC_A83T = 1, 148 EMAC_H3, 149 EMAC_A64, 150 }; 151 152 static struct ofw_compat_data compat_data[] = { 153 { "allwinner,sun8i-a83t-emac", EMAC_A83T }, 154 { "allwinner,sun8i-h3-emac", EMAC_H3 }, 155 { "allwinner,sun50i-a64-emac", EMAC_A64 }, 156 { NULL, 0 } 157 }; 158 159 struct awg_bufmap { 160 bus_dmamap_t map; 161 struct mbuf *mbuf; 162 }; 163 164 struct awg_txring { 165 bus_dma_tag_t desc_tag; 166 bus_dmamap_t desc_map; 167 struct emac_desc *desc_ring; 168 bus_addr_t desc_ring_paddr; 169 bus_dma_tag_t buf_tag; 170 struct awg_bufmap buf_map[TX_DESC_COUNT]; 171 u_int cur, next, queued; 172 }; 173 174 struct awg_rxring { 175 bus_dma_tag_t desc_tag; 176 bus_dmamap_t desc_map; 177 struct emac_desc *desc_ring; 178 bus_addr_t desc_ring_paddr; 179 bus_dma_tag_t buf_tag; 180 struct awg_bufmap buf_map[RX_DESC_COUNT]; 181 u_int cur; 182 }; 183 184 enum { 185 _RES_EMAC, 186 _RES_IRQ, 187 _RES_SYSCON, 188 _RES_NITEMS 189 }; 190 191 struct awg_softc { 192 struct resource *res[_RES_NITEMS]; 193 struct mtx mtx; 194 if_t ifp; 195 device_t miibus; 196 struct callout stat_ch; 197 struct task link_task; 198 void *ih; 199 u_int mdc_div_ratio_m; 200 int link; 201 int if_flags; 202 enum awg_type type; 203 204 struct awg_txring tx; 205 struct awg_rxring rx; 206 }; 207 208 static struct resource_spec awg_spec[] = { 209 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 210 { SYS_RES_IRQ, 0, RF_ACTIVE }, 211 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL }, 212 { -1, 0 } 213 }; 214 215 static int 216 awg_miibus_readreg(device_t dev, int phy, int reg) 217 { 218 struct awg_softc *sc; 219 int retry, val; 220 221 sc = device_get_softc(dev); 222 val = 0; 223 224 WR4(sc, EMAC_MII_CMD, 225 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 226 (phy << PHY_ADDR_SHIFT) | 227 (reg << PHY_REG_ADDR_SHIFT) | 228 MII_BUSY); 229 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 230 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { 231 val = RD4(sc, EMAC_MII_DATA); 232 break; 233 } 234 DELAY(10); 235 } 236 237 if (retry == 0) 238 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 239 phy, reg); 240 241 return (val); 242 } 243 244 static int 245 awg_miibus_writereg(device_t dev, int phy, int reg, int val) 246 { 247 struct awg_softc *sc; 248 int retry; 249 250 sc = device_get_softc(dev); 251 252 WR4(sc, EMAC_MII_DATA, val); 253 WR4(sc, EMAC_MII_CMD, 254 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 255 (phy << PHY_ADDR_SHIFT) | 256 (reg << PHY_REG_ADDR_SHIFT) | 257 MII_WR | MII_BUSY); 258 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 259 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) 260 break; 261 DELAY(10); 262 } 263 264 if (retry == 0) 265 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 266 phy, reg); 267 268 return (0); 269 } 270 271 static void 272 awg_update_link_locked(struct awg_softc *sc) 273 { 274 struct mii_data *mii; 275 uint32_t val; 276 277 AWG_ASSERT_LOCKED(sc); 278 279 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 280 return; 281 mii = device_get_softc(sc->miibus); 282 283 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 284 (IFM_ACTIVE | IFM_AVALID)) { 285 switch (IFM_SUBTYPE(mii->mii_media_active)) { 286 case IFM_1000_T: 287 case IFM_1000_SX: 288 case IFM_100_TX: 289 case IFM_10_T: 290 sc->link = 1; 291 break; 292 default: 293 sc->link = 0; 294 break; 295 } 296 } else 297 sc->link = 0; 298 299 if (sc->link == 0) 300 return; 301 302 val = RD4(sc, EMAC_BASIC_CTL_0); 303 val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); 304 305 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 306 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 307 val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; 308 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 309 val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; 310 else 311 val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; 312 313 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 314 val |= BASIC_CTL_DUPLEX; 315 316 WR4(sc, EMAC_BASIC_CTL_0, val); 317 318 val = RD4(sc, EMAC_RX_CTL_0); 319 val &= ~RX_FLOW_CTL_EN; 320 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 321 val |= RX_FLOW_CTL_EN; 322 WR4(sc, EMAC_RX_CTL_0, val); 323 324 val = RD4(sc, EMAC_TX_FLOW_CTL); 325 val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); 326 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 327 val |= TX_FLOW_CTL_EN; 328 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 329 val |= awg_pause_time << PAUSE_TIME_SHIFT; 330 WR4(sc, EMAC_TX_FLOW_CTL, val); 331 } 332 333 static void 334 awg_link_task(void *arg, int pending) 335 { 336 struct awg_softc *sc; 337 338 sc = arg; 339 340 AWG_LOCK(sc); 341 awg_update_link_locked(sc); 342 AWG_UNLOCK(sc); 343 } 344 345 static void 346 awg_miibus_statchg(device_t dev) 347 { 348 struct awg_softc *sc; 349 350 sc = device_get_softc(dev); 351 352 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 353 } 354 355 static void 356 awg_media_status(if_t ifp, struct ifmediareq *ifmr) 357 { 358 struct awg_softc *sc; 359 struct mii_data *mii; 360 361 sc = if_getsoftc(ifp); 362 mii = device_get_softc(sc->miibus); 363 364 AWG_LOCK(sc); 365 mii_pollstat(mii); 366 ifmr->ifm_active = mii->mii_media_active; 367 ifmr->ifm_status = mii->mii_media_status; 368 AWG_UNLOCK(sc); 369 } 370 371 static int 372 awg_media_change(if_t ifp) 373 { 374 struct awg_softc *sc; 375 struct mii_data *mii; 376 int error; 377 378 sc = if_getsoftc(ifp); 379 mii = device_get_softc(sc->miibus); 380 381 AWG_LOCK(sc); 382 error = mii_mediachg(mii); 383 AWG_UNLOCK(sc); 384 385 return (error); 386 } 387 388 static void 389 awg_setup_txdesc(struct awg_softc *sc, int index, int flags, bus_addr_t paddr, 390 u_int len) 391 { 392 uint32_t status, size; 393 394 if (paddr == 0 || len == 0) { 395 status = 0; 396 size = 0; 397 --sc->tx.queued; 398 } else { 399 status = TX_DESC_CTL; 400 size = flags | len; 401 if ((index & (awg_tx_interval - 1)) == 0) 402 size |= TX_INT_CTL; 403 ++sc->tx.queued; 404 } 405 406 sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr); 407 sc->tx.desc_ring[index].size = htole32(size); 408 sc->tx.desc_ring[index].status = htole32(status); 409 } 410 411 static int 412 awg_setup_txbuf(struct awg_softc *sc, int index, struct mbuf **mp) 413 { 414 bus_dma_segment_t segs[TX_MAX_SEGS]; 415 int error, nsegs, cur, i, flags; 416 u_int csum_flags; 417 struct mbuf *m; 418 419 m = *mp; 420 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, 421 sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); 422 if (error == EFBIG) { 423 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 424 if (m == NULL) 425 return (0); 426 *mp = m; 427 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, 428 sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); 429 } 430 if (error != 0) 431 return (0); 432 433 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map, 434 BUS_DMASYNC_PREWRITE); 435 436 flags = TX_FIR_DESC; 437 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 438 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) 439 csum_flags = TX_CHECKSUM_CTL_FULL; 440 else 441 csum_flags = TX_CHECKSUM_CTL_IP; 442 flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); 443 } 444 445 for (cur = index, i = 0; i < nsegs; i++) { 446 sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); 447 if (i == nsegs - 1) 448 flags |= TX_LAST_DESC; 449 awg_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 450 segs[i].ds_len); 451 flags &= ~TX_FIR_DESC; 452 cur = TX_NEXT(cur); 453 } 454 455 return (nsegs); 456 } 457 458 static void 459 awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr) 460 { 461 uint32_t status, size; 462 463 status = RX_DESC_CTL; 464 size = MCLBYTES - 1; 465 466 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); 467 sc->rx.desc_ring[index].size = htole32(size); 468 sc->rx.desc_ring[index].next = 469 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index))); 470 sc->rx.desc_ring[index].status = htole32(status); 471 } 472 473 static int 474 awg_setup_rxbuf(struct awg_softc *sc, int index, struct mbuf *m) 475 { 476 bus_dma_segment_t seg; 477 int error, nsegs; 478 479 m_adj(m, ETHER_ALIGN); 480 481 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, 482 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); 483 if (error != 0) 484 return (error); 485 486 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 487 BUS_DMASYNC_PREREAD); 488 489 sc->rx.buf_map[index].mbuf = m; 490 awg_setup_rxdesc(sc, index, seg.ds_addr); 491 492 return (0); 493 } 494 495 static struct mbuf * 496 awg_alloc_mbufcl(struct awg_softc *sc) 497 { 498 struct mbuf *m; 499 500 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 501 if (m != NULL) 502 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 503 504 return (m); 505 } 506 507 static void 508 awg_start_locked(struct awg_softc *sc) 509 { 510 struct mbuf *m; 511 uint32_t val; 512 if_t ifp; 513 int cnt, nsegs; 514 515 AWG_ASSERT_LOCKED(sc); 516 517 if (!sc->link) 518 return; 519 520 ifp = sc->ifp; 521 522 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 523 IFF_DRV_RUNNING) 524 return; 525 526 for (cnt = 0; ; cnt++) { 527 if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 528 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 529 break; 530 } 531 532 m = if_dequeue(ifp); 533 if (m == NULL) 534 break; 535 536 nsegs = awg_setup_txbuf(sc, sc->tx.cur, &m); 537 if (nsegs == 0) { 538 if_sendq_prepend(ifp, m); 539 break; 540 } 541 if_bpfmtap(ifp, m); 542 sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs); 543 } 544 545 if (cnt != 0) { 546 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 547 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 548 549 /* Start and run TX DMA */ 550 val = RD4(sc, EMAC_TX_CTL_1); 551 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); 552 } 553 } 554 555 static void 556 awg_start(if_t ifp) 557 { 558 struct awg_softc *sc; 559 560 sc = if_getsoftc(ifp); 561 562 AWG_LOCK(sc); 563 awg_start_locked(sc); 564 AWG_UNLOCK(sc); 565 } 566 567 static void 568 awg_tick(void *softc) 569 { 570 struct awg_softc *sc; 571 struct mii_data *mii; 572 if_t ifp; 573 int link; 574 575 sc = softc; 576 ifp = sc->ifp; 577 mii = device_get_softc(sc->miibus); 578 579 AWG_ASSERT_LOCKED(sc); 580 581 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 582 return; 583 584 link = sc->link; 585 mii_tick(mii); 586 if (sc->link && !link) 587 awg_start_locked(sc); 588 589 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 590 } 591 592 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 593 static uint32_t 594 bitrev32(uint32_t x) 595 { 596 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 597 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 598 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 599 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 600 601 return (x >> 16) | (x << 16); 602 } 603 604 static void 605 awg_setup_rxfilter(struct awg_softc *sc) 606 { 607 uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo; 608 int mc_count, mcnt, i; 609 uint8_t *eaddr, *mta; 610 if_t ifp; 611 612 AWG_ASSERT_LOCKED(sc); 613 614 ifp = sc->ifp; 615 val = 0; 616 hash[0] = hash[1] = 0; 617 618 mc_count = if_multiaddr_count(ifp, -1); 619 620 if (if_getflags(ifp) & IFF_PROMISC) 621 val |= DIS_ADDR_FILTER; 622 else if (if_getflags(ifp) & IFF_ALLMULTI) { 623 val |= RX_ALL_MULTICAST; 624 hash[0] = hash[1] = ~0; 625 } else if (mc_count > 0) { 626 val |= HASH_MULTICAST; 627 628 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count, 629 M_DEVBUF, M_NOWAIT); 630 if (mta == NULL) { 631 if_printf(ifp, 632 "failed to allocate temporary multicast list\n"); 633 return; 634 } 635 636 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 637 for (i = 0; i < mcnt; i++) { 638 crc = ether_crc32_le(mta + (i * ETHER_ADDR_LEN), 639 ETHER_ADDR_LEN) & 0x7f; 640 crc = bitrev32(~crc) >> 26; 641 hashreg = (crc >> 5); 642 hashbit = (crc & 0x1f); 643 hash[hashreg] |= (1 << hashbit); 644 } 645 646 free(mta, M_DEVBUF); 647 } 648 649 /* Write our unicast address */ 650 eaddr = IF_LLADDR(ifp); 651 machi = (eaddr[5] << 8) | eaddr[4]; 652 maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | 653 (eaddr[0] << 0); 654 WR4(sc, EMAC_ADDR_HIGH(0), machi); 655 WR4(sc, EMAC_ADDR_LOW(0), maclo); 656 657 /* Multicast hash filters */ 658 WR4(sc, EMAC_RX_HASH_0, hash[1]); 659 WR4(sc, EMAC_RX_HASH_1, hash[0]); 660 661 /* RX frame filter config */ 662 WR4(sc, EMAC_RX_FRM_FLT, val); 663 } 664 665 static void 666 awg_enable_intr(struct awg_softc *sc) 667 { 668 /* Enable interrupts */ 669 WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); 670 } 671 672 static void 673 awg_disable_intr(struct awg_softc *sc) 674 { 675 /* Disable interrupts */ 676 WR4(sc, EMAC_INT_EN, 0); 677 } 678 679 static void 680 awg_init_locked(struct awg_softc *sc) 681 { 682 struct mii_data *mii; 683 uint32_t val; 684 if_t ifp; 685 686 mii = device_get_softc(sc->miibus); 687 ifp = sc->ifp; 688 689 AWG_ASSERT_LOCKED(sc); 690 691 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 692 return; 693 694 awg_setup_rxfilter(sc); 695 696 /* Configure DMA burst length and priorities */ 697 val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT; 698 if (awg_rx_tx_pri) 699 val |= BASIC_CTL_RX_TX_PRI; 700 WR4(sc, EMAC_BASIC_CTL_1, val); 701 702 /* Enable interrupts */ 703 #ifdef DEVICE_POLLING 704 if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0) 705 awg_enable_intr(sc); 706 else 707 awg_disable_intr(sc); 708 #else 709 awg_enable_intr(sc); 710 #endif 711 712 /* Enable transmit DMA */ 713 val = RD4(sc, EMAC_TX_CTL_1); 714 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); 715 716 /* Enable receive DMA */ 717 val = RD4(sc, EMAC_RX_CTL_1); 718 WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); 719 720 /* Enable transmitter */ 721 val = RD4(sc, EMAC_TX_CTL_0); 722 WR4(sc, EMAC_TX_CTL_0, val | TX_EN); 723 724 /* Enable receiver */ 725 val = RD4(sc, EMAC_RX_CTL_0); 726 WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC); 727 728 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 729 730 mii_mediachg(mii); 731 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 732 } 733 734 static void 735 awg_init(void *softc) 736 { 737 struct awg_softc *sc; 738 739 sc = softc; 740 741 AWG_LOCK(sc); 742 awg_init_locked(sc); 743 AWG_UNLOCK(sc); 744 } 745 746 static void 747 awg_stop(struct awg_softc *sc) 748 { 749 if_t ifp; 750 uint32_t val; 751 752 AWG_ASSERT_LOCKED(sc); 753 754 ifp = sc->ifp; 755 756 callout_stop(&sc->stat_ch); 757 758 /* Stop transmit DMA and flush data in the TX FIFO */ 759 val = RD4(sc, EMAC_TX_CTL_1); 760 val &= ~TX_DMA_EN; 761 val |= FLUSH_TX_FIFO; 762 WR4(sc, EMAC_TX_CTL_1, val); 763 764 /* Disable transmitter */ 765 val = RD4(sc, EMAC_TX_CTL_0); 766 WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN); 767 768 /* Disable receiver */ 769 val = RD4(sc, EMAC_RX_CTL_0); 770 WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN); 771 772 /* Disable interrupts */ 773 awg_disable_intr(sc); 774 775 /* Disable transmit DMA */ 776 val = RD4(sc, EMAC_TX_CTL_1); 777 WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); 778 779 /* Disable receive DMA */ 780 val = RD4(sc, EMAC_RX_CTL_1); 781 WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); 782 783 sc->link = 0; 784 785 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 786 } 787 788 static int 789 awg_rxintr(struct awg_softc *sc) 790 { 791 if_t ifp; 792 struct mbuf *m, *m0, *mh, *mt; 793 int error, index, len, cnt, npkt; 794 uint32_t status; 795 796 ifp = sc->ifp; 797 mh = mt = NULL; 798 cnt = 0; 799 npkt = 0; 800 801 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 802 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 803 804 for (index = sc->rx.cur; ; index = RX_NEXT(index)) { 805 status = le32toh(sc->rx.desc_ring[index].status); 806 if ((status & RX_DESC_CTL) != 0) 807 break; 808 809 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 810 BUS_DMASYNC_POSTREAD); 811 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); 812 813 len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; 814 if (len != 0) { 815 m = sc->rx.buf_map[index].mbuf; 816 m->m_pkthdr.rcvif = ifp; 817 m->m_pkthdr.len = len; 818 m->m_len = len; 819 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 820 821 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 822 (status & RX_FRM_TYPE) != 0) { 823 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 824 if ((status & RX_HEADER_ERR) == 0) 825 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 826 if ((status & RX_PAYLOAD_ERR) == 0) { 827 m->m_pkthdr.csum_flags |= 828 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 829 m->m_pkthdr.csum_data = 0xffff; 830 } 831 } 832 833 m->m_nextpkt = NULL; 834 if (mh == NULL) 835 mh = m; 836 else 837 mt->m_nextpkt = m; 838 mt = m; 839 ++cnt; 840 ++npkt; 841 842 if (cnt == awg_rx_batch) { 843 AWG_UNLOCK(sc); 844 if_input(ifp, mh); 845 AWG_LOCK(sc); 846 mh = mt = NULL; 847 cnt = 0; 848 } 849 850 } 851 852 if ((m0 = awg_alloc_mbufcl(sc)) != NULL) { 853 error = awg_setup_rxbuf(sc, index, m0); 854 if (error != 0) { 855 /* XXX hole in RX ring */ 856 } 857 } else 858 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 859 } 860 861 if (index != sc->rx.cur) { 862 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 863 BUS_DMASYNC_PREWRITE); 864 } 865 866 if (mh != NULL) { 867 AWG_UNLOCK(sc); 868 if_input(ifp, mh); 869 AWG_LOCK(sc); 870 } 871 872 sc->rx.cur = index; 873 874 return (npkt); 875 } 876 877 static void 878 awg_txintr(struct awg_softc *sc) 879 { 880 struct awg_bufmap *bmap; 881 struct emac_desc *desc; 882 uint32_t status; 883 if_t ifp; 884 int i; 885 886 AWG_ASSERT_LOCKED(sc); 887 888 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 889 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 890 891 ifp = sc->ifp; 892 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 893 desc = &sc->tx.desc_ring[i]; 894 status = le32toh(desc->status); 895 if ((status & TX_DESC_CTL) != 0) 896 break; 897 bmap = &sc->tx.buf_map[i]; 898 if (bmap->mbuf != NULL) { 899 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, 900 BUS_DMASYNC_POSTWRITE); 901 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); 902 m_freem(bmap->mbuf); 903 bmap->mbuf = NULL; 904 } 905 awg_setup_txdesc(sc, i, 0, 0, 0); 906 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 907 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 908 } 909 910 sc->tx.next = i; 911 912 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 913 BUS_DMASYNC_PREWRITE); 914 } 915 916 static void 917 awg_intr(void *arg) 918 { 919 struct awg_softc *sc; 920 uint32_t val; 921 922 sc = arg; 923 924 AWG_LOCK(sc); 925 val = RD4(sc, EMAC_INT_STA); 926 WR4(sc, EMAC_INT_STA, val); 927 928 if (val & RX_INT) 929 awg_rxintr(sc); 930 931 if (val & (TX_INT|TX_BUF_UA_INT)) { 932 awg_txintr(sc); 933 if (!if_sendq_empty(sc->ifp)) 934 awg_start_locked(sc); 935 } 936 937 AWG_UNLOCK(sc); 938 } 939 940 #ifdef DEVICE_POLLING 941 static int 942 awg_poll(if_t ifp, enum poll_cmd cmd, int count) 943 { 944 struct awg_softc *sc; 945 uint32_t val; 946 int rx_npkts; 947 948 sc = if_getsoftc(ifp); 949 rx_npkts = 0; 950 951 AWG_LOCK(sc); 952 953 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 954 AWG_UNLOCK(sc); 955 return (0); 956 } 957 958 rx_npkts = awg_rxintr(sc); 959 awg_txintr(sc); 960 if (!if_sendq_empty(ifp)) 961 awg_start_locked(sc); 962 963 if (cmd == POLL_AND_CHECK_STATUS) { 964 val = RD4(sc, EMAC_INT_STA); 965 if (val != 0) 966 WR4(sc, EMAC_INT_STA, val); 967 } 968 969 AWG_UNLOCK(sc); 970 971 return (rx_npkts); 972 } 973 #endif 974 975 static int 976 awg_ioctl(if_t ifp, u_long cmd, caddr_t data) 977 { 978 struct awg_softc *sc; 979 struct mii_data *mii; 980 struct ifreq *ifr; 981 int flags, mask, error; 982 983 sc = if_getsoftc(ifp); 984 mii = device_get_softc(sc->miibus); 985 ifr = (struct ifreq *)data; 986 error = 0; 987 988 switch (cmd) { 989 case SIOCSIFFLAGS: 990 AWG_LOCK(sc); 991 if (if_getflags(ifp) & IFF_UP) { 992 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 993 flags = if_getflags(ifp) ^ sc->if_flags; 994 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 995 awg_setup_rxfilter(sc); 996 } else 997 awg_init_locked(sc); 998 } else { 999 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 1000 awg_stop(sc); 1001 } 1002 sc->if_flags = if_getflags(ifp); 1003 AWG_UNLOCK(sc); 1004 break; 1005 case SIOCADDMULTI: 1006 case SIOCDELMULTI: 1007 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1008 AWG_LOCK(sc); 1009 awg_setup_rxfilter(sc); 1010 AWG_UNLOCK(sc); 1011 } 1012 break; 1013 case SIOCSIFMEDIA: 1014 case SIOCGIFMEDIA: 1015 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1016 break; 1017 case SIOCSIFCAP: 1018 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1019 #ifdef DEVICE_POLLING 1020 if (mask & IFCAP_POLLING) { 1021 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1022 error = ether_poll_register(awg_poll, ifp); 1023 if (error != 0) 1024 break; 1025 AWG_LOCK(sc); 1026 awg_disable_intr(sc); 1027 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 1028 AWG_UNLOCK(sc); 1029 } else { 1030 error = ether_poll_deregister(ifp); 1031 AWG_LOCK(sc); 1032 awg_enable_intr(sc); 1033 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1034 AWG_UNLOCK(sc); 1035 } 1036 } 1037 #endif 1038 if (mask & IFCAP_VLAN_MTU) 1039 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1040 if (mask & IFCAP_RXCSUM) 1041 if_togglecapenable(ifp, IFCAP_RXCSUM); 1042 if (mask & IFCAP_TXCSUM) 1043 if_togglecapenable(ifp, IFCAP_TXCSUM); 1044 if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) 1045 if_sethwassistbits(ifp, CSUM_IP | CSUM_UDP | CSUM_TCP, 0); 1046 else 1047 if_sethwassistbits(ifp, 0, CSUM_IP | CSUM_UDP | CSUM_TCP); 1048 break; 1049 default: 1050 error = ether_ioctl(ifp, cmd, data); 1051 break; 1052 } 1053 1054 return (error); 1055 } 1056 1057 static int 1058 awg_setup_phy(device_t dev) 1059 { 1060 struct awg_softc *sc; 1061 clk_t clk_tx, clk_tx_parent; 1062 const char *tx_parent_name; 1063 char *phy_type; 1064 phandle_t node; 1065 uint32_t reg, tx_delay, rx_delay; 1066 int error; 1067 1068 sc = device_get_softc(dev); 1069 node = ofw_bus_get_node(dev); 1070 1071 if (OF_getprop_alloc(node, "phy-mode", 1, (void **)&phy_type) == 0) 1072 return (0); 1073 1074 if (bootverbose) 1075 device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type, 1076 sc->res[_RES_SYSCON] != NULL ? "reg" : "clk"); 1077 1078 if (sc->res[_RES_SYSCON] != NULL) { 1079 reg = bus_read_4(sc->res[_RES_SYSCON], 0); 1080 reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); 1081 if (strcmp(phy_type, "rgmii") == 0) 1082 reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; 1083 else if (strcmp(phy_type, "rmii") == 0) 1084 reg |= EMAC_CLK_RMII_EN; 1085 else 1086 reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; 1087 1088 if (OF_getencprop(node, "tx-delay", &tx_delay, 1089 sizeof(tx_delay)) > 0) { 1090 reg &= ~EMAC_CLK_ETXDC; 1091 reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); 1092 } 1093 if (OF_getencprop(node, "rx-delay", &rx_delay, 1094 sizeof(rx_delay)) > 0) { 1095 reg &= ~EMAC_CLK_ERXDC; 1096 reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); 1097 } 1098 1099 if (sc->type == EMAC_H3) { 1100 if (OF_hasprop(node, "allwinner,use-internal-phy")) { 1101 reg |= EMAC_CLK_EPHY_SELECT; 1102 reg &= ~EMAC_CLK_EPHY_SHUTDOWN; 1103 if (OF_hasprop(node, 1104 "allwinner,leds-active-low")) 1105 reg |= EMAC_CLK_EPHY_LED_POL; 1106 else 1107 reg &= ~EMAC_CLK_EPHY_LED_POL; 1108 1109 /* Set internal PHY addr to 1 */ 1110 reg &= ~EMAC_CLK_EPHY_ADDR; 1111 reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); 1112 } else { 1113 reg &= ~EMAC_CLK_EPHY_SELECT; 1114 } 1115 } 1116 1117 if (bootverbose) 1118 device_printf(dev, "EMAC clock: 0x%08x\n", reg); 1119 bus_write_4(sc->res[_RES_SYSCON], 0, reg); 1120 } else { 1121 if (strcmp(phy_type, "rgmii") == 0) 1122 tx_parent_name = "emac_int_tx"; 1123 else 1124 tx_parent_name = "mii_phy_tx"; 1125 1126 /* Get the TX clock */ 1127 error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx); 1128 if (error != 0) { 1129 device_printf(dev, "cannot get tx clock\n"); 1130 goto fail; 1131 } 1132 1133 /* Find the desired parent clock based on phy-mode property */ 1134 error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); 1135 if (error != 0) { 1136 device_printf(dev, "cannot get clock '%s'\n", 1137 tx_parent_name); 1138 goto fail; 1139 } 1140 1141 /* Set TX clock parent */ 1142 error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); 1143 if (error != 0) { 1144 device_printf(dev, "cannot set tx clock parent\n"); 1145 goto fail; 1146 } 1147 1148 /* Enable TX clock */ 1149 error = clk_enable(clk_tx); 1150 if (error != 0) { 1151 device_printf(dev, "cannot enable tx clock\n"); 1152 goto fail; 1153 } 1154 } 1155 1156 error = 0; 1157 1158 fail: 1159 OF_prop_free(phy_type); 1160 return (error); 1161 } 1162 1163 static int 1164 awg_setup_extres(device_t dev) 1165 { 1166 struct awg_softc *sc; 1167 hwreset_t rst_ahb, rst_ephy; 1168 clk_t clk_ahb, clk_ephy; 1169 regulator_t reg; 1170 phandle_t node; 1171 uint64_t freq; 1172 int error, div; 1173 1174 sc = device_get_softc(dev); 1175 node = ofw_bus_get_node(dev); 1176 rst_ahb = rst_ephy = NULL; 1177 clk_ahb = clk_ephy = NULL; 1178 reg = NULL; 1179 1180 /* Get AHB clock and reset resources */ 1181 error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb); 1182 if (error != 0) { 1183 device_printf(dev, "cannot get ahb reset\n"); 1184 goto fail; 1185 } 1186 if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0) 1187 rst_ephy = NULL; 1188 error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb); 1189 if (error != 0) { 1190 device_printf(dev, "cannot get ahb clock\n"); 1191 goto fail; 1192 } 1193 if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0) 1194 clk_ephy = NULL; 1195 1196 /* Configure PHY for MII or RGMII mode */ 1197 if (awg_setup_phy(dev) != 0) 1198 goto fail; 1199 1200 /* Enable clocks */ 1201 error = clk_enable(clk_ahb); 1202 if (error != 0) { 1203 device_printf(dev, "cannot enable ahb clock\n"); 1204 goto fail; 1205 } 1206 if (clk_ephy != NULL) { 1207 error = clk_enable(clk_ephy); 1208 if (error != 0) { 1209 device_printf(dev, "cannot enable ephy clock\n"); 1210 goto fail; 1211 } 1212 } 1213 1214 /* De-assert reset */ 1215 error = hwreset_deassert(rst_ahb); 1216 if (error != 0) { 1217 device_printf(dev, "cannot de-assert ahb reset\n"); 1218 goto fail; 1219 } 1220 if (rst_ephy != NULL) { 1221 error = hwreset_deassert(rst_ephy); 1222 if (error != 0) { 1223 device_printf(dev, "cannot de-assert ephy reset\n"); 1224 goto fail; 1225 } 1226 } 1227 1228 /* Enable PHY regulator if applicable */ 1229 if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { 1230 error = regulator_enable(reg); 1231 if (error != 0) { 1232 device_printf(dev, "cannot enable PHY regulator\n"); 1233 goto fail; 1234 } 1235 } 1236 1237 /* Determine MDC clock divide ratio based on AHB clock */ 1238 error = clk_get_freq(clk_ahb, &freq); 1239 if (error != 0) { 1240 device_printf(dev, "cannot get AHB clock frequency\n"); 1241 goto fail; 1242 } 1243 div = freq / MDIO_FREQ; 1244 if (div <= 16) 1245 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; 1246 else if (div <= 32) 1247 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; 1248 else if (div <= 64) 1249 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; 1250 else if (div <= 128) 1251 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; 1252 else { 1253 device_printf(dev, "cannot determine MDC clock divide ratio\n"); 1254 error = ENXIO; 1255 goto fail; 1256 } 1257 1258 if (bootverbose) 1259 device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n", 1260 (uintmax_t)freq, sc->mdc_div_ratio_m); 1261 1262 return (0); 1263 1264 fail: 1265 if (reg != NULL) 1266 regulator_release(reg); 1267 if (clk_ephy != NULL) 1268 clk_release(clk_ephy); 1269 if (clk_ahb != NULL) 1270 clk_release(clk_ahb); 1271 if (rst_ephy != NULL) 1272 hwreset_release(rst_ephy); 1273 if (rst_ahb != NULL) 1274 hwreset_release(rst_ahb); 1275 return (error); 1276 } 1277 1278 static void 1279 awg_get_eaddr(device_t dev, uint8_t *eaddr) 1280 { 1281 struct awg_softc *sc; 1282 uint32_t maclo, machi, rnd; 1283 u_char rootkey[16]; 1284 1285 sc = device_get_softc(dev); 1286 1287 machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; 1288 maclo = RD4(sc, EMAC_ADDR_LOW(0)); 1289 1290 if (maclo == 0xffffffff && machi == 0xffff) { 1291 /* MAC address in hardware is invalid, create one */ 1292 if (aw_sid_get_rootkey(rootkey) == 0 && 1293 (rootkey[3] | rootkey[12] | rootkey[13] | rootkey[14] | 1294 rootkey[15]) != 0) { 1295 /* MAC address is derived from the root key in SID */ 1296 maclo = (rootkey[13] << 24) | (rootkey[12] << 16) | 1297 (rootkey[3] << 8) | 0x02; 1298 machi = (rootkey[15] << 8) | rootkey[14]; 1299 } else { 1300 /* Create one */ 1301 rnd = arc4random(); 1302 maclo = 0x00f2 | (rnd & 0xffff0000); 1303 machi = rnd & 0xffff; 1304 } 1305 } 1306 1307 eaddr[0] = maclo & 0xff; 1308 eaddr[1] = (maclo >> 8) & 0xff; 1309 eaddr[2] = (maclo >> 16) & 0xff; 1310 eaddr[3] = (maclo >> 24) & 0xff; 1311 eaddr[4] = machi & 0xff; 1312 eaddr[5] = (machi >> 8) & 0xff; 1313 } 1314 1315 #ifdef AWG_DEBUG 1316 static void 1317 awg_dump_regs(device_t dev) 1318 { 1319 static const struct { 1320 const char *name; 1321 u_int reg; 1322 } regs[] = { 1323 { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, 1324 { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, 1325 { "INT_STA", EMAC_INT_STA }, 1326 { "INT_EN", EMAC_INT_EN }, 1327 { "TX_CTL_0", EMAC_TX_CTL_0 }, 1328 { "TX_CTL_1", EMAC_TX_CTL_1 }, 1329 { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, 1330 { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, 1331 { "RX_CTL_0", EMAC_RX_CTL_0 }, 1332 { "RX_CTL_1", EMAC_RX_CTL_1 }, 1333 { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, 1334 { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, 1335 { "RX_HASH_0", EMAC_RX_HASH_0 }, 1336 { "RX_HASH_1", EMAC_RX_HASH_1 }, 1337 { "MII_CMD", EMAC_MII_CMD }, 1338 { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, 1339 { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, 1340 { "TX_DMA_STA", EMAC_TX_DMA_STA }, 1341 { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, 1342 { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, 1343 { "RX_DMA_STA", EMAC_RX_DMA_STA }, 1344 { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, 1345 { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, 1346 { "RGMII_STA", EMAC_RGMII_STA }, 1347 }; 1348 struct awg_softc *sc; 1349 unsigned int n; 1350 1351 sc = device_get_softc(dev); 1352 1353 for (n = 0; n < nitems(regs); n++) 1354 device_printf(dev, " %-20s %08x\n", regs[n].name, 1355 RD4(sc, regs[n].reg)); 1356 } 1357 #endif 1358 1359 #define GPIO_ACTIVE_LOW 1 1360 1361 static int 1362 awg_phy_reset(device_t dev) 1363 { 1364 pcell_t gpio_prop[4], delay_prop[3]; 1365 phandle_t node, gpio_node; 1366 device_t gpio; 1367 uint32_t pin, flags; 1368 uint32_t pin_value; 1369 1370 node = ofw_bus_get_node(dev); 1371 if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop, 1372 sizeof(gpio_prop)) <= 0) 1373 return (0); 1374 1375 if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop, 1376 sizeof(delay_prop)) <= 0) 1377 return (ENXIO); 1378 1379 gpio_node = OF_node_from_xref(gpio_prop[0]); 1380 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) 1381 return (ENXIO); 1382 1383 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, 1384 gpio_prop + 1, &pin, &flags) != 0) 1385 return (ENXIO); 1386 1387 pin_value = GPIO_PIN_LOW; 1388 if (OF_hasprop(node, "allwinner,reset-active-low")) 1389 pin_value = GPIO_PIN_HIGH; 1390 1391 if (flags & GPIO_ACTIVE_LOW) 1392 pin_value = !pin_value; 1393 1394 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1395 GPIO_PIN_SET(gpio, pin, pin_value); 1396 DELAY(delay_prop[0]); 1397 GPIO_PIN_SET(gpio, pin, !pin_value); 1398 DELAY(delay_prop[1]); 1399 GPIO_PIN_SET(gpio, pin, pin_value); 1400 DELAY(delay_prop[2]); 1401 1402 return (0); 1403 } 1404 1405 static int 1406 awg_reset(device_t dev) 1407 { 1408 struct awg_softc *sc; 1409 int retry; 1410 1411 sc = device_get_softc(dev); 1412 1413 /* Reset PHY if necessary */ 1414 if (awg_phy_reset(dev) != 0) { 1415 device_printf(dev, "failed to reset PHY\n"); 1416 return (ENXIO); 1417 } 1418 1419 /* Soft reset all registers and logic */ 1420 WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); 1421 1422 /* Wait for soft reset bit to self-clear */ 1423 for (retry = SOFT_RST_RETRY; retry > 0; retry--) { 1424 if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) 1425 break; 1426 DELAY(10); 1427 } 1428 if (retry == 0) { 1429 device_printf(dev, "soft reset timed out\n"); 1430 #ifdef AWG_DEBUG 1431 awg_dump_regs(dev); 1432 #endif 1433 return (ETIMEDOUT); 1434 } 1435 1436 return (0); 1437 } 1438 1439 static void 1440 awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1441 { 1442 if (error != 0) 1443 return; 1444 *(bus_addr_t *)arg = segs[0].ds_addr; 1445 } 1446 1447 static int 1448 awg_setup_dma(device_t dev) 1449 { 1450 struct awg_softc *sc; 1451 struct mbuf *m; 1452 int error, i; 1453 1454 sc = device_get_softc(dev); 1455 1456 /* Setup TX ring */ 1457 error = bus_dma_tag_create( 1458 bus_get_dma_tag(dev), /* Parent tag */ 1459 DESC_ALIGN, 0, /* alignment, boundary */ 1460 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1461 BUS_SPACE_MAXADDR, /* highaddr */ 1462 NULL, NULL, /* filter, filterarg */ 1463 TX_DESC_SIZE, 1, /* maxsize, nsegs */ 1464 TX_DESC_SIZE, /* maxsegsize */ 1465 0, /* flags */ 1466 NULL, NULL, /* lockfunc, lockarg */ 1467 &sc->tx.desc_tag); 1468 if (error != 0) { 1469 device_printf(dev, "cannot create TX descriptor ring tag\n"); 1470 return (error); 1471 } 1472 1473 error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring, 1474 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map); 1475 if (error != 0) { 1476 device_printf(dev, "cannot allocate TX descriptor ring\n"); 1477 return (error); 1478 } 1479 1480 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, 1481 sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb, 1482 &sc->tx.desc_ring_paddr, 0); 1483 if (error != 0) { 1484 device_printf(dev, "cannot load TX descriptor ring\n"); 1485 return (error); 1486 } 1487 1488 for (i = 0; i < TX_DESC_COUNT; i++) 1489 sc->tx.desc_ring[i].next = 1490 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); 1491 1492 error = bus_dma_tag_create( 1493 bus_get_dma_tag(dev), /* Parent tag */ 1494 1, 0, /* alignment, boundary */ 1495 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1496 BUS_SPACE_MAXADDR, /* highaddr */ 1497 NULL, NULL, /* filter, filterarg */ 1498 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 1499 MCLBYTES, /* maxsegsize */ 1500 0, /* flags */ 1501 NULL, NULL, /* lockfunc, lockarg */ 1502 &sc->tx.buf_tag); 1503 if (error != 0) { 1504 device_printf(dev, "cannot create TX buffer tag\n"); 1505 return (error); 1506 } 1507 1508 sc->tx.queued = TX_DESC_COUNT; 1509 for (i = 0; i < TX_DESC_COUNT; i++) { 1510 error = bus_dmamap_create(sc->tx.buf_tag, 0, 1511 &sc->tx.buf_map[i].map); 1512 if (error != 0) { 1513 device_printf(dev, "cannot create TX buffer map\n"); 1514 return (error); 1515 } 1516 awg_setup_txdesc(sc, i, 0, 0, 0); 1517 } 1518 1519 /* Setup RX ring */ 1520 error = bus_dma_tag_create( 1521 bus_get_dma_tag(dev), /* Parent tag */ 1522 DESC_ALIGN, 0, /* alignment, boundary */ 1523 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1524 BUS_SPACE_MAXADDR, /* highaddr */ 1525 NULL, NULL, /* filter, filterarg */ 1526 RX_DESC_SIZE, 1, /* maxsize, nsegs */ 1527 RX_DESC_SIZE, /* maxsegsize */ 1528 0, /* flags */ 1529 NULL, NULL, /* lockfunc, lockarg */ 1530 &sc->rx.desc_tag); 1531 if (error != 0) { 1532 device_printf(dev, "cannot create RX descriptor ring tag\n"); 1533 return (error); 1534 } 1535 1536 error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, 1537 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map); 1538 if (error != 0) { 1539 device_printf(dev, "cannot allocate RX descriptor ring\n"); 1540 return (error); 1541 } 1542 1543 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, 1544 sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb, 1545 &sc->rx.desc_ring_paddr, 0); 1546 if (error != 0) { 1547 device_printf(dev, "cannot load RX descriptor ring\n"); 1548 return (error); 1549 } 1550 1551 error = bus_dma_tag_create( 1552 bus_get_dma_tag(dev), /* Parent tag */ 1553 1, 0, /* alignment, boundary */ 1554 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1555 BUS_SPACE_MAXADDR, /* highaddr */ 1556 NULL, NULL, /* filter, filterarg */ 1557 MCLBYTES, 1, /* maxsize, nsegs */ 1558 MCLBYTES, /* maxsegsize */ 1559 0, /* flags */ 1560 NULL, NULL, /* lockfunc, lockarg */ 1561 &sc->rx.buf_tag); 1562 if (error != 0) { 1563 device_printf(dev, "cannot create RX buffer tag\n"); 1564 return (error); 1565 } 1566 1567 for (i = 0; i < RX_DESC_COUNT; i++) { 1568 error = bus_dmamap_create(sc->rx.buf_tag, 0, 1569 &sc->rx.buf_map[i].map); 1570 if (error != 0) { 1571 device_printf(dev, "cannot create RX buffer map\n"); 1572 return (error); 1573 } 1574 if ((m = awg_alloc_mbufcl(sc)) == NULL) { 1575 device_printf(dev, "cannot allocate RX mbuf\n"); 1576 return (ENOMEM); 1577 } 1578 error = awg_setup_rxbuf(sc, i, m); 1579 if (error != 0) { 1580 device_printf(dev, "cannot create RX buffer\n"); 1581 return (error); 1582 } 1583 } 1584 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 1585 BUS_DMASYNC_PREWRITE); 1586 1587 /* Write transmit and receive descriptor base address registers */ 1588 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); 1589 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); 1590 1591 return (0); 1592 } 1593 1594 static int 1595 awg_probe(device_t dev) 1596 { 1597 if (!ofw_bus_status_okay(dev)) 1598 return (ENXIO); 1599 1600 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 1601 return (ENXIO); 1602 1603 device_set_desc(dev, "Allwinner Gigabit Ethernet"); 1604 return (BUS_PROBE_DEFAULT); 1605 } 1606 1607 static int 1608 awg_attach(device_t dev) 1609 { 1610 uint8_t eaddr[ETHER_ADDR_LEN]; 1611 struct awg_softc *sc; 1612 phandle_t node; 1613 int error; 1614 1615 sc = device_get_softc(dev); 1616 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 1617 node = ofw_bus_get_node(dev); 1618 1619 if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) { 1620 device_printf(dev, "cannot allocate resources for device\n"); 1621 return (ENXIO); 1622 } 1623 1624 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 1625 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 1626 TASK_INIT(&sc->link_task, 0, awg_link_task, sc); 1627 1628 /* Setup clocks and regulators */ 1629 error = awg_setup_extres(dev); 1630 if (error != 0) 1631 return (error); 1632 1633 /* Read MAC address before resetting the chip */ 1634 awg_get_eaddr(dev, eaddr); 1635 1636 /* Soft reset EMAC core */ 1637 error = awg_reset(dev); 1638 if (error != 0) 1639 return (error); 1640 1641 /* Setup DMA descriptors */ 1642 error = awg_setup_dma(dev); 1643 if (error != 0) 1644 return (error); 1645 1646 /* Install interrupt handler */ 1647 error = bus_setup_intr(dev, sc->res[_RES_IRQ], 1648 INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih); 1649 if (error != 0) { 1650 device_printf(dev, "cannot setup interrupt handler\n"); 1651 return (error); 1652 } 1653 1654 /* Setup ethernet interface */ 1655 sc->ifp = if_alloc(IFT_ETHER); 1656 if_setsoftc(sc->ifp, sc); 1657 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 1658 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1659 if_setstartfn(sc->ifp, awg_start); 1660 if_setioctlfn(sc->ifp, awg_ioctl); 1661 if_setinitfn(sc->ifp, awg_init); 1662 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 1663 if_setsendqready(sc->ifp); 1664 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1665 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1666 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1667 #ifdef DEVICE_POLLING 1668 if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0); 1669 #endif 1670 1671 /* Attach MII driver */ 1672 error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change, 1673 awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 1674 MIIF_DOPAUSE); 1675 if (error != 0) { 1676 device_printf(dev, "cannot attach PHY\n"); 1677 return (error); 1678 } 1679 1680 /* Attach ethernet interface */ 1681 ether_ifattach(sc->ifp, eaddr); 1682 1683 return (0); 1684 } 1685 1686 static device_method_t awg_methods[] = { 1687 /* Device interface */ 1688 DEVMETHOD(device_probe, awg_probe), 1689 DEVMETHOD(device_attach, awg_attach), 1690 1691 /* MII interface */ 1692 DEVMETHOD(miibus_readreg, awg_miibus_readreg), 1693 DEVMETHOD(miibus_writereg, awg_miibus_writereg), 1694 DEVMETHOD(miibus_statchg, awg_miibus_statchg), 1695 1696 DEVMETHOD_END 1697 }; 1698 1699 static driver_t awg_driver = { 1700 "awg", 1701 awg_methods, 1702 sizeof(struct awg_softc), 1703 }; 1704 1705 static devclass_t awg_devclass; 1706 1707 DRIVER_MODULE(awg, simplebus, awg_driver, awg_devclass, 0, 0); 1708 DRIVER_MODULE(miibus, awg, miibus_driver, miibus_devclass, 0, 0); 1709 1710 MODULE_DEPEND(awg, ether, 1, 1, 1); 1711 MODULE_DEPEND(awg, miibus, 1, 1, 1); 1712