1 /*- 2 * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 19 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 21 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* 30 * Allwinner Gigabit Ethernet MAC (EMAC) controller 31 */ 32 33 #include "opt_device_polling.h" 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/bus.h> 41 #include <sys/rman.h> 42 #include <sys/kernel.h> 43 #include <sys/endian.h> 44 #include <sys/mbuf.h> 45 #include <sys/socket.h> 46 #include <sys/sockio.h> 47 #include <sys/module.h> 48 #include <sys/taskqueue.h> 49 #include <sys/gpio.h> 50 51 #include <net/bpf.h> 52 #include <net/if.h> 53 #include <net/ethernet.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_types.h> 57 #include <net/if_var.h> 58 59 #include <machine/bus.h> 60 61 #include <dev/ofw/ofw_bus.h> 62 #include <dev/ofw/ofw_bus_subr.h> 63 64 #include <arm/allwinner/if_awgreg.h> 65 #include <dev/mii/mii.h> 66 #include <dev/mii/miivar.h> 67 68 #include <dev/extres/clk/clk.h> 69 #include <dev/extres/hwreset/hwreset.h> 70 #include <dev/extres/regulator/regulator.h> 71 72 #include "miibus_if.h" 73 #include "gpio_if.h" 74 75 #define RD4(sc, reg) bus_read_4((sc)->res[_RES_EMAC], (reg)) 76 #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_EMAC], (reg), (val)) 77 78 #define AWG_LOCK(sc) mtx_lock(&(sc)->mtx) 79 #define AWG_UNLOCK(sc) mtx_unlock(&(sc)->mtx); 80 #define AWG_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) 81 #define AWG_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) 82 83 #define DESC_ALIGN 4 84 #define TX_DESC_COUNT 1024 85 #define TX_DESC_SIZE (sizeof(struct emac_desc) * TX_DESC_COUNT) 86 #define RX_DESC_COUNT 256 87 #define RX_DESC_SIZE (sizeof(struct emac_desc) * RX_DESC_COUNT) 88 89 #define DESC_OFF(n) ((n) * sizeof(struct emac_desc)) 90 #define TX_NEXT(n) (((n) + 1) & (TX_DESC_COUNT - 1)) 91 #define TX_SKIP(n, o) (((n) + (o)) & (TX_DESC_COUNT - 1)) 92 #define RX_NEXT(n) (((n) + 1) & (RX_DESC_COUNT - 1)) 93 94 #define TX_MAX_SEGS 10 95 96 #define SOFT_RST_RETRY 1000 97 #define MII_BUSY_RETRY 1000 98 #define MDIO_FREQ 2500000 99 100 #define BURST_LEN_DEFAULT 8 101 #define RX_TX_PRI_DEFAULT 0 102 #define PAUSE_TIME_DEFAULT 0x400 103 #define TX_INTERVAL_DEFAULT 64 104 #define RX_BATCH_DEFAULT 64 105 106 /* syscon EMAC clock register */ 107 #define EMAC_CLK_EPHY_ADDR (0x1f << 20) /* H3 */ 108 #define EMAC_CLK_EPHY_ADDR_SHIFT 20 109 #define EMAC_CLK_EPHY_LED_POL (1 << 17) /* H3 */ 110 #define EMAC_CLK_EPHY_SHUTDOWN (1 << 16) /* H3 */ 111 #define EMAC_CLK_EPHY_SELECT (1 << 15) /* H3 */ 112 #define EMAC_CLK_RMII_EN (1 << 13) 113 #define EMAC_CLK_ETXDC (0x7 << 10) 114 #define EMAC_CLK_ETXDC_SHIFT 10 115 #define EMAC_CLK_ERXDC (0x1f << 5) 116 #define EMAC_CLK_ERXDC_SHIFT 5 117 #define EMAC_CLK_PIT (0x1 << 2) 118 #define EMAC_CLK_PIT_MII (0 << 2) 119 #define EMAC_CLK_PIT_RGMII (1 << 2) 120 #define EMAC_CLK_SRC (0x3 << 0) 121 #define EMAC_CLK_SRC_MII (0 << 0) 122 #define EMAC_CLK_SRC_EXT_RGMII (1 << 0) 123 #define EMAC_CLK_SRC_RGMII (2 << 0) 124 125 /* Burst length of RX and TX DMA transfers */ 126 static int awg_burst_len = BURST_LEN_DEFAULT; 127 TUNABLE_INT("hw.awg.burst_len", &awg_burst_len); 128 129 /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */ 130 static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT; 131 TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri); 132 133 /* Pause time field in the transmitted control frame */ 134 static int awg_pause_time = PAUSE_TIME_DEFAULT; 135 TUNABLE_INT("hw.awg.pause_time", &awg_pause_time); 136 137 /* Request a TX interrupt every <n> descriptors */ 138 static int awg_tx_interval = TX_INTERVAL_DEFAULT; 139 TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval); 140 141 /* Maximum number of mbufs to send to if_input */ 142 static int awg_rx_batch = RX_BATCH_DEFAULT; 143 TUNABLE_INT("hw.awg.rx_batch", &awg_rx_batch); 144 145 enum awg_type { 146 EMAC_A83T = 1, 147 EMAC_H3, 148 }; 149 150 static struct ofw_compat_data compat_data[] = { 151 { "allwinner,sun8i-a83t-emac", EMAC_A83T }, 152 { "allwinner,sun8i-h3-emac", EMAC_H3 }, 153 { NULL, 0 } 154 }; 155 156 struct awg_bufmap { 157 bus_dmamap_t map; 158 struct mbuf *mbuf; 159 }; 160 161 struct awg_txring { 162 bus_dma_tag_t desc_tag; 163 bus_dmamap_t desc_map; 164 struct emac_desc *desc_ring; 165 bus_addr_t desc_ring_paddr; 166 bus_dma_tag_t buf_tag; 167 struct awg_bufmap buf_map[TX_DESC_COUNT]; 168 u_int cur, next, queued; 169 }; 170 171 struct awg_rxring { 172 bus_dma_tag_t desc_tag; 173 bus_dmamap_t desc_map; 174 struct emac_desc *desc_ring; 175 bus_addr_t desc_ring_paddr; 176 bus_dma_tag_t buf_tag; 177 struct awg_bufmap buf_map[RX_DESC_COUNT]; 178 u_int cur; 179 }; 180 181 enum { 182 _RES_EMAC, 183 _RES_IRQ, 184 _RES_SYSCON, 185 _RES_NITEMS 186 }; 187 188 struct awg_softc { 189 struct resource *res[_RES_NITEMS]; 190 struct mtx mtx; 191 if_t ifp; 192 device_t miibus; 193 struct callout stat_ch; 194 struct task link_task; 195 void *ih; 196 u_int mdc_div_ratio_m; 197 int link; 198 int if_flags; 199 enum awg_type type; 200 201 struct awg_txring tx; 202 struct awg_rxring rx; 203 }; 204 205 static struct resource_spec awg_spec[] = { 206 { SYS_RES_MEMORY, 0, RF_ACTIVE }, 207 { SYS_RES_IRQ, 0, RF_ACTIVE }, 208 { SYS_RES_MEMORY, 1, RF_ACTIVE | RF_OPTIONAL }, 209 { -1, 0 } 210 }; 211 212 static int 213 awg_miibus_readreg(device_t dev, int phy, int reg) 214 { 215 struct awg_softc *sc; 216 int retry, val; 217 218 sc = device_get_softc(dev); 219 val = 0; 220 221 WR4(sc, EMAC_MII_CMD, 222 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 223 (phy << PHY_ADDR_SHIFT) | 224 (reg << PHY_REG_ADDR_SHIFT) | 225 MII_BUSY); 226 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 227 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) { 228 val = RD4(sc, EMAC_MII_DATA); 229 break; 230 } 231 DELAY(10); 232 } 233 234 if (retry == 0) 235 device_printf(dev, "phy read timeout, phy=%d reg=%d\n", 236 phy, reg); 237 238 return (val); 239 } 240 241 static int 242 awg_miibus_writereg(device_t dev, int phy, int reg, int val) 243 { 244 struct awg_softc *sc; 245 int retry; 246 247 sc = device_get_softc(dev); 248 249 WR4(sc, EMAC_MII_DATA, val); 250 WR4(sc, EMAC_MII_CMD, 251 (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) | 252 (phy << PHY_ADDR_SHIFT) | 253 (reg << PHY_REG_ADDR_SHIFT) | 254 MII_WR | MII_BUSY); 255 for (retry = MII_BUSY_RETRY; retry > 0; retry--) { 256 if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) 257 break; 258 DELAY(10); 259 } 260 261 if (retry == 0) 262 device_printf(dev, "phy write timeout, phy=%d reg=%d\n", 263 phy, reg); 264 265 return (0); 266 } 267 268 static void 269 awg_update_link_locked(struct awg_softc *sc) 270 { 271 struct mii_data *mii; 272 uint32_t val; 273 274 AWG_ASSERT_LOCKED(sc); 275 276 if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) 277 return; 278 mii = device_get_softc(sc->miibus); 279 280 if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == 281 (IFM_ACTIVE | IFM_AVALID)) { 282 switch (IFM_SUBTYPE(mii->mii_media_active)) { 283 case IFM_1000_T: 284 case IFM_1000_SX: 285 case IFM_100_TX: 286 case IFM_10_T: 287 sc->link = 1; 288 break; 289 default: 290 sc->link = 0; 291 break; 292 } 293 } else 294 sc->link = 0; 295 296 if (sc->link == 0) 297 return; 298 299 val = RD4(sc, EMAC_BASIC_CTL_0); 300 val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); 301 302 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 303 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) 304 val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; 305 else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) 306 val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; 307 else 308 val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; 309 310 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 311 val |= BASIC_CTL_DUPLEX; 312 313 WR4(sc, EMAC_BASIC_CTL_0, val); 314 315 val = RD4(sc, EMAC_RX_CTL_0); 316 val &= ~RX_FLOW_CTL_EN; 317 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) 318 val |= RX_FLOW_CTL_EN; 319 WR4(sc, EMAC_RX_CTL_0, val); 320 321 val = RD4(sc, EMAC_TX_FLOW_CTL); 322 val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); 323 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) 324 val |= TX_FLOW_CTL_EN; 325 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) 326 val |= awg_pause_time << PAUSE_TIME_SHIFT; 327 WR4(sc, EMAC_TX_FLOW_CTL, val); 328 } 329 330 static void 331 awg_link_task(void *arg, int pending) 332 { 333 struct awg_softc *sc; 334 335 sc = arg; 336 337 AWG_LOCK(sc); 338 awg_update_link_locked(sc); 339 AWG_UNLOCK(sc); 340 } 341 342 static void 343 awg_miibus_statchg(device_t dev) 344 { 345 struct awg_softc *sc; 346 347 sc = device_get_softc(dev); 348 349 taskqueue_enqueue(taskqueue_swi, &sc->link_task); 350 } 351 352 static void 353 awg_media_status(if_t ifp, struct ifmediareq *ifmr) 354 { 355 struct awg_softc *sc; 356 struct mii_data *mii; 357 358 sc = if_getsoftc(ifp); 359 mii = device_get_softc(sc->miibus); 360 361 AWG_LOCK(sc); 362 mii_pollstat(mii); 363 ifmr->ifm_active = mii->mii_media_active; 364 ifmr->ifm_status = mii->mii_media_status; 365 AWG_UNLOCK(sc); 366 } 367 368 static int 369 awg_media_change(if_t ifp) 370 { 371 struct awg_softc *sc; 372 struct mii_data *mii; 373 int error; 374 375 sc = if_getsoftc(ifp); 376 mii = device_get_softc(sc->miibus); 377 378 AWG_LOCK(sc); 379 error = mii_mediachg(mii); 380 AWG_UNLOCK(sc); 381 382 return (error); 383 } 384 385 static void 386 awg_setup_txdesc(struct awg_softc *sc, int index, int flags, bus_addr_t paddr, 387 u_int len) 388 { 389 uint32_t status, size; 390 391 if (paddr == 0 || len == 0) { 392 status = 0; 393 size = 0; 394 --sc->tx.queued; 395 } else { 396 status = TX_DESC_CTL; 397 size = flags | len; 398 if ((index & (awg_tx_interval - 1)) == 0) 399 size |= TX_INT_CTL; 400 ++sc->tx.queued; 401 } 402 403 sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr); 404 sc->tx.desc_ring[index].size = htole32(size); 405 sc->tx.desc_ring[index].status = htole32(status); 406 } 407 408 static int 409 awg_setup_txbuf(struct awg_softc *sc, int index, struct mbuf **mp) 410 { 411 bus_dma_segment_t segs[TX_MAX_SEGS]; 412 int error, nsegs, cur, i, flags; 413 u_int csum_flags; 414 struct mbuf *m; 415 416 m = *mp; 417 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, 418 sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); 419 if (error == EFBIG) { 420 m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); 421 if (m == NULL) 422 return (0); 423 *mp = m; 424 error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag, 425 sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT); 426 } 427 if (error != 0) 428 return (0); 429 430 bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map, 431 BUS_DMASYNC_PREWRITE); 432 433 flags = TX_FIR_DESC; 434 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) { 435 if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) 436 csum_flags = TX_CHECKSUM_CTL_FULL; 437 else 438 csum_flags = TX_CHECKSUM_CTL_IP; 439 flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT); 440 } 441 442 for (cur = index, i = 0; i < nsegs; i++) { 443 sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL); 444 if (i == nsegs - 1) 445 flags |= TX_LAST_DESC; 446 awg_setup_txdesc(sc, cur, flags, segs[i].ds_addr, 447 segs[i].ds_len); 448 flags &= ~TX_FIR_DESC; 449 cur = TX_NEXT(cur); 450 } 451 452 return (nsegs); 453 } 454 455 static void 456 awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr) 457 { 458 uint32_t status, size; 459 460 status = RX_DESC_CTL; 461 size = MCLBYTES - 1; 462 463 sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr); 464 sc->rx.desc_ring[index].size = htole32(size); 465 sc->rx.desc_ring[index].next = 466 htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index))); 467 sc->rx.desc_ring[index].status = htole32(status); 468 } 469 470 static int 471 awg_setup_rxbuf(struct awg_softc *sc, int index, struct mbuf *m) 472 { 473 bus_dma_segment_t seg; 474 int error, nsegs; 475 476 m_adj(m, ETHER_ALIGN); 477 478 error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag, 479 sc->rx.buf_map[index].map, m, &seg, &nsegs, 0); 480 if (error != 0) 481 return (error); 482 483 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 484 BUS_DMASYNC_PREREAD); 485 486 sc->rx.buf_map[index].mbuf = m; 487 awg_setup_rxdesc(sc, index, seg.ds_addr); 488 489 return (0); 490 } 491 492 static struct mbuf * 493 awg_alloc_mbufcl(struct awg_softc *sc) 494 { 495 struct mbuf *m; 496 497 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 498 if (m != NULL) 499 m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; 500 501 return (m); 502 } 503 504 static void 505 awg_start_locked(struct awg_softc *sc) 506 { 507 struct mbuf *m; 508 uint32_t val; 509 if_t ifp; 510 int cnt, nsegs; 511 512 AWG_ASSERT_LOCKED(sc); 513 514 if (!sc->link) 515 return; 516 517 ifp = sc->ifp; 518 519 if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != 520 IFF_DRV_RUNNING) 521 return; 522 523 for (cnt = 0; ; cnt++) { 524 if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { 525 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); 526 break; 527 } 528 529 m = if_dequeue(ifp); 530 if (m == NULL) 531 break; 532 533 nsegs = awg_setup_txbuf(sc, sc->tx.cur, &m); 534 if (nsegs == 0) { 535 if_sendq_prepend(ifp, m); 536 break; 537 } 538 if_bpfmtap(ifp, m); 539 sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs); 540 } 541 542 if (cnt != 0) { 543 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 544 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 545 546 /* Start and run TX DMA */ 547 val = RD4(sc, EMAC_TX_CTL_1); 548 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); 549 } 550 } 551 552 static void 553 awg_start(if_t ifp) 554 { 555 struct awg_softc *sc; 556 557 sc = if_getsoftc(ifp); 558 559 AWG_LOCK(sc); 560 awg_start_locked(sc); 561 AWG_UNLOCK(sc); 562 } 563 564 static void 565 awg_tick(void *softc) 566 { 567 struct awg_softc *sc; 568 struct mii_data *mii; 569 if_t ifp; 570 int link; 571 572 sc = softc; 573 ifp = sc->ifp; 574 mii = device_get_softc(sc->miibus); 575 576 AWG_ASSERT_LOCKED(sc); 577 578 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) 579 return; 580 581 link = sc->link; 582 mii_tick(mii); 583 if (sc->link && !link) 584 awg_start_locked(sc); 585 586 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 587 } 588 589 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */ 590 static uint32_t 591 bitrev32(uint32_t x) 592 { 593 x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 594 x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 595 x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 596 x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 597 598 return (x >> 16) | (x << 16); 599 } 600 601 static void 602 awg_setup_rxfilter(struct awg_softc *sc) 603 { 604 uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo; 605 int mc_count, mcnt, i; 606 uint8_t *eaddr, *mta; 607 if_t ifp; 608 609 AWG_ASSERT_LOCKED(sc); 610 611 ifp = sc->ifp; 612 val = 0; 613 hash[0] = hash[1] = 0; 614 615 mc_count = if_multiaddr_count(ifp, -1); 616 617 if (if_getflags(ifp) & IFF_PROMISC) 618 val |= DIS_ADDR_FILTER; 619 else if (if_getflags(ifp) & IFF_ALLMULTI) { 620 val |= RX_ALL_MULTICAST; 621 hash[0] = hash[1] = ~0; 622 } else if (mc_count > 0) { 623 val |= HASH_MULTICAST; 624 625 mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count, 626 M_DEVBUF, M_NOWAIT); 627 if (mta == NULL) { 628 if_printf(ifp, 629 "failed to allocate temporary multicast list\n"); 630 return; 631 } 632 633 if_multiaddr_array(ifp, mta, &mcnt, mc_count); 634 for (i = 0; i < mcnt; i++) { 635 crc = ether_crc32_le(mta + (i * ETHER_ADDR_LEN), 636 ETHER_ADDR_LEN) & 0x7f; 637 crc = bitrev32(~crc) >> 26; 638 hashreg = (crc >> 5); 639 hashbit = (crc & 0x1f); 640 hash[hashreg] |= (1 << hashbit); 641 } 642 643 free(mta, M_DEVBUF); 644 } 645 646 /* Write our unicast address */ 647 eaddr = IF_LLADDR(ifp); 648 machi = (eaddr[5] << 8) | eaddr[4]; 649 maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) | 650 (eaddr[0] << 0); 651 WR4(sc, EMAC_ADDR_HIGH(0), machi); 652 WR4(sc, EMAC_ADDR_LOW(0), maclo); 653 654 /* Multicast hash filters */ 655 WR4(sc, EMAC_RX_HASH_0, hash[1]); 656 WR4(sc, EMAC_RX_HASH_1, hash[0]); 657 658 /* RX frame filter config */ 659 WR4(sc, EMAC_RX_FRM_FLT, val); 660 } 661 662 static void 663 awg_enable_intr(struct awg_softc *sc) 664 { 665 /* Enable interrupts */ 666 WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN); 667 } 668 669 static void 670 awg_disable_intr(struct awg_softc *sc) 671 { 672 /* Disable interrupts */ 673 WR4(sc, EMAC_INT_EN, 0); 674 } 675 676 static void 677 awg_init_locked(struct awg_softc *sc) 678 { 679 struct mii_data *mii; 680 uint32_t val; 681 if_t ifp; 682 683 mii = device_get_softc(sc->miibus); 684 ifp = sc->ifp; 685 686 AWG_ASSERT_LOCKED(sc); 687 688 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 689 return; 690 691 awg_setup_rxfilter(sc); 692 693 /* Configure DMA burst length and priorities */ 694 val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT; 695 if (awg_rx_tx_pri) 696 val |= BASIC_CTL_RX_TX_PRI; 697 WR4(sc, EMAC_BASIC_CTL_1, val); 698 699 /* Enable interrupts */ 700 #ifdef DEVICE_POLLING 701 if ((if_getcapenable(ifp) & IFCAP_POLLING) == 0) 702 awg_enable_intr(sc); 703 else 704 awg_disable_intr(sc); 705 #else 706 awg_enable_intr(sc); 707 #endif 708 709 /* Enable transmit DMA */ 710 val = RD4(sc, EMAC_TX_CTL_1); 711 WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD | TX_NEXT_FRAME); 712 713 /* Enable receive DMA */ 714 val = RD4(sc, EMAC_RX_CTL_1); 715 WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD); 716 717 /* Enable transmitter */ 718 val = RD4(sc, EMAC_TX_CTL_0); 719 WR4(sc, EMAC_TX_CTL_0, val | TX_EN); 720 721 /* Enable receiver */ 722 val = RD4(sc, EMAC_RX_CTL_0); 723 WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC); 724 725 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 726 727 mii_mediachg(mii); 728 callout_reset(&sc->stat_ch, hz, awg_tick, sc); 729 } 730 731 static void 732 awg_init(void *softc) 733 { 734 struct awg_softc *sc; 735 736 sc = softc; 737 738 AWG_LOCK(sc); 739 awg_init_locked(sc); 740 AWG_UNLOCK(sc); 741 } 742 743 static void 744 awg_stop(struct awg_softc *sc) 745 { 746 if_t ifp; 747 uint32_t val; 748 749 AWG_ASSERT_LOCKED(sc); 750 751 ifp = sc->ifp; 752 753 callout_stop(&sc->stat_ch); 754 755 /* Stop transmit DMA and flush data in the TX FIFO */ 756 val = RD4(sc, EMAC_TX_CTL_1); 757 val &= ~TX_DMA_EN; 758 val |= FLUSH_TX_FIFO; 759 WR4(sc, EMAC_TX_CTL_1, val); 760 761 /* Disable transmitter */ 762 val = RD4(sc, EMAC_TX_CTL_0); 763 WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN); 764 765 /* Disable receiver */ 766 val = RD4(sc, EMAC_RX_CTL_0); 767 WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN); 768 769 /* Disable interrupts */ 770 awg_disable_intr(sc); 771 772 /* Disable transmit DMA */ 773 val = RD4(sc, EMAC_TX_CTL_1); 774 WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN); 775 776 /* Disable receive DMA */ 777 val = RD4(sc, EMAC_RX_CTL_1); 778 WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN); 779 780 sc->link = 0; 781 782 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 783 } 784 785 static int 786 awg_rxintr(struct awg_softc *sc) 787 { 788 if_t ifp; 789 struct mbuf *m, *m0, *mh, *mt; 790 int error, index, len, cnt, npkt; 791 uint32_t status; 792 793 ifp = sc->ifp; 794 mh = mt = NULL; 795 cnt = 0; 796 npkt = 0; 797 798 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 799 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 800 801 for (index = sc->rx.cur; ; index = RX_NEXT(index)) { 802 status = le32toh(sc->rx.desc_ring[index].status); 803 if ((status & RX_DESC_CTL) != 0) 804 break; 805 806 bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map, 807 BUS_DMASYNC_POSTREAD); 808 bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map); 809 810 len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT; 811 if (len != 0) { 812 m = sc->rx.buf_map[index].mbuf; 813 m->m_pkthdr.rcvif = ifp; 814 m->m_pkthdr.len = len; 815 m->m_len = len; 816 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); 817 818 if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 && 819 (status & RX_FRM_TYPE) != 0) { 820 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; 821 if ((status & RX_HEADER_ERR) == 0) 822 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 823 if ((status & RX_PAYLOAD_ERR) == 0) { 824 m->m_pkthdr.csum_flags |= 825 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 826 m->m_pkthdr.csum_data = 0xffff; 827 } 828 } 829 830 m->m_nextpkt = NULL; 831 if (mh == NULL) 832 mh = m; 833 else 834 mt->m_nextpkt = m; 835 mt = m; 836 ++cnt; 837 ++npkt; 838 839 if (cnt == awg_rx_batch) { 840 AWG_UNLOCK(sc); 841 if_input(ifp, mh); 842 AWG_LOCK(sc); 843 mh = mt = NULL; 844 cnt = 0; 845 } 846 847 } 848 849 if ((m0 = awg_alloc_mbufcl(sc)) != NULL) { 850 error = awg_setup_rxbuf(sc, index, m0); 851 if (error != 0) { 852 /* XXX hole in RX ring */ 853 } 854 } else 855 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); 856 } 857 858 if (index != sc->rx.cur) { 859 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 860 BUS_DMASYNC_PREWRITE); 861 } 862 863 if (mh != NULL) { 864 AWG_UNLOCK(sc); 865 if_input(ifp, mh); 866 AWG_LOCK(sc); 867 } 868 869 sc->rx.cur = index; 870 871 return (npkt); 872 } 873 874 static void 875 awg_txintr(struct awg_softc *sc) 876 { 877 struct awg_bufmap *bmap; 878 struct emac_desc *desc; 879 uint32_t status; 880 if_t ifp; 881 int i; 882 883 AWG_ASSERT_LOCKED(sc); 884 885 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 886 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 887 888 ifp = sc->ifp; 889 for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) { 890 desc = &sc->tx.desc_ring[i]; 891 status = le32toh(desc->status); 892 if ((status & TX_DESC_CTL) != 0) 893 break; 894 bmap = &sc->tx.buf_map[i]; 895 if (bmap->mbuf != NULL) { 896 bus_dmamap_sync(sc->tx.buf_tag, bmap->map, 897 BUS_DMASYNC_POSTWRITE); 898 bus_dmamap_unload(sc->tx.buf_tag, bmap->map); 899 m_freem(bmap->mbuf); 900 bmap->mbuf = NULL; 901 } 902 awg_setup_txdesc(sc, i, 0, 0, 0); 903 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 904 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 905 } 906 907 sc->tx.next = i; 908 909 bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, 910 BUS_DMASYNC_PREWRITE); 911 } 912 913 static void 914 awg_intr(void *arg) 915 { 916 struct awg_softc *sc; 917 uint32_t val; 918 919 sc = arg; 920 921 AWG_LOCK(sc); 922 val = RD4(sc, EMAC_INT_STA); 923 WR4(sc, EMAC_INT_STA, val); 924 925 if (val & RX_INT) 926 awg_rxintr(sc); 927 928 if (val & (TX_INT|TX_BUF_UA_INT)) { 929 awg_txintr(sc); 930 if (!if_sendq_empty(sc->ifp)) 931 awg_start_locked(sc); 932 } 933 934 AWG_UNLOCK(sc); 935 } 936 937 #ifdef DEVICE_POLLING 938 static int 939 awg_poll(if_t ifp, enum poll_cmd cmd, int count) 940 { 941 struct awg_softc *sc; 942 uint32_t val; 943 int rx_npkts; 944 945 sc = if_getsoftc(ifp); 946 rx_npkts = 0; 947 948 AWG_LOCK(sc); 949 950 if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { 951 AWG_UNLOCK(sc); 952 return (0); 953 } 954 955 rx_npkts = awg_rxintr(sc); 956 awg_txintr(sc); 957 if (!if_sendq_empty(ifp)) 958 awg_start_locked(sc); 959 960 if (cmd == POLL_AND_CHECK_STATUS) { 961 val = RD4(sc, EMAC_INT_STA); 962 if (val != 0) 963 WR4(sc, EMAC_INT_STA, val); 964 } 965 966 AWG_UNLOCK(sc); 967 968 return (rx_npkts); 969 } 970 #endif 971 972 static int 973 awg_ioctl(if_t ifp, u_long cmd, caddr_t data) 974 { 975 struct awg_softc *sc; 976 struct mii_data *mii; 977 struct ifreq *ifr; 978 int flags, mask, error; 979 980 sc = if_getsoftc(ifp); 981 mii = device_get_softc(sc->miibus); 982 ifr = (struct ifreq *)data; 983 error = 0; 984 985 switch (cmd) { 986 case SIOCSIFFLAGS: 987 AWG_LOCK(sc); 988 if (if_getflags(ifp) & IFF_UP) { 989 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 990 flags = if_getflags(ifp) ^ sc->if_flags; 991 if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) 992 awg_setup_rxfilter(sc); 993 } else 994 awg_init_locked(sc); 995 } else { 996 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 997 awg_stop(sc); 998 } 999 sc->if_flags = if_getflags(ifp); 1000 AWG_UNLOCK(sc); 1001 break; 1002 case SIOCADDMULTI: 1003 case SIOCDELMULTI: 1004 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 1005 AWG_LOCK(sc); 1006 awg_setup_rxfilter(sc); 1007 AWG_UNLOCK(sc); 1008 } 1009 break; 1010 case SIOCSIFMEDIA: 1011 case SIOCGIFMEDIA: 1012 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); 1013 break; 1014 case SIOCSIFCAP: 1015 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 1016 #ifdef DEVICE_POLLING 1017 if (mask & IFCAP_POLLING) { 1018 if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { 1019 error = ether_poll_register(awg_poll, ifp); 1020 if (error != 0) 1021 break; 1022 AWG_LOCK(sc); 1023 awg_disable_intr(sc); 1024 if_setcapenablebit(ifp, IFCAP_POLLING, 0); 1025 AWG_UNLOCK(sc); 1026 } else { 1027 error = ether_poll_deregister(ifp); 1028 AWG_LOCK(sc); 1029 awg_enable_intr(sc); 1030 if_setcapenablebit(ifp, 0, IFCAP_POLLING); 1031 AWG_UNLOCK(sc); 1032 } 1033 } 1034 #endif 1035 if (mask & IFCAP_VLAN_MTU) 1036 if_togglecapenable(ifp, IFCAP_VLAN_MTU); 1037 if (mask & IFCAP_RXCSUM) 1038 if_togglecapenable(ifp, IFCAP_RXCSUM); 1039 if (mask & IFCAP_TXCSUM) 1040 if_togglecapenable(ifp, IFCAP_TXCSUM); 1041 if ((if_getcapenable(ifp) & (IFCAP_RXCSUM|IFCAP_TXCSUM)) != 0) 1042 if_sethwassistbits(ifp, CSUM_IP, 0); 1043 else 1044 if_sethwassistbits(ifp, 0, CSUM_IP); 1045 break; 1046 default: 1047 error = ether_ioctl(ifp, cmd, data); 1048 break; 1049 } 1050 1051 return (error); 1052 } 1053 1054 static int 1055 awg_setup_phy(device_t dev) 1056 { 1057 struct awg_softc *sc; 1058 clk_t clk_tx, clk_tx_parent; 1059 const char *tx_parent_name; 1060 char *phy_type; 1061 phandle_t node; 1062 uint32_t reg, tx_delay, rx_delay; 1063 int error; 1064 1065 sc = device_get_softc(dev); 1066 node = ofw_bus_get_node(dev); 1067 1068 if (OF_getprop_alloc(node, "phy-mode", 1, (void **)&phy_type) == 0) 1069 return (0); 1070 1071 if (bootverbose) 1072 device_printf(dev, "PHY type: %s, conf mode: %s\n", phy_type, 1073 sc->res[_RES_SYSCON] != NULL ? "reg" : "clk"); 1074 1075 if (sc->res[_RES_SYSCON] != NULL) { 1076 reg = bus_read_4(sc->res[_RES_SYSCON], 0); 1077 reg &= ~(EMAC_CLK_PIT | EMAC_CLK_SRC | EMAC_CLK_RMII_EN); 1078 if (strcmp(phy_type, "rgmii") == 0) 1079 reg |= EMAC_CLK_PIT_RGMII | EMAC_CLK_SRC_RGMII; 1080 else if (strcmp(phy_type, "rmii") == 0) 1081 reg |= EMAC_CLK_RMII_EN; 1082 else 1083 reg |= EMAC_CLK_PIT_MII | EMAC_CLK_SRC_MII; 1084 1085 if (OF_getencprop(node, "tx-delay", &tx_delay, 1086 sizeof(tx_delay)) > 0) { 1087 reg &= ~EMAC_CLK_ETXDC; 1088 reg |= (tx_delay << EMAC_CLK_ETXDC_SHIFT); 1089 } 1090 if (OF_getencprop(node, "rx-delay", &rx_delay, 1091 sizeof(rx_delay)) > 0) { 1092 reg &= ~EMAC_CLK_ERXDC; 1093 reg |= (rx_delay << EMAC_CLK_ERXDC_SHIFT); 1094 } 1095 1096 if (sc->type == EMAC_H3) { 1097 if (OF_hasprop(node, "allwinner,use-internal-phy")) { 1098 reg |= EMAC_CLK_EPHY_SELECT; 1099 reg &= ~EMAC_CLK_EPHY_SHUTDOWN; 1100 if (OF_hasprop(node, 1101 "allwinner,leds-active-low")) 1102 reg |= EMAC_CLK_EPHY_LED_POL; 1103 else 1104 reg &= ~EMAC_CLK_EPHY_LED_POL; 1105 1106 /* Set internal PHY addr to 1 */ 1107 reg &= ~EMAC_CLK_EPHY_ADDR; 1108 reg |= (1 << EMAC_CLK_EPHY_ADDR_SHIFT); 1109 } else { 1110 reg &= ~EMAC_CLK_EPHY_SELECT; 1111 } 1112 } 1113 1114 if (bootverbose) 1115 device_printf(dev, "EMAC clock: 0x%08x\n", reg); 1116 bus_write_4(sc->res[_RES_SYSCON], 0, reg); 1117 } else { 1118 if (strcmp(phy_type, "rgmii") == 0) 1119 tx_parent_name = "emac_int_tx"; 1120 else 1121 tx_parent_name = "mii_phy_tx"; 1122 1123 /* Get the TX clock */ 1124 error = clk_get_by_ofw_name(dev, 0, "tx", &clk_tx); 1125 if (error != 0) { 1126 device_printf(dev, "cannot get tx clock\n"); 1127 goto fail; 1128 } 1129 1130 /* Find the desired parent clock based on phy-mode property */ 1131 error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent); 1132 if (error != 0) { 1133 device_printf(dev, "cannot get clock '%s'\n", 1134 tx_parent_name); 1135 goto fail; 1136 } 1137 1138 /* Set TX clock parent */ 1139 error = clk_set_parent_by_clk(clk_tx, clk_tx_parent); 1140 if (error != 0) { 1141 device_printf(dev, "cannot set tx clock parent\n"); 1142 goto fail; 1143 } 1144 1145 /* Enable TX clock */ 1146 error = clk_enable(clk_tx); 1147 if (error != 0) { 1148 device_printf(dev, "cannot enable tx clock\n"); 1149 goto fail; 1150 } 1151 } 1152 1153 error = 0; 1154 1155 fail: 1156 OF_prop_free(phy_type); 1157 return (error); 1158 } 1159 1160 static int 1161 awg_setup_extres(device_t dev) 1162 { 1163 struct awg_softc *sc; 1164 hwreset_t rst_ahb, rst_ephy; 1165 clk_t clk_ahb, clk_ephy; 1166 regulator_t reg; 1167 phandle_t node; 1168 uint64_t freq; 1169 int error, div; 1170 1171 sc = device_get_softc(dev); 1172 node = ofw_bus_get_node(dev); 1173 rst_ahb = rst_ephy = NULL; 1174 clk_ahb = clk_ephy = NULL; 1175 reg = NULL; 1176 1177 /* Get AHB clock and reset resources */ 1178 error = hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb); 1179 if (error != 0) { 1180 device_printf(dev, "cannot get ahb reset\n"); 1181 goto fail; 1182 } 1183 if (hwreset_get_by_ofw_name(dev, 0, "ephy", &rst_ephy) != 0) 1184 rst_ephy = NULL; 1185 error = clk_get_by_ofw_name(dev, 0, "ahb", &clk_ahb); 1186 if (error != 0) { 1187 device_printf(dev, "cannot get ahb clock\n"); 1188 goto fail; 1189 } 1190 if (clk_get_by_ofw_name(dev, 0, "ephy", &clk_ephy) != 0) 1191 clk_ephy = NULL; 1192 1193 /* Configure PHY for MII or RGMII mode */ 1194 if (awg_setup_phy(dev) != 0) 1195 goto fail; 1196 1197 /* Enable clocks */ 1198 error = clk_enable(clk_ahb); 1199 if (error != 0) { 1200 device_printf(dev, "cannot enable ahb clock\n"); 1201 goto fail; 1202 } 1203 if (clk_ephy != NULL) { 1204 error = clk_enable(clk_ephy); 1205 if (error != 0) { 1206 device_printf(dev, "cannot enable ephy clock\n"); 1207 goto fail; 1208 } 1209 } 1210 1211 /* De-assert reset */ 1212 error = hwreset_deassert(rst_ahb); 1213 if (error != 0) { 1214 device_printf(dev, "cannot de-assert ahb reset\n"); 1215 goto fail; 1216 } 1217 if (rst_ephy != NULL) { 1218 error = hwreset_deassert(rst_ephy); 1219 if (error != 0) { 1220 device_printf(dev, "cannot de-assert ephy reset\n"); 1221 goto fail; 1222 } 1223 } 1224 1225 /* Enable PHY regulator if applicable */ 1226 if (regulator_get_by_ofw_property(dev, 0, "phy-supply", ®) == 0) { 1227 error = regulator_enable(reg); 1228 if (error != 0) { 1229 device_printf(dev, "cannot enable PHY regulator\n"); 1230 goto fail; 1231 } 1232 } 1233 1234 /* Determine MDC clock divide ratio based on AHB clock */ 1235 error = clk_get_freq(clk_ahb, &freq); 1236 if (error != 0) { 1237 device_printf(dev, "cannot get AHB clock frequency\n"); 1238 goto fail; 1239 } 1240 div = freq / MDIO_FREQ; 1241 if (div <= 16) 1242 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16; 1243 else if (div <= 32) 1244 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32; 1245 else if (div <= 64) 1246 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64; 1247 else if (div <= 128) 1248 sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128; 1249 else { 1250 device_printf(dev, "cannot determine MDC clock divide ratio\n"); 1251 error = ENXIO; 1252 goto fail; 1253 } 1254 1255 if (bootverbose) 1256 device_printf(dev, "AHB frequency %ju Hz, MDC div: 0x%x\n", 1257 (uintmax_t)freq, sc->mdc_div_ratio_m); 1258 1259 return (0); 1260 1261 fail: 1262 if (reg != NULL) 1263 regulator_release(reg); 1264 if (clk_ephy != NULL) 1265 clk_release(clk_ephy); 1266 if (clk_ahb != NULL) 1267 clk_release(clk_ahb); 1268 if (rst_ephy != NULL) 1269 hwreset_release(rst_ephy); 1270 if (rst_ahb != NULL) 1271 hwreset_release(rst_ahb); 1272 return (error); 1273 } 1274 1275 static void 1276 awg_get_eaddr(device_t dev, uint8_t *eaddr) 1277 { 1278 struct awg_softc *sc; 1279 uint32_t maclo, machi, rnd; 1280 1281 sc = device_get_softc(dev); 1282 1283 machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff; 1284 maclo = RD4(sc, EMAC_ADDR_LOW(0)); 1285 1286 if (maclo == 0xffffffff && machi == 0xffff) { 1287 /* MAC address in hardware is invalid, create one */ 1288 rnd = arc4random(); 1289 maclo = 0x00f2 | (rnd & 0xffff0000); 1290 machi = rnd & 0xffff; 1291 } 1292 1293 eaddr[0] = maclo & 0xff; 1294 eaddr[1] = (maclo >> 8) & 0xff; 1295 eaddr[2] = (maclo >> 16) & 0xff; 1296 eaddr[3] = (maclo >> 24) & 0xff; 1297 eaddr[4] = machi & 0xff; 1298 eaddr[5] = (machi >> 8) & 0xff; 1299 } 1300 1301 #ifdef AWG_DEBUG 1302 static void 1303 awg_dump_regs(device_t dev) 1304 { 1305 static const struct { 1306 const char *name; 1307 u_int reg; 1308 } regs[] = { 1309 { "BASIC_CTL_0", EMAC_BASIC_CTL_0 }, 1310 { "BASIC_CTL_1", EMAC_BASIC_CTL_1 }, 1311 { "INT_STA", EMAC_INT_STA }, 1312 { "INT_EN", EMAC_INT_EN }, 1313 { "TX_CTL_0", EMAC_TX_CTL_0 }, 1314 { "TX_CTL_1", EMAC_TX_CTL_1 }, 1315 { "TX_FLOW_CTL", EMAC_TX_FLOW_CTL }, 1316 { "TX_DMA_LIST", EMAC_TX_DMA_LIST }, 1317 { "RX_CTL_0", EMAC_RX_CTL_0 }, 1318 { "RX_CTL_1", EMAC_RX_CTL_1 }, 1319 { "RX_DMA_LIST", EMAC_RX_DMA_LIST }, 1320 { "RX_FRM_FLT", EMAC_RX_FRM_FLT }, 1321 { "RX_HASH_0", EMAC_RX_HASH_0 }, 1322 { "RX_HASH_1", EMAC_RX_HASH_1 }, 1323 { "MII_CMD", EMAC_MII_CMD }, 1324 { "ADDR_HIGH0", EMAC_ADDR_HIGH(0) }, 1325 { "ADDR_LOW0", EMAC_ADDR_LOW(0) }, 1326 { "TX_DMA_STA", EMAC_TX_DMA_STA }, 1327 { "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC }, 1328 { "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF }, 1329 { "RX_DMA_STA", EMAC_RX_DMA_STA }, 1330 { "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC }, 1331 { "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF }, 1332 { "RGMII_STA", EMAC_RGMII_STA }, 1333 }; 1334 struct awg_softc *sc; 1335 unsigned int n; 1336 1337 sc = device_get_softc(dev); 1338 1339 for (n = 0; n < nitems(regs); n++) 1340 device_printf(dev, " %-20s %08x\n", regs[n].name, 1341 RD4(sc, regs[n].reg)); 1342 } 1343 #endif 1344 1345 #define GPIO_ACTIVE_LOW 1 1346 1347 static int 1348 awg_phy_reset(device_t dev) 1349 { 1350 pcell_t gpio_prop[4], delay_prop[3]; 1351 phandle_t node, gpio_node; 1352 device_t gpio; 1353 uint32_t pin, flags; 1354 uint32_t pin_value; 1355 1356 node = ofw_bus_get_node(dev); 1357 if (OF_getencprop(node, "allwinner,reset-gpio", gpio_prop, 1358 sizeof(gpio_prop)) <= 0) 1359 return (0); 1360 1361 if (OF_getencprop(node, "allwinner,reset-delays-us", delay_prop, 1362 sizeof(delay_prop)) <= 0) 1363 return (ENXIO); 1364 1365 gpio_node = OF_node_from_xref(gpio_prop[0]); 1366 if ((gpio = OF_device_from_xref(gpio_prop[0])) == NULL) 1367 return (ENXIO); 1368 1369 if (GPIO_MAP_GPIOS(gpio, node, gpio_node, nitems(gpio_prop) - 1, 1370 gpio_prop + 1, &pin, &flags) != 0) 1371 return (ENXIO); 1372 1373 pin_value = GPIO_PIN_LOW; 1374 if (OF_hasprop(node, "allwinner,reset-active-low")) 1375 pin_value = GPIO_PIN_HIGH; 1376 1377 if (flags & GPIO_ACTIVE_LOW) 1378 pin_value = !pin_value; 1379 1380 GPIO_PIN_SETFLAGS(gpio, pin, GPIO_PIN_OUTPUT); 1381 GPIO_PIN_SET(gpio, pin, pin_value); 1382 DELAY(delay_prop[0]); 1383 GPIO_PIN_SET(gpio, pin, !pin_value); 1384 DELAY(delay_prop[1]); 1385 GPIO_PIN_SET(gpio, pin, pin_value); 1386 DELAY(delay_prop[2]); 1387 1388 return (0); 1389 } 1390 1391 static int 1392 awg_reset(device_t dev) 1393 { 1394 struct awg_softc *sc; 1395 int retry; 1396 1397 sc = device_get_softc(dev); 1398 1399 /* Reset PHY if necessary */ 1400 if (awg_phy_reset(dev) != 0) { 1401 device_printf(dev, "failed to reset PHY\n"); 1402 return (ENXIO); 1403 } 1404 1405 /* Soft reset all registers and logic */ 1406 WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST); 1407 1408 /* Wait for soft reset bit to self-clear */ 1409 for (retry = SOFT_RST_RETRY; retry > 0; retry--) { 1410 if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0) 1411 break; 1412 DELAY(10); 1413 } 1414 if (retry == 0) { 1415 device_printf(dev, "soft reset timed out\n"); 1416 #ifdef AWG_DEBUG 1417 awg_dump_regs(dev); 1418 #endif 1419 return (ETIMEDOUT); 1420 } 1421 1422 return (0); 1423 } 1424 1425 static void 1426 awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1427 { 1428 if (error != 0) 1429 return; 1430 *(bus_addr_t *)arg = segs[0].ds_addr; 1431 } 1432 1433 static int 1434 awg_setup_dma(device_t dev) 1435 { 1436 struct awg_softc *sc; 1437 struct mbuf *m; 1438 int error, i; 1439 1440 sc = device_get_softc(dev); 1441 1442 /* Setup TX ring */ 1443 error = bus_dma_tag_create( 1444 bus_get_dma_tag(dev), /* Parent tag */ 1445 DESC_ALIGN, 0, /* alignment, boundary */ 1446 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1447 BUS_SPACE_MAXADDR, /* highaddr */ 1448 NULL, NULL, /* filter, filterarg */ 1449 TX_DESC_SIZE, 1, /* maxsize, nsegs */ 1450 TX_DESC_SIZE, /* maxsegsize */ 1451 0, /* flags */ 1452 NULL, NULL, /* lockfunc, lockarg */ 1453 &sc->tx.desc_tag); 1454 if (error != 0) { 1455 device_printf(dev, "cannot create TX descriptor ring tag\n"); 1456 return (error); 1457 } 1458 1459 error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring, 1460 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map); 1461 if (error != 0) { 1462 device_printf(dev, "cannot allocate TX descriptor ring\n"); 1463 return (error); 1464 } 1465 1466 error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map, 1467 sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb, 1468 &sc->tx.desc_ring_paddr, 0); 1469 if (error != 0) { 1470 device_printf(dev, "cannot load TX descriptor ring\n"); 1471 return (error); 1472 } 1473 1474 for (i = 0; i < TX_DESC_COUNT; i++) 1475 sc->tx.desc_ring[i].next = 1476 htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i))); 1477 1478 error = bus_dma_tag_create( 1479 bus_get_dma_tag(dev), /* Parent tag */ 1480 1, 0, /* alignment, boundary */ 1481 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1482 BUS_SPACE_MAXADDR, /* highaddr */ 1483 NULL, NULL, /* filter, filterarg */ 1484 MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ 1485 MCLBYTES, /* maxsegsize */ 1486 0, /* flags */ 1487 NULL, NULL, /* lockfunc, lockarg */ 1488 &sc->tx.buf_tag); 1489 if (error != 0) { 1490 device_printf(dev, "cannot create TX buffer tag\n"); 1491 return (error); 1492 } 1493 1494 sc->tx.queued = TX_DESC_COUNT; 1495 for (i = 0; i < TX_DESC_COUNT; i++) { 1496 error = bus_dmamap_create(sc->tx.buf_tag, 0, 1497 &sc->tx.buf_map[i].map); 1498 if (error != 0) { 1499 device_printf(dev, "cannot create TX buffer map\n"); 1500 return (error); 1501 } 1502 awg_setup_txdesc(sc, i, 0, 0, 0); 1503 } 1504 1505 /* Setup RX ring */ 1506 error = bus_dma_tag_create( 1507 bus_get_dma_tag(dev), /* Parent tag */ 1508 DESC_ALIGN, 0, /* alignment, boundary */ 1509 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1510 BUS_SPACE_MAXADDR, /* highaddr */ 1511 NULL, NULL, /* filter, filterarg */ 1512 RX_DESC_SIZE, 1, /* maxsize, nsegs */ 1513 RX_DESC_SIZE, /* maxsegsize */ 1514 0, /* flags */ 1515 NULL, NULL, /* lockfunc, lockarg */ 1516 &sc->rx.desc_tag); 1517 if (error != 0) { 1518 device_printf(dev, "cannot create RX descriptor ring tag\n"); 1519 return (error); 1520 } 1521 1522 error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring, 1523 BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map); 1524 if (error != 0) { 1525 device_printf(dev, "cannot allocate RX descriptor ring\n"); 1526 return (error); 1527 } 1528 1529 error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map, 1530 sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb, 1531 &sc->rx.desc_ring_paddr, 0); 1532 if (error != 0) { 1533 device_printf(dev, "cannot load RX descriptor ring\n"); 1534 return (error); 1535 } 1536 1537 error = bus_dma_tag_create( 1538 bus_get_dma_tag(dev), /* Parent tag */ 1539 1, 0, /* alignment, boundary */ 1540 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1541 BUS_SPACE_MAXADDR, /* highaddr */ 1542 NULL, NULL, /* filter, filterarg */ 1543 MCLBYTES, 1, /* maxsize, nsegs */ 1544 MCLBYTES, /* maxsegsize */ 1545 0, /* flags */ 1546 NULL, NULL, /* lockfunc, lockarg */ 1547 &sc->rx.buf_tag); 1548 if (error != 0) { 1549 device_printf(dev, "cannot create RX buffer tag\n"); 1550 return (error); 1551 } 1552 1553 for (i = 0; i < RX_DESC_COUNT; i++) { 1554 error = bus_dmamap_create(sc->rx.buf_tag, 0, 1555 &sc->rx.buf_map[i].map); 1556 if (error != 0) { 1557 device_printf(dev, "cannot create RX buffer map\n"); 1558 return (error); 1559 } 1560 if ((m = awg_alloc_mbufcl(sc)) == NULL) { 1561 device_printf(dev, "cannot allocate RX mbuf\n"); 1562 return (ENOMEM); 1563 } 1564 error = awg_setup_rxbuf(sc, i, m); 1565 if (error != 0) { 1566 device_printf(dev, "cannot create RX buffer\n"); 1567 return (error); 1568 } 1569 } 1570 bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, 1571 BUS_DMASYNC_PREWRITE); 1572 1573 /* Write transmit and receive descriptor base address registers */ 1574 WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr); 1575 WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr); 1576 1577 return (0); 1578 } 1579 1580 static int 1581 awg_probe(device_t dev) 1582 { 1583 if (!ofw_bus_status_okay(dev)) 1584 return (ENXIO); 1585 1586 if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) 1587 return (ENXIO); 1588 1589 device_set_desc(dev, "Allwinner Gigabit Ethernet"); 1590 return (BUS_PROBE_DEFAULT); 1591 } 1592 1593 static int 1594 awg_attach(device_t dev) 1595 { 1596 uint8_t eaddr[ETHER_ADDR_LEN]; 1597 struct awg_softc *sc; 1598 phandle_t node; 1599 int error; 1600 1601 sc = device_get_softc(dev); 1602 sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; 1603 node = ofw_bus_get_node(dev); 1604 1605 if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) { 1606 device_printf(dev, "cannot allocate resources for device\n"); 1607 return (ENXIO); 1608 } 1609 1610 mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); 1611 callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); 1612 TASK_INIT(&sc->link_task, 0, awg_link_task, sc); 1613 1614 /* Setup clocks and regulators */ 1615 error = awg_setup_extres(dev); 1616 if (error != 0) 1617 return (error); 1618 1619 /* Read MAC address before resetting the chip */ 1620 awg_get_eaddr(dev, eaddr); 1621 1622 /* Soft reset EMAC core */ 1623 error = awg_reset(dev); 1624 if (error != 0) 1625 return (error); 1626 1627 /* Setup DMA descriptors */ 1628 error = awg_setup_dma(dev); 1629 if (error != 0) 1630 return (error); 1631 1632 /* Install interrupt handler */ 1633 error = bus_setup_intr(dev, sc->res[_RES_IRQ], 1634 INTR_TYPE_NET | INTR_MPSAFE, NULL, awg_intr, sc, &sc->ih); 1635 if (error != 0) { 1636 device_printf(dev, "cannot setup interrupt handler\n"); 1637 return (error); 1638 } 1639 1640 /* Setup ethernet interface */ 1641 sc->ifp = if_alloc(IFT_ETHER); 1642 if_setsoftc(sc->ifp, sc); 1643 if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); 1644 if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 1645 if_setstartfn(sc->ifp, awg_start); 1646 if_setioctlfn(sc->ifp, awg_ioctl); 1647 if_setinitfn(sc->ifp, awg_init); 1648 if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); 1649 if_setsendqready(sc->ifp); 1650 if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP); 1651 if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM); 1652 if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); 1653 #ifdef DEVICE_POLLING 1654 if_setcapabilitiesbit(sc->ifp, IFCAP_POLLING, 0); 1655 #endif 1656 1657 /* Attach MII driver */ 1658 error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change, 1659 awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 1660 MIIF_DOPAUSE); 1661 if (error != 0) { 1662 device_printf(dev, "cannot attach PHY\n"); 1663 return (error); 1664 } 1665 1666 /* Attach ethernet interface */ 1667 ether_ifattach(sc->ifp, eaddr); 1668 1669 return (0); 1670 } 1671 1672 static device_method_t awg_methods[] = { 1673 /* Device interface */ 1674 DEVMETHOD(device_probe, awg_probe), 1675 DEVMETHOD(device_attach, awg_attach), 1676 1677 /* MII interface */ 1678 DEVMETHOD(miibus_readreg, awg_miibus_readreg), 1679 DEVMETHOD(miibus_writereg, awg_miibus_writereg), 1680 DEVMETHOD(miibus_statchg, awg_miibus_statchg), 1681 1682 DEVMETHOD_END 1683 }; 1684 1685 static driver_t awg_driver = { 1686 "awg", 1687 awg_methods, 1688 sizeof(struct awg_softc), 1689 }; 1690 1691 static devclass_t awg_devclass; 1692 1693 DRIVER_MODULE(awg, simplebus, awg_driver, awg_devclass, 0, 0); 1694 DRIVER_MODULE(miibus, awg, miibus_driver, miibus_devclass, 0, 0); 1695 1696 MODULE_DEPEND(awg, ether, 1, 1, 1); 1697 MODULE_DEPEND(awg, miibus, 1, 1, 1); 1698