xref: /freebsd/sys/arm/allwinner/if_awg.c (revision 430f7286a566b1407c7b32ce13585caf5aa59b92)
1 /*-
2  * Copyright (c) 2016 Jared McNeill <jmcneill@invisible.ca>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
19  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
21  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 /*
30  * Allwinner Gigabit Ethernet MAC (EMAC) controller
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/bus.h>
39 #include <sys/rman.h>
40 #include <sys/kernel.h>
41 #include <sys/endian.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/sockio.h>
45 #include <sys/module.h>
46 #include <sys/taskqueue.h>
47 
48 #include <net/bpf.h>
49 #include <net/if.h>
50 #include <net/ethernet.h>
51 #include <net/if_dl.h>
52 #include <net/if_media.h>
53 #include <net/if_types.h>
54 #include <net/if_var.h>
55 
56 #include <machine/bus.h>
57 
58 #include <dev/ofw/ofw_bus.h>
59 #include <dev/ofw/ofw_bus_subr.h>
60 
61 #include <arm/allwinner/if_awgreg.h>
62 #include <dev/mii/mii.h>
63 #include <dev/mii/miivar.h>
64 
65 #include <dev/extres/clk/clk.h>
66 #include <dev/extres/hwreset/hwreset.h>
67 #include <dev/extres/regulator/regulator.h>
68 
69 #include "miibus_if.h"
70 
71 #define	RD4(sc, reg)		bus_read_4((sc)->res[0], (reg))
72 #define	WR4(sc, reg, val)	bus_write_4((sc)->res[0], (reg), (val))
73 
74 #define	AWG_LOCK(sc)		mtx_lock(&(sc)->mtx)
75 #define	AWG_UNLOCK(sc)		mtx_unlock(&(sc)->mtx);
76 #define	AWG_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->mtx, MA_OWNED)
77 #define	AWG_ASSERT_UNLOCKED(sc)	mtx_assert(&(sc)->mtx, MA_NOTOWNED)
78 
79 #define	DESC_ALIGN		4
80 #define	TX_DESC_COUNT		256
81 #define	TX_DESC_SIZE		(sizeof(struct emac_desc) * TX_DESC_COUNT)
82 #define	RX_DESC_COUNT		256
83 #define	RX_DESC_SIZE		(sizeof(struct emac_desc) * RX_DESC_COUNT)
84 
85 #define	DESC_OFF(n)		((n) * sizeof(struct emac_desc))
86 #define	TX_NEXT(n)		(((n) + 1) & (TX_DESC_COUNT - 1))
87 #define	TX_SKIP(n, o)		(((n) + (o)) & (TX_DESC_COUNT - 1))
88 #define	RX_NEXT(n)		(((n) + 1) & (RX_DESC_COUNT - 1))
89 
90 #define	TX_MAX_SEGS		10
91 
92 #define	SOFT_RST_RETRY		1000
93 #define	MII_BUSY_RETRY		1000
94 #define	MDIO_FREQ		2500000
95 
96 #define	BURST_LEN_DEFAULT	8
97 #define	RX_TX_PRI_DEFAULT	0
98 #define	PAUSE_TIME_DEFAULT	0x400
99 #define	TX_INTERVAL_DEFAULT	64
100 
101 /* Burst length of RX and TX DMA transfers */
102 static int awg_burst_len = BURST_LEN_DEFAULT;
103 TUNABLE_INT("hw.awg.burst_len", &awg_burst_len);
104 
105 /* RX / TX DMA priority. If 1, RX DMA has priority over TX DMA. */
106 static int awg_rx_tx_pri = RX_TX_PRI_DEFAULT;
107 TUNABLE_INT("hw.awg.rx_tx_pri", &awg_rx_tx_pri);
108 
109 /* Pause time field in the transmitted control frame */
110 static int awg_pause_time = PAUSE_TIME_DEFAULT;
111 TUNABLE_INT("hw.awg.pause_time", &awg_pause_time);
112 
113 /* Request a TX interrupt every <n> descriptors */
114 static int awg_tx_interval = TX_INTERVAL_DEFAULT;
115 TUNABLE_INT("hw.awg.tx_interval", &awg_tx_interval);
116 
117 static struct ofw_compat_data compat_data[] = {
118 	{ "allwinner,sun8i-a83t-emac",		1 },
119 	{ NULL,					0 }
120 };
121 
122 struct awg_bufmap {
123 	bus_dmamap_t		map;
124 	struct mbuf		*mbuf;
125 };
126 
127 struct awg_txring {
128 	bus_dma_tag_t		desc_tag;
129 	bus_dmamap_t		desc_map;
130 	struct emac_desc	*desc_ring;
131 	bus_addr_t		desc_ring_paddr;
132 	bus_dma_tag_t		buf_tag;
133 	struct awg_bufmap	buf_map[TX_DESC_COUNT];
134 	u_int			cur, next, queued;
135 };
136 
137 struct awg_rxring {
138 	bus_dma_tag_t		desc_tag;
139 	bus_dmamap_t		desc_map;
140 	struct emac_desc	*desc_ring;
141 	bus_addr_t		desc_ring_paddr;
142 	bus_dma_tag_t		buf_tag;
143 	struct awg_bufmap	buf_map[RX_DESC_COUNT];
144 	u_int			cur;
145 };
146 
147 struct awg_softc {
148 	struct resource		*res[2];
149 	struct mtx		mtx;
150 	if_t			ifp;
151 	device_t		miibus;
152 	struct callout		stat_ch;
153 	struct task		link_task;
154 	void			*ih;
155 	u_int			mdc_div_ratio_m;
156 	int			link;
157 	int			if_flags;
158 
159 	struct awg_txring	tx;
160 	struct awg_rxring	rx;
161 };
162 
163 static struct resource_spec awg_spec[] = {
164 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
165 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
166 	{ -1, 0 }
167 };
168 
169 static int
170 awg_miibus_readreg(device_t dev, int phy, int reg)
171 {
172 	struct awg_softc *sc;
173 	int retry, val;
174 
175 	sc = device_get_softc(dev);
176 	val = 0;
177 
178 	WR4(sc, EMAC_MII_CMD,
179 	    (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
180 	    (phy << PHY_ADDR_SHIFT) |
181 	    (reg << PHY_REG_ADDR_SHIFT) |
182 	    MII_BUSY);
183 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
184 		if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0) {
185 			val = RD4(sc, EMAC_MII_DATA);
186 			break;
187 		}
188 		DELAY(10);
189 	}
190 
191 	if (retry == 0)
192 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
193 		    phy, reg);
194 
195 	return (val);
196 }
197 
198 static int
199 awg_miibus_writereg(device_t dev, int phy, int reg, int val)
200 {
201 	struct awg_softc *sc;
202 	int retry;
203 
204 	sc = device_get_softc(dev);
205 
206 	WR4(sc, EMAC_MII_DATA, val);
207 	WR4(sc, EMAC_MII_CMD,
208 	    (sc->mdc_div_ratio_m << MDC_DIV_RATIO_M_SHIFT) |
209 	    (phy << PHY_ADDR_SHIFT) |
210 	    (reg << PHY_REG_ADDR_SHIFT) |
211 	    MII_WR | MII_BUSY);
212 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
213 		if ((RD4(sc, EMAC_MII_CMD) & MII_BUSY) == 0)
214 			break;
215 		DELAY(10);
216 	}
217 
218 	if (retry == 0)
219 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
220 		    phy, reg);
221 
222 	return (0);
223 }
224 
225 static void
226 awg_update_link_locked(struct awg_softc *sc)
227 {
228 	struct mii_data *mii;
229 	uint32_t val;
230 
231 	AWG_ASSERT_LOCKED(sc);
232 
233 	if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0)
234 		return;
235 	mii = device_get_softc(sc->miibus);
236 
237 	if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
238 	    (IFM_ACTIVE | IFM_AVALID)) {
239 		switch (IFM_SUBTYPE(mii->mii_media_active)) {
240 		case IFM_1000_T:
241 		case IFM_1000_SX:
242 		case IFM_100_TX:
243 		case IFM_10_T:
244 			sc->link = 1;
245 			break;
246 		default:
247 			sc->link = 0;
248 			break;
249 		}
250 	} else
251 		sc->link = 0;
252 
253 	if (sc->link == 0)
254 		return;
255 
256 	val = RD4(sc, EMAC_BASIC_CTL_0);
257 	val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX);
258 
259 	if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T ||
260 	    IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX)
261 		val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT;
262 	else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX)
263 		val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT;
264 	else
265 		val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT;
266 
267 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
268 		val |= BASIC_CTL_DUPLEX;
269 
270 	WR4(sc, EMAC_BASIC_CTL_0, val);
271 
272 	val = RD4(sc, EMAC_RX_CTL_0);
273 	val &= ~RX_FLOW_CTL_EN;
274 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0)
275 		val |= RX_FLOW_CTL_EN;
276 	WR4(sc, EMAC_RX_CTL_0, val);
277 
278 	val = RD4(sc, EMAC_TX_FLOW_CTL);
279 	val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN);
280 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0)
281 		val |= TX_FLOW_CTL_EN;
282 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0)
283 		val |= awg_pause_time << PAUSE_TIME_SHIFT;
284 	WR4(sc, EMAC_TX_FLOW_CTL, val);
285 }
286 
287 static void
288 awg_link_task(void *arg, int pending)
289 {
290 	struct awg_softc *sc;
291 
292 	sc = arg;
293 
294 	AWG_LOCK(sc);
295 	awg_update_link_locked(sc);
296 	AWG_UNLOCK(sc);
297 }
298 
299 static void
300 awg_miibus_statchg(device_t dev)
301 {
302 	struct awg_softc *sc;
303 
304 	sc = device_get_softc(dev);
305 
306 	taskqueue_enqueue(taskqueue_swi, &sc->link_task);
307 }
308 
309 static void
310 awg_media_status(if_t ifp, struct ifmediareq *ifmr)
311 {
312 	struct awg_softc *sc;
313 	struct mii_data *mii;
314 
315 	sc = if_getsoftc(ifp);
316 	mii = device_get_softc(sc->miibus);
317 
318 	AWG_LOCK(sc);
319 	mii_pollstat(mii);
320 	ifmr->ifm_active = mii->mii_media_active;
321 	ifmr->ifm_status = mii->mii_media_status;
322 	AWG_UNLOCK(sc);
323 }
324 
325 static int
326 awg_media_change(if_t ifp)
327 {
328 	struct awg_softc *sc;
329 	struct mii_data *mii;
330 	int error;
331 
332 	sc = if_getsoftc(ifp);
333 	mii = device_get_softc(sc->miibus);
334 
335 	AWG_LOCK(sc);
336 	error = mii_mediachg(mii);
337 	AWG_UNLOCK(sc);
338 
339 	return (error);
340 }
341 
342 static void
343 awg_setup_txdesc(struct awg_softc *sc, int index, int flags, bus_addr_t paddr,
344     u_int len)
345 {
346 	uint32_t status, size;
347 
348 	if (paddr == 0 || len == 0) {
349 		status = 0;
350 		size = 0;
351 		--sc->tx.queued;
352 	} else {
353 		status = TX_DESC_CTL;
354 		size = flags | len;
355 		if ((index & (awg_tx_interval - 1)) == 0)
356 			size |= htole32(TX_INT_CTL);
357 		++sc->tx.queued;
358 	}
359 
360 	sc->tx.desc_ring[index].addr = htole32((uint32_t)paddr);
361 	sc->tx.desc_ring[index].size = htole32(size);
362 	sc->tx.desc_ring[index].status = htole32(status);
363 }
364 
365 static int
366 awg_setup_txbuf(struct awg_softc *sc, int index, struct mbuf **mp)
367 {
368 	bus_dma_segment_t segs[TX_MAX_SEGS];
369 	int error, nsegs, cur, i, flags;
370 	u_int csum_flags;
371 	struct mbuf *m;
372 
373 	m = *mp;
374 	error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
375 	    sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT);
376 	if (error == EFBIG) {
377 		m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS);
378 		if (m == NULL)
379 			return (0);
380 		*mp = m;
381 		error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
382 		    sc->tx.buf_map[index].map, m, segs, &nsegs, BUS_DMA_NOWAIT);
383 	}
384 	if (error != 0)
385 		return (0);
386 
387 	bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[index].map,
388 	    BUS_DMASYNC_PREWRITE);
389 
390 	flags = TX_FIR_DESC;
391 	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
392 		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0)
393 			csum_flags = TX_CHECKSUM_CTL_FULL;
394 		else
395 			csum_flags = TX_CHECKSUM_CTL_IP;
396 		flags |= (csum_flags << TX_CHECKSUM_CTL_SHIFT);
397 	}
398 
399 	for (cur = index, i = 0; i < nsegs; i++) {
400 		sc->tx.buf_map[cur].mbuf = (i == 0 ? m : NULL);
401 		if (i == nsegs - 1)
402 			flags |= TX_LAST_DESC;
403 		awg_setup_txdesc(sc, cur, flags, segs[i].ds_addr,
404 		    segs[i].ds_len);
405 		flags &= ~TX_FIR_DESC;
406 		cur = TX_NEXT(cur);
407 	}
408 
409 	return (nsegs);
410 }
411 
412 static void
413 awg_setup_rxdesc(struct awg_softc *sc, int index, bus_addr_t paddr)
414 {
415 	uint32_t status, size;
416 
417 	status = RX_DESC_CTL;
418 	size = MCLBYTES - 1;
419 
420 	sc->rx.desc_ring[index].addr = htole32((uint32_t)paddr);
421 	sc->rx.desc_ring[index].size = htole32(size);
422 	sc->rx.desc_ring[index].next =
423 	    htole32(sc->rx.desc_ring_paddr + DESC_OFF(RX_NEXT(index)));
424 	sc->rx.desc_ring[index].status = htole32(status);
425 }
426 
427 static int
428 awg_setup_rxbuf(struct awg_softc *sc, int index, struct mbuf *m)
429 {
430 	bus_dma_segment_t seg;
431 	int error, nsegs;
432 
433 	m_adj(m, ETHER_ALIGN);
434 
435 	error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag,
436 	    sc->rx.buf_map[index].map, m, &seg, &nsegs, 0);
437 	if (error != 0)
438 		return (error);
439 
440 	bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
441 	    BUS_DMASYNC_PREREAD);
442 
443 	sc->rx.buf_map[index].mbuf = m;
444 	awg_setup_rxdesc(sc, index, seg.ds_addr);
445 
446 	return (0);
447 }
448 
449 static struct mbuf *
450 awg_alloc_mbufcl(struct awg_softc *sc)
451 {
452 	struct mbuf *m;
453 
454 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
455 	if (m != NULL)
456 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
457 
458 	return (m);
459 }
460 
461 static void
462 awg_start_locked(struct awg_softc *sc)
463 {
464 	struct mbuf *m;
465 	uint32_t val;
466 	if_t ifp;
467 	int cnt, nsegs;
468 
469 	AWG_ASSERT_LOCKED(sc);
470 
471 	if (!sc->link)
472 		return;
473 
474 	ifp = sc->ifp;
475 
476 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
477 	    IFF_DRV_RUNNING)
478 		return;
479 
480 	for (cnt = 0; ; cnt++) {
481 		if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) {
482 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
483 			break;
484 		}
485 
486 		m = if_dequeue(ifp);
487 		if (m == NULL)
488 			break;
489 
490 		nsegs = awg_setup_txbuf(sc, sc->tx.cur, &m);
491 		if (nsegs == 0) {
492 			if_sendq_prepend(ifp, m);
493 			break;
494 		}
495 		if_bpfmtap(ifp, m);
496 		sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs);
497 	}
498 
499 	if (cnt != 0) {
500 		bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
501 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
502 
503 		/* Start and run TX DMA */
504 		val = RD4(sc, EMAC_TX_CTL_1);
505 		WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START);
506 	}
507 }
508 
509 static void
510 awg_start(if_t ifp)
511 {
512 	struct awg_softc *sc;
513 
514 	sc = if_getsoftc(ifp);
515 
516 	AWG_LOCK(sc);
517 	awg_start_locked(sc);
518 	AWG_UNLOCK(sc);
519 }
520 
521 static void
522 awg_tick(void *softc)
523 {
524 	struct awg_softc *sc;
525 	struct mii_data *mii;
526 	if_t ifp;
527 	int link;
528 
529 	sc = softc;
530 	ifp = sc->ifp;
531 	mii = device_get_softc(sc->miibus);
532 
533 	AWG_ASSERT_LOCKED(sc);
534 
535 	if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
536 		return;
537 
538 	link = sc->link;
539 	mii_tick(mii);
540 	if (sc->link && !link)
541 		awg_start_locked(sc);
542 
543 	callout_reset(&sc->stat_ch, hz, awg_tick, sc);
544 }
545 
546 /* Bit Reversal - http://aggregate.org/MAGIC/#Bit%20Reversal */
547 static uint32_t
548 bitrev32(uint32_t x)
549 {
550 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
551 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
552 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
553 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
554 
555 	return (x >> 16) | (x << 16);
556 }
557 
558 static void
559 awg_setup_rxfilter(struct awg_softc *sc)
560 {
561 	uint32_t val, crc, hashreg, hashbit, hash[2], machi, maclo;
562 	int mc_count, mcnt, i;
563 	uint8_t *eaddr, *mta;
564 	if_t ifp;
565 
566 	AWG_ASSERT_LOCKED(sc);
567 
568 	ifp = sc->ifp;
569 	val = 0;
570 	hash[0] = hash[1] = 0;
571 
572 	mc_count = if_multiaddr_count(ifp, -1);
573 
574 	if (if_getflags(ifp) & IFF_PROMISC)
575 		val |= DIS_ADDR_FILTER;
576 	else if (if_getflags(ifp) & IFF_ALLMULTI) {
577 		val |= RX_ALL_MULTICAST;
578 		hash[0] = hash[1] = ~0;
579 	} else if (mc_count > 0) {
580 		val |= HASH_MULTICAST;
581 
582 		mta = malloc(sizeof(unsigned char) * ETHER_ADDR_LEN * mc_count,
583 		    M_DEVBUF, M_NOWAIT);
584 		if (mta == NULL) {
585 			if_printf(ifp,
586 			    "failed to allocate temporary multicast list\n");
587 			return;
588 		}
589 
590 		if_multiaddr_array(ifp, mta, &mcnt, mc_count);
591 		for (i = 0; i < mcnt; i++) {
592 			crc = ether_crc32_le(mta + (i * ETHER_ADDR_LEN),
593 			    ETHER_ADDR_LEN) & 0x7f;
594 			crc = bitrev32(~crc) >> 26;
595 			hashreg = (crc >> 5);
596 			hashbit = (crc & 0x1f);
597 			hash[hashreg] |= (1 << hashbit);
598 		}
599 
600 		free(mta, M_DEVBUF);
601 	}
602 
603 	/* Write our unicast address */
604 	eaddr = IF_LLADDR(ifp);
605 	machi = (eaddr[5] << 8) | eaddr[4];
606 	maclo = (eaddr[3] << 24) | (eaddr[2] << 16) | (eaddr[1] << 8) |
607 	   (eaddr[0] << 0);
608 	WR4(sc, EMAC_ADDR_HIGH(0), machi);
609 	WR4(sc, EMAC_ADDR_LOW(0), maclo);
610 
611 	/* Multicast hash filters */
612 	WR4(sc, EMAC_RX_HASH_0, hash[1]);
613 	WR4(sc, EMAC_RX_HASH_1, hash[0]);
614 
615 	/* RX frame filter config */
616 	WR4(sc, EMAC_RX_FRM_FLT, val);
617 }
618 
619 static void
620 awg_init_locked(struct awg_softc *sc)
621 {
622 	struct mii_data *mii;
623 	uint32_t val;
624 	if_t ifp;
625 
626 	mii = device_get_softc(sc->miibus);
627 	ifp = sc->ifp;
628 
629 	AWG_ASSERT_LOCKED(sc);
630 
631 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
632 		return;
633 
634 	awg_setup_rxfilter(sc);
635 
636 	/* Configure DMA burst length and priorities */
637 	val = awg_burst_len << BASIC_CTL_BURST_LEN_SHIFT;
638 	if (awg_rx_tx_pri)
639 		val |= BASIC_CTL_RX_TX_PRI;
640 	WR4(sc, EMAC_BASIC_CTL_1, val);
641 
642 	/* Enable interrupts */
643 	WR4(sc, EMAC_INT_EN, RX_INT_EN | TX_INT_EN | TX_BUF_UA_INT_EN);
644 
645 	/* Enable transmit DMA */
646 	val = RD4(sc, EMAC_TX_CTL_1);
647 	WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_EN | TX_MD);
648 
649 	/* Enable receive DMA */
650 	val = RD4(sc, EMAC_RX_CTL_1);
651 	WR4(sc, EMAC_RX_CTL_1, val | RX_DMA_EN | RX_MD);
652 
653 	/* Enable transmitter */
654 	val = RD4(sc, EMAC_TX_CTL_0);
655 	WR4(sc, EMAC_TX_CTL_0, val | TX_EN);
656 
657 	/* Enable receiver */
658 	val = RD4(sc, EMAC_RX_CTL_0);
659 	WR4(sc, EMAC_RX_CTL_0, val | RX_EN | CHECK_CRC);
660 
661 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
662 
663 	mii_mediachg(mii);
664 	callout_reset(&sc->stat_ch, hz, awg_tick, sc);
665 }
666 
667 static void
668 awg_init(void *softc)
669 {
670 	struct awg_softc *sc;
671 
672 	sc = softc;
673 
674 	AWG_LOCK(sc);
675 	awg_init_locked(sc);
676 	AWG_UNLOCK(sc);
677 }
678 
679 static void
680 awg_stop(struct awg_softc *sc)
681 {
682 	if_t ifp;
683 	uint32_t val;
684 
685 	AWG_ASSERT_LOCKED(sc);
686 
687 	ifp = sc->ifp;
688 
689 	callout_stop(&sc->stat_ch);
690 
691 	/* Stop transmit DMA and flush data in the TX FIFO */
692 	val = RD4(sc, EMAC_TX_CTL_1);
693 	val &= ~TX_DMA_EN;
694 	val |= FLUSH_TX_FIFO;
695 	WR4(sc, EMAC_TX_CTL_1, val);
696 
697 	/* Disable transmitter */
698 	val = RD4(sc, EMAC_TX_CTL_0);
699 	WR4(sc, EMAC_TX_CTL_0, val & ~TX_EN);
700 
701 	/* Disable receiver */
702 	val = RD4(sc, EMAC_RX_CTL_0);
703 	WR4(sc, EMAC_RX_CTL_0, val & ~RX_EN);
704 
705 	/* Disable interrupts */
706 	WR4(sc, EMAC_INT_EN, 0);
707 
708 	/* Disable transmit DMA */
709 	val = RD4(sc, EMAC_TX_CTL_1);
710 	WR4(sc, EMAC_TX_CTL_1, val & ~TX_DMA_EN);
711 
712 	/* Disable receive DMA */
713 	val = RD4(sc, EMAC_RX_CTL_1);
714 	WR4(sc, EMAC_RX_CTL_1, val & ~RX_DMA_EN);
715 
716 	sc->link = 0;
717 
718 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
719 }
720 
721 static void
722 awg_rxintr(struct awg_softc *sc)
723 {
724 	if_t ifp;
725 	struct mbuf *m, *m0;
726 	int error, index, len;
727 	uint32_t status;
728 
729 	ifp = sc->ifp;
730 
731 	bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
732 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
733 
734 	for (index = sc->rx.cur; ; index = RX_NEXT(index)) {
735 		status = le32toh(sc->rx.desc_ring[index].status);
736 		if ((status & RX_DESC_CTL) != 0)
737 			break;
738 
739 		bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
740 		    BUS_DMASYNC_POSTREAD);
741 		bus_dmamap_unload(sc->rx.buf_tag, sc->rx.buf_map[index].map);
742 
743 		len = (status & RX_FRM_LEN) >> RX_FRM_LEN_SHIFT;
744 		if (len != 0) {
745 			m = sc->rx.buf_map[index].mbuf;
746 			m->m_pkthdr.rcvif = ifp;
747 			m->m_pkthdr.len = len;
748 			m->m_len = len;
749 			if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
750 
751 			if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
752 			    (status & RX_FRM_TYPE) != 0) {
753 				m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
754 				if ((status & RX_HEADER_ERR) == 0)
755 					m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
756 				if ((status & RX_PAYLOAD_ERR) == 0) {
757 					m->m_pkthdr.csum_flags |=
758 					    CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
759 					m->m_pkthdr.csum_data = 0xffff;
760 				}
761 			}
762 
763 			AWG_UNLOCK(sc);
764 			if_input(ifp, m);
765 			AWG_LOCK(sc);
766 		}
767 
768 		if ((m0 = awg_alloc_mbufcl(sc)) != NULL) {
769 			error = awg_setup_rxbuf(sc, index, m0);
770 			if (error != 0) {
771 				/* XXX hole in RX ring */
772 			}
773 		} else
774 			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
775 	}
776 
777 	if (index != sc->rx.cur) {
778 		bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
779 		    BUS_DMASYNC_PREWRITE);
780 	}
781 
782 	sc->rx.cur = index;
783 }
784 
785 static void
786 awg_txintr(struct awg_softc *sc)
787 {
788 	struct awg_bufmap *bmap;
789 	struct emac_desc *desc;
790 	uint32_t status;
791 	if_t ifp;
792 	int i;
793 
794 	AWG_ASSERT_LOCKED(sc);
795 
796 	bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
797 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
798 
799 	ifp = sc->ifp;
800 	for (i = sc->tx.next; sc->tx.queued > 0; i = TX_NEXT(i)) {
801 		desc = &sc->tx.desc_ring[i];
802 		status = le32toh(desc->status);
803 		if ((status & TX_DESC_CTL) != 0)
804 			break;
805 		bmap = &sc->tx.buf_map[i];
806 		if (bmap->mbuf != NULL) {
807 			bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
808 			    BUS_DMASYNC_POSTWRITE);
809 			bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
810 			m_freem(bmap->mbuf);
811 			bmap->mbuf = NULL;
812 		}
813 		awg_setup_txdesc(sc, i, 0, 0, 0);
814 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
815 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
816 	}
817 
818 	sc->tx.next = i;
819 
820 	bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
821 	    BUS_DMASYNC_PREWRITE);
822 }
823 
824 static void
825 awg_intr(void *arg)
826 {
827 	struct awg_softc *sc;
828 	uint32_t val;
829 
830 	sc = arg;
831 
832 	AWG_LOCK(sc);
833 	val = RD4(sc, EMAC_INT_STA);
834 	WR4(sc, EMAC_INT_STA, val);
835 
836 	if (val & RX_INT)
837 		awg_rxintr(sc);
838 
839 	if (val & (TX_INT|TX_BUF_UA_INT)) {
840 		awg_txintr(sc);
841 		if (!if_sendq_empty(sc->ifp))
842 			awg_start_locked(sc);
843 	}
844 
845 	AWG_UNLOCK(sc);
846 }
847 
848 static int
849 awg_ioctl(if_t ifp, u_long cmd, caddr_t data)
850 {
851 	struct awg_softc *sc;
852 	struct mii_data *mii;
853 	struct ifreq *ifr;
854 	int flags, mask, error;
855 
856 	sc = if_getsoftc(ifp);
857 	mii = device_get_softc(sc->miibus);
858 	ifr = (struct ifreq *)data;
859 	error = 0;
860 
861 	switch (cmd) {
862 	case SIOCSIFFLAGS:
863 		AWG_LOCK(sc);
864 		if (if_getflags(ifp) & IFF_UP) {
865 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
866 				flags = if_getflags(ifp) ^ sc->if_flags;
867 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0)
868 					awg_setup_rxfilter(sc);
869 			} else
870 				awg_init_locked(sc);
871 		} else {
872 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
873 				awg_stop(sc);
874 		}
875 		sc->if_flags = if_getflags(ifp);
876 		AWG_UNLOCK(sc);
877 		break;
878 	case SIOCADDMULTI:
879 	case SIOCDELMULTI:
880 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
881 			AWG_LOCK(sc);
882 			awg_setup_rxfilter(sc);
883 			AWG_UNLOCK(sc);
884 		}
885 		break;
886 	case SIOCSIFMEDIA:
887 	case SIOCGIFMEDIA:
888 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
889 		break;
890 	case SIOCSIFCAP:
891 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
892 		if (mask & IFCAP_VLAN_MTU)
893 			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
894 		if (mask & IFCAP_RXCSUM)
895 			if_togglecapenable(ifp, IFCAP_RXCSUM);
896 		if (mask & IFCAP_TXCSUM)
897 			if_togglecapenable(ifp, IFCAP_TXCSUM);
898 		if ((if_getcapenable(ifp) & (IFCAP_RXCSUM|IFCAP_TXCSUM)) != 0)
899 			if_sethwassistbits(ifp, CSUM_IP, 0);
900 		else
901 			if_sethwassistbits(ifp, 0, CSUM_IP);
902 		break;
903 	default:
904 		error = ether_ioctl(ifp, cmd, data);
905 		break;
906 	}
907 
908 	return (error);
909 }
910 
911 static int
912 awg_setup_extres(device_t dev)
913 {
914 	struct awg_softc *sc;
915 	hwreset_t rst_ahb;
916 	clk_t clk_ahb, clk_tx, clk_tx_parent;
917 	regulator_t reg;
918 	const char *tx_parent_name;
919 	char *phy_type;
920 	phandle_t node;
921 	uint64_t freq;
922 	int error, div;
923 
924 	sc = device_get_softc(dev);
925 	node = ofw_bus_get_node(dev);
926 	rst_ahb = NULL;
927 	clk_ahb = NULL;
928 	clk_tx = NULL;
929 	clk_tx_parent = NULL;
930 	reg = NULL;
931 	phy_type = NULL;
932 
933 	/* Get AHB clock and reset resources */
934 	error = hwreset_get_by_ofw_name(dev, "ahb", &rst_ahb);
935 	if (error != 0) {
936 		device_printf(dev, "cannot get ahb reset\n");
937 		goto fail;
938 	}
939 	error = clk_get_by_ofw_name(dev, "ahb", &clk_ahb);
940 	if (error != 0) {
941 		device_printf(dev, "cannot get ahb clock\n");
942 		goto fail;
943 	}
944 
945 	/* Configure PHY for MII or RGMII mode */
946 	if (OF_getprop_alloc(node, "phy-mode", 1, (void **)&phy_type)) {
947 		if (bootverbose)
948 			device_printf(dev, "PHY type: %s\n", phy_type);
949 
950 		if (strcmp(phy_type, "rgmii") == 0)
951 			tx_parent_name = "emac_int_tx";
952 		else
953 			tx_parent_name = "mii_phy_tx";
954 		free(phy_type, M_OFWPROP);
955 
956 		/* Get the TX clock */
957 		error = clk_get_by_ofw_name(dev, "tx", &clk_tx);
958 		if (error != 0) {
959 			device_printf(dev, "cannot get tx clock\n");
960 			goto fail;
961 		}
962 
963 		/* Find the desired parent clock based on phy-mode property */
964 		error = clk_get_by_name(dev, tx_parent_name, &clk_tx_parent);
965 		if (error != 0) {
966 			device_printf(dev, "cannot get clock '%s'\n",
967 			    tx_parent_name);
968 			goto fail;
969 		}
970 
971 		/* Set TX clock parent */
972 		error = clk_set_parent_by_clk(clk_tx, clk_tx_parent);
973 		if (error != 0) {
974 			device_printf(dev, "cannot set tx clock parent\n");
975 			goto fail;
976 		}
977 
978 		/* Enable TX clock */
979 		error = clk_enable(clk_tx);
980 		if (error != 0) {
981 			device_printf(dev, "cannot enable tx clock\n");
982 			goto fail;
983 		}
984 	}
985 
986 	/* Enable AHB clock */
987 	error = clk_enable(clk_ahb);
988 	if (error != 0) {
989 		device_printf(dev, "cannot enable ahb clock\n");
990 		goto fail;
991 	}
992 
993 	/* De-assert reset */
994 	error = hwreset_deassert(rst_ahb);
995 	if (error != 0) {
996 		device_printf(dev, "cannot de-assert ahb reset\n");
997 		goto fail;
998 	}
999 
1000 	/* Enable PHY regulator if applicable */
1001 	if (regulator_get_by_ofw_property(dev, "phy-supply", &reg) == 0) {
1002 		error = regulator_enable(reg);
1003 		if (error != 0) {
1004 			device_printf(dev, "cannot enable PHY regulator\n");
1005 			goto fail;
1006 		}
1007 	}
1008 
1009 	/* Determine MDC clock divide ratio based on AHB clock */
1010 	error = clk_get_freq(clk_ahb, &freq);
1011 	if (error != 0) {
1012 		device_printf(dev, "cannot get AHB clock frequency\n");
1013 		goto fail;
1014 	}
1015 	div = freq / MDIO_FREQ;
1016 	if (div <= 16)
1017 		sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_16;
1018 	else if (div <= 32)
1019 		sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_32;
1020 	else if (div <= 64)
1021 		sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_64;
1022 	else if (div <= 128)
1023 		sc->mdc_div_ratio_m = MDC_DIV_RATIO_M_128;
1024 	else {
1025 		device_printf(dev, "cannot determine MDC clock divide ratio\n");
1026 		error = ENXIO;
1027 		goto fail;
1028 	}
1029 
1030 	if (bootverbose)
1031 		device_printf(dev, "AHB frequency %llu Hz, MDC div: 0x%x\n",
1032 		    freq, sc->mdc_div_ratio_m);
1033 
1034 	return (0);
1035 
1036 fail:
1037 	free(phy_type, M_OFWPROP);
1038 
1039 	if (reg != NULL)
1040 		regulator_release(reg);
1041 	if (clk_tx_parent != NULL)
1042 		clk_release(clk_tx_parent);
1043 	if (clk_tx != NULL)
1044 		clk_release(clk_tx);
1045 	if (clk_ahb != NULL)
1046 		clk_release(clk_ahb);
1047 	if (rst_ahb != NULL)
1048 		hwreset_release(rst_ahb);
1049 	return (error);
1050 }
1051 
1052 static void
1053 awg_get_eaddr(device_t dev, uint8_t *eaddr)
1054 {
1055 	struct awg_softc *sc;
1056 	uint32_t maclo, machi, rnd;
1057 
1058 	sc = device_get_softc(dev);
1059 
1060 	machi = RD4(sc, EMAC_ADDR_HIGH(0)) & 0xffff;
1061 	maclo = RD4(sc, EMAC_ADDR_LOW(0));
1062 
1063 	if (maclo == 0xffffffff && machi == 0xffff) {
1064 		/* MAC address in hardware is invalid, create one */
1065 		rnd = arc4random();
1066 		maclo = 0x00f2 | (rnd & 0xffff0000);
1067 		machi = rnd & 0xffff;
1068 	}
1069 
1070 	eaddr[0] = maclo & 0xff;
1071 	eaddr[1] = (maclo >> 8) & 0xff;
1072 	eaddr[2] = (maclo >> 16) & 0xff;
1073 	eaddr[3] = (maclo >> 24) & 0xff;
1074 	eaddr[4] = machi & 0xff;
1075 	eaddr[5] = (machi >> 8) & 0xff;
1076 }
1077 
1078 #ifdef AWG_DEBUG
1079 static void
1080 awg_dump_regs(device_t dev)
1081 {
1082 	static const struct {
1083 		const char *name;
1084 		u_int reg;
1085 	} regs[] = {
1086 		{ "BASIC_CTL_0", EMAC_BASIC_CTL_0 },
1087 		{ "BASIC_CTL_1", EMAC_BASIC_CTL_1 },
1088 		{ "INT_STA", EMAC_INT_STA },
1089 		{ "INT_EN", EMAC_INT_EN },
1090 		{ "TX_CTL_0", EMAC_TX_CTL_0 },
1091 		{ "TX_CTL_1", EMAC_TX_CTL_1 },
1092 		{ "TX_FLOW_CTL", EMAC_TX_FLOW_CTL },
1093 		{ "TX_DMA_LIST", EMAC_TX_DMA_LIST },
1094 		{ "RX_CTL_0", EMAC_RX_CTL_0 },
1095 		{ "RX_CTL_1", EMAC_RX_CTL_1 },
1096 		{ "RX_DMA_LIST", EMAC_RX_DMA_LIST },
1097 		{ "RX_FRM_FLT", EMAC_RX_FRM_FLT },
1098 		{ "RX_HASH_0", EMAC_RX_HASH_0 },
1099 		{ "RX_HASH_1", EMAC_RX_HASH_1 },
1100 		{ "MII_CMD", EMAC_MII_CMD },
1101 		{ "ADDR_HIGH0", EMAC_ADDR_HIGH(0) },
1102 		{ "ADDR_LOW0", EMAC_ADDR_LOW(0) },
1103 		{ "TX_DMA_STA", EMAC_TX_DMA_STA },
1104 		{ "TX_DMA_CUR_DESC", EMAC_TX_DMA_CUR_DESC },
1105 		{ "TX_DMA_CUR_BUF", EMAC_TX_DMA_CUR_BUF },
1106 		{ "RX_DMA_STA", EMAC_RX_DMA_STA },
1107 		{ "RX_DMA_CUR_DESC", EMAC_RX_DMA_CUR_DESC },
1108 		{ "RX_DMA_CUR_BUF", EMAC_RX_DMA_CUR_BUF },
1109 		{ "RGMII_STA", EMAC_RGMII_STA },
1110 	};
1111 	struct awg_softc *sc;
1112 	unsigned int n;
1113 
1114 	sc = device_get_softc(dev);
1115 
1116 	for (n = 0; n < nitems(regs); n++)
1117 		device_printf(dev, "  %-20s %08x\n", regs[n].name,
1118 		    RD4(sc, regs[n].reg));
1119 }
1120 #endif
1121 
1122 static int
1123 awg_reset(device_t dev)
1124 {
1125 	struct awg_softc *sc;
1126 	int retry;
1127 
1128 	sc = device_get_softc(dev);
1129 
1130 	/* Soft reset all registers and logic */
1131 	WR4(sc, EMAC_BASIC_CTL_1, BASIC_CTL_SOFT_RST);
1132 
1133 	/* Wait for soft reset bit to self-clear */
1134 	for (retry = SOFT_RST_RETRY; retry > 0; retry--) {
1135 		if ((RD4(sc, EMAC_BASIC_CTL_1) & BASIC_CTL_SOFT_RST) == 0)
1136 			break;
1137 		DELAY(10);
1138 	}
1139 	if (retry == 0) {
1140 		device_printf(dev, "soft reset timed out\n");
1141 #ifdef AWG_DEBUG
1142 		awg_dump_regs(dev);
1143 #endif
1144 		return (ETIMEDOUT);
1145 	}
1146 
1147 	return (0);
1148 }
1149 
1150 static void
1151 awg_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1152 {
1153 	if (error != 0)
1154 		return;
1155 	*(bus_addr_t *)arg = segs[0].ds_addr;
1156 }
1157 
1158 static int
1159 awg_setup_dma(device_t dev)
1160 {
1161 	struct awg_softc *sc;
1162 	struct mbuf *m;
1163 	int error, i;
1164 
1165 	sc = device_get_softc(dev);
1166 
1167 	/* Setup TX ring */
1168 	error = bus_dma_tag_create(
1169 	    bus_get_dma_tag(dev),	/* Parent tag */
1170 	    DESC_ALIGN, 0,		/* alignment, boundary */
1171 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1172 	    BUS_SPACE_MAXADDR,		/* highaddr */
1173 	    NULL, NULL,			/* filter, filterarg */
1174 	    TX_DESC_SIZE, 1,		/* maxsize, nsegs */
1175 	    TX_DESC_SIZE,		/* maxsegsize */
1176 	    0,				/* flags */
1177 	    NULL, NULL,			/* lockfunc, lockarg */
1178 	    &sc->tx.desc_tag);
1179 	if (error != 0) {
1180 		device_printf(dev, "cannot create TX descriptor ring tag\n");
1181 		return (error);
1182 	}
1183 
1184 	error = bus_dmamem_alloc(sc->tx.desc_tag, (void **)&sc->tx.desc_ring,
1185 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->tx.desc_map);
1186 	if (error != 0) {
1187 		device_printf(dev, "cannot allocate TX descriptor ring\n");
1188 		return (error);
1189 	}
1190 
1191 	error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
1192 	    sc->tx.desc_ring, TX_DESC_SIZE, awg_dmamap_cb,
1193 	    &sc->tx.desc_ring_paddr, 0);
1194 	if (error != 0) {
1195 		device_printf(dev, "cannot load TX descriptor ring\n");
1196 		return (error);
1197 	}
1198 
1199 	for (i = 0; i < TX_DESC_COUNT; i++)
1200 		sc->tx.desc_ring[i].next =
1201 		    htole32(sc->tx.desc_ring_paddr + DESC_OFF(TX_NEXT(i)));
1202 
1203 	error = bus_dma_tag_create(
1204 	    bus_get_dma_tag(dev),	/* Parent tag */
1205 	    1, 0,			/* alignment, boundary */
1206 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1207 	    BUS_SPACE_MAXADDR,		/* highaddr */
1208 	    NULL, NULL,			/* filter, filterarg */
1209 	    MCLBYTES, TX_MAX_SEGS,	/* maxsize, nsegs */
1210 	    MCLBYTES,			/* maxsegsize */
1211 	    0,				/* flags */
1212 	    NULL, NULL,			/* lockfunc, lockarg */
1213 	    &sc->tx.buf_tag);
1214 	if (error != 0) {
1215 		device_printf(dev, "cannot create TX buffer tag\n");
1216 		return (error);
1217 	}
1218 
1219 	sc->tx.queued = TX_DESC_COUNT;
1220 	for (i = 0; i < TX_DESC_COUNT; i++) {
1221 		error = bus_dmamap_create(sc->tx.buf_tag, 0,
1222 		    &sc->tx.buf_map[i].map);
1223 		if (error != 0) {
1224 			device_printf(dev, "cannot create TX buffer map\n");
1225 			return (error);
1226 		}
1227 		awg_setup_txdesc(sc, i, 0, 0, 0);
1228 	}
1229 
1230 	/* Setup RX ring */
1231 	error = bus_dma_tag_create(
1232 	    bus_get_dma_tag(dev),	/* Parent tag */
1233 	    DESC_ALIGN, 0,		/* alignment, boundary */
1234 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1235 	    BUS_SPACE_MAXADDR,		/* highaddr */
1236 	    NULL, NULL,			/* filter, filterarg */
1237 	    RX_DESC_SIZE, 1,		/* maxsize, nsegs */
1238 	    RX_DESC_SIZE,		/* maxsegsize */
1239 	    0,				/* flags */
1240 	    NULL, NULL,			/* lockfunc, lockarg */
1241 	    &sc->rx.desc_tag);
1242 	if (error != 0) {
1243 		device_printf(dev, "cannot create RX descriptor ring tag\n");
1244 		return (error);
1245 	}
1246 
1247 	error = bus_dmamem_alloc(sc->rx.desc_tag, (void **)&sc->rx.desc_ring,
1248 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->rx.desc_map);
1249 	if (error != 0) {
1250 		device_printf(dev, "cannot allocate RX descriptor ring\n");
1251 		return (error);
1252 	}
1253 
1254 	error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
1255 	    sc->rx.desc_ring, RX_DESC_SIZE, awg_dmamap_cb,
1256 	    &sc->rx.desc_ring_paddr, 0);
1257 	if (error != 0) {
1258 		device_printf(dev, "cannot load RX descriptor ring\n");
1259 		return (error);
1260 	}
1261 
1262 	error = bus_dma_tag_create(
1263 	    bus_get_dma_tag(dev),	/* Parent tag */
1264 	    1, 0,			/* alignment, boundary */
1265 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
1266 	    BUS_SPACE_MAXADDR,		/* highaddr */
1267 	    NULL, NULL,			/* filter, filterarg */
1268 	    MCLBYTES, 1,		/* maxsize, nsegs */
1269 	    MCLBYTES,			/* maxsegsize */
1270 	    0,				/* flags */
1271 	    NULL, NULL,			/* lockfunc, lockarg */
1272 	    &sc->rx.buf_tag);
1273 	if (error != 0) {
1274 		device_printf(dev, "cannot create RX buffer tag\n");
1275 		return (error);
1276 	}
1277 
1278 	for (i = 0; i < RX_DESC_COUNT; i++) {
1279 		error = bus_dmamap_create(sc->rx.buf_tag, 0,
1280 		    &sc->rx.buf_map[i].map);
1281 		if (error != 0) {
1282 			device_printf(dev, "cannot create RX buffer map\n");
1283 			return (error);
1284 		}
1285 		if ((m = awg_alloc_mbufcl(sc)) == NULL) {
1286 			device_printf(dev, "cannot allocate RX mbuf\n");
1287 			return (ENOMEM);
1288 		}
1289 		error = awg_setup_rxbuf(sc, i, m);
1290 		if (error != 0) {
1291 			device_printf(dev, "cannot create RX buffer\n");
1292 			return (error);
1293 		}
1294 	}
1295 	bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map,
1296 	    BUS_DMASYNC_PREWRITE);
1297 
1298 	/* Write transmit and receive descriptor base address registers */
1299 	WR4(sc, EMAC_TX_DMA_LIST, sc->tx.desc_ring_paddr);
1300 	WR4(sc, EMAC_RX_DMA_LIST, sc->rx.desc_ring_paddr);
1301 
1302 	return (0);
1303 }
1304 
1305 static int
1306 awg_probe(device_t dev)
1307 {
1308 	if (!ofw_bus_status_okay(dev))
1309 		return (ENXIO);
1310 
1311 	if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
1312 		return (ENXIO);
1313 
1314 	device_set_desc(dev, "Allwinner Gigabit Ethernet");
1315 	return (BUS_PROBE_DEFAULT);
1316 }
1317 
1318 static int
1319 awg_attach(device_t dev)
1320 {
1321 	uint8_t eaddr[ETHER_ADDR_LEN];
1322 	struct awg_softc *sc;
1323 	phandle_t node;
1324 	int error;
1325 
1326 	sc = device_get_softc(dev);
1327 	node = ofw_bus_get_node(dev);
1328 
1329 	if (bus_alloc_resources(dev, awg_spec, sc->res) != 0) {
1330 		device_printf(dev, "cannot allocate resources for device\n");
1331 		return (ENXIO);
1332 	}
1333 
1334 	mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF);
1335 	callout_init_mtx(&sc->stat_ch, &sc->mtx, 0);
1336 	TASK_INIT(&sc->link_task, 0, awg_link_task, sc);
1337 
1338 	/* Setup clocks and regulators */
1339 	error = awg_setup_extres(dev);
1340 	if (error != 0)
1341 		return (error);
1342 
1343 	/* Read MAC address before resetting the chip */
1344 	awg_get_eaddr(dev, eaddr);
1345 
1346 	/* Soft reset EMAC core */
1347 	error = awg_reset(dev);
1348 	if (error != 0)
1349 		return (error);
1350 
1351 	/* Setup DMA descriptors */
1352 	error = awg_setup_dma(dev);
1353 	if (error != 0)
1354 		return (error);
1355 
1356 	/* Install interrupt handler */
1357 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
1358 	    NULL, awg_intr, sc, &sc->ih);
1359 	if (error != 0) {
1360 		device_printf(dev, "cannot setup interrupt handler\n");
1361 		return (error);
1362 	}
1363 
1364 	/* Setup ethernet interface */
1365 	sc->ifp = if_alloc(IFT_ETHER);
1366 	if_setsoftc(sc->ifp, sc);
1367 	if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev));
1368 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1369 	if_setstartfn(sc->ifp, awg_start);
1370 	if_setioctlfn(sc->ifp, awg_ioctl);
1371 	if_setinitfn(sc->ifp, awg_init);
1372 	if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1);
1373 	if_setsendqready(sc->ifp);
1374 	if_sethwassist(sc->ifp, CSUM_IP | CSUM_UDP | CSUM_TCP);
1375 	if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM);
1376 	if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp));
1377 
1378 	/* Attach MII driver */
1379 	error = mii_attach(dev, &sc->miibus, sc->ifp, awg_media_change,
1380 	    awg_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY,
1381 	    MIIF_DOPAUSE);
1382 	if (error != 0) {
1383 		device_printf(dev, "cannot attach PHY\n");
1384 		return (error);
1385 	}
1386 
1387 	/* Attach ethernet interface */
1388 	ether_ifattach(sc->ifp, eaddr);
1389 
1390 	return (0);
1391 }
1392 
1393 static device_method_t awg_methods[] = {
1394 	/* Device interface */
1395 	DEVMETHOD(device_probe,		awg_probe),
1396 	DEVMETHOD(device_attach,	awg_attach),
1397 
1398 	/* MII interface */
1399 	DEVMETHOD(miibus_readreg,	awg_miibus_readreg),
1400 	DEVMETHOD(miibus_writereg,	awg_miibus_writereg),
1401 	DEVMETHOD(miibus_statchg,	awg_miibus_statchg),
1402 
1403 	DEVMETHOD_END
1404 };
1405 
1406 static driver_t awg_driver = {
1407 	"awg",
1408 	awg_methods,
1409 	sizeof(struct awg_softc),
1410 };
1411 
1412 static devclass_t awg_devclass;
1413 
1414 DRIVER_MODULE(awg, simplebus, awg_driver, awg_devclass, 0, 0);
1415 DRIVER_MODULE(miibus, awg, miibus_driver, miibus_devclass, 0, 0);
1416 
1417 MODULE_DEPEND(awg, ether, 1, 1, 1);
1418 MODULE_DEPEND(awg, miibus, 1, 1, 1);
1419