xref: /freebsd/sys/dev/eqos/if_eqos.c (revision 9088779e3c8b810afb701adb80be154a7b2e0523)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2022 Soren Schmidt <sos@deepcore.dk>
5  * Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  * $Id: eqos.c 1059 2022-12-08 19:32:32Z sos $
30  */
31 
32 /*
33  * DesignWare Ethernet Quality-of-Service controller
34  */
35 
36 #include "opt_platform.h"
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/mutex.h>
42 #include <sys/rman.h>
43 #include <sys/endian.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/callout.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/mbuf.h>
50 #include <sys/systm.h>
51 #include <machine/bus.h>
52 
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_dl.h>
56 #include <net/if_media.h>
57 #include <net/if_types.h>
58 #include <net/bpf.h>
59 
60 #include <dev/mii/mii.h>
61 #include <dev/mii/miivar.h>
62 
63 #include "miibus_if.h"
64 #include "if_eqos_if.h"
65 
66 #ifdef FDT
67 #include <dev/ofw/openfirm.h>
68 #include <dev/ofw/ofw_bus.h>
69 #include <dev/ofw/ofw_bus_subr.h>
70 #include <dev/extres/clk/clk.h>
71 #endif
72 
73 #include <dev/eqos/if_eqos_reg.h>
74 #include <dev/eqos/if_eqos_var.h>
75 
76 #define	DESC_BOUNDARY		(1ULL << 32)
77 #define	DESC_ALIGN		sizeof(struct eqos_dma_desc)
78 #define	DESC_OFFSET(n)		((n) * sizeof(struct eqos_dma_desc))
79 
80 #define	TX_DESC_COUNT		EQOS_DMA_DESC_COUNT
81 #define	TX_DESC_SIZE		(TX_DESC_COUNT * DESC_ALIGN)
82 #define	TX_MAX_SEGS		(TX_DESC_COUNT / 2)
83 #define	TX_NEXT(n)		(((n) + 1 ) % TX_DESC_COUNT)
84 #define	TX_QUEUED(h, t)		((((h) - (t)) + TX_DESC_COUNT) % TX_DESC_COUNT)
85 
86 #define	RX_DESC_COUNT		EQOS_DMA_DESC_COUNT
87 #define	RX_DESC_SIZE		(RX_DESC_COUNT * DESC_ALIGN)
88 #define	RX_NEXT(n)		(((n) + 1) % RX_DESC_COUNT)
89 
90 #define	MII_BUSY_RETRY		1000
91 #define	WATCHDOG_TIMEOUT_SECS	3
92 
93 #define	EQOS_LOCK(sc)		mtx_lock(&(sc)->lock)
94 #define	EQOS_UNLOCK(sc)		mtx_unlock(&(sc)->lock)
95 #define	EQOS_ASSERT_LOCKED(sc)	mtx_assert(&(sc)->lock, MA_OWNED)
96 
97 #define	RD4(sc, o)		bus_read_4(sc->res[EQOS_RES_MEM], (o))
98 #define	WR4(sc, o, v)		bus_write_4(sc->res[EQOS_RES_MEM], (o), (v))
99 
100 
101 static struct resource_spec eqos_spec[] = {
102 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
103 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
104 	{ -1, 0 }
105 };
106 
107 static void eqos_tick(void *softc);
108 
109 
110 static int
111 eqos_miibus_readreg(device_t dev, int phy, int reg)
112 {
113 	struct eqos_softc *sc = device_get_softc(dev);
114 	uint32_t addr;
115 	int retry, val;
116 
117 	addr = sc->csr_clock_range |
118 	    (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
119 	    (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
120 	    GMAC_MAC_MDIO_ADDRESS_GOC_READ | GMAC_MAC_MDIO_ADDRESS_GB;
121 	WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
122 
123 	DELAY(100);
124 
125 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
126 		addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS);
127 		if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB)) {
128 			val = RD4(sc, GMAC_MAC_MDIO_DATA) & 0xFFFF;
129 			break;
130 		}
131 		DELAY(10);
132 	}
133 	if (!retry) {
134 		device_printf(dev, "phy read timeout, phy=%d reg=%d\n",
135 		    phy, reg);
136 		return (ETIMEDOUT);
137 	}
138 	return (val);
139 }
140 
141 static int
142 eqos_miibus_writereg(device_t dev, int phy, int reg, int val)
143 {
144 	struct eqos_softc *sc = device_get_softc(dev);
145 	uint32_t addr;
146 	int retry;
147 
148 	WR4(sc, GMAC_MAC_MDIO_DATA, val);
149 
150 	addr = sc->csr_clock_range |
151 	    (phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
152 	    (reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
153 	    GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | GMAC_MAC_MDIO_ADDRESS_GB;
154 	WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
155 
156 	DELAY(100);
157 
158 	for (retry = MII_BUSY_RETRY; retry > 0; retry--) {
159 		addr = RD4(sc, GMAC_MAC_MDIO_ADDRESS);
160 		if (!(addr & GMAC_MAC_MDIO_ADDRESS_GB))
161 			break;
162 		DELAY(10);
163 	}
164 	if (!retry) {
165 		device_printf(dev, "phy write timeout, phy=%d reg=%d\n",
166 		    phy, reg);
167 		return (ETIMEDOUT);
168 	}
169 	return (0);
170 }
171 
172 static void
173 eqos_miibus_statchg(device_t dev)
174 {
175 	struct eqos_softc *sc = device_get_softc(dev);
176 	struct mii_data *mii = device_get_softc(sc->miibus);
177 	uint32_t reg;
178 
179 	EQOS_ASSERT_LOCKED(sc);
180 
181 	if (mii->mii_media_status & IFM_ACTIVE)
182 		sc->link_up = true;
183 	else
184 		sc->link_up = false;
185 
186 	reg = RD4(sc, GMAC_MAC_CONFIGURATION);
187 
188 	switch (IFM_SUBTYPE(mii->mii_media_active)) {
189 	case IFM_10_T:
190 		reg |= GMAC_MAC_CONFIGURATION_PS;
191 		reg &= ~GMAC_MAC_CONFIGURATION_FES;
192 		break;
193 	case IFM_100_TX:
194 		reg |= GMAC_MAC_CONFIGURATION_PS;
195 		reg |= GMAC_MAC_CONFIGURATION_FES;
196 		break;
197 	case IFM_1000_T:
198         case IFM_1000_SX:
199 		reg &= ~GMAC_MAC_CONFIGURATION_PS;
200 		reg &= ~GMAC_MAC_CONFIGURATION_FES;
201 		break;
202 	case IFM_2500_T:
203 	case IFM_2500_SX:
204 		reg &= ~GMAC_MAC_CONFIGURATION_PS;
205 		reg |= GMAC_MAC_CONFIGURATION_FES;
206 		break;
207 	default:
208 		sc->link_up = false;
209 		return;
210 	}
211 
212 	if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX))
213 		reg |= GMAC_MAC_CONFIGURATION_DM;
214 	else
215 		reg &= ~GMAC_MAC_CONFIGURATION_DM;
216 
217 	WR4(sc, GMAC_MAC_CONFIGURATION, reg);
218 
219 	IF_EQOS_SET_SPEED(dev, IFM_SUBTYPE(mii->mii_media_active));
220 
221 	WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
222 }
223 
224 static void
225 eqos_media_status(if_t ifp, struct ifmediareq *ifmr)
226 {
227 	struct eqos_softc *sc = if_getsoftc(ifp);
228 	struct mii_data *mii = device_get_softc(sc->miibus);
229 
230 	EQOS_LOCK(sc);
231 	mii_pollstat(mii);
232 	ifmr->ifm_active = mii->mii_media_active;
233 	ifmr->ifm_status = mii->mii_media_status;
234 	EQOS_UNLOCK(sc);
235 }
236 
237 static int
238 eqos_media_change(if_t ifp)
239 {
240 	struct eqos_softc *sc = if_getsoftc(ifp);
241 	int error;
242 
243 	EQOS_LOCK(sc);
244 	error = mii_mediachg(device_get_softc(sc->miibus));
245 	EQOS_UNLOCK(sc);
246 	return (error);
247 }
248 
249 static void
250 eqos_setup_txdesc(struct eqos_softc *sc, int index, int flags,
251     bus_addr_t paddr, u_int len, u_int total_len)
252 {
253 	uint32_t tdes2, tdes3;
254 
255 	if (!paddr || !len) {
256 		tdes2 = 0;
257 		tdes3 = flags;
258 	} else {
259 		tdes2 = (flags & EQOS_TDES3_LD) ? EQOS_TDES2_IOC : 0;
260 		tdes3 = flags;
261 	}
262 	bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
263 	sc->tx.desc_ring[index].des0 = htole32((uint32_t)paddr);
264 	sc->tx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
265 	sc->tx.desc_ring[index].des2 = htole32(tdes2 | len);
266 	sc->tx.desc_ring[index].des3 = htole32(tdes3 | total_len);
267 }
268 
269 static int
270 eqos_setup_txbuf(struct eqos_softc *sc, struct mbuf *m)
271 {
272 	bus_dma_segment_t segs[TX_MAX_SEGS];
273 	int first = sc->tx.head;
274 	int error, nsegs, idx;
275 	uint32_t flags;
276 
277 	error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
278 	    sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
279 	if (error == EFBIG) {
280 		struct mbuf *mb;
281 
282 		device_printf(sc->dev, "TX packet too big trying defrag\n");
283 		bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
284 		if (!(mb = m_defrag(m, M_NOWAIT)))
285 			return (ENOMEM);
286 		m = mb;
287 		error = bus_dmamap_load_mbuf_sg(sc->tx.buf_tag,
288 		    sc->tx.buf_map[first].map, m, segs, &nsegs, 0);
289 	}
290 	if (error)
291 		return (ENOMEM);
292 
293 	if (TX_QUEUED(sc->tx.head, sc->tx.tail) + nsegs > TX_DESC_COUNT) {
294 		bus_dmamap_unload(sc->tx.buf_tag, sc->tx.buf_map[first].map);
295 		device_printf(sc->dev, "TX packet no more queue space\n");
296 		return (ENOMEM);
297 	}
298 
299 	bus_dmamap_sync(sc->tx.buf_tag, sc->tx.buf_map[first].map,
300 	    BUS_DMASYNC_PREWRITE);
301 
302 	sc->tx.buf_map[first].mbuf = m;
303 
304 	for (flags = EQOS_TDES3_FD, idx = 0; idx < nsegs; idx++) {
305 		if (idx == (nsegs - 1))
306 			flags |= EQOS_TDES3_LD;
307 		eqos_setup_txdesc(sc, sc->tx.head, flags, segs[idx].ds_addr,
308 		    segs[idx].ds_len, m->m_pkthdr.len);
309 		flags &= ~EQOS_TDES3_FD;
310 		flags |= EQOS_TDES3_OWN;
311 		sc->tx.head = TX_NEXT(sc->tx.head);
312 	}
313 
314 	/*
315 	 * Defer setting OWN bit on the first descriptor
316 	 * until all descriptors have been updated
317 	 */
318 	bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREWRITE);
319 	sc->tx.desc_ring[first].des3 |= htole32(EQOS_TDES3_OWN);
320 
321 	return (0);
322 }
323 
324 static void
325 eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr)
326 {
327 
328 	sc->rx.desc_ring[index].des0 = htole32((uint32_t)paddr);
329 	sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
330 	sc->rx.desc_ring[index].des2 = htole32(0);
331 	bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE);
332 	sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC |
333 	    EQOS_RDES3_BUF1V);
334 }
335 
336 static int
337 eqos_setup_rxbuf(struct eqos_softc *sc, int index, struct mbuf *m)
338 {
339 	struct bus_dma_segment seg;
340 	int error, nsegs;
341 
342 	m_adj(m, ETHER_ALIGN);
343 
344 	error = bus_dmamap_load_mbuf_sg(sc->rx.buf_tag,
345 	    sc->rx.buf_map[index].map, m, &seg, &nsegs, 0);
346 	if (error)
347 		return (error);
348 
349 	bus_dmamap_sync(sc->rx.buf_tag, sc->rx.buf_map[index].map,
350 	    BUS_DMASYNC_PREREAD);
351 
352 	sc->rx.buf_map[index].mbuf = m;
353 	eqos_setup_rxdesc(sc, index, seg.ds_addr);
354 
355 	return (0);
356 }
357 
358 static struct mbuf *
359 eqos_alloc_mbufcl(struct eqos_softc *sc)
360 {
361 	struct mbuf *m;
362 
363 	if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)))
364 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
365 	return (m);
366 }
367 
368 static void
369 eqos_enable_intr(struct eqos_softc *sc)
370 {
371 
372 	WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE,
373 	    GMAC_DMA_CHAN0_INTR_ENABLE_NIE | GMAC_DMA_CHAN0_INTR_ENABLE_AIE |
374 	    GMAC_DMA_CHAN0_INTR_ENABLE_FBE | GMAC_DMA_CHAN0_INTR_ENABLE_RIE |
375 	    GMAC_DMA_CHAN0_INTR_ENABLE_TIE);
376 }
377 
378 static void
379 eqos_disable_intr(struct eqos_softc *sc)
380 {
381 
382 	WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE, 0);
383 }
384 
385 static uint32_t
386 eqos_bitrev32(uint32_t x)
387 {
388 
389 	x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1));
390 	x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2));
391 	x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4));
392 	x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8));
393 	return ((x >> 16) | (x << 16));
394 }
395 
396 static u_int
397 eqos_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
398 {
399 	uint32_t crc, *hash = arg;
400 
401 	crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN);
402 	crc &= 0x7f;
403 	crc = eqos_bitrev32(~crc) >> 26;
404 	hash[crc >> 5] |= 1 << (crc & 0x1f);
405 	return (1);
406 }
407 
408 static void
409 eqos_setup_rxfilter(struct eqos_softc *sc)
410 {
411 	if_t ifp = sc->ifp;
412 	uint32_t pfil, hash[2];
413 	const uint8_t *eaddr;
414 	uint32_t val;
415 
416 	EQOS_ASSERT_LOCKED(sc);
417 
418 	pfil = RD4(sc, GMAC_MAC_PACKET_FILTER);
419 	pfil &= ~(GMAC_MAC_PACKET_FILTER_PR |
420 	    GMAC_MAC_PACKET_FILTER_PM |
421 	    GMAC_MAC_PACKET_FILTER_HMC |
422 	    GMAC_MAC_PACKET_FILTER_PCF_MASK);
423 	hash[0] = hash[1] = 0xffffffff;
424 
425 	if ((if_getflags(ifp) & IFF_PROMISC)) {
426 		pfil |= GMAC_MAC_PACKET_FILTER_PR |
427 		    GMAC_MAC_PACKET_FILTER_PCF_ALL;
428 	} else if ((if_getflags(ifp) & IFF_ALLMULTI)) {
429 		pfil |= GMAC_MAC_PACKET_FILTER_PM;
430 	} else {
431 		hash[0] = hash[1] = 0;
432 		pfil |= GMAC_MAC_PACKET_FILTER_HMC;
433 		if_foreach_llmaddr(ifp, eqos_hash_maddr, hash);
434 	}
435 
436 	/* Write our unicast address */
437 	eaddr = if_getlladdr(ifp);
438 	val = eaddr[4] | (eaddr[5] << 8);
439 	WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val);
440 	val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) |
441 	    (eaddr[3] << 24);
442 	WR4(sc, GMAC_MAC_ADDRESS0_LOW, val);
443 
444 	/* Multicast hash filters */
445 	WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[1]);
446 	WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[0]);
447 
448 	/* Packet filter config */
449 	WR4(sc, GMAC_MAC_PACKET_FILTER, pfil);
450 }
451 
452 static int
453 eqos_reset(struct eqos_softc *sc)
454 {
455 	uint32_t val;
456 	int retry;
457 
458 	WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR);
459 	for (retry = 2000; retry > 0; retry--) {
460 		DELAY(1000);
461 		val = RD4(sc, GMAC_DMA_MODE);
462 		if (!(val & GMAC_DMA_MODE_SWR))
463 			return (0);
464 	}
465 	return (ETIMEDOUT);
466 }
467 
468 static void
469 eqos_init_rings(struct eqos_softc *sc)
470 {
471 
472 	WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR_HI,
473 	    (uint32_t)(sc->tx.desc_ring_paddr >> 32));
474 	WR4(sc, GMAC_DMA_CHAN0_TX_BASE_ADDR,
475 	    (uint32_t)sc->tx.desc_ring_paddr);
476 	WR4(sc, GMAC_DMA_CHAN0_TX_RING_LEN, TX_DESC_COUNT - 1);
477 
478 	WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR_HI,
479 	    (uint32_t)(sc->rx.desc_ring_paddr >> 32));
480 	WR4(sc, GMAC_DMA_CHAN0_RX_BASE_ADDR,
481 	    (uint32_t)sc->rx.desc_ring_paddr);
482 	WR4(sc, GMAC_DMA_CHAN0_RX_RING_LEN, RX_DESC_COUNT - 1);
483 
484 	WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR,
485 	    (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(RX_DESC_COUNT));
486 }
487 
488 static void
489 eqos_init(void *if_softc)
490 {
491 	struct eqos_softc *sc = if_softc;
492 	if_t ifp = sc->ifp;
493 	struct mii_data *mii = device_get_softc(sc->miibus);
494 	uint32_t val;
495 
496 	if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
497 		return;
498 
499 	EQOS_LOCK(sc);
500 
501 	eqos_init_rings(sc);
502 
503 	eqos_setup_rxfilter(sc);
504 
505 	WR4(sc, GMAC_MAC_1US_TIC_COUNTER, (sc->csr_clock / 1000000) - 1);
506 
507 	/* Enable transmit and receive DMA */
508 	val = RD4(sc, GMAC_DMA_CHAN0_CONTROL);
509 	val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK;
510 	val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT;
511 	val |= GMAC_DMA_CHAN0_CONTROL_PBLX8;
512 	WR4(sc, GMAC_DMA_CHAN0_CONTROL, val);
513 	val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL);
514 	val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP;
515 	val |= GMAC_DMA_CHAN0_TX_CONTROL_START;
516 	WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val);
517 	val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL);
518 	val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK;
519 	val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT);
520 	val |= GMAC_DMA_CHAN0_RX_CONTROL_START;
521 	WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val);
522 
523 	/* Disable counters */
524 	WR4(sc, GMAC_MMC_CONTROL,
525 	    GMAC_MMC_CONTROL_CNTFREEZ |
526 	    GMAC_MMC_CONTROL_CNTPRST |
527 	    GMAC_MMC_CONTROL_CNTPRSTLVL);
528 
529 	/* Configure operation modes */
530 	WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE,
531 	    GMAC_MTL_TXQ0_OPERATION_MODE_TSF |
532 	    GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN);
533 	WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE,
534 	    GMAC_MTL_RXQ0_OPERATION_MODE_RSF |
535 	    GMAC_MTL_RXQ0_OPERATION_MODE_FEP |
536 	    GMAC_MTL_RXQ0_OPERATION_MODE_FUP);
537 
538 	/* Enable flow control */
539 	val = RD4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL);
540 	val |= 0xFFFFU << GMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT;
541 	val |= GMAC_MAC_Q0_TX_FLOW_CTRL_TFE;
542 	WR4(sc, GMAC_MAC_Q0_TX_FLOW_CTRL, val);
543 	val = RD4(sc, GMAC_MAC_RX_FLOW_CTRL);
544 	val |= GMAC_MAC_RX_FLOW_CTRL_RFE;
545 	WR4(sc, GMAC_MAC_RX_FLOW_CTRL, val);
546 
547 	/* set RX queue mode. must be in DCB mode. */
548 	WR4(sc, GMAC_RXQ_CTRL0, (GMAC_RXQ_CTRL0_EN_MASK << 16) |
549 	    GMAC_RXQ_CTRL0_EN_DCB);
550 
551 	/* Enable transmitter and receiver */
552 	val = RD4(sc, GMAC_MAC_CONFIGURATION);
553 	val |= GMAC_MAC_CONFIGURATION_BE;
554 	val |= GMAC_MAC_CONFIGURATION_JD;
555 	val |= GMAC_MAC_CONFIGURATION_JE;
556 	val |= GMAC_MAC_CONFIGURATION_DCRS;
557 	val |= GMAC_MAC_CONFIGURATION_TE;
558 	val |= GMAC_MAC_CONFIGURATION_RE;
559 	WR4(sc, GMAC_MAC_CONFIGURATION, val);
560 
561 	eqos_enable_intr(sc);
562 
563 	if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
564 
565 	mii_mediachg(mii);
566 	callout_reset(&sc->callout, hz, eqos_tick, sc);
567 
568 	EQOS_UNLOCK(sc);
569 }
570 
571 static void
572 eqos_start_locked(if_t ifp)
573 {
574 	struct eqos_softc *sc = if_getsoftc(ifp);
575 	struct mbuf *m;
576 	int pending = 0;
577 
578 	if (!sc->link_up)
579 		return;
580 
581 	if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
582 	    IFF_DRV_RUNNING)
583 		return;
584 
585 	while (true) {
586 		if (TX_QUEUED(sc->tx.head, sc->tx.tail) >=
587 		    TX_DESC_COUNT - TX_MAX_SEGS) {
588 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
589 			break;
590 		}
591 
592 		if (!(m = if_dequeue(ifp)))
593 			break;
594 
595 		if (eqos_setup_txbuf(sc, m)) {
596 			if_sendq_prepend(ifp, m);
597 			if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
598 			break;
599 		}
600 		if_bpfmtap(ifp, m);
601 		pending++;
602 	}
603 
604 	if (pending) {
605 		bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map,
606 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
607 
608 		/* Start and run TX DMA */
609 		WR4(sc, GMAC_DMA_CHAN0_TX_END_ADDR,
610 		    (uint32_t)sc->tx.desc_ring_paddr + DESC_OFFSET(sc->tx.head));
611 		sc->tx_watchdog = WATCHDOG_TIMEOUT_SECS;
612 	}
613 }
614 
615 static void
616 eqos_start(if_t ifp)
617 {
618 	struct eqos_softc *sc = if_getsoftc(ifp);
619 
620 	EQOS_LOCK(sc);
621 	eqos_start_locked(ifp);
622 	EQOS_UNLOCK(sc);
623 }
624 
625 static void
626 eqos_stop(struct eqos_softc *sc)
627 {
628 	if_t ifp = sc->ifp;
629 	uint32_t val;
630 	int retry;
631 
632 	EQOS_LOCK(sc);
633 
634 	if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
635 
636 	callout_stop(&sc->callout);
637 
638 	/* Disable receiver */
639 	val = RD4(sc, GMAC_MAC_CONFIGURATION);
640 	val &= ~GMAC_MAC_CONFIGURATION_RE;
641 	WR4(sc, GMAC_MAC_CONFIGURATION, val);
642 
643 	/* Stop receive DMA */
644 	val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL);
645 	val &= ~GMAC_DMA_CHAN0_RX_CONTROL_START;
646 	WR4(sc, GMAC_DMA_CHAN0_RX_CONTROL, val);
647 
648 	/* Stop transmit DMA */
649 	val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL);
650 	val &= ~GMAC_DMA_CHAN0_TX_CONTROL_START;
651 	WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val);
652 
653 	/* Flush data in the TX FIFO */
654 	val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE);
655 	val |= GMAC_MTL_TXQ0_OPERATION_MODE_FTQ;
656 	WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE, val);
657 	for (retry = 10000; retry > 0; retry--) {
658 		val = RD4(sc, GMAC_MTL_TXQ0_OPERATION_MODE);
659 		if (!(val & GMAC_MTL_TXQ0_OPERATION_MODE_FTQ))
660 			break;
661 		DELAY(10);
662 	}
663 	if (!retry)
664 		device_printf(sc->dev, "timeout flushing TX queue\n");
665 
666 	/* Disable transmitter */
667 	val = RD4(sc, GMAC_MAC_CONFIGURATION);
668 	val &= ~GMAC_MAC_CONFIGURATION_TE;
669 	WR4(sc, GMAC_MAC_CONFIGURATION, val);
670 
671 	eqos_disable_intr(sc);
672 
673 	EQOS_UNLOCK(sc);
674 }
675 
676 static void
677 eqos_rxintr(struct eqos_softc *sc)
678 {
679 	if_t ifp = sc->ifp;
680 	struct mbuf *m;
681 	uint32_t rdes3;
682 	int error, length;
683 
684 	while (true) {
685 		rdes3 = le32toh(sc->rx.desc_ring[sc->rx.head].des3);
686 		if ((rdes3 & EQOS_RDES3_OWN))
687 			break;
688 
689 		if (rdes3 & (EQOS_RDES3_OE | EQOS_RDES3_RE))
690 			printf("Receive errer rdes3=%08x\n", rdes3);
691 
692 		bus_dmamap_sync(sc->rx.buf_tag,
693 		    sc->rx.buf_map[sc->rx.head].map, BUS_DMASYNC_POSTREAD);
694 		bus_dmamap_unload(sc->rx.buf_tag,
695 		    sc->rx.buf_map[sc->rx.head].map);
696 
697 		length = rdes3 & EQOS_RDES3_LENGTH_MASK;
698 		if (length) {
699 			m = sc->rx.buf_map[sc->rx.head].mbuf;
700 			m->m_pkthdr.rcvif = ifp;
701 			m->m_pkthdr.len = length;
702 			m->m_len = length;
703 			m->m_nextpkt = NULL;
704 
705 			/* Remove trailing FCS */
706 			m_adj(m, -ETHER_CRC_LEN);
707 
708 			EQOS_UNLOCK(sc);
709 			if_input(ifp, m);
710 			EQOS_LOCK(sc);
711 		}
712 
713 		if ((m = eqos_alloc_mbufcl(sc))) {
714 			if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m)))
715 				printf("ERROR: Hole in RX ring!!\n");
716 		}
717 		else
718 			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
719 
720 		if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
721 
722 		WR4(sc, GMAC_DMA_CHAN0_RX_END_ADDR,
723 		    (uint32_t)sc->rx.desc_ring_paddr + DESC_OFFSET(sc->rx.head));
724 
725 		sc->rx.head = RX_NEXT(sc->rx.head);
726 	}
727 }
728 
729 static void
730 eqos_txintr(struct eqos_softc *sc)
731 {
732 	if_t ifp = sc->ifp;
733 	struct eqos_bufmap *bmap;
734 	uint32_t tdes3;
735 
736 	EQOS_ASSERT_LOCKED(sc);
737 
738 	while (sc->tx.tail != sc->tx.head) {
739 		tdes3 = le32toh(sc->tx.desc_ring[sc->tx.tail].des3);
740 		if ((tdes3 & EQOS_TDES3_OWN))
741 			break;
742 
743 		bmap = &sc->tx.buf_map[sc->tx.tail];
744 		if (bmap->mbuf) {
745 			bus_dmamap_sync(sc->tx.buf_tag, bmap->map,
746 			    BUS_DMASYNC_POSTWRITE);
747 			bus_dmamap_unload(sc->tx.buf_tag, bmap->map);
748 			m_freem(bmap->mbuf);
749 			bmap->mbuf = NULL;
750 		}
751 
752 		eqos_setup_txdesc(sc, sc->tx.tail, 0, 0, 0, 0);
753 
754 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
755 
756 		/* Last descriptor in a packet contains DMA status */
757 		if ((tdes3 & EQOS_TDES3_LD)) {
758 			if ((tdes3 & EQOS_TDES3_DE)) {
759 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
760 			} else if ((tdes3 & EQOS_TDES3_ES)) {
761 				if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
762 			} else {
763 				if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
764 			}
765 		}
766 		sc->tx.tail = TX_NEXT(sc->tx.tail);
767 	}
768 	if (sc->tx.tail == sc->tx.head)
769 		sc->tx_watchdog = 0;
770 	eqos_start_locked(sc->ifp);
771 }
772 
773 static void
774 eqos_intr_mtl(struct eqos_softc *sc, uint32_t mtl_status)
775 {
776 	uint32_t mtl_istat = 0;
777 
778 	if ((mtl_status & GMAC_MTL_INTERRUPT_STATUS_Q0IS)) {
779 		uint32_t mtl_clear = 0;
780 
781 		mtl_istat = RD4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS);
782 		if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS)) {
783 			mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOVFIS;
784 		}
785 		if ((mtl_istat & GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS)) {
786 			mtl_clear |= GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUNFIS;
787 		}
788 		if (mtl_clear) {
789 			mtl_clear |= (mtl_istat &
790 			    (GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_RXOIE |
791 			    GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS_TXUIE));
792 			WR4(sc, GMAC_MTL_Q0_INTERRUPT_CTRL_STATUS, mtl_clear);
793 		}
794 	}
795 	if (bootverbose)
796 		device_printf(sc->dev,
797 		    "GMAC_MTL_INTERRUPT_STATUS = 0x%08X, "
798 		    "GMAC_MTL_INTERRUPT_STATUS_Q0IS = 0x%08X\n",
799 		    mtl_status, mtl_istat);
800 }
801 
802 static void
803 eqos_tick(void *softc)
804 {
805 	struct eqos_softc *sc = softc;
806 	struct mii_data *mii = device_get_softc(sc->miibus);
807 	bool link_status;
808 
809 	EQOS_ASSERT_LOCKED(sc);
810 
811 	if (sc->tx_watchdog > 0)
812 		if (!--sc->tx_watchdog) {
813 			device_printf(sc->dev, "watchdog timeout\n");
814 			eqos_txintr(sc);
815 		}
816 
817 	link_status = sc->link_up;
818 	mii_tick(mii);
819 	if (sc->link_up && !link_status)
820 		eqos_start_locked(sc->ifp);
821 
822 	callout_reset(&sc->callout, hz, eqos_tick, sc);
823 }
824 
825 static void
826 eqos_intr(void *arg)
827 {
828 	struct eqos_softc *sc = arg;
829 	uint32_t mac_status, mtl_status, dma_status, rx_tx_status;
830 
831 	mac_status = RD4(sc, GMAC_MAC_INTERRUPT_STATUS);
832 	mac_status &= RD4(sc, GMAC_MAC_INTERRUPT_ENABLE);
833 
834 	if (mac_status)
835 		device_printf(sc->dev, "MAC interrupt\n");
836 
837 	if ((mtl_status = RD4(sc, GMAC_MTL_INTERRUPT_STATUS)))
838 		eqos_intr_mtl(sc, mtl_status);
839 
840 	dma_status = RD4(sc, GMAC_DMA_CHAN0_STATUS);
841 	dma_status &= RD4(sc, GMAC_DMA_CHAN0_INTR_ENABLE);
842 
843 	if (dma_status)
844 		WR4(sc, GMAC_DMA_CHAN0_STATUS, dma_status);
845 
846 	EQOS_LOCK(sc);
847 
848 	if (dma_status & GMAC_DMA_CHAN0_STATUS_RI)
849 		eqos_rxintr(sc);
850 
851 	if (dma_status & GMAC_DMA_CHAN0_STATUS_TI)
852 		eqos_txintr(sc);
853 
854 	EQOS_UNLOCK(sc);
855 
856 	if (!(mac_status | mtl_status | dma_status)) {
857 		device_printf(sc->dev,
858 		    "spurious interrupt mac=%08x mtl=%08x dma=%08x\n",
859 		    RD4(sc, GMAC_MAC_INTERRUPT_STATUS),
860 		    RD4(sc, GMAC_MTL_INTERRUPT_STATUS),
861 		    RD4(sc, GMAC_DMA_CHAN0_STATUS));
862 	}
863 	if ((rx_tx_status = RD4(sc, GMAC_MAC_RX_TX_STATUS)))
864 		device_printf(sc->dev, "RX/TX status interrupt\n");
865 }
866 
867 static int
868 eqos_ioctl(if_t ifp, u_long cmd, caddr_t data)
869 {
870 	struct eqos_softc *sc = if_getsoftc(ifp);
871 	struct ifreq *ifr = (struct ifreq *)data;
872 	struct mii_data *mii;
873 	int flags, mask;
874 	int error = 0;
875 
876 	switch (cmd) {
877 	case SIOCSIFFLAGS:
878 		if (if_getflags(ifp) & IFF_UP) {
879 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
880 				flags = if_getflags(ifp);
881 				if ((flags & (IFF_PROMISC|IFF_ALLMULTI))) {
882 					EQOS_LOCK(sc);
883 					eqos_setup_rxfilter(sc);
884 					EQOS_UNLOCK(sc);
885 				}
886 			}
887 			else {
888 				eqos_init(sc);
889 			}
890 		}
891 		else {
892 			if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
893 				eqos_stop(sc);
894 		}
895 		break;
896 
897 	case SIOCADDMULTI:
898 	case SIOCDELMULTI:
899 		if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
900 			EQOS_LOCK(sc);
901 			eqos_setup_rxfilter(sc);
902 			EQOS_UNLOCK(sc);
903 		}
904 		break;
905 
906 	case SIOCSIFMEDIA:
907 	case SIOCGIFMEDIA:
908 		mii = device_get_softc(sc->miibus);
909 		error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
910 		break;
911 
912 	case SIOCSIFCAP:
913 		mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
914 		if (mask & IFCAP_VLAN_MTU)
915 			if_togglecapenable(ifp, IFCAP_VLAN_MTU);
916 		if (mask & IFCAP_RXCSUM)
917 			if_togglecapenable(ifp, IFCAP_RXCSUM);
918 		if (mask & IFCAP_TXCSUM)
919 			if_togglecapenable(ifp, IFCAP_TXCSUM);
920 		if ((if_getcapenable(ifp) & IFCAP_TXCSUM))
921 			if_sethwassistbits(ifp,
922 			    CSUM_IP | CSUM_UDP | CSUM_TCP, 0);
923 		else
924 			if_sethwassistbits(ifp,
925 			    0, CSUM_IP | CSUM_UDP | CSUM_TCP);
926 		break;
927 
928 	default:
929 		error = ether_ioctl(ifp, cmd, data);
930 		break;
931 	}
932 
933 	return (error);
934 }
935 
936 static void
937 eqos_get_eaddr(struct eqos_softc *sc, uint8_t *eaddr)
938 {
939 	uint32_t maclo, machi;
940 
941 	maclo = htobe32(RD4(sc, GMAC_MAC_ADDRESS0_LOW));
942 	machi = htobe16(RD4(sc, GMAC_MAC_ADDRESS0_HIGH) & 0xFFFF);
943 
944 	/* if no valid MAC address generate random */
945 	if (maclo == 0xffffffff && machi == 0xffff) {
946 		maclo = 0xf2 | (arc4random() & 0xffff0000);
947 		machi = arc4random() & 0x0000ffff;
948 	}
949 	eaddr[0] = maclo & 0xff;
950 	eaddr[1] = (maclo >> 8) & 0xff;
951 	eaddr[2] = (maclo >> 16) & 0xff;
952 	eaddr[3] = (maclo >> 24) & 0xff;
953 	eaddr[4] = machi & 0xff;
954 	eaddr[5] = (machi >> 8) & 0xff;
955 }
956 
957 static void
958 eqos_axi_configure(struct eqos_softc *sc)
959 {
960 	uint32_t val;
961 
962 	val = RD4(sc, GMAC_DMA_SYSBUS_MODE);
963 
964 	/* Max Write Outstanding Req Limit */
965 	val &= ~GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK;
966 	val |= 0x03 << GMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT;
967 
968 	/* Max Read Outstanding Req Limit */
969 	val &= ~GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK;
970 	val |= 0x07 << GMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT;
971 
972 	/* Allowed Burst Length's */
973 	val |= GMAC_DMA_SYSBUS_MODE_BLEN16;
974 	val |= GMAC_DMA_SYSBUS_MODE_BLEN8;
975 	val |= GMAC_DMA_SYSBUS_MODE_BLEN4;
976 
977 	/* Fixed Burst Length */
978 	val |= GMAC_DMA_SYSBUS_MODE_MB;
979 
980 	WR4(sc, GMAC_DMA_SYSBUS_MODE, val);
981 }
982 
983 static void
984 eqos_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
985 {
986 
987 	if (!error)
988 		*(bus_addr_t *)arg = segs[0].ds_addr;
989 }
990 
991 static int
992 eqos_setup_dma(struct eqos_softc *sc)
993 {
994 	struct mbuf *m;
995 	int error, i;
996 
997 	/* Set up TX descriptor ring, descriptors, and dma maps */
998 	if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
999 					DESC_ALIGN, DESC_BOUNDARY,
1000 					BUS_SPACE_MAXADDR_32BIT,
1001 					BUS_SPACE_MAXADDR, NULL, NULL,
1002 					TX_DESC_SIZE, 1, TX_DESC_SIZE, 0,
1003 					NULL, NULL, &sc->tx.desc_tag))) {
1004 		device_printf(sc->dev, "could not create TX ring DMA tag\n");
1005 		return (error);
1006 	}
1007 
1008 	if ((error = bus_dmamem_alloc(sc->tx.desc_tag,
1009 	    (void**)&sc->tx.desc_ring,
1010 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
1011 	    &sc->tx.desc_map))) {
1012 		device_printf(sc->dev,
1013 		    "could not allocate TX descriptor ring.\n");
1014 		return (error);
1015 	}
1016 
1017 	if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
1018 	    sc->tx.desc_ring,
1019 	    TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) {
1020 		device_printf(sc->dev,
1021 		    "could not load TX descriptor ring map.\n");
1022 		return (error);
1023 	}
1024 
1025 	if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1026 					BUS_SPACE_MAXADDR_32BIT,
1027 					BUS_SPACE_MAXADDR, NULL, NULL,
1028 					MCLBYTES*TX_MAX_SEGS, TX_MAX_SEGS,
1029 					MCLBYTES, 0, NULL, NULL,
1030 					&sc->tx.buf_tag))) {
1031 		device_printf(sc->dev, "could not create TX buffer DMA tag.\n");
1032 		return (error);
1033 	}
1034 
1035 	for (i = 0; i < TX_DESC_COUNT; i++) {
1036 		if ((error = bus_dmamap_create(sc->tx.buf_tag, BUS_DMA_COHERENT,
1037 		    &sc->tx.buf_map[i].map))) {
1038 			device_printf(sc->dev, "cannot create TX buffer map\n");
1039 			return (error);
1040 		}
1041 		eqos_setup_txdesc(sc, i, EQOS_TDES3_OWN, 0, 0, 0);
1042 	}
1043 
1044 	/* Set up RX descriptor ring, descriptors, dma maps, and mbufs */
1045 	if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
1046 					DESC_ALIGN, DESC_BOUNDARY,
1047 					BUS_SPACE_MAXADDR_32BIT,
1048 					BUS_SPACE_MAXADDR, NULL, NULL,
1049 					RX_DESC_SIZE, 1, RX_DESC_SIZE, 0,
1050 					NULL, NULL, &sc->rx.desc_tag))) {
1051 		device_printf(sc->dev, "could not create RX ring DMA tag.\n");
1052 		return (error);
1053 	}
1054 
1055 	if ((error = bus_dmamem_alloc(sc->rx.desc_tag,
1056 	    (void **)&sc->rx.desc_ring,
1057 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
1058 	    &sc->rx.desc_map))) {
1059 		device_printf(sc->dev,
1060 		    "could not allocate RX descriptor ring.\n");
1061 		return (error);
1062 	}
1063 
1064 	if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
1065 	    sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr,
1066 	    &sc->rx.desc_ring_paddr, 0))) {
1067 		device_printf(sc->dev,
1068 		    "could not load RX descriptor ring map.\n");
1069 		return (error);
1070 	}
1071 
1072 	if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
1073 					BUS_SPACE_MAXADDR_32BIT,
1074 					BUS_SPACE_MAXADDR, NULL, NULL,
1075 					MCLBYTES, 1,
1076 					MCLBYTES, 0, NULL, NULL,
1077 					&sc->rx.buf_tag))) {
1078 		device_printf(sc->dev, "could not create RX buf DMA tag.\n");
1079 		return (error);
1080 	}
1081 
1082 	for (i = 0; i < RX_DESC_COUNT; i++) {
1083 		if ((error = bus_dmamap_create(sc->rx.buf_tag, BUS_DMA_COHERENT,
1084 		    &sc->rx.buf_map[i].map))) {
1085 			device_printf(sc->dev, "cannot create RX buffer map\n");
1086 			return (error);
1087 		}
1088 		if (!(m = eqos_alloc_mbufcl(sc))) {
1089 			device_printf(sc->dev, "cannot allocate RX mbuf\n");
1090 			return (ENOMEM);
1091 		}
1092 		if ((error = eqos_setup_rxbuf(sc, i, m))) {
1093 			device_printf(sc->dev, "cannot create RX buffer\n");
1094 			return (error);
1095 		}
1096 	}
1097 
1098 	if (bootverbose)
1099 		device_printf(sc->dev, "TX ring @ 0x%lx, RX ring @ 0x%lx\n",
1100 		    sc->tx.desc_ring_paddr, sc->rx.desc_ring_paddr);
1101 	return (0);
1102 }
1103 
1104 static int
1105 eqos_attach(device_t dev)
1106 {
1107 	struct eqos_softc *sc = device_get_softc(dev);
1108 	if_t ifp;
1109 	uint32_t ver;
1110 	uint8_t eaddr[ETHER_ADDR_LEN];
1111 	u_int userver, snpsver;
1112 	int error;
1113 	int n;
1114 
1115 	/* setup resources */
1116 	if (bus_alloc_resources(dev, eqos_spec, sc->res)) {
1117 		device_printf(dev, "Could not allocate resources\n");
1118 		bus_release_resources(dev, eqos_spec, sc->res);
1119 		return (ENXIO);
1120 	}
1121 
1122 	sc->dev = dev;
1123 	ver  = RD4(sc, GMAC_MAC_VERSION);
1124 	userver = (ver & GMAC_MAC_VERSION_USERVER_MASK) >>
1125 	    GMAC_MAC_VERSION_USERVER_SHIFT;
1126 	snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK;
1127 
1128 	if (snpsver != 0x51) {
1129 		device_printf(dev, "EQOS version 0x%02xx not supported\n",
1130 		    snpsver);
1131 		return (ENXIO);
1132 	}
1133 
1134 	for (n = 0; n < 4; n++)
1135 		sc->hw_feature[n] = RD4(sc, GMAC_MAC_HW_FEATURE(n));
1136 
1137 	if (bootverbose) {
1138 		device_printf(dev, "DesignWare EQOS ver 0x%02x (0x%02x)\n",
1139 		    snpsver, userver);
1140 		device_printf(dev, "hw features %08x %08x %08x %08x\n",
1141 		    sc->hw_feature[0], sc->hw_feature[1],
1142 		    sc->hw_feature[2], sc->hw_feature[3]);
1143 	}
1144 
1145 
1146 	if ((error = IF_EQOS_INIT(dev)))
1147 		return (error);
1148 
1149 	mtx_init(&sc->lock, "eqos lock", MTX_NETWORK_LOCK, MTX_DEF);
1150 	callout_init_mtx(&sc->callout, &sc->lock, 0);
1151 
1152 	eqos_get_eaddr(sc, eaddr);
1153 	if (bootverbose)
1154 		device_printf(sc->dev, "Ethernet address %6D\n", eaddr, ":");
1155 
1156 	/* Soft reset EMAC core */
1157 	if ((error = eqos_reset(sc))) {
1158 		device_printf(sc->dev, "reset timeout!\n");
1159 		return (error);
1160 	}
1161 
1162 	/* Configure AXI Bus mode parameters */
1163 	eqos_axi_configure(sc);
1164 
1165 	/* Setup DMA descriptors */
1166 	if (eqos_setup_dma(sc)) {
1167 		device_printf(sc->dev, "failed to setup DMA descriptors\n");
1168 		return (EINVAL);
1169 	}
1170 
1171 	/* setup interrupt delivery */
1172 	if ((bus_setup_intr(dev, sc->res[EQOS_RES_IRQ0], EQOS_INTR_FLAGS,
1173 	    NULL, eqos_intr, sc, &sc->irq_handle))) {
1174 		device_printf(dev, "unable to setup 1st interrupt\n");
1175 		bus_release_resources(dev, eqos_spec, sc->res);
1176 		return (ENXIO);
1177 	}
1178 
1179 	/* Setup ethernet interface */
1180 	ifp = sc->ifp = if_alloc(IFT_ETHER);
1181 	if_setsoftc(ifp, sc);
1182 	if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
1183 	if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
1184 	if_setstartfn(ifp, eqos_start);
1185 	if_setioctlfn(ifp, eqos_ioctl);
1186 	if_setinitfn(ifp, eqos_init);
1187 	if_setsendqlen(ifp, TX_DESC_COUNT - 1);
1188 	if_setsendqready(ifp);
1189 	if_setcapabilities(ifp, IFCAP_VLAN_MTU /*| IFCAP_HWCSUM*/);
1190 	if_setcapenable(ifp, if_getcapabilities(ifp));
1191 
1192 	/* Attach MII driver */
1193 	if ((error = mii_attach(sc->dev, &sc->miibus, ifp, eqos_media_change,
1194 	    eqos_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY,
1195 	    MII_OFFSET_ANY, 0))) {
1196 		device_printf(sc->dev, "PHY attach failed\n");
1197 		return (ENXIO);
1198 	}
1199 
1200 	/* Attach ethernet interface */
1201 	ether_ifattach(ifp, eaddr);
1202 
1203 	return (0);
1204 }
1205 
1206 static int
1207 eqos_detach(device_t dev)
1208 {
1209 	struct eqos_softc *sc = device_get_softc(dev);
1210 	int i;
1211 
1212 	if (device_is_attached(dev)) {
1213 		EQOS_LOCK(sc);
1214 		eqos_stop(sc);
1215 		EQOS_UNLOCK(sc);
1216 		if_setflagbits(sc->ifp, 0, IFF_UP);
1217 		ether_ifdetach(sc->ifp);
1218 	}
1219 
1220 	if (sc->miibus)
1221 		device_delete_child(dev, sc->miibus);
1222 	bus_generic_detach(dev);
1223 
1224 	if (sc->irq_handle)
1225 		bus_teardown_intr(dev, sc->res[EQOS_RES_IRQ0],
1226 		    sc->irq_handle);
1227 
1228 	if (sc->ifp)
1229 		if_free(sc->ifp);
1230 
1231 	bus_release_resources(dev, eqos_spec, sc->res);
1232 
1233 	if (sc->tx.desc_tag) {
1234 		if (sc->tx.desc_map) {
1235 			bus_dmamap_unload(sc->tx.desc_tag, sc->tx.desc_map);
1236 			bus_dmamem_free(sc->tx.desc_tag, sc->tx.desc_ring,
1237 			    sc->tx.desc_map);
1238 		}
1239 		bus_dma_tag_destroy(sc->tx.desc_tag);
1240 	}
1241 	if (sc->tx.buf_tag) {
1242 		for (i = 0; i < TX_DESC_COUNT; i++) {
1243 			m_free(sc->tx.buf_map[i].mbuf);
1244 			bus_dmamap_destroy(sc->tx.buf_tag,
1245 			    sc->tx.buf_map[i].map);
1246 		}
1247 		bus_dma_tag_destroy(sc->tx.buf_tag);
1248 	}
1249 
1250 	if (sc->rx.desc_tag) {
1251 		if (sc->rx.desc_map) {
1252 			bus_dmamap_unload(sc->rx.desc_tag, sc->rx.desc_map);
1253 			bus_dmamem_free(sc->rx.desc_tag, sc->rx.desc_ring,
1254 			    sc->rx.desc_map);
1255 		}
1256 		bus_dma_tag_destroy(sc->rx.desc_tag);
1257 	}
1258 	if (sc->rx.buf_tag) {
1259 		for (i = 0; i < RX_DESC_COUNT; i++) {
1260 			m_free(sc->rx.buf_map[i].mbuf);
1261 			bus_dmamap_destroy(sc->rx.buf_tag,
1262 			    sc->rx.buf_map[i].map);
1263 		}
1264 		bus_dma_tag_destroy(sc->rx.buf_tag);
1265 	}
1266 
1267 	mtx_destroy(&sc->lock);
1268 
1269 	return (0);
1270 }
1271 
1272 
1273 static device_method_t eqos_methods[] = {
1274 	/* Device Interface */
1275 	DEVMETHOD(device_attach,	eqos_attach),
1276 	DEVMETHOD(device_detach,	eqos_detach),
1277 
1278 	/* MII Interface */
1279 	DEVMETHOD(miibus_readreg,	eqos_miibus_readreg),
1280 	DEVMETHOD(miibus_writereg,	eqos_miibus_writereg),
1281 	DEVMETHOD(miibus_statchg,	eqos_miibus_statchg),
1282 
1283 	DEVMETHOD_END
1284 };
1285 
1286 driver_t eqos_driver = {
1287 	"eqos",
1288 	eqos_methods,
1289 	sizeof(struct eqos_softc),
1290 };
1291 
1292 DRIVER_MODULE(miibus, eqos, miibus_driver, 0, 0);
1293