xref: /freebsd/sys/dev/dwc/dwc1000_dma.c (revision 972adf0f97ac5e2f3a6024c25623a908403aff9b)
1*972adf0fSEmmanuel Vadot /*-
2*972adf0fSEmmanuel Vadot  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3*972adf0fSEmmanuel Vadot  *
4*972adf0fSEmmanuel Vadot  * This software was developed by SRI International and the University of
5*972adf0fSEmmanuel Vadot  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6*972adf0fSEmmanuel Vadot  * ("CTSRD"), as part of the DARPA CRASH research programme.
7*972adf0fSEmmanuel Vadot  *
8*972adf0fSEmmanuel Vadot  * Redistribution and use in source and binary forms, with or without
9*972adf0fSEmmanuel Vadot  * modification, are permitted provided that the following conditions
10*972adf0fSEmmanuel Vadot  * are met:
11*972adf0fSEmmanuel Vadot  * 1. Redistributions of source code must retain the above copyright
12*972adf0fSEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer.
13*972adf0fSEmmanuel Vadot  * 2. Redistributions in binary form must reproduce the above copyright
14*972adf0fSEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer in the
15*972adf0fSEmmanuel Vadot  *    documentation and/or other materials provided with the distribution.
16*972adf0fSEmmanuel Vadot  *
17*972adf0fSEmmanuel Vadot  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18*972adf0fSEmmanuel Vadot  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19*972adf0fSEmmanuel Vadot  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20*972adf0fSEmmanuel Vadot  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21*972adf0fSEmmanuel Vadot  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22*972adf0fSEmmanuel Vadot  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23*972adf0fSEmmanuel Vadot  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24*972adf0fSEmmanuel Vadot  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25*972adf0fSEmmanuel Vadot  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26*972adf0fSEmmanuel Vadot  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27*972adf0fSEmmanuel Vadot  * SUCH DAMAGE.
28*972adf0fSEmmanuel Vadot  */
29*972adf0fSEmmanuel Vadot 
30*972adf0fSEmmanuel Vadot #include <sys/cdefs.h>
31*972adf0fSEmmanuel Vadot #include <sys/param.h>
32*972adf0fSEmmanuel Vadot #include <sys/systm.h>
33*972adf0fSEmmanuel Vadot #include <sys/bus.h>
34*972adf0fSEmmanuel Vadot #include <sys/kernel.h>
35*972adf0fSEmmanuel Vadot #include <sys/lock.h>
36*972adf0fSEmmanuel Vadot #include <sys/malloc.h>
37*972adf0fSEmmanuel Vadot #include <sys/mbuf.h>
38*972adf0fSEmmanuel Vadot #include <sys/module.h>
39*972adf0fSEmmanuel Vadot #include <sys/mutex.h>
40*972adf0fSEmmanuel Vadot #include <sys/rman.h>
41*972adf0fSEmmanuel Vadot #include <sys/socket.h>
42*972adf0fSEmmanuel Vadot 
43*972adf0fSEmmanuel Vadot #include <net/bpf.h>
44*972adf0fSEmmanuel Vadot #include <net/if.h>
45*972adf0fSEmmanuel Vadot #include <net/ethernet.h>
46*972adf0fSEmmanuel Vadot #include <net/if_dl.h>
47*972adf0fSEmmanuel Vadot #include <net/if_media.h>
48*972adf0fSEmmanuel Vadot #include <net/if_types.h>
49*972adf0fSEmmanuel Vadot #include <net/if_var.h>
50*972adf0fSEmmanuel Vadot 
51*972adf0fSEmmanuel Vadot #include <machine/bus.h>
52*972adf0fSEmmanuel Vadot 
53*972adf0fSEmmanuel Vadot #include <dev/extres/clk/clk.h>
54*972adf0fSEmmanuel Vadot #include <dev/extres/hwreset/hwreset.h>
55*972adf0fSEmmanuel Vadot 
56*972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus.h>
57*972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus_subr.h>
58*972adf0fSEmmanuel Vadot 
59*972adf0fSEmmanuel Vadot #include <dev/dwc/if_dwcvar.h>
60*972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_reg.h>
61*972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_dma.h>
62*972adf0fSEmmanuel Vadot 
63*972adf0fSEmmanuel Vadot static inline uint32_t
64*972adf0fSEmmanuel Vadot next_rxidx(struct dwc_softc *sc, uint32_t curidx)
65*972adf0fSEmmanuel Vadot {
66*972adf0fSEmmanuel Vadot 
67*972adf0fSEmmanuel Vadot 	return ((curidx + 1) % RX_DESC_COUNT);
68*972adf0fSEmmanuel Vadot }
69*972adf0fSEmmanuel Vadot 
70*972adf0fSEmmanuel Vadot static void
71*972adf0fSEmmanuel Vadot dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
72*972adf0fSEmmanuel Vadot {
73*972adf0fSEmmanuel Vadot 
74*972adf0fSEmmanuel Vadot 	if (error != 0)
75*972adf0fSEmmanuel Vadot 		return;
76*972adf0fSEmmanuel Vadot 	*(bus_addr_t *)arg = segs[0].ds_addr;
77*972adf0fSEmmanuel Vadot }
78*972adf0fSEmmanuel Vadot 
79*972adf0fSEmmanuel Vadot inline static void
80*972adf0fSEmmanuel Vadot dwc_set_owner(struct dwc_softc *sc, int idx)
81*972adf0fSEmmanuel Vadot {
82*972adf0fSEmmanuel Vadot 	wmb();
83*972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
84*972adf0fSEmmanuel Vadot 	wmb();
85*972adf0fSEmmanuel Vadot }
86*972adf0fSEmmanuel Vadot 
87*972adf0fSEmmanuel Vadot inline static void
88*972adf0fSEmmanuel Vadot dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr,
89*972adf0fSEmmanuel Vadot   uint32_t len, uint32_t flags, bool first, bool last)
90*972adf0fSEmmanuel Vadot {
91*972adf0fSEmmanuel Vadot 	uint32_t desc0, desc1;
92*972adf0fSEmmanuel Vadot 
93*972adf0fSEmmanuel Vadot 	/* Addr/len 0 means we're clearing the descriptor after xmit done. */
94*972adf0fSEmmanuel Vadot 	if (paddr == 0 || len == 0) {
95*972adf0fSEmmanuel Vadot 		desc0 = 0;
96*972adf0fSEmmanuel Vadot 		desc1 = 0;
97*972adf0fSEmmanuel Vadot 		--sc->tx_desccount;
98*972adf0fSEmmanuel Vadot 	} else {
99*972adf0fSEmmanuel Vadot 		if (sc->mactype != DWC_GMAC_EXT_DESC) {
100*972adf0fSEmmanuel Vadot 			desc0 = 0;
101*972adf0fSEmmanuel Vadot 			desc1 = NTDESC1_TCH | len | flags;
102*972adf0fSEmmanuel Vadot 			if (first)
103*972adf0fSEmmanuel Vadot 				desc1 |=  NTDESC1_FS;
104*972adf0fSEmmanuel Vadot 			if (last)
105*972adf0fSEmmanuel Vadot 				desc1 |= NTDESC1_LS | NTDESC1_IC;
106*972adf0fSEmmanuel Vadot 		} else {
107*972adf0fSEmmanuel Vadot 			desc0 = ETDESC0_TCH | flags;
108*972adf0fSEmmanuel Vadot 			if (first)
109*972adf0fSEmmanuel Vadot 				desc0 |= ETDESC0_FS;
110*972adf0fSEmmanuel Vadot 			if (last)
111*972adf0fSEmmanuel Vadot 				desc0 |= ETDESC0_LS | ETDESC0_IC;
112*972adf0fSEmmanuel Vadot 			desc1 = len;
113*972adf0fSEmmanuel Vadot 		}
114*972adf0fSEmmanuel Vadot 		++sc->tx_desccount;
115*972adf0fSEmmanuel Vadot 	}
116*972adf0fSEmmanuel Vadot 
117*972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
118*972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc0 = desc0;
119*972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc1 = desc1;
120*972adf0fSEmmanuel Vadot }
121*972adf0fSEmmanuel Vadot 
122*972adf0fSEmmanuel Vadot inline static uint32_t
123*972adf0fSEmmanuel Vadot dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr)
124*972adf0fSEmmanuel Vadot {
125*972adf0fSEmmanuel Vadot 	uint32_t nidx;
126*972adf0fSEmmanuel Vadot 
127*972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
128*972adf0fSEmmanuel Vadot 	nidx = next_rxidx(sc, idx);
129*972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
130*972adf0fSEmmanuel Vadot 	    (nidx * sizeof(struct dwc_hwdesc));
131*972adf0fSEmmanuel Vadot 	if (sc->mactype != DWC_GMAC_EXT_DESC)
132*972adf0fSEmmanuel Vadot 		sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
133*972adf0fSEmmanuel Vadot 		    MIN(MCLBYTES, NRDESC1_RBS1_MASK);
134*972adf0fSEmmanuel Vadot 	else
135*972adf0fSEmmanuel Vadot 		sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
136*972adf0fSEmmanuel Vadot 		    MIN(MCLBYTES, ERDESC1_RBS1_MASK);
137*972adf0fSEmmanuel Vadot 
138*972adf0fSEmmanuel Vadot 	wmb();
139*972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
140*972adf0fSEmmanuel Vadot 	wmb();
141*972adf0fSEmmanuel Vadot 	return (nidx);
142*972adf0fSEmmanuel Vadot }
143*972adf0fSEmmanuel Vadot 
144*972adf0fSEmmanuel Vadot int
145*972adf0fSEmmanuel Vadot dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
146*972adf0fSEmmanuel Vadot {
147*972adf0fSEmmanuel Vadot 	struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
148*972adf0fSEmmanuel Vadot 	int error, nsegs;
149*972adf0fSEmmanuel Vadot 	struct mbuf * m;
150*972adf0fSEmmanuel Vadot 	uint32_t flags = 0;
151*972adf0fSEmmanuel Vadot 	int i;
152*972adf0fSEmmanuel Vadot 	int first, last;
153*972adf0fSEmmanuel Vadot 
154*972adf0fSEmmanuel Vadot 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
155*972adf0fSEmmanuel Vadot 	    *mp, segs, &nsegs, 0);
156*972adf0fSEmmanuel Vadot 	if (error == EFBIG) {
157*972adf0fSEmmanuel Vadot 		/*
158*972adf0fSEmmanuel Vadot 		 * The map may be partially mapped from the first call.
159*972adf0fSEmmanuel Vadot 		 * Make sure to reset it.
160*972adf0fSEmmanuel Vadot 		 */
161*972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
162*972adf0fSEmmanuel Vadot 		if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
163*972adf0fSEmmanuel Vadot 			return (ENOMEM);
164*972adf0fSEmmanuel Vadot 		*mp = m;
165*972adf0fSEmmanuel Vadot 		error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
166*972adf0fSEmmanuel Vadot 		    *mp, segs, &nsegs, 0);
167*972adf0fSEmmanuel Vadot 	}
168*972adf0fSEmmanuel Vadot 	if (error != 0)
169*972adf0fSEmmanuel Vadot 		return (ENOMEM);
170*972adf0fSEmmanuel Vadot 
171*972adf0fSEmmanuel Vadot 	if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
172*972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
173*972adf0fSEmmanuel Vadot 		return (ENOMEM);
174*972adf0fSEmmanuel Vadot 	}
175*972adf0fSEmmanuel Vadot 
176*972adf0fSEmmanuel Vadot 	m = *mp;
177*972adf0fSEmmanuel Vadot 
178*972adf0fSEmmanuel Vadot 	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
179*972adf0fSEmmanuel Vadot 		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
180*972adf0fSEmmanuel Vadot 			if (sc->mactype != DWC_GMAC_EXT_DESC)
181*972adf0fSEmmanuel Vadot 				flags = NTDESC1_CIC_FULL;
182*972adf0fSEmmanuel Vadot 			else
183*972adf0fSEmmanuel Vadot 				flags = ETDESC0_CIC_FULL;
184*972adf0fSEmmanuel Vadot 		} else {
185*972adf0fSEmmanuel Vadot 			if (sc->mactype != DWC_GMAC_EXT_DESC)
186*972adf0fSEmmanuel Vadot 				flags = NTDESC1_CIC_HDR;
187*972adf0fSEmmanuel Vadot 			else
188*972adf0fSEmmanuel Vadot 				flags = ETDESC0_CIC_HDR;
189*972adf0fSEmmanuel Vadot 		}
190*972adf0fSEmmanuel Vadot 	}
191*972adf0fSEmmanuel Vadot 
192*972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
193*972adf0fSEmmanuel Vadot 	    BUS_DMASYNC_PREWRITE);
194*972adf0fSEmmanuel Vadot 
195*972adf0fSEmmanuel Vadot 	sc->txbuf_map[idx].mbuf = m;
196*972adf0fSEmmanuel Vadot 
197*972adf0fSEmmanuel Vadot 	first = sc->tx_desc_head;
198*972adf0fSEmmanuel Vadot 	for (i = 0; i < nsegs; i++) {
199*972adf0fSEmmanuel Vadot 		dwc_setup_txdesc(sc, sc->tx_desc_head,
200*972adf0fSEmmanuel Vadot 		    segs[i].ds_addr, segs[i].ds_len,
201*972adf0fSEmmanuel Vadot 		    (i == 0) ? flags : 0, /* only first desc needs flags */
202*972adf0fSEmmanuel Vadot 		    (i == 0),
203*972adf0fSEmmanuel Vadot 		    (i == nsegs - 1));
204*972adf0fSEmmanuel Vadot 		if (i > 0)
205*972adf0fSEmmanuel Vadot 			dwc_set_owner(sc, sc->tx_desc_head);
206*972adf0fSEmmanuel Vadot 		last = sc->tx_desc_head;
207*972adf0fSEmmanuel Vadot 		sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
208*972adf0fSEmmanuel Vadot 	}
209*972adf0fSEmmanuel Vadot 
210*972adf0fSEmmanuel Vadot 	sc->txbuf_map[idx].last_desc_idx = last;
211*972adf0fSEmmanuel Vadot 
212*972adf0fSEmmanuel Vadot 	dwc_set_owner(sc, first);
213*972adf0fSEmmanuel Vadot 
214*972adf0fSEmmanuel Vadot 	return (0);
215*972adf0fSEmmanuel Vadot }
216*972adf0fSEmmanuel Vadot 
217*972adf0fSEmmanuel Vadot static int
218*972adf0fSEmmanuel Vadot dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
219*972adf0fSEmmanuel Vadot {
220*972adf0fSEmmanuel Vadot 	struct bus_dma_segment seg;
221*972adf0fSEmmanuel Vadot 	int error, nsegs;
222*972adf0fSEmmanuel Vadot 
223*972adf0fSEmmanuel Vadot 	m_adj(m, ETHER_ALIGN);
224*972adf0fSEmmanuel Vadot 
225*972adf0fSEmmanuel Vadot 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
226*972adf0fSEmmanuel Vadot 	    m, &seg, &nsegs, 0);
227*972adf0fSEmmanuel Vadot 	if (error != 0)
228*972adf0fSEmmanuel Vadot 		return (error);
229*972adf0fSEmmanuel Vadot 
230*972adf0fSEmmanuel Vadot 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
231*972adf0fSEmmanuel Vadot 
232*972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
233*972adf0fSEmmanuel Vadot 	    BUS_DMASYNC_PREREAD);
234*972adf0fSEmmanuel Vadot 
235*972adf0fSEmmanuel Vadot 	sc->rxbuf_map[idx].mbuf = m;
236*972adf0fSEmmanuel Vadot 	dwc_setup_rxdesc(sc, idx, seg.ds_addr);
237*972adf0fSEmmanuel Vadot 
238*972adf0fSEmmanuel Vadot 	return (0);
239*972adf0fSEmmanuel Vadot }
240*972adf0fSEmmanuel Vadot 
241*972adf0fSEmmanuel Vadot static struct mbuf *
242*972adf0fSEmmanuel Vadot dwc_alloc_mbufcl(struct dwc_softc *sc)
243*972adf0fSEmmanuel Vadot {
244*972adf0fSEmmanuel Vadot 	struct mbuf *m;
245*972adf0fSEmmanuel Vadot 
246*972adf0fSEmmanuel Vadot 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
247*972adf0fSEmmanuel Vadot 	if (m != NULL)
248*972adf0fSEmmanuel Vadot 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
249*972adf0fSEmmanuel Vadot 
250*972adf0fSEmmanuel Vadot 	return (m);
251*972adf0fSEmmanuel Vadot }
252*972adf0fSEmmanuel Vadot 
253*972adf0fSEmmanuel Vadot static struct mbuf *
254*972adf0fSEmmanuel Vadot dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
255*972adf0fSEmmanuel Vadot     struct dwc_bufmap *map)
256*972adf0fSEmmanuel Vadot {
257*972adf0fSEmmanuel Vadot 	if_t ifp;
258*972adf0fSEmmanuel Vadot 	struct mbuf *m, *m0;
259*972adf0fSEmmanuel Vadot 	int len;
260*972adf0fSEmmanuel Vadot 	uint32_t rdesc0;
261*972adf0fSEmmanuel Vadot 
262*972adf0fSEmmanuel Vadot 	m = map->mbuf;
263*972adf0fSEmmanuel Vadot 	ifp = sc->ifp;
264*972adf0fSEmmanuel Vadot 	rdesc0 = desc ->desc0;
265*972adf0fSEmmanuel Vadot 
266*972adf0fSEmmanuel Vadot 	if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
267*972adf0fSEmmanuel Vadot 		    (RDESC0_FS | RDESC0_LS)) {
268*972adf0fSEmmanuel Vadot 		/*
269*972adf0fSEmmanuel Vadot 		 * Something very wrong happens. The whole packet should be
270*972adf0fSEmmanuel Vadot 		 * recevied in one descriptr. Report problem.
271*972adf0fSEmmanuel Vadot 		 */
272*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
273*972adf0fSEmmanuel Vadot 		    "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
274*972adf0fSEmmanuel Vadot 		    __func__, rdesc0);
275*972adf0fSEmmanuel Vadot 		return (NULL);
276*972adf0fSEmmanuel Vadot 	}
277*972adf0fSEmmanuel Vadot 
278*972adf0fSEmmanuel Vadot 	len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
279*972adf0fSEmmanuel Vadot 	if (len < 64) {
280*972adf0fSEmmanuel Vadot 		/*
281*972adf0fSEmmanuel Vadot 		 * Lenght is invalid, recycle old mbuf
282*972adf0fSEmmanuel Vadot 		 * Probably impossible case
283*972adf0fSEmmanuel Vadot 		 */
284*972adf0fSEmmanuel Vadot 		return (NULL);
285*972adf0fSEmmanuel Vadot 	}
286*972adf0fSEmmanuel Vadot 
287*972adf0fSEmmanuel Vadot 	/* Allocate new buffer */
288*972adf0fSEmmanuel Vadot 	m0 = dwc_alloc_mbufcl(sc);
289*972adf0fSEmmanuel Vadot 	if (m0 == NULL) {
290*972adf0fSEmmanuel Vadot 		/* no new mbuf available, recycle old */
291*972adf0fSEmmanuel Vadot 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
292*972adf0fSEmmanuel Vadot 		return (NULL);
293*972adf0fSEmmanuel Vadot 	}
294*972adf0fSEmmanuel Vadot 	/* Do dmasync for newly received packet */
295*972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
296*972adf0fSEmmanuel Vadot 	bus_dmamap_unload(sc->rxbuf_tag, map->map);
297*972adf0fSEmmanuel Vadot 
298*972adf0fSEmmanuel Vadot 	/* Received packet is valid, process it */
299*972adf0fSEmmanuel Vadot 	m->m_pkthdr.rcvif = ifp;
300*972adf0fSEmmanuel Vadot 	m->m_pkthdr.len = len;
301*972adf0fSEmmanuel Vadot 	m->m_len = len;
302*972adf0fSEmmanuel Vadot 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
303*972adf0fSEmmanuel Vadot 
304*972adf0fSEmmanuel Vadot 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
305*972adf0fSEmmanuel Vadot 	  (rdesc0 & RDESC0_FT) != 0) {
306*972adf0fSEmmanuel Vadot 		m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
307*972adf0fSEmmanuel Vadot 		if ((rdesc0 & RDESC0_ICE) == 0)
308*972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
309*972adf0fSEmmanuel Vadot 		if ((rdesc0 & RDESC0_PCE) == 0) {
310*972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_flags |=
311*972adf0fSEmmanuel Vadot 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
312*972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_data = 0xffff;
313*972adf0fSEmmanuel Vadot 		}
314*972adf0fSEmmanuel Vadot 	}
315*972adf0fSEmmanuel Vadot 
316*972adf0fSEmmanuel Vadot 	/* Remove trailing FCS */
317*972adf0fSEmmanuel Vadot 	m_adj(m, -ETHER_CRC_LEN);
318*972adf0fSEmmanuel Vadot 
319*972adf0fSEmmanuel Vadot 	DWC_UNLOCK(sc);
320*972adf0fSEmmanuel Vadot 	if_input(ifp, m);
321*972adf0fSEmmanuel Vadot 	DWC_LOCK(sc);
322*972adf0fSEmmanuel Vadot 	return (m0);
323*972adf0fSEmmanuel Vadot }
324*972adf0fSEmmanuel Vadot 
325*972adf0fSEmmanuel Vadot void
326*972adf0fSEmmanuel Vadot dma1000_txfinish_locked(struct dwc_softc *sc)
327*972adf0fSEmmanuel Vadot {
328*972adf0fSEmmanuel Vadot 	struct dwc_bufmap *bmap;
329*972adf0fSEmmanuel Vadot 	struct dwc_hwdesc *desc;
330*972adf0fSEmmanuel Vadot 	if_t ifp;
331*972adf0fSEmmanuel Vadot 	int idx, last_idx;
332*972adf0fSEmmanuel Vadot 	bool map_finished;
333*972adf0fSEmmanuel Vadot 
334*972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
335*972adf0fSEmmanuel Vadot 
336*972adf0fSEmmanuel Vadot 	ifp = sc->ifp;
337*972adf0fSEmmanuel Vadot 	/* check if all descriptors of the map are done */
338*972adf0fSEmmanuel Vadot 	while (sc->tx_map_tail != sc->tx_map_head) {
339*972adf0fSEmmanuel Vadot 		map_finished = true;
340*972adf0fSEmmanuel Vadot 		bmap = &sc->txbuf_map[sc->tx_map_tail];
341*972adf0fSEmmanuel Vadot 		idx = sc->tx_desc_tail;
342*972adf0fSEmmanuel Vadot 		last_idx = next_txidx(sc, bmap->last_desc_idx);
343*972adf0fSEmmanuel Vadot 		while (idx != last_idx) {
344*972adf0fSEmmanuel Vadot 			desc = &sc->txdesc_ring[idx];
345*972adf0fSEmmanuel Vadot 			if ((desc->desc0 & TDESC0_OWN) != 0) {
346*972adf0fSEmmanuel Vadot 				map_finished = false;
347*972adf0fSEmmanuel Vadot 				break;
348*972adf0fSEmmanuel Vadot 			}
349*972adf0fSEmmanuel Vadot 			idx = next_txidx(sc, idx);
350*972adf0fSEmmanuel Vadot 		}
351*972adf0fSEmmanuel Vadot 
352*972adf0fSEmmanuel Vadot 		if (!map_finished)
353*972adf0fSEmmanuel Vadot 			break;
354*972adf0fSEmmanuel Vadot 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
355*972adf0fSEmmanuel Vadot 		    BUS_DMASYNC_POSTWRITE);
356*972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
357*972adf0fSEmmanuel Vadot 		m_freem(bmap->mbuf);
358*972adf0fSEmmanuel Vadot 		bmap->mbuf = NULL;
359*972adf0fSEmmanuel Vadot 		sc->tx_mapcount--;
360*972adf0fSEmmanuel Vadot 		while (sc->tx_desc_tail != last_idx) {
361*972adf0fSEmmanuel Vadot 			dwc_setup_txdesc(sc, sc->tx_desc_tail, 0, 0, 0, false, false);
362*972adf0fSEmmanuel Vadot 			sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
363*972adf0fSEmmanuel Vadot 		}
364*972adf0fSEmmanuel Vadot 		sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
365*972adf0fSEmmanuel Vadot 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
366*972adf0fSEmmanuel Vadot 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
367*972adf0fSEmmanuel Vadot 	}
368*972adf0fSEmmanuel Vadot 
369*972adf0fSEmmanuel Vadot 	/* If there are no buffers outstanding, muzzle the watchdog. */
370*972adf0fSEmmanuel Vadot 	if (sc->tx_desc_tail == sc->tx_desc_head) {
371*972adf0fSEmmanuel Vadot 		sc->tx_watchdog_count = 0;
372*972adf0fSEmmanuel Vadot 	}
373*972adf0fSEmmanuel Vadot }
374*972adf0fSEmmanuel Vadot 
375*972adf0fSEmmanuel Vadot void
376*972adf0fSEmmanuel Vadot dma1000_rxfinish_locked(struct dwc_softc *sc)
377*972adf0fSEmmanuel Vadot {
378*972adf0fSEmmanuel Vadot 	struct mbuf *m;
379*972adf0fSEmmanuel Vadot 	int error, idx;
380*972adf0fSEmmanuel Vadot 	struct dwc_hwdesc *desc;
381*972adf0fSEmmanuel Vadot 
382*972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
383*972adf0fSEmmanuel Vadot 	for (;;) {
384*972adf0fSEmmanuel Vadot 		idx = sc->rx_idx;
385*972adf0fSEmmanuel Vadot 		desc = sc->rxdesc_ring + idx;
386*972adf0fSEmmanuel Vadot 		if ((desc->desc0 & RDESC0_OWN) != 0)
387*972adf0fSEmmanuel Vadot 			break;
388*972adf0fSEmmanuel Vadot 
389*972adf0fSEmmanuel Vadot 		m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
390*972adf0fSEmmanuel Vadot 		if (m == NULL) {
391*972adf0fSEmmanuel Vadot 			wmb();
392*972adf0fSEmmanuel Vadot 			desc->desc0 = RDESC0_OWN;
393*972adf0fSEmmanuel Vadot 			wmb();
394*972adf0fSEmmanuel Vadot 		} else {
395*972adf0fSEmmanuel Vadot 			/* We cannot create hole in RX ring */
396*972adf0fSEmmanuel Vadot 			error = dma1000_setup_rxbuf(sc, idx, m);
397*972adf0fSEmmanuel Vadot 			if (error != 0)
398*972adf0fSEmmanuel Vadot 				panic("dma1000_setup_rxbuf failed:  error %d\n",
399*972adf0fSEmmanuel Vadot 				    error);
400*972adf0fSEmmanuel Vadot 
401*972adf0fSEmmanuel Vadot 		}
402*972adf0fSEmmanuel Vadot 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
403*972adf0fSEmmanuel Vadot 	}
404*972adf0fSEmmanuel Vadot }
405*972adf0fSEmmanuel Vadot 
406*972adf0fSEmmanuel Vadot /*
407*972adf0fSEmmanuel Vadot  * Start the DMA controller
408*972adf0fSEmmanuel Vadot  */
409*972adf0fSEmmanuel Vadot void
410*972adf0fSEmmanuel Vadot dma1000_start(struct dwc_softc *sc)
411*972adf0fSEmmanuel Vadot {
412*972adf0fSEmmanuel Vadot 	uint32_t reg;
413*972adf0fSEmmanuel Vadot 
414*972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
415*972adf0fSEmmanuel Vadot 
416*972adf0fSEmmanuel Vadot 	/* Initializa DMA and enable transmitters */
417*972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
418*972adf0fSEmmanuel Vadot 	reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
419*972adf0fSEmmanuel Vadot 	reg &= ~(MODE_RSF);
420*972adf0fSEmmanuel Vadot 	reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
421*972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
422*972adf0fSEmmanuel Vadot 
423*972adf0fSEmmanuel Vadot 	WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
424*972adf0fSEmmanuel Vadot 
425*972adf0fSEmmanuel Vadot 	/* Start DMA */
426*972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
427*972adf0fSEmmanuel Vadot 	reg |= (MODE_ST | MODE_SR);
428*972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
429*972adf0fSEmmanuel Vadot }
430*972adf0fSEmmanuel Vadot 
431*972adf0fSEmmanuel Vadot /*
432*972adf0fSEmmanuel Vadot  * Stop the DMA controller
433*972adf0fSEmmanuel Vadot  */
434*972adf0fSEmmanuel Vadot void
435*972adf0fSEmmanuel Vadot dma1000_stop(struct dwc_softc *sc)
436*972adf0fSEmmanuel Vadot {
437*972adf0fSEmmanuel Vadot 	uint32_t reg;
438*972adf0fSEmmanuel Vadot 
439*972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
440*972adf0fSEmmanuel Vadot 
441*972adf0fSEmmanuel Vadot 	/* Stop DMA TX */
442*972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
443*972adf0fSEmmanuel Vadot 	reg &= ~(MODE_ST);
444*972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
445*972adf0fSEmmanuel Vadot 
446*972adf0fSEmmanuel Vadot 	/* Flush TX */
447*972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
448*972adf0fSEmmanuel Vadot 	reg |= (MODE_FTF);
449*972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
450*972adf0fSEmmanuel Vadot 
451*972adf0fSEmmanuel Vadot 	/* Stop DMA RX */
452*972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
453*972adf0fSEmmanuel Vadot 	reg &= ~(MODE_SR);
454*972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
455*972adf0fSEmmanuel Vadot }
456*972adf0fSEmmanuel Vadot 
457*972adf0fSEmmanuel Vadot /*
458*972adf0fSEmmanuel Vadot  * Create the bus_dma resources
459*972adf0fSEmmanuel Vadot  */
460*972adf0fSEmmanuel Vadot int
461*972adf0fSEmmanuel Vadot dma1000_init(struct dwc_softc *sc)
462*972adf0fSEmmanuel Vadot {
463*972adf0fSEmmanuel Vadot 	struct mbuf *m;
464*972adf0fSEmmanuel Vadot 	int error;
465*972adf0fSEmmanuel Vadot 	int nidx;
466*972adf0fSEmmanuel Vadot 	int idx;
467*972adf0fSEmmanuel Vadot 
468*972adf0fSEmmanuel Vadot 	/*
469*972adf0fSEmmanuel Vadot 	 * Set up TX descriptor ring, descriptors, and dma maps.
470*972adf0fSEmmanuel Vadot 	 */
471*972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
472*972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
473*972adf0fSEmmanuel Vadot 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
474*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
475*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
476*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
477*972adf0fSEmmanuel Vadot 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
478*972adf0fSEmmanuel Vadot 	    TX_DESC_SIZE,		/* maxsegsize */
479*972adf0fSEmmanuel Vadot 	    0,				/* flags */
480*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
481*972adf0fSEmmanuel Vadot 	    &sc->txdesc_tag);
482*972adf0fSEmmanuel Vadot 	if (error != 0) {
483*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
484*972adf0fSEmmanuel Vadot 		    "could not create TX ring DMA tag.\n");
485*972adf0fSEmmanuel Vadot 		goto out;
486*972adf0fSEmmanuel Vadot 	}
487*972adf0fSEmmanuel Vadot 
488*972adf0fSEmmanuel Vadot 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
489*972adf0fSEmmanuel Vadot 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
490*972adf0fSEmmanuel Vadot 	    &sc->txdesc_map);
491*972adf0fSEmmanuel Vadot 	if (error != 0) {
492*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
493*972adf0fSEmmanuel Vadot 		    "could not allocate TX descriptor ring.\n");
494*972adf0fSEmmanuel Vadot 		goto out;
495*972adf0fSEmmanuel Vadot 	}
496*972adf0fSEmmanuel Vadot 
497*972adf0fSEmmanuel Vadot 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
498*972adf0fSEmmanuel Vadot 	    sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
499*972adf0fSEmmanuel Vadot 	    &sc->txdesc_ring_paddr, 0);
500*972adf0fSEmmanuel Vadot 	if (error != 0) {
501*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
502*972adf0fSEmmanuel Vadot 		    "could not load TX descriptor ring map.\n");
503*972adf0fSEmmanuel Vadot 		goto out;
504*972adf0fSEmmanuel Vadot 	}
505*972adf0fSEmmanuel Vadot 
506*972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
507*972adf0fSEmmanuel Vadot 		nidx = next_txidx(sc, idx);
508*972adf0fSEmmanuel Vadot 		sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
509*972adf0fSEmmanuel Vadot 		    (nidx * sizeof(struct dwc_hwdesc));
510*972adf0fSEmmanuel Vadot 	}
511*972adf0fSEmmanuel Vadot 
512*972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
513*972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
514*972adf0fSEmmanuel Vadot 	    1, 0,			/* alignment, boundary */
515*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
516*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
517*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
518*972adf0fSEmmanuel Vadot 	    MCLBYTES*TX_MAP_MAX_SEGS,	/* maxsize */
519*972adf0fSEmmanuel Vadot 	    TX_MAP_MAX_SEGS,		/* nsegments */
520*972adf0fSEmmanuel Vadot 	    MCLBYTES,			/* maxsegsize */
521*972adf0fSEmmanuel Vadot 	    0,				/* flags */
522*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
523*972adf0fSEmmanuel Vadot 	    &sc->txbuf_tag);
524*972adf0fSEmmanuel Vadot 	if (error != 0) {
525*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
526*972adf0fSEmmanuel Vadot 		    "could not create TX ring DMA tag.\n");
527*972adf0fSEmmanuel Vadot 		goto out;
528*972adf0fSEmmanuel Vadot 	}
529*972adf0fSEmmanuel Vadot 
530*972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_MAP_COUNT; idx++) {
531*972adf0fSEmmanuel Vadot 		error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
532*972adf0fSEmmanuel Vadot 		    &sc->txbuf_map[idx].map);
533*972adf0fSEmmanuel Vadot 		if (error != 0) {
534*972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
535*972adf0fSEmmanuel Vadot 			    "could not create TX buffer DMA map.\n");
536*972adf0fSEmmanuel Vadot 			goto out;
537*972adf0fSEmmanuel Vadot 		}
538*972adf0fSEmmanuel Vadot 	}
539*972adf0fSEmmanuel Vadot 
540*972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; idx++)
541*972adf0fSEmmanuel Vadot 		dwc_setup_txdesc(sc, idx, 0, 0, 0, false, false);
542*972adf0fSEmmanuel Vadot 
543*972adf0fSEmmanuel Vadot 	/*
544*972adf0fSEmmanuel Vadot 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
545*972adf0fSEmmanuel Vadot 	 */
546*972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
547*972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
548*972adf0fSEmmanuel Vadot 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
549*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
550*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
551*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
552*972adf0fSEmmanuel Vadot 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
553*972adf0fSEmmanuel Vadot 	    RX_DESC_SIZE,		/* maxsegsize */
554*972adf0fSEmmanuel Vadot 	    0,				/* flags */
555*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
556*972adf0fSEmmanuel Vadot 	    &sc->rxdesc_tag);
557*972adf0fSEmmanuel Vadot 	if (error != 0) {
558*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
559*972adf0fSEmmanuel Vadot 		    "could not create RX ring DMA tag.\n");
560*972adf0fSEmmanuel Vadot 		goto out;
561*972adf0fSEmmanuel Vadot 	}
562*972adf0fSEmmanuel Vadot 
563*972adf0fSEmmanuel Vadot 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
564*972adf0fSEmmanuel Vadot 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
565*972adf0fSEmmanuel Vadot 	    &sc->rxdesc_map);
566*972adf0fSEmmanuel Vadot 	if (error != 0) {
567*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
568*972adf0fSEmmanuel Vadot 		    "could not allocate RX descriptor ring.\n");
569*972adf0fSEmmanuel Vadot 		goto out;
570*972adf0fSEmmanuel Vadot 	}
571*972adf0fSEmmanuel Vadot 
572*972adf0fSEmmanuel Vadot 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
573*972adf0fSEmmanuel Vadot 	    sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
574*972adf0fSEmmanuel Vadot 	    &sc->rxdesc_ring_paddr, 0);
575*972adf0fSEmmanuel Vadot 	if (error != 0) {
576*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
577*972adf0fSEmmanuel Vadot 		    "could not load RX descriptor ring map.\n");
578*972adf0fSEmmanuel Vadot 		goto out;
579*972adf0fSEmmanuel Vadot 	}
580*972adf0fSEmmanuel Vadot 
581*972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
582*972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
583*972adf0fSEmmanuel Vadot 	    1, 0,			/* alignment, boundary */
584*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
585*972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
586*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
587*972adf0fSEmmanuel Vadot 	    MCLBYTES, 1, 		/* maxsize, nsegments */
588*972adf0fSEmmanuel Vadot 	    MCLBYTES,			/* maxsegsize */
589*972adf0fSEmmanuel Vadot 	    0,				/* flags */
590*972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
591*972adf0fSEmmanuel Vadot 	    &sc->rxbuf_tag);
592*972adf0fSEmmanuel Vadot 	if (error != 0) {
593*972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
594*972adf0fSEmmanuel Vadot 		    "could not create RX buf DMA tag.\n");
595*972adf0fSEmmanuel Vadot 		goto out;
596*972adf0fSEmmanuel Vadot 	}
597*972adf0fSEmmanuel Vadot 
598*972adf0fSEmmanuel Vadot 	for (idx = 0; idx < RX_DESC_COUNT; idx++) {
599*972adf0fSEmmanuel Vadot 		error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
600*972adf0fSEmmanuel Vadot 		    &sc->rxbuf_map[idx].map);
601*972adf0fSEmmanuel Vadot 		if (error != 0) {
602*972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
603*972adf0fSEmmanuel Vadot 			    "could not create RX buffer DMA map.\n");
604*972adf0fSEmmanuel Vadot 			goto out;
605*972adf0fSEmmanuel Vadot 		}
606*972adf0fSEmmanuel Vadot 		if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
607*972adf0fSEmmanuel Vadot 			device_printf(sc->dev, "Could not alloc mbuf\n");
608*972adf0fSEmmanuel Vadot 			error = ENOMEM;
609*972adf0fSEmmanuel Vadot 			goto out;
610*972adf0fSEmmanuel Vadot 		}
611*972adf0fSEmmanuel Vadot 		if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
612*972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
613*972adf0fSEmmanuel Vadot 			    "could not create new RX buffer.\n");
614*972adf0fSEmmanuel Vadot 			goto out;
615*972adf0fSEmmanuel Vadot 		}
616*972adf0fSEmmanuel Vadot 	}
617*972adf0fSEmmanuel Vadot 
618*972adf0fSEmmanuel Vadot out:
619*972adf0fSEmmanuel Vadot 	if (error != 0)
620*972adf0fSEmmanuel Vadot 		return (ENXIO);
621*972adf0fSEmmanuel Vadot 
622*972adf0fSEmmanuel Vadot 	return (0);
623*972adf0fSEmmanuel Vadot }
624*972adf0fSEmmanuel Vadot 
625*972adf0fSEmmanuel Vadot /*
626*972adf0fSEmmanuel Vadot  * Free the bus_dma resources
627*972adf0fSEmmanuel Vadot  */
628*972adf0fSEmmanuel Vadot void
629*972adf0fSEmmanuel Vadot dma1000_free(struct dwc_softc *sc)
630*972adf0fSEmmanuel Vadot {
631*972adf0fSEmmanuel Vadot 	bus_dmamap_t map;
632*972adf0fSEmmanuel Vadot 	int idx;
633*972adf0fSEmmanuel Vadot 
634*972adf0fSEmmanuel Vadot 	/* Clean up RX DMA resources and free mbufs. */
635*972adf0fSEmmanuel Vadot 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
636*972adf0fSEmmanuel Vadot 		if ((map = sc->rxbuf_map[idx].map) != NULL) {
637*972adf0fSEmmanuel Vadot 			bus_dmamap_unload(sc->rxbuf_tag, map);
638*972adf0fSEmmanuel Vadot 			bus_dmamap_destroy(sc->rxbuf_tag, map);
639*972adf0fSEmmanuel Vadot 			m_freem(sc->rxbuf_map[idx].mbuf);
640*972adf0fSEmmanuel Vadot 		}
641*972adf0fSEmmanuel Vadot 	}
642*972adf0fSEmmanuel Vadot 	if (sc->rxbuf_tag != NULL)
643*972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->rxbuf_tag);
644*972adf0fSEmmanuel Vadot 	if (sc->rxdesc_map != NULL) {
645*972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
646*972adf0fSEmmanuel Vadot 		bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
647*972adf0fSEmmanuel Vadot 		    sc->rxdesc_map);
648*972adf0fSEmmanuel Vadot 	}
649*972adf0fSEmmanuel Vadot 	if (sc->rxdesc_tag != NULL)
650*972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->rxdesc_tag);
651*972adf0fSEmmanuel Vadot 
652*972adf0fSEmmanuel Vadot 	/* Clean up TX DMA resources. */
653*972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
654*972adf0fSEmmanuel Vadot 		if ((map = sc->txbuf_map[idx].map) != NULL) {
655*972adf0fSEmmanuel Vadot 			/* TX maps are already unloaded. */
656*972adf0fSEmmanuel Vadot 			bus_dmamap_destroy(sc->txbuf_tag, map);
657*972adf0fSEmmanuel Vadot 		}
658*972adf0fSEmmanuel Vadot 	}
659*972adf0fSEmmanuel Vadot 	if (sc->txbuf_tag != NULL)
660*972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->txbuf_tag);
661*972adf0fSEmmanuel Vadot 	if (sc->txdesc_map != NULL) {
662*972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
663*972adf0fSEmmanuel Vadot 		bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
664*972adf0fSEmmanuel Vadot 		    sc->txdesc_map);
665*972adf0fSEmmanuel Vadot 	}
666*972adf0fSEmmanuel Vadot 	if (sc->txdesc_tag != NULL)
667*972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->txdesc_tag);
668*972adf0fSEmmanuel Vadot }
669