xref: /freebsd/sys/dev/dwc/dwc1000_dma.c (revision fe82f82dcc3dea2b2f82fd14bbccf845d6c31951)
1972adf0fSEmmanuel Vadot /*-
2972adf0fSEmmanuel Vadot  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3972adf0fSEmmanuel Vadot  *
4972adf0fSEmmanuel Vadot  * This software was developed by SRI International and the University of
5972adf0fSEmmanuel Vadot  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6972adf0fSEmmanuel Vadot  * ("CTSRD"), as part of the DARPA CRASH research programme.
7972adf0fSEmmanuel Vadot  *
8972adf0fSEmmanuel Vadot  * Redistribution and use in source and binary forms, with or without
9972adf0fSEmmanuel Vadot  * modification, are permitted provided that the following conditions
10972adf0fSEmmanuel Vadot  * are met:
11972adf0fSEmmanuel Vadot  * 1. Redistributions of source code must retain the above copyright
12972adf0fSEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer.
13972adf0fSEmmanuel Vadot  * 2. Redistributions in binary form must reproduce the above copyright
14972adf0fSEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer in the
15972adf0fSEmmanuel Vadot  *    documentation and/or other materials provided with the distribution.
16972adf0fSEmmanuel Vadot  *
17972adf0fSEmmanuel Vadot  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18972adf0fSEmmanuel Vadot  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19972adf0fSEmmanuel Vadot  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20972adf0fSEmmanuel Vadot  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21972adf0fSEmmanuel Vadot  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22972adf0fSEmmanuel Vadot  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23972adf0fSEmmanuel Vadot  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24972adf0fSEmmanuel Vadot  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25972adf0fSEmmanuel Vadot  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26972adf0fSEmmanuel Vadot  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27972adf0fSEmmanuel Vadot  * SUCH DAMAGE.
28972adf0fSEmmanuel Vadot  */
29972adf0fSEmmanuel Vadot 
30972adf0fSEmmanuel Vadot #include <sys/cdefs.h>
31972adf0fSEmmanuel Vadot #include <sys/param.h>
32972adf0fSEmmanuel Vadot #include <sys/systm.h>
33972adf0fSEmmanuel Vadot #include <sys/bus.h>
34972adf0fSEmmanuel Vadot #include <sys/kernel.h>
35972adf0fSEmmanuel Vadot #include <sys/lock.h>
36972adf0fSEmmanuel Vadot #include <sys/malloc.h>
37972adf0fSEmmanuel Vadot #include <sys/mbuf.h>
38972adf0fSEmmanuel Vadot #include <sys/module.h>
39972adf0fSEmmanuel Vadot #include <sys/mutex.h>
40972adf0fSEmmanuel Vadot #include <sys/rman.h>
41972adf0fSEmmanuel Vadot #include <sys/socket.h>
42972adf0fSEmmanuel Vadot 
43972adf0fSEmmanuel Vadot #include <net/bpf.h>
44972adf0fSEmmanuel Vadot #include <net/if.h>
45972adf0fSEmmanuel Vadot #include <net/ethernet.h>
46972adf0fSEmmanuel Vadot #include <net/if_dl.h>
47972adf0fSEmmanuel Vadot #include <net/if_media.h>
48972adf0fSEmmanuel Vadot #include <net/if_types.h>
49972adf0fSEmmanuel Vadot #include <net/if_var.h>
50972adf0fSEmmanuel Vadot 
51972adf0fSEmmanuel Vadot #include <machine/bus.h>
52972adf0fSEmmanuel Vadot 
53972adf0fSEmmanuel Vadot #include <dev/extres/clk/clk.h>
54972adf0fSEmmanuel Vadot #include <dev/extres/hwreset/hwreset.h>
55972adf0fSEmmanuel Vadot 
56972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus.h>
57972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus_subr.h>
58972adf0fSEmmanuel Vadot 
59972adf0fSEmmanuel Vadot #include <dev/dwc/if_dwcvar.h>
60972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_reg.h>
61972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_dma.h>
62972adf0fSEmmanuel Vadot 
63afa0f66eSEmmanuel Vadot #define	WATCHDOG_TIMEOUT_SECS	5
64afa0f66eSEmmanuel Vadot 
65*fe82f82dSEmmanuel Vadot 
66*fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC0 is almost unified */
67*fe82f82dSEmmanuel Vadot #define	TDESC0_OWN		(1U << 31)
68*fe82f82dSEmmanuel Vadot #define	TDESC0_IHE		(1U << 16)	/* IP Header Error */
69*fe82f82dSEmmanuel Vadot #define	TDESC0_ES		(1U << 15)	/* Error Summary */
70*fe82f82dSEmmanuel Vadot #define	TDESC0_JT		(1U << 14)	/* Jabber Timeout */
71*fe82f82dSEmmanuel Vadot #define	TDESC0_FF		(1U << 13)	/* Frame Flushed */
72*fe82f82dSEmmanuel Vadot #define	TDESC0_PCE		(1U << 12)	/* Payload Checksum Error */
73*fe82f82dSEmmanuel Vadot #define	TDESC0_LOC		(1U << 11)	/* Loss of Carrier */
74*fe82f82dSEmmanuel Vadot #define	TDESC0_NC		(1U << 10)	/* No Carrier */
75*fe82f82dSEmmanuel Vadot #define	TDESC0_LC		(1U <<  9)	/* Late Collision */
76*fe82f82dSEmmanuel Vadot #define	TDESC0_EC		(1U <<  8)	/* Excessive Collision */
77*fe82f82dSEmmanuel Vadot #define	TDESC0_VF		(1U <<  7)	/* VLAN Frame */
78*fe82f82dSEmmanuel Vadot #define	TDESC0_CC_MASK		0xf
79*fe82f82dSEmmanuel Vadot #define	TDESC0_CC_SHIFT		3		/* Collision Count */
80*fe82f82dSEmmanuel Vadot #define	TDESC0_ED		(1U <<  2)	/* Excessive Deferral */
81*fe82f82dSEmmanuel Vadot #define	TDESC0_UF		(1U <<  1)	/* Underflow Error */
82*fe82f82dSEmmanuel Vadot #define	TDESC0_DB		(1U <<  0)	/* Deferred Bit */
83*fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC0 extended format only */
84*fe82f82dSEmmanuel Vadot #define	ETDESC0_IC		(1U << 30)	/* Interrupt on Completion */
85*fe82f82dSEmmanuel Vadot #define	ETDESC0_LS		(1U << 29)	/* Last Segment */
86*fe82f82dSEmmanuel Vadot #define	ETDESC0_FS		(1U << 28)	/* First Segment */
87*fe82f82dSEmmanuel Vadot #define	ETDESC0_DC		(1U << 27)	/* Disable CRC */
88*fe82f82dSEmmanuel Vadot #define	ETDESC0_DP		(1U << 26)	/* Disable Padding */
89*fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_NONE	(0U << 22)	/* Checksum Insertion Control */
90*fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_HDR		(1U << 22)
91*fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_SEG 	(2U << 22)
92*fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_FULL	(3U << 22)
93*fe82f82dSEmmanuel Vadot #define	ETDESC0_TER		(1U << 21)	/* Transmit End of Ring */
94*fe82f82dSEmmanuel Vadot #define	ETDESC0_TCH		(1U << 20)	/* Second Address Chained */
95*fe82f82dSEmmanuel Vadot 
96*fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC1 normal format */
97*fe82f82dSEmmanuel Vadot #define	NTDESC1_IC		(1U << 31)	/* Interrupt on Completion */
98*fe82f82dSEmmanuel Vadot #define	NTDESC1_LS		(1U << 30)	/* Last Segment */
99*fe82f82dSEmmanuel Vadot #define	NTDESC1_FS		(1U << 29)	/* First Segment */
100*fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_NONE	(0U << 27)	/* Checksum Insertion Control */
101*fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_HDR		(1U << 27)
102*fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_SEG 	(2U << 27)
103*fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_FULL	(3U << 27)
104*fe82f82dSEmmanuel Vadot #define	NTDESC1_DC		(1U << 26)	/* Disable CRC */
105*fe82f82dSEmmanuel Vadot #define	NTDESC1_TER		(1U << 25)	/* Transmit End of Ring */
106*fe82f82dSEmmanuel Vadot #define	NTDESC1_TCH		(1U << 24)	/* Second Address Chained */
107*fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC1 extended format */
108*fe82f82dSEmmanuel Vadot #define	ETDESC1_DP		(1U << 23)	/* Disable Padding */
109*fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS2_MASK	0x7ff
110*fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS2_SHIFT	11		/* Receive Buffer 2 Size */
111*fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS1_MASK	0x7ff
112*fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS1_SHIFT	0		/* Receive Buffer 1 Size */
113*fe82f82dSEmmanuel Vadot 
114*fe82f82dSEmmanuel Vadot /* RX descriptor - RDESC0 is unified */
115*fe82f82dSEmmanuel Vadot #define	RDESC0_OWN		(1U << 31)
116*fe82f82dSEmmanuel Vadot #define	RDESC0_AFM		(1U << 30)	/* Dest. Address Filter Fail */
117*fe82f82dSEmmanuel Vadot #define	RDESC0_FL_MASK		0x3fff
118*fe82f82dSEmmanuel Vadot #define	RDESC0_FL_SHIFT		16		/* Frame Length */
119*fe82f82dSEmmanuel Vadot #define	RDESC0_ES		(1U << 15)	/* Error Summary */
120*fe82f82dSEmmanuel Vadot #define	RDESC0_DE		(1U << 14)	/* Descriptor Error */
121*fe82f82dSEmmanuel Vadot #define	RDESC0_SAF		(1U << 13)	/* Source Address Filter Fail */
122*fe82f82dSEmmanuel Vadot #define	RDESC0_LE		(1U << 12)	/* Length Error */
123*fe82f82dSEmmanuel Vadot #define	RDESC0_OE		(1U << 11)	/* Overflow Error */
124*fe82f82dSEmmanuel Vadot #define	RDESC0_VLAN		(1U << 10)	/* VLAN Tag */
125*fe82f82dSEmmanuel Vadot #define	RDESC0_FS		(1U <<  9)	/* First Descriptor */
126*fe82f82dSEmmanuel Vadot #define	RDESC0_LS		(1U <<  8)	/* Last Descriptor */
127*fe82f82dSEmmanuel Vadot #define	RDESC0_ICE		(1U <<  7)	/* IPC Checksum Error */
128*fe82f82dSEmmanuel Vadot #define	RDESC0_LC		(1U <<  6)	/* Late Collision */
129*fe82f82dSEmmanuel Vadot #define	RDESC0_FT		(1U <<  5)	/* Frame Type */
130*fe82f82dSEmmanuel Vadot #define	RDESC0_RWT		(1U <<  4)	/* Receive Watchdog Timeout */
131*fe82f82dSEmmanuel Vadot #define	RDESC0_RE		(1U <<  3)	/* Receive Error */
132*fe82f82dSEmmanuel Vadot #define	RDESC0_DBE		(1U <<  2)	/* Dribble Bit Error */
133*fe82f82dSEmmanuel Vadot #define	RDESC0_CE		(1U <<  1)	/* CRC Error */
134*fe82f82dSEmmanuel Vadot #define	RDESC0_PCE		(1U <<  0)	/* Payload Checksum Error */
135*fe82f82dSEmmanuel Vadot #define	RDESC0_RXMA		(1U <<  0)	/* Rx MAC Address */
136*fe82f82dSEmmanuel Vadot 
137*fe82f82dSEmmanuel Vadot /* RX descriptors - RDESC1 normal format */
138*fe82f82dSEmmanuel Vadot #define	NRDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
139*fe82f82dSEmmanuel Vadot #define	NRDESC1_RER		(1U << 25)	/* Receive End of Ring */
140*fe82f82dSEmmanuel Vadot #define	NRDESC1_RCH		(1U << 24)	/* Second Address Chained */
141*fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS2_MASK	0x7ff
142*fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS2_SHIFT	11		/* Receive Buffer 2 Size */
143*fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS1_MASK	0x7ff
144*fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
145*fe82f82dSEmmanuel Vadot 
146*fe82f82dSEmmanuel Vadot /* RX descriptors - RDESC1 enhanced format */
147*fe82f82dSEmmanuel Vadot #define	ERDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
148*fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS2_MASK	0x7ffff
149*fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS2_SHIFT	16		/* Receive Buffer 2 Size */
150*fe82f82dSEmmanuel Vadot #define	ERDESC1_RER		(1U << 15)	/* Receive End of Ring */
151*fe82f82dSEmmanuel Vadot #define	ERDESC1_RCH		(1U << 14)	/* Second Address Chained */
152*fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS1_MASK	0x7ffff
153*fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
154*fe82f82dSEmmanuel Vadot 
155*fe82f82dSEmmanuel Vadot /*
156*fe82f82dSEmmanuel Vadot  * The hardware imposes alignment restrictions on various objects involved in
157*fe82f82dSEmmanuel Vadot  * DMA transfers.  These values are expressed in bytes (not bits).
158*fe82f82dSEmmanuel Vadot  */
159*fe82f82dSEmmanuel Vadot #define	DWC_DESC_RING_ALIGN	2048
160*fe82f82dSEmmanuel Vadot 
161afa0f66eSEmmanuel Vadot static inline uint32_t
162afa0f66eSEmmanuel Vadot next_txidx(struct dwc_softc *sc, uint32_t curidx)
163afa0f66eSEmmanuel Vadot {
164afa0f66eSEmmanuel Vadot 
165afa0f66eSEmmanuel Vadot 	return ((curidx + 1) % TX_DESC_COUNT);
166afa0f66eSEmmanuel Vadot }
167afa0f66eSEmmanuel Vadot 
168972adf0fSEmmanuel Vadot static inline uint32_t
169972adf0fSEmmanuel Vadot next_rxidx(struct dwc_softc *sc, uint32_t curidx)
170972adf0fSEmmanuel Vadot {
171972adf0fSEmmanuel Vadot 
172972adf0fSEmmanuel Vadot 	return ((curidx + 1) % RX_DESC_COUNT);
173972adf0fSEmmanuel Vadot }
174972adf0fSEmmanuel Vadot 
175972adf0fSEmmanuel Vadot static void
176972adf0fSEmmanuel Vadot dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
177972adf0fSEmmanuel Vadot {
178972adf0fSEmmanuel Vadot 
179972adf0fSEmmanuel Vadot 	if (error != 0)
180972adf0fSEmmanuel Vadot 		return;
181972adf0fSEmmanuel Vadot 	*(bus_addr_t *)arg = segs[0].ds_addr;
182972adf0fSEmmanuel Vadot }
183972adf0fSEmmanuel Vadot 
184972adf0fSEmmanuel Vadot inline static void
185972adf0fSEmmanuel Vadot dwc_set_owner(struct dwc_softc *sc, int idx)
186972adf0fSEmmanuel Vadot {
187972adf0fSEmmanuel Vadot 	wmb();
188972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
189972adf0fSEmmanuel Vadot 	wmb();
190972adf0fSEmmanuel Vadot }
191972adf0fSEmmanuel Vadot 
192972adf0fSEmmanuel Vadot inline static void
193972adf0fSEmmanuel Vadot dwc_setup_txdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr,
194972adf0fSEmmanuel Vadot   uint32_t len, uint32_t flags, bool first, bool last)
195972adf0fSEmmanuel Vadot {
196972adf0fSEmmanuel Vadot 	uint32_t desc0, desc1;
197972adf0fSEmmanuel Vadot 
198972adf0fSEmmanuel Vadot 	/* Addr/len 0 means we're clearing the descriptor after xmit done. */
199972adf0fSEmmanuel Vadot 	if (paddr == 0 || len == 0) {
200972adf0fSEmmanuel Vadot 		desc0 = 0;
201972adf0fSEmmanuel Vadot 		desc1 = 0;
202972adf0fSEmmanuel Vadot 		--sc->tx_desccount;
203972adf0fSEmmanuel Vadot 	} else {
204972adf0fSEmmanuel Vadot 		if (sc->mactype != DWC_GMAC_EXT_DESC) {
205972adf0fSEmmanuel Vadot 			desc0 = 0;
206972adf0fSEmmanuel Vadot 			desc1 = NTDESC1_TCH | len | flags;
207972adf0fSEmmanuel Vadot 			if (first)
208972adf0fSEmmanuel Vadot 				desc1 |=  NTDESC1_FS;
209972adf0fSEmmanuel Vadot 			if (last)
210972adf0fSEmmanuel Vadot 				desc1 |= NTDESC1_LS | NTDESC1_IC;
211972adf0fSEmmanuel Vadot 		} else {
212972adf0fSEmmanuel Vadot 			desc0 = ETDESC0_TCH | flags;
213972adf0fSEmmanuel Vadot 			if (first)
214972adf0fSEmmanuel Vadot 				desc0 |= ETDESC0_FS;
215972adf0fSEmmanuel Vadot 			if (last)
216972adf0fSEmmanuel Vadot 				desc0 |= ETDESC0_LS | ETDESC0_IC;
217972adf0fSEmmanuel Vadot 			desc1 = len;
218972adf0fSEmmanuel Vadot 		}
219972adf0fSEmmanuel Vadot 		++sc->tx_desccount;
220972adf0fSEmmanuel Vadot 	}
221972adf0fSEmmanuel Vadot 
222972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
223972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc0 = desc0;
224972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc1 = desc1;
225972adf0fSEmmanuel Vadot }
226972adf0fSEmmanuel Vadot 
227972adf0fSEmmanuel Vadot inline static uint32_t
228972adf0fSEmmanuel Vadot dwc_setup_rxdesc(struct dwc_softc *sc, int idx, bus_addr_t paddr)
229972adf0fSEmmanuel Vadot {
230972adf0fSEmmanuel Vadot 	uint32_t nidx;
231972adf0fSEmmanuel Vadot 
232972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
233972adf0fSEmmanuel Vadot 	nidx = next_rxidx(sc, idx);
234972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
235972adf0fSEmmanuel Vadot 	    (nidx * sizeof(struct dwc_hwdesc));
236972adf0fSEmmanuel Vadot 	if (sc->mactype != DWC_GMAC_EXT_DESC)
237972adf0fSEmmanuel Vadot 		sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
238972adf0fSEmmanuel Vadot 		    MIN(MCLBYTES, NRDESC1_RBS1_MASK);
239972adf0fSEmmanuel Vadot 	else
240972adf0fSEmmanuel Vadot 		sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
241972adf0fSEmmanuel Vadot 		    MIN(MCLBYTES, ERDESC1_RBS1_MASK);
242972adf0fSEmmanuel Vadot 
243972adf0fSEmmanuel Vadot 	wmb();
244972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
245972adf0fSEmmanuel Vadot 	wmb();
246972adf0fSEmmanuel Vadot 	return (nidx);
247972adf0fSEmmanuel Vadot }
248972adf0fSEmmanuel Vadot 
249972adf0fSEmmanuel Vadot int
250972adf0fSEmmanuel Vadot dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
251972adf0fSEmmanuel Vadot {
252972adf0fSEmmanuel Vadot 	struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
253972adf0fSEmmanuel Vadot 	int error, nsegs;
254972adf0fSEmmanuel Vadot 	struct mbuf * m;
255972adf0fSEmmanuel Vadot 	uint32_t flags = 0;
256972adf0fSEmmanuel Vadot 	int i;
257972adf0fSEmmanuel Vadot 	int first, last;
258972adf0fSEmmanuel Vadot 
259972adf0fSEmmanuel Vadot 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
260972adf0fSEmmanuel Vadot 	    *mp, segs, &nsegs, 0);
261972adf0fSEmmanuel Vadot 	if (error == EFBIG) {
262972adf0fSEmmanuel Vadot 		/*
263972adf0fSEmmanuel Vadot 		 * The map may be partially mapped from the first call.
264972adf0fSEmmanuel Vadot 		 * Make sure to reset it.
265972adf0fSEmmanuel Vadot 		 */
266972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
267972adf0fSEmmanuel Vadot 		if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
268972adf0fSEmmanuel Vadot 			return (ENOMEM);
269972adf0fSEmmanuel Vadot 		*mp = m;
270972adf0fSEmmanuel Vadot 		error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
271972adf0fSEmmanuel Vadot 		    *mp, segs, &nsegs, 0);
272972adf0fSEmmanuel Vadot 	}
273972adf0fSEmmanuel Vadot 	if (error != 0)
274972adf0fSEmmanuel Vadot 		return (ENOMEM);
275972adf0fSEmmanuel Vadot 
276972adf0fSEmmanuel Vadot 	if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
277972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
278972adf0fSEmmanuel Vadot 		return (ENOMEM);
279972adf0fSEmmanuel Vadot 	}
280972adf0fSEmmanuel Vadot 
281972adf0fSEmmanuel Vadot 	m = *mp;
282972adf0fSEmmanuel Vadot 
283972adf0fSEmmanuel Vadot 	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
284972adf0fSEmmanuel Vadot 		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
285972adf0fSEmmanuel Vadot 			if (sc->mactype != DWC_GMAC_EXT_DESC)
286972adf0fSEmmanuel Vadot 				flags = NTDESC1_CIC_FULL;
287972adf0fSEmmanuel Vadot 			else
288972adf0fSEmmanuel Vadot 				flags = ETDESC0_CIC_FULL;
289972adf0fSEmmanuel Vadot 		} else {
290972adf0fSEmmanuel Vadot 			if (sc->mactype != DWC_GMAC_EXT_DESC)
291972adf0fSEmmanuel Vadot 				flags = NTDESC1_CIC_HDR;
292972adf0fSEmmanuel Vadot 			else
293972adf0fSEmmanuel Vadot 				flags = ETDESC0_CIC_HDR;
294972adf0fSEmmanuel Vadot 		}
295972adf0fSEmmanuel Vadot 	}
296972adf0fSEmmanuel Vadot 
297972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
298972adf0fSEmmanuel Vadot 	    BUS_DMASYNC_PREWRITE);
299972adf0fSEmmanuel Vadot 
300972adf0fSEmmanuel Vadot 	sc->txbuf_map[idx].mbuf = m;
301972adf0fSEmmanuel Vadot 
302972adf0fSEmmanuel Vadot 	first = sc->tx_desc_head;
303972adf0fSEmmanuel Vadot 	for (i = 0; i < nsegs; i++) {
304972adf0fSEmmanuel Vadot 		dwc_setup_txdesc(sc, sc->tx_desc_head,
305972adf0fSEmmanuel Vadot 		    segs[i].ds_addr, segs[i].ds_len,
306972adf0fSEmmanuel Vadot 		    (i == 0) ? flags : 0, /* only first desc needs flags */
307972adf0fSEmmanuel Vadot 		    (i == 0),
308972adf0fSEmmanuel Vadot 		    (i == nsegs - 1));
309972adf0fSEmmanuel Vadot 		if (i > 0)
310972adf0fSEmmanuel Vadot 			dwc_set_owner(sc, sc->tx_desc_head);
311972adf0fSEmmanuel Vadot 		last = sc->tx_desc_head;
312972adf0fSEmmanuel Vadot 		sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
313972adf0fSEmmanuel Vadot 	}
314972adf0fSEmmanuel Vadot 
315972adf0fSEmmanuel Vadot 	sc->txbuf_map[idx].last_desc_idx = last;
316972adf0fSEmmanuel Vadot 
317972adf0fSEmmanuel Vadot 	dwc_set_owner(sc, first);
318972adf0fSEmmanuel Vadot 
319972adf0fSEmmanuel Vadot 	return (0);
320972adf0fSEmmanuel Vadot }
321972adf0fSEmmanuel Vadot 
322972adf0fSEmmanuel Vadot static int
323972adf0fSEmmanuel Vadot dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
324972adf0fSEmmanuel Vadot {
325972adf0fSEmmanuel Vadot 	struct bus_dma_segment seg;
326972adf0fSEmmanuel Vadot 	int error, nsegs;
327972adf0fSEmmanuel Vadot 
328972adf0fSEmmanuel Vadot 	m_adj(m, ETHER_ALIGN);
329972adf0fSEmmanuel Vadot 
330972adf0fSEmmanuel Vadot 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
331972adf0fSEmmanuel Vadot 	    m, &seg, &nsegs, 0);
332972adf0fSEmmanuel Vadot 	if (error != 0)
333972adf0fSEmmanuel Vadot 		return (error);
334972adf0fSEmmanuel Vadot 
335972adf0fSEmmanuel Vadot 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
336972adf0fSEmmanuel Vadot 
337972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
338972adf0fSEmmanuel Vadot 	    BUS_DMASYNC_PREREAD);
339972adf0fSEmmanuel Vadot 
340972adf0fSEmmanuel Vadot 	sc->rxbuf_map[idx].mbuf = m;
341972adf0fSEmmanuel Vadot 	dwc_setup_rxdesc(sc, idx, seg.ds_addr);
342972adf0fSEmmanuel Vadot 
343972adf0fSEmmanuel Vadot 	return (0);
344972adf0fSEmmanuel Vadot }
345972adf0fSEmmanuel Vadot 
346972adf0fSEmmanuel Vadot static struct mbuf *
347972adf0fSEmmanuel Vadot dwc_alloc_mbufcl(struct dwc_softc *sc)
348972adf0fSEmmanuel Vadot {
349972adf0fSEmmanuel Vadot 	struct mbuf *m;
350972adf0fSEmmanuel Vadot 
351972adf0fSEmmanuel Vadot 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
352972adf0fSEmmanuel Vadot 	if (m != NULL)
353972adf0fSEmmanuel Vadot 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
354972adf0fSEmmanuel Vadot 
355972adf0fSEmmanuel Vadot 	return (m);
356972adf0fSEmmanuel Vadot }
357972adf0fSEmmanuel Vadot 
358972adf0fSEmmanuel Vadot static struct mbuf *
359972adf0fSEmmanuel Vadot dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
360972adf0fSEmmanuel Vadot     struct dwc_bufmap *map)
361972adf0fSEmmanuel Vadot {
362972adf0fSEmmanuel Vadot 	if_t ifp;
363972adf0fSEmmanuel Vadot 	struct mbuf *m, *m0;
364972adf0fSEmmanuel Vadot 	int len;
365972adf0fSEmmanuel Vadot 	uint32_t rdesc0;
366972adf0fSEmmanuel Vadot 
367972adf0fSEmmanuel Vadot 	m = map->mbuf;
368972adf0fSEmmanuel Vadot 	ifp = sc->ifp;
369972adf0fSEmmanuel Vadot 	rdesc0 = desc ->desc0;
370972adf0fSEmmanuel Vadot 
371972adf0fSEmmanuel Vadot 	if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
372972adf0fSEmmanuel Vadot 		    (RDESC0_FS | RDESC0_LS)) {
373972adf0fSEmmanuel Vadot 		/*
374972adf0fSEmmanuel Vadot 		 * Something very wrong happens. The whole packet should be
375972adf0fSEmmanuel Vadot 		 * recevied in one descriptr. Report problem.
376972adf0fSEmmanuel Vadot 		 */
377972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
378972adf0fSEmmanuel Vadot 		    "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
379972adf0fSEmmanuel Vadot 		    __func__, rdesc0);
380972adf0fSEmmanuel Vadot 		return (NULL);
381972adf0fSEmmanuel Vadot 	}
382972adf0fSEmmanuel Vadot 
383972adf0fSEmmanuel Vadot 	len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
384972adf0fSEmmanuel Vadot 	if (len < 64) {
385972adf0fSEmmanuel Vadot 		/*
386972adf0fSEmmanuel Vadot 		 * Lenght is invalid, recycle old mbuf
387972adf0fSEmmanuel Vadot 		 * Probably impossible case
388972adf0fSEmmanuel Vadot 		 */
389972adf0fSEmmanuel Vadot 		return (NULL);
390972adf0fSEmmanuel Vadot 	}
391972adf0fSEmmanuel Vadot 
392972adf0fSEmmanuel Vadot 	/* Allocate new buffer */
393972adf0fSEmmanuel Vadot 	m0 = dwc_alloc_mbufcl(sc);
394972adf0fSEmmanuel Vadot 	if (m0 == NULL) {
395972adf0fSEmmanuel Vadot 		/* no new mbuf available, recycle old */
396972adf0fSEmmanuel Vadot 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
397972adf0fSEmmanuel Vadot 		return (NULL);
398972adf0fSEmmanuel Vadot 	}
399972adf0fSEmmanuel Vadot 	/* Do dmasync for newly received packet */
400972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
401972adf0fSEmmanuel Vadot 	bus_dmamap_unload(sc->rxbuf_tag, map->map);
402972adf0fSEmmanuel Vadot 
403972adf0fSEmmanuel Vadot 	/* Received packet is valid, process it */
404972adf0fSEmmanuel Vadot 	m->m_pkthdr.rcvif = ifp;
405972adf0fSEmmanuel Vadot 	m->m_pkthdr.len = len;
406972adf0fSEmmanuel Vadot 	m->m_len = len;
407972adf0fSEmmanuel Vadot 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
408972adf0fSEmmanuel Vadot 
409972adf0fSEmmanuel Vadot 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
410972adf0fSEmmanuel Vadot 	  (rdesc0 & RDESC0_FT) != 0) {
411972adf0fSEmmanuel Vadot 		m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
412972adf0fSEmmanuel Vadot 		if ((rdesc0 & RDESC0_ICE) == 0)
413972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
414972adf0fSEmmanuel Vadot 		if ((rdesc0 & RDESC0_PCE) == 0) {
415972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_flags |=
416972adf0fSEmmanuel Vadot 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
417972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_data = 0xffff;
418972adf0fSEmmanuel Vadot 		}
419972adf0fSEmmanuel Vadot 	}
420972adf0fSEmmanuel Vadot 
421972adf0fSEmmanuel Vadot 	/* Remove trailing FCS */
422972adf0fSEmmanuel Vadot 	m_adj(m, -ETHER_CRC_LEN);
423972adf0fSEmmanuel Vadot 
424972adf0fSEmmanuel Vadot 	DWC_UNLOCK(sc);
425972adf0fSEmmanuel Vadot 	if_input(ifp, m);
426972adf0fSEmmanuel Vadot 	DWC_LOCK(sc);
427972adf0fSEmmanuel Vadot 	return (m0);
428972adf0fSEmmanuel Vadot }
429972adf0fSEmmanuel Vadot 
430972adf0fSEmmanuel Vadot void
431972adf0fSEmmanuel Vadot dma1000_txfinish_locked(struct dwc_softc *sc)
432972adf0fSEmmanuel Vadot {
433972adf0fSEmmanuel Vadot 	struct dwc_bufmap *bmap;
434972adf0fSEmmanuel Vadot 	struct dwc_hwdesc *desc;
435972adf0fSEmmanuel Vadot 	if_t ifp;
436972adf0fSEmmanuel Vadot 	int idx, last_idx;
437972adf0fSEmmanuel Vadot 	bool map_finished;
438972adf0fSEmmanuel Vadot 
439972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
440972adf0fSEmmanuel Vadot 
441972adf0fSEmmanuel Vadot 	ifp = sc->ifp;
442972adf0fSEmmanuel Vadot 	/* check if all descriptors of the map are done */
443972adf0fSEmmanuel Vadot 	while (sc->tx_map_tail != sc->tx_map_head) {
444972adf0fSEmmanuel Vadot 		map_finished = true;
445972adf0fSEmmanuel Vadot 		bmap = &sc->txbuf_map[sc->tx_map_tail];
446972adf0fSEmmanuel Vadot 		idx = sc->tx_desc_tail;
447972adf0fSEmmanuel Vadot 		last_idx = next_txidx(sc, bmap->last_desc_idx);
448972adf0fSEmmanuel Vadot 		while (idx != last_idx) {
449972adf0fSEmmanuel Vadot 			desc = &sc->txdesc_ring[idx];
450972adf0fSEmmanuel Vadot 			if ((desc->desc0 & TDESC0_OWN) != 0) {
451972adf0fSEmmanuel Vadot 				map_finished = false;
452972adf0fSEmmanuel Vadot 				break;
453972adf0fSEmmanuel Vadot 			}
454972adf0fSEmmanuel Vadot 			idx = next_txidx(sc, idx);
455972adf0fSEmmanuel Vadot 		}
456972adf0fSEmmanuel Vadot 
457972adf0fSEmmanuel Vadot 		if (!map_finished)
458972adf0fSEmmanuel Vadot 			break;
459972adf0fSEmmanuel Vadot 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
460972adf0fSEmmanuel Vadot 		    BUS_DMASYNC_POSTWRITE);
461972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
462972adf0fSEmmanuel Vadot 		m_freem(bmap->mbuf);
463972adf0fSEmmanuel Vadot 		bmap->mbuf = NULL;
464972adf0fSEmmanuel Vadot 		sc->tx_mapcount--;
465972adf0fSEmmanuel Vadot 		while (sc->tx_desc_tail != last_idx) {
466972adf0fSEmmanuel Vadot 			dwc_setup_txdesc(sc, sc->tx_desc_tail, 0, 0, 0, false, false);
467972adf0fSEmmanuel Vadot 			sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
468972adf0fSEmmanuel Vadot 		}
469972adf0fSEmmanuel Vadot 		sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
470972adf0fSEmmanuel Vadot 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
471972adf0fSEmmanuel Vadot 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
472972adf0fSEmmanuel Vadot 	}
473972adf0fSEmmanuel Vadot 
474972adf0fSEmmanuel Vadot 	/* If there are no buffers outstanding, muzzle the watchdog. */
475972adf0fSEmmanuel Vadot 	if (sc->tx_desc_tail == sc->tx_desc_head) {
476972adf0fSEmmanuel Vadot 		sc->tx_watchdog_count = 0;
477972adf0fSEmmanuel Vadot 	}
478972adf0fSEmmanuel Vadot }
479972adf0fSEmmanuel Vadot 
480972adf0fSEmmanuel Vadot void
481afa0f66eSEmmanuel Vadot dma1000_txstart(struct dwc_softc *sc)
482afa0f66eSEmmanuel Vadot {
483afa0f66eSEmmanuel Vadot 	int enqueued;
484afa0f66eSEmmanuel Vadot 	struct mbuf *m;
485afa0f66eSEmmanuel Vadot 
486afa0f66eSEmmanuel Vadot 	enqueued = 0;
487afa0f66eSEmmanuel Vadot 
488afa0f66eSEmmanuel Vadot 	for (;;) {
489afa0f66eSEmmanuel Vadot 		if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS  + 1)) {
490afa0f66eSEmmanuel Vadot 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
491afa0f66eSEmmanuel Vadot 			break;
492afa0f66eSEmmanuel Vadot 		}
493afa0f66eSEmmanuel Vadot 
494afa0f66eSEmmanuel Vadot 		if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
495afa0f66eSEmmanuel Vadot 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
496afa0f66eSEmmanuel Vadot 			break;
497afa0f66eSEmmanuel Vadot 		}
498afa0f66eSEmmanuel Vadot 
499afa0f66eSEmmanuel Vadot 		m = if_dequeue(sc->ifp);
500afa0f66eSEmmanuel Vadot 		if (m == NULL)
501afa0f66eSEmmanuel Vadot 			break;
502afa0f66eSEmmanuel Vadot 		if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
503afa0f66eSEmmanuel Vadot 			if_sendq_prepend(sc->ifp, m);
504afa0f66eSEmmanuel Vadot 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
505afa0f66eSEmmanuel Vadot 			break;
506afa0f66eSEmmanuel Vadot 		}
507afa0f66eSEmmanuel Vadot 		bpf_mtap_if(sc->ifp, m);
508afa0f66eSEmmanuel Vadot 		sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
509afa0f66eSEmmanuel Vadot 		sc->tx_mapcount++;
510afa0f66eSEmmanuel Vadot 		++enqueued;
511afa0f66eSEmmanuel Vadot 	}
512afa0f66eSEmmanuel Vadot 
513afa0f66eSEmmanuel Vadot 	if (enqueued != 0) {
514afa0f66eSEmmanuel Vadot 		WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
515afa0f66eSEmmanuel Vadot 		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
516afa0f66eSEmmanuel Vadot 	}
517afa0f66eSEmmanuel Vadot }
518afa0f66eSEmmanuel Vadot 
519afa0f66eSEmmanuel Vadot void
520972adf0fSEmmanuel Vadot dma1000_rxfinish_locked(struct dwc_softc *sc)
521972adf0fSEmmanuel Vadot {
522972adf0fSEmmanuel Vadot 	struct mbuf *m;
523972adf0fSEmmanuel Vadot 	int error, idx;
524972adf0fSEmmanuel Vadot 	struct dwc_hwdesc *desc;
525972adf0fSEmmanuel Vadot 
526972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
527972adf0fSEmmanuel Vadot 	for (;;) {
528972adf0fSEmmanuel Vadot 		idx = sc->rx_idx;
529972adf0fSEmmanuel Vadot 		desc = sc->rxdesc_ring + idx;
530972adf0fSEmmanuel Vadot 		if ((desc->desc0 & RDESC0_OWN) != 0)
531972adf0fSEmmanuel Vadot 			break;
532972adf0fSEmmanuel Vadot 
533972adf0fSEmmanuel Vadot 		m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
534972adf0fSEmmanuel Vadot 		if (m == NULL) {
535972adf0fSEmmanuel Vadot 			wmb();
536972adf0fSEmmanuel Vadot 			desc->desc0 = RDESC0_OWN;
537972adf0fSEmmanuel Vadot 			wmb();
538972adf0fSEmmanuel Vadot 		} else {
539972adf0fSEmmanuel Vadot 			/* We cannot create hole in RX ring */
540972adf0fSEmmanuel Vadot 			error = dma1000_setup_rxbuf(sc, idx, m);
541972adf0fSEmmanuel Vadot 			if (error != 0)
542972adf0fSEmmanuel Vadot 				panic("dma1000_setup_rxbuf failed:  error %d\n",
543972adf0fSEmmanuel Vadot 				    error);
544972adf0fSEmmanuel Vadot 
545972adf0fSEmmanuel Vadot 		}
546972adf0fSEmmanuel Vadot 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
547972adf0fSEmmanuel Vadot 	}
548972adf0fSEmmanuel Vadot }
549972adf0fSEmmanuel Vadot 
550972adf0fSEmmanuel Vadot /*
551972adf0fSEmmanuel Vadot  * Start the DMA controller
552972adf0fSEmmanuel Vadot  */
553972adf0fSEmmanuel Vadot void
554972adf0fSEmmanuel Vadot dma1000_start(struct dwc_softc *sc)
555972adf0fSEmmanuel Vadot {
556972adf0fSEmmanuel Vadot 	uint32_t reg;
557972adf0fSEmmanuel Vadot 
558972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
559972adf0fSEmmanuel Vadot 
560972adf0fSEmmanuel Vadot 	/* Initializa DMA and enable transmitters */
561972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
562972adf0fSEmmanuel Vadot 	reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
563972adf0fSEmmanuel Vadot 	reg &= ~(MODE_RSF);
564972adf0fSEmmanuel Vadot 	reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
565972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
566972adf0fSEmmanuel Vadot 
567972adf0fSEmmanuel Vadot 	WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
568972adf0fSEmmanuel Vadot 
569972adf0fSEmmanuel Vadot 	/* Start DMA */
570972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
571972adf0fSEmmanuel Vadot 	reg |= (MODE_ST | MODE_SR);
572972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
573972adf0fSEmmanuel Vadot }
574972adf0fSEmmanuel Vadot 
575972adf0fSEmmanuel Vadot /*
576972adf0fSEmmanuel Vadot  * Stop the DMA controller
577972adf0fSEmmanuel Vadot  */
578972adf0fSEmmanuel Vadot void
579972adf0fSEmmanuel Vadot dma1000_stop(struct dwc_softc *sc)
580972adf0fSEmmanuel Vadot {
581972adf0fSEmmanuel Vadot 	uint32_t reg;
582972adf0fSEmmanuel Vadot 
583972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
584972adf0fSEmmanuel Vadot 
585972adf0fSEmmanuel Vadot 	/* Stop DMA TX */
586972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
587972adf0fSEmmanuel Vadot 	reg &= ~(MODE_ST);
588972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
589972adf0fSEmmanuel Vadot 
590972adf0fSEmmanuel Vadot 	/* Flush TX */
591972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
592972adf0fSEmmanuel Vadot 	reg |= (MODE_FTF);
593972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
594972adf0fSEmmanuel Vadot 
595972adf0fSEmmanuel Vadot 	/* Stop DMA RX */
596972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
597972adf0fSEmmanuel Vadot 	reg &= ~(MODE_SR);
598972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
599972adf0fSEmmanuel Vadot }
600972adf0fSEmmanuel Vadot 
601972adf0fSEmmanuel Vadot /*
602972adf0fSEmmanuel Vadot  * Create the bus_dma resources
603972adf0fSEmmanuel Vadot  */
604972adf0fSEmmanuel Vadot int
605972adf0fSEmmanuel Vadot dma1000_init(struct dwc_softc *sc)
606972adf0fSEmmanuel Vadot {
607972adf0fSEmmanuel Vadot 	struct mbuf *m;
608972adf0fSEmmanuel Vadot 	int error;
609972adf0fSEmmanuel Vadot 	int nidx;
610972adf0fSEmmanuel Vadot 	int idx;
611972adf0fSEmmanuel Vadot 
612972adf0fSEmmanuel Vadot 	/*
613972adf0fSEmmanuel Vadot 	 * Set up TX descriptor ring, descriptors, and dma maps.
614972adf0fSEmmanuel Vadot 	 */
615972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
616972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
617972adf0fSEmmanuel Vadot 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
618972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
619972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
620972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
621972adf0fSEmmanuel Vadot 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
622972adf0fSEmmanuel Vadot 	    TX_DESC_SIZE,		/* maxsegsize */
623972adf0fSEmmanuel Vadot 	    0,				/* flags */
624972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
625972adf0fSEmmanuel Vadot 	    &sc->txdesc_tag);
626972adf0fSEmmanuel Vadot 	if (error != 0) {
627972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
628972adf0fSEmmanuel Vadot 		    "could not create TX ring DMA tag.\n");
629972adf0fSEmmanuel Vadot 		goto out;
630972adf0fSEmmanuel Vadot 	}
631972adf0fSEmmanuel Vadot 
632972adf0fSEmmanuel Vadot 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
633972adf0fSEmmanuel Vadot 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
634972adf0fSEmmanuel Vadot 	    &sc->txdesc_map);
635972adf0fSEmmanuel Vadot 	if (error != 0) {
636972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
637972adf0fSEmmanuel Vadot 		    "could not allocate TX descriptor ring.\n");
638972adf0fSEmmanuel Vadot 		goto out;
639972adf0fSEmmanuel Vadot 	}
640972adf0fSEmmanuel Vadot 
641972adf0fSEmmanuel Vadot 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
642972adf0fSEmmanuel Vadot 	    sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
643972adf0fSEmmanuel Vadot 	    &sc->txdesc_ring_paddr, 0);
644972adf0fSEmmanuel Vadot 	if (error != 0) {
645972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
646972adf0fSEmmanuel Vadot 		    "could not load TX descriptor ring map.\n");
647972adf0fSEmmanuel Vadot 		goto out;
648972adf0fSEmmanuel Vadot 	}
649972adf0fSEmmanuel Vadot 
650972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
651972adf0fSEmmanuel Vadot 		nidx = next_txidx(sc, idx);
652972adf0fSEmmanuel Vadot 		sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
653972adf0fSEmmanuel Vadot 		    (nidx * sizeof(struct dwc_hwdesc));
654972adf0fSEmmanuel Vadot 	}
655972adf0fSEmmanuel Vadot 
656972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
657972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
658972adf0fSEmmanuel Vadot 	    1, 0,			/* alignment, boundary */
659972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
660972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
661972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
662972adf0fSEmmanuel Vadot 	    MCLBYTES*TX_MAP_MAX_SEGS,	/* maxsize */
663972adf0fSEmmanuel Vadot 	    TX_MAP_MAX_SEGS,		/* nsegments */
664972adf0fSEmmanuel Vadot 	    MCLBYTES,			/* maxsegsize */
665972adf0fSEmmanuel Vadot 	    0,				/* flags */
666972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
667972adf0fSEmmanuel Vadot 	    &sc->txbuf_tag);
668972adf0fSEmmanuel Vadot 	if (error != 0) {
669972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
670972adf0fSEmmanuel Vadot 		    "could not create TX ring DMA tag.\n");
671972adf0fSEmmanuel Vadot 		goto out;
672972adf0fSEmmanuel Vadot 	}
673972adf0fSEmmanuel Vadot 
674972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_MAP_COUNT; idx++) {
675972adf0fSEmmanuel Vadot 		error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
676972adf0fSEmmanuel Vadot 		    &sc->txbuf_map[idx].map);
677972adf0fSEmmanuel Vadot 		if (error != 0) {
678972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
679972adf0fSEmmanuel Vadot 			    "could not create TX buffer DMA map.\n");
680972adf0fSEmmanuel Vadot 			goto out;
681972adf0fSEmmanuel Vadot 		}
682972adf0fSEmmanuel Vadot 	}
683972adf0fSEmmanuel Vadot 
684972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; idx++)
685972adf0fSEmmanuel Vadot 		dwc_setup_txdesc(sc, idx, 0, 0, 0, false, false);
686972adf0fSEmmanuel Vadot 
687972adf0fSEmmanuel Vadot 	/*
688972adf0fSEmmanuel Vadot 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
689972adf0fSEmmanuel Vadot 	 */
690972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
691972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
692972adf0fSEmmanuel Vadot 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
693972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
694972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
695972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
696972adf0fSEmmanuel Vadot 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
697972adf0fSEmmanuel Vadot 	    RX_DESC_SIZE,		/* maxsegsize */
698972adf0fSEmmanuel Vadot 	    0,				/* flags */
699972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
700972adf0fSEmmanuel Vadot 	    &sc->rxdesc_tag);
701972adf0fSEmmanuel Vadot 	if (error != 0) {
702972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
703972adf0fSEmmanuel Vadot 		    "could not create RX ring DMA tag.\n");
704972adf0fSEmmanuel Vadot 		goto out;
705972adf0fSEmmanuel Vadot 	}
706972adf0fSEmmanuel Vadot 
707972adf0fSEmmanuel Vadot 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
708972adf0fSEmmanuel Vadot 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
709972adf0fSEmmanuel Vadot 	    &sc->rxdesc_map);
710972adf0fSEmmanuel Vadot 	if (error != 0) {
711972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
712972adf0fSEmmanuel Vadot 		    "could not allocate RX descriptor ring.\n");
713972adf0fSEmmanuel Vadot 		goto out;
714972adf0fSEmmanuel Vadot 	}
715972adf0fSEmmanuel Vadot 
716972adf0fSEmmanuel Vadot 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
717972adf0fSEmmanuel Vadot 	    sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
718972adf0fSEmmanuel Vadot 	    &sc->rxdesc_ring_paddr, 0);
719972adf0fSEmmanuel Vadot 	if (error != 0) {
720972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
721972adf0fSEmmanuel Vadot 		    "could not load RX descriptor ring map.\n");
722972adf0fSEmmanuel Vadot 		goto out;
723972adf0fSEmmanuel Vadot 	}
724972adf0fSEmmanuel Vadot 
725972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
726972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
727972adf0fSEmmanuel Vadot 	    1, 0,			/* alignment, boundary */
728972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
729972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
730972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
731972adf0fSEmmanuel Vadot 	    MCLBYTES, 1, 		/* maxsize, nsegments */
732972adf0fSEmmanuel Vadot 	    MCLBYTES,			/* maxsegsize */
733972adf0fSEmmanuel Vadot 	    0,				/* flags */
734972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
735972adf0fSEmmanuel Vadot 	    &sc->rxbuf_tag);
736972adf0fSEmmanuel Vadot 	if (error != 0) {
737972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
738972adf0fSEmmanuel Vadot 		    "could not create RX buf DMA tag.\n");
739972adf0fSEmmanuel Vadot 		goto out;
740972adf0fSEmmanuel Vadot 	}
741972adf0fSEmmanuel Vadot 
742972adf0fSEmmanuel Vadot 	for (idx = 0; idx < RX_DESC_COUNT; idx++) {
743972adf0fSEmmanuel Vadot 		error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
744972adf0fSEmmanuel Vadot 		    &sc->rxbuf_map[idx].map);
745972adf0fSEmmanuel Vadot 		if (error != 0) {
746972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
747972adf0fSEmmanuel Vadot 			    "could not create RX buffer DMA map.\n");
748972adf0fSEmmanuel Vadot 			goto out;
749972adf0fSEmmanuel Vadot 		}
750972adf0fSEmmanuel Vadot 		if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
751972adf0fSEmmanuel Vadot 			device_printf(sc->dev, "Could not alloc mbuf\n");
752972adf0fSEmmanuel Vadot 			error = ENOMEM;
753972adf0fSEmmanuel Vadot 			goto out;
754972adf0fSEmmanuel Vadot 		}
755972adf0fSEmmanuel Vadot 		if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
756972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
757972adf0fSEmmanuel Vadot 			    "could not create new RX buffer.\n");
758972adf0fSEmmanuel Vadot 			goto out;
759972adf0fSEmmanuel Vadot 		}
760972adf0fSEmmanuel Vadot 	}
761972adf0fSEmmanuel Vadot 
762972adf0fSEmmanuel Vadot out:
763972adf0fSEmmanuel Vadot 	if (error != 0)
764972adf0fSEmmanuel Vadot 		return (ENXIO);
765972adf0fSEmmanuel Vadot 
766972adf0fSEmmanuel Vadot 	return (0);
767972adf0fSEmmanuel Vadot }
768972adf0fSEmmanuel Vadot 
769972adf0fSEmmanuel Vadot /*
770972adf0fSEmmanuel Vadot  * Free the bus_dma resources
771972adf0fSEmmanuel Vadot  */
772972adf0fSEmmanuel Vadot void
773972adf0fSEmmanuel Vadot dma1000_free(struct dwc_softc *sc)
774972adf0fSEmmanuel Vadot {
775972adf0fSEmmanuel Vadot 	bus_dmamap_t map;
776972adf0fSEmmanuel Vadot 	int idx;
777972adf0fSEmmanuel Vadot 
778972adf0fSEmmanuel Vadot 	/* Clean up RX DMA resources and free mbufs. */
779972adf0fSEmmanuel Vadot 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
780972adf0fSEmmanuel Vadot 		if ((map = sc->rxbuf_map[idx].map) != NULL) {
781972adf0fSEmmanuel Vadot 			bus_dmamap_unload(sc->rxbuf_tag, map);
782972adf0fSEmmanuel Vadot 			bus_dmamap_destroy(sc->rxbuf_tag, map);
783972adf0fSEmmanuel Vadot 			m_freem(sc->rxbuf_map[idx].mbuf);
784972adf0fSEmmanuel Vadot 		}
785972adf0fSEmmanuel Vadot 	}
786972adf0fSEmmanuel Vadot 	if (sc->rxbuf_tag != NULL)
787972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->rxbuf_tag);
788972adf0fSEmmanuel Vadot 	if (sc->rxdesc_map != NULL) {
789972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
790972adf0fSEmmanuel Vadot 		bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
791972adf0fSEmmanuel Vadot 		    sc->rxdesc_map);
792972adf0fSEmmanuel Vadot 	}
793972adf0fSEmmanuel Vadot 	if (sc->rxdesc_tag != NULL)
794972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->rxdesc_tag);
795972adf0fSEmmanuel Vadot 
796972adf0fSEmmanuel Vadot 	/* Clean up TX DMA resources. */
797972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
798972adf0fSEmmanuel Vadot 		if ((map = sc->txbuf_map[idx].map) != NULL) {
799972adf0fSEmmanuel Vadot 			/* TX maps are already unloaded. */
800972adf0fSEmmanuel Vadot 			bus_dmamap_destroy(sc->txbuf_tag, map);
801972adf0fSEmmanuel Vadot 		}
802972adf0fSEmmanuel Vadot 	}
803972adf0fSEmmanuel Vadot 	if (sc->txbuf_tag != NULL)
804972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->txbuf_tag);
805972adf0fSEmmanuel Vadot 	if (sc->txdesc_map != NULL) {
806972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
807972adf0fSEmmanuel Vadot 		bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
808972adf0fSEmmanuel Vadot 		    sc->txdesc_map);
809972adf0fSEmmanuel Vadot 	}
810972adf0fSEmmanuel Vadot 	if (sc->txdesc_tag != NULL)
811972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->txdesc_tag);
812972adf0fSEmmanuel Vadot }
813