xref: /freebsd/sys/dev/dwc/dwc1000_dma.c (revision 0cb63dcac446df85787cc0a77d4d38d01ff92913)
1972adf0fSEmmanuel Vadot /*-
2972adf0fSEmmanuel Vadot  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3972adf0fSEmmanuel Vadot  *
4972adf0fSEmmanuel Vadot  * This software was developed by SRI International and the University of
5972adf0fSEmmanuel Vadot  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6972adf0fSEmmanuel Vadot  * ("CTSRD"), as part of the DARPA CRASH research programme.
7972adf0fSEmmanuel Vadot  *
8972adf0fSEmmanuel Vadot  * Redistribution and use in source and binary forms, with or without
9972adf0fSEmmanuel Vadot  * modification, are permitted provided that the following conditions
10972adf0fSEmmanuel Vadot  * are met:
11972adf0fSEmmanuel Vadot  * 1. Redistributions of source code must retain the above copyright
12972adf0fSEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer.
13972adf0fSEmmanuel Vadot  * 2. Redistributions in binary form must reproduce the above copyright
14972adf0fSEmmanuel Vadot  *    notice, this list of conditions and the following disclaimer in the
15972adf0fSEmmanuel Vadot  *    documentation and/or other materials provided with the distribution.
16972adf0fSEmmanuel Vadot  *
17972adf0fSEmmanuel Vadot  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18972adf0fSEmmanuel Vadot  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19972adf0fSEmmanuel Vadot  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20972adf0fSEmmanuel Vadot  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21972adf0fSEmmanuel Vadot  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22972adf0fSEmmanuel Vadot  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23972adf0fSEmmanuel Vadot  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24972adf0fSEmmanuel Vadot  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25972adf0fSEmmanuel Vadot  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26972adf0fSEmmanuel Vadot  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27972adf0fSEmmanuel Vadot  * SUCH DAMAGE.
28972adf0fSEmmanuel Vadot  */
29972adf0fSEmmanuel Vadot 
30972adf0fSEmmanuel Vadot #include <sys/cdefs.h>
31972adf0fSEmmanuel Vadot #include <sys/param.h>
32972adf0fSEmmanuel Vadot #include <sys/systm.h>
33972adf0fSEmmanuel Vadot #include <sys/bus.h>
34972adf0fSEmmanuel Vadot #include <sys/kernel.h>
35972adf0fSEmmanuel Vadot #include <sys/lock.h>
36972adf0fSEmmanuel Vadot #include <sys/malloc.h>
37972adf0fSEmmanuel Vadot #include <sys/mbuf.h>
38972adf0fSEmmanuel Vadot #include <sys/module.h>
39972adf0fSEmmanuel Vadot #include <sys/mutex.h>
40972adf0fSEmmanuel Vadot #include <sys/rman.h>
41972adf0fSEmmanuel Vadot #include <sys/socket.h>
42972adf0fSEmmanuel Vadot 
43972adf0fSEmmanuel Vadot #include <net/bpf.h>
44972adf0fSEmmanuel Vadot #include <net/if.h>
45972adf0fSEmmanuel Vadot #include <net/ethernet.h>
46972adf0fSEmmanuel Vadot #include <net/if_dl.h>
47972adf0fSEmmanuel Vadot #include <net/if_media.h>
48972adf0fSEmmanuel Vadot #include <net/if_types.h>
49972adf0fSEmmanuel Vadot #include <net/if_var.h>
50972adf0fSEmmanuel Vadot 
51972adf0fSEmmanuel Vadot #include <machine/bus.h>
52972adf0fSEmmanuel Vadot 
53972adf0fSEmmanuel Vadot #include <dev/extres/clk/clk.h>
54972adf0fSEmmanuel Vadot #include <dev/extres/hwreset/hwreset.h>
55972adf0fSEmmanuel Vadot 
56972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus.h>
57972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus_subr.h>
58972adf0fSEmmanuel Vadot 
59972adf0fSEmmanuel Vadot #include <dev/dwc/if_dwcvar.h>
60972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_reg.h>
61972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_dma.h>
62972adf0fSEmmanuel Vadot 
63afa0f66eSEmmanuel Vadot #define	WATCHDOG_TIMEOUT_SECS	5
64363b7c39SEmmanuel Vadot #define	DMA_RESET_TIMEOUT	100
65fe82f82dSEmmanuel Vadot 
66fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC0 is almost unified */
67fe82f82dSEmmanuel Vadot #define	TDESC0_OWN		(1U << 31)
68fe82f82dSEmmanuel Vadot #define	TDESC0_IHE		(1U << 16)	/* IP Header Error */
69fe82f82dSEmmanuel Vadot #define	TDESC0_ES		(1U << 15)	/* Error Summary */
70fe82f82dSEmmanuel Vadot #define	TDESC0_JT		(1U << 14)	/* Jabber Timeout */
71fe82f82dSEmmanuel Vadot #define	TDESC0_FF		(1U << 13)	/* Frame Flushed */
72fe82f82dSEmmanuel Vadot #define	TDESC0_PCE		(1U << 12)	/* Payload Checksum Error */
73fe82f82dSEmmanuel Vadot #define	TDESC0_LOC		(1U << 11)	/* Loss of Carrier */
74fe82f82dSEmmanuel Vadot #define	TDESC0_NC		(1U << 10)	/* No Carrier */
75fe82f82dSEmmanuel Vadot #define	TDESC0_LC		(1U <<  9)	/* Late Collision */
76fe82f82dSEmmanuel Vadot #define	TDESC0_EC		(1U <<  8)	/* Excessive Collision */
77fe82f82dSEmmanuel Vadot #define	TDESC0_VF		(1U <<  7)	/* VLAN Frame */
78fe82f82dSEmmanuel Vadot #define	TDESC0_CC_MASK		0xf
79fe82f82dSEmmanuel Vadot #define	TDESC0_CC_SHIFT		3		/* Collision Count */
80fe82f82dSEmmanuel Vadot #define	TDESC0_ED		(1U <<  2)	/* Excessive Deferral */
81fe82f82dSEmmanuel Vadot #define	TDESC0_UF		(1U <<  1)	/* Underflow Error */
82fe82f82dSEmmanuel Vadot #define	TDESC0_DB		(1U <<  0)	/* Deferred Bit */
83fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC0 extended format only */
84fe82f82dSEmmanuel Vadot #define	ETDESC0_IC		(1U << 30)	/* Interrupt on Completion */
85fe82f82dSEmmanuel Vadot #define	ETDESC0_LS		(1U << 29)	/* Last Segment */
86fe82f82dSEmmanuel Vadot #define	ETDESC0_FS		(1U << 28)	/* First Segment */
87fe82f82dSEmmanuel Vadot #define	ETDESC0_DC		(1U << 27)	/* Disable CRC */
88fe82f82dSEmmanuel Vadot #define	ETDESC0_DP		(1U << 26)	/* Disable Padding */
89fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_NONE	(0U << 22)	/* Checksum Insertion Control */
90fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_HDR		(1U << 22)
91fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_SEG 	(2U << 22)
92fe82f82dSEmmanuel Vadot #define	ETDESC0_CIC_FULL	(3U << 22)
93fe82f82dSEmmanuel Vadot #define	ETDESC0_TER		(1U << 21)	/* Transmit End of Ring */
94fe82f82dSEmmanuel Vadot #define	ETDESC0_TCH		(1U << 20)	/* Second Address Chained */
95fe82f82dSEmmanuel Vadot 
96fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC1 normal format */
97fe82f82dSEmmanuel Vadot #define	NTDESC1_IC		(1U << 31)	/* Interrupt on Completion */
98fe82f82dSEmmanuel Vadot #define	NTDESC1_LS		(1U << 30)	/* Last Segment */
99fe82f82dSEmmanuel Vadot #define	NTDESC1_FS		(1U << 29)	/* First Segment */
100fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_NONE	(0U << 27)	/* Checksum Insertion Control */
101fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_HDR		(1U << 27)
102fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_SEG 	(2U << 27)
103fe82f82dSEmmanuel Vadot #define	NTDESC1_CIC_FULL	(3U << 27)
104fe82f82dSEmmanuel Vadot #define	NTDESC1_DC		(1U << 26)	/* Disable CRC */
105fe82f82dSEmmanuel Vadot #define	NTDESC1_TER		(1U << 25)	/* Transmit End of Ring */
106fe82f82dSEmmanuel Vadot #define	NTDESC1_TCH		(1U << 24)	/* Second Address Chained */
107fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC1 extended format */
108fe82f82dSEmmanuel Vadot #define	ETDESC1_DP		(1U << 23)	/* Disable Padding */
109fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS2_MASK	0x7ff
110fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS2_SHIFT	11		/* Receive Buffer 2 Size */
111fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS1_MASK	0x7ff
112fe82f82dSEmmanuel Vadot #define	ETDESC1_TBS1_SHIFT	0		/* Receive Buffer 1 Size */
113fe82f82dSEmmanuel Vadot 
114fe82f82dSEmmanuel Vadot /* RX descriptor - RDESC0 is unified */
115fe82f82dSEmmanuel Vadot #define	RDESC0_OWN		(1U << 31)
116fe82f82dSEmmanuel Vadot #define	RDESC0_AFM		(1U << 30)	/* Dest. Address Filter Fail */
117fe82f82dSEmmanuel Vadot #define	RDESC0_FL_MASK		0x3fff
118fe82f82dSEmmanuel Vadot #define	RDESC0_FL_SHIFT		16		/* Frame Length */
119fe82f82dSEmmanuel Vadot #define	RDESC0_ES		(1U << 15)	/* Error Summary */
120fe82f82dSEmmanuel Vadot #define	RDESC0_DE		(1U << 14)	/* Descriptor Error */
121fe82f82dSEmmanuel Vadot #define	RDESC0_SAF		(1U << 13)	/* Source Address Filter Fail */
122fe82f82dSEmmanuel Vadot #define	RDESC0_LE		(1U << 12)	/* Length Error */
123fe82f82dSEmmanuel Vadot #define	RDESC0_OE		(1U << 11)	/* Overflow Error */
124fe82f82dSEmmanuel Vadot #define	RDESC0_VLAN		(1U << 10)	/* VLAN Tag */
125fe82f82dSEmmanuel Vadot #define	RDESC0_FS		(1U <<  9)	/* First Descriptor */
126fe82f82dSEmmanuel Vadot #define	RDESC0_LS		(1U <<  8)	/* Last Descriptor */
127fe82f82dSEmmanuel Vadot #define	RDESC0_ICE		(1U <<  7)	/* IPC Checksum Error */
128fe82f82dSEmmanuel Vadot #define	RDESC0_LC		(1U <<  6)	/* Late Collision */
129fe82f82dSEmmanuel Vadot #define	RDESC0_FT		(1U <<  5)	/* Frame Type */
130fe82f82dSEmmanuel Vadot #define	RDESC0_RWT		(1U <<  4)	/* Receive Watchdog Timeout */
131fe82f82dSEmmanuel Vadot #define	RDESC0_RE		(1U <<  3)	/* Receive Error */
132fe82f82dSEmmanuel Vadot #define	RDESC0_DBE		(1U <<  2)	/* Dribble Bit Error */
133fe82f82dSEmmanuel Vadot #define	RDESC0_CE		(1U <<  1)	/* CRC Error */
134fe82f82dSEmmanuel Vadot #define	RDESC0_PCE		(1U <<  0)	/* Payload Checksum Error */
135fe82f82dSEmmanuel Vadot #define	RDESC0_RXMA		(1U <<  0)	/* Rx MAC Address */
136fe82f82dSEmmanuel Vadot 
137fe82f82dSEmmanuel Vadot /* RX descriptors - RDESC1 normal format */
138fe82f82dSEmmanuel Vadot #define	NRDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
139fe82f82dSEmmanuel Vadot #define	NRDESC1_RER		(1U << 25)	/* Receive End of Ring */
140fe82f82dSEmmanuel Vadot #define	NRDESC1_RCH		(1U << 24)	/* Second Address Chained */
141fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS2_MASK	0x7ff
142fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS2_SHIFT	11		/* Receive Buffer 2 Size */
143fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS1_MASK	0x7ff
144fe82f82dSEmmanuel Vadot #define	NRDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
145fe82f82dSEmmanuel Vadot 
146fe82f82dSEmmanuel Vadot /* RX descriptors - RDESC1 enhanced format */
147fe82f82dSEmmanuel Vadot #define	ERDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
148fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS2_MASK	0x7ffff
149fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS2_SHIFT	16		/* Receive Buffer 2 Size */
150fe82f82dSEmmanuel Vadot #define	ERDESC1_RER		(1U << 15)	/* Receive End of Ring */
151fe82f82dSEmmanuel Vadot #define	ERDESC1_RCH		(1U << 14)	/* Second Address Chained */
152fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS1_MASK	0x7ffff
153fe82f82dSEmmanuel Vadot #define	ERDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
154fe82f82dSEmmanuel Vadot 
155fe82f82dSEmmanuel Vadot /*
156fe82f82dSEmmanuel Vadot  * The hardware imposes alignment restrictions on various objects involved in
157fe82f82dSEmmanuel Vadot  * DMA transfers.  These values are expressed in bytes (not bits).
158fe82f82dSEmmanuel Vadot  */
159fe82f82dSEmmanuel Vadot #define	DWC_DESC_RING_ALIGN	2048
160fe82f82dSEmmanuel Vadot 
161afa0f66eSEmmanuel Vadot static inline uint32_t
162afa0f66eSEmmanuel Vadot next_txidx(struct dwc_softc *sc, uint32_t curidx)
163afa0f66eSEmmanuel Vadot {
164afa0f66eSEmmanuel Vadot 
165afa0f66eSEmmanuel Vadot 	return ((curidx + 1) % TX_DESC_COUNT);
166afa0f66eSEmmanuel Vadot }
167afa0f66eSEmmanuel Vadot 
168972adf0fSEmmanuel Vadot static inline uint32_t
169972adf0fSEmmanuel Vadot next_rxidx(struct dwc_softc *sc, uint32_t curidx)
170972adf0fSEmmanuel Vadot {
171972adf0fSEmmanuel Vadot 
172972adf0fSEmmanuel Vadot 	return ((curidx + 1) % RX_DESC_COUNT);
173972adf0fSEmmanuel Vadot }
174972adf0fSEmmanuel Vadot 
175972adf0fSEmmanuel Vadot static void
176972adf0fSEmmanuel Vadot dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
177972adf0fSEmmanuel Vadot {
178972adf0fSEmmanuel Vadot 
179972adf0fSEmmanuel Vadot 	if (error != 0)
180972adf0fSEmmanuel Vadot 		return;
181972adf0fSEmmanuel Vadot 	*(bus_addr_t *)arg = segs[0].ds_addr;
182972adf0fSEmmanuel Vadot }
183972adf0fSEmmanuel Vadot 
184972adf0fSEmmanuel Vadot inline static void
1857786911fSEmmanuel Vadot txdesc_clear(struct dwc_softc *sc, int idx)
186972adf0fSEmmanuel Vadot {
1877786911fSEmmanuel Vadot 
1887786911fSEmmanuel Vadot 	sc->tx_desccount--;
1897786911fSEmmanuel Vadot 	sc->txdesc_ring[idx].addr1 = (uint32_t)(0);
1907786911fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc0 = 0;
1917786911fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc1 = 0;
192972adf0fSEmmanuel Vadot }
193972adf0fSEmmanuel Vadot 
194972adf0fSEmmanuel Vadot inline static void
1957786911fSEmmanuel Vadot txdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr,
196972adf0fSEmmanuel Vadot   uint32_t len, uint32_t flags, bool first, bool last)
197972adf0fSEmmanuel Vadot {
198972adf0fSEmmanuel Vadot 	uint32_t desc0, desc1;
199972adf0fSEmmanuel Vadot 
2004b7975ecSEmmanuel Vadot 	if (!sc->dma_ext_desc) {
201972adf0fSEmmanuel Vadot 		desc0 = 0;
202972adf0fSEmmanuel Vadot 		desc1 = NTDESC1_TCH | len | flags;
203972adf0fSEmmanuel Vadot 		if (first)
204972adf0fSEmmanuel Vadot 			desc1 |=  NTDESC1_FS;
205972adf0fSEmmanuel Vadot 		if (last)
206972adf0fSEmmanuel Vadot 			desc1 |= NTDESC1_LS | NTDESC1_IC;
207972adf0fSEmmanuel Vadot 	} else {
208972adf0fSEmmanuel Vadot 		desc0 = ETDESC0_TCH | flags;
209972adf0fSEmmanuel Vadot 		if (first)
210972adf0fSEmmanuel Vadot 			desc0 |= ETDESC0_FS;
211972adf0fSEmmanuel Vadot 		if (last)
212972adf0fSEmmanuel Vadot 			desc0 |= ETDESC0_LS | ETDESC0_IC;
213972adf0fSEmmanuel Vadot 		desc1 = len;
214972adf0fSEmmanuel Vadot 	}
215972adf0fSEmmanuel Vadot 	++sc->tx_desccount;
216972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
217972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc0 = desc0;
218972adf0fSEmmanuel Vadot 	sc->txdesc_ring[idx].desc1 = desc1;
2197786911fSEmmanuel Vadot 	wmb();
22043cd6bbbSEmmanuel Vadot 	sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
221*0cb63dcaSEmmanuel Vadot 	wmb();
222972adf0fSEmmanuel Vadot }
223972adf0fSEmmanuel Vadot 
224972adf0fSEmmanuel Vadot inline static uint32_t
2257786911fSEmmanuel Vadot rxdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr)
226972adf0fSEmmanuel Vadot {
227972adf0fSEmmanuel Vadot 	uint32_t nidx;
228972adf0fSEmmanuel Vadot 
229972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
230972adf0fSEmmanuel Vadot 	nidx = next_rxidx(sc, idx);
231972adf0fSEmmanuel Vadot 	sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
232972adf0fSEmmanuel Vadot 	    (nidx * sizeof(struct dwc_hwdesc));
2334b7975ecSEmmanuel Vadot 	if (!sc->dma_ext_desc)
234972adf0fSEmmanuel Vadot 		sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
235972adf0fSEmmanuel Vadot 		    MIN(MCLBYTES, NRDESC1_RBS1_MASK);
236972adf0fSEmmanuel Vadot 	else
237972adf0fSEmmanuel Vadot 		sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
238972adf0fSEmmanuel Vadot 		    MIN(MCLBYTES, ERDESC1_RBS1_MASK);
239972adf0fSEmmanuel Vadot 
240972adf0fSEmmanuel Vadot 	wmb();
24143cd6bbbSEmmanuel Vadot 	sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
242*0cb63dcaSEmmanuel Vadot 	wmb();
243972adf0fSEmmanuel Vadot 	return (nidx);
244972adf0fSEmmanuel Vadot }
245972adf0fSEmmanuel Vadot 
246972adf0fSEmmanuel Vadot int
247972adf0fSEmmanuel Vadot dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
248972adf0fSEmmanuel Vadot {
249972adf0fSEmmanuel Vadot 	struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
250972adf0fSEmmanuel Vadot 	int error, nsegs;
251972adf0fSEmmanuel Vadot 	struct mbuf * m;
252972adf0fSEmmanuel Vadot 	uint32_t flags = 0;
253972adf0fSEmmanuel Vadot 	int i;
2547786911fSEmmanuel Vadot 	int last;
255972adf0fSEmmanuel Vadot 
256972adf0fSEmmanuel Vadot 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
257972adf0fSEmmanuel Vadot 	    *mp, segs, &nsegs, 0);
258972adf0fSEmmanuel Vadot 	if (error == EFBIG) {
259972adf0fSEmmanuel Vadot 		/*
260972adf0fSEmmanuel Vadot 		 * The map may be partially mapped from the first call.
261972adf0fSEmmanuel Vadot 		 * Make sure to reset it.
262972adf0fSEmmanuel Vadot 		 */
263972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
264972adf0fSEmmanuel Vadot 		if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
265972adf0fSEmmanuel Vadot 			return (ENOMEM);
266972adf0fSEmmanuel Vadot 		*mp = m;
267972adf0fSEmmanuel Vadot 		error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
268972adf0fSEmmanuel Vadot 		    *mp, segs, &nsegs, 0);
269972adf0fSEmmanuel Vadot 	}
270972adf0fSEmmanuel Vadot 	if (error != 0)
271972adf0fSEmmanuel Vadot 		return (ENOMEM);
272972adf0fSEmmanuel Vadot 
273972adf0fSEmmanuel Vadot 	if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
274972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
275972adf0fSEmmanuel Vadot 		return (ENOMEM);
276972adf0fSEmmanuel Vadot 	}
277972adf0fSEmmanuel Vadot 
278972adf0fSEmmanuel Vadot 	m = *mp;
279972adf0fSEmmanuel Vadot 
280972adf0fSEmmanuel Vadot 	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
281972adf0fSEmmanuel Vadot 		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
2824b7975ecSEmmanuel Vadot 			if (!sc->dma_ext_desc)
283972adf0fSEmmanuel Vadot 				flags = NTDESC1_CIC_FULL;
284972adf0fSEmmanuel Vadot 			else
285972adf0fSEmmanuel Vadot 				flags = ETDESC0_CIC_FULL;
286972adf0fSEmmanuel Vadot 		} else {
2874b7975ecSEmmanuel Vadot 			if (!sc->dma_ext_desc)
288972adf0fSEmmanuel Vadot 				flags = NTDESC1_CIC_HDR;
289972adf0fSEmmanuel Vadot 			else
290972adf0fSEmmanuel Vadot 				flags = ETDESC0_CIC_HDR;
291972adf0fSEmmanuel Vadot 		}
292972adf0fSEmmanuel Vadot 	}
293972adf0fSEmmanuel Vadot 
294972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
295972adf0fSEmmanuel Vadot 	    BUS_DMASYNC_PREWRITE);
296972adf0fSEmmanuel Vadot 
297972adf0fSEmmanuel Vadot 	sc->txbuf_map[idx].mbuf = m;
298972adf0fSEmmanuel Vadot 
299972adf0fSEmmanuel Vadot 	for (i = 0; i < nsegs; i++) {
3007786911fSEmmanuel Vadot 		txdesc_setup(sc, sc->tx_desc_head,
301972adf0fSEmmanuel Vadot 		    segs[i].ds_addr, segs[i].ds_len,
302972adf0fSEmmanuel Vadot 		    (i == 0) ? flags : 0, /* only first desc needs flags */
303972adf0fSEmmanuel Vadot 		    (i == 0),
304972adf0fSEmmanuel Vadot 		    (i == nsegs - 1));
305972adf0fSEmmanuel Vadot 		last = sc->tx_desc_head;
306972adf0fSEmmanuel Vadot 		sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
307972adf0fSEmmanuel Vadot 	}
308972adf0fSEmmanuel Vadot 
309972adf0fSEmmanuel Vadot 	sc->txbuf_map[idx].last_desc_idx = last;
310972adf0fSEmmanuel Vadot 
311972adf0fSEmmanuel Vadot 	return (0);
312972adf0fSEmmanuel Vadot }
313972adf0fSEmmanuel Vadot 
314972adf0fSEmmanuel Vadot static int
315972adf0fSEmmanuel Vadot dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
316972adf0fSEmmanuel Vadot {
317972adf0fSEmmanuel Vadot 	struct bus_dma_segment seg;
318972adf0fSEmmanuel Vadot 	int error, nsegs;
319972adf0fSEmmanuel Vadot 
320972adf0fSEmmanuel Vadot 	m_adj(m, ETHER_ALIGN);
321972adf0fSEmmanuel Vadot 
322972adf0fSEmmanuel Vadot 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
323972adf0fSEmmanuel Vadot 	    m, &seg, &nsegs, 0);
324972adf0fSEmmanuel Vadot 	if (error != 0)
325972adf0fSEmmanuel Vadot 		return (error);
326972adf0fSEmmanuel Vadot 
327972adf0fSEmmanuel Vadot 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
328972adf0fSEmmanuel Vadot 
329972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
330972adf0fSEmmanuel Vadot 	    BUS_DMASYNC_PREREAD);
331972adf0fSEmmanuel Vadot 
332972adf0fSEmmanuel Vadot 	sc->rxbuf_map[idx].mbuf = m;
3337786911fSEmmanuel Vadot 	rxdesc_setup(sc, idx, seg.ds_addr);
334972adf0fSEmmanuel Vadot 
335972adf0fSEmmanuel Vadot 	return (0);
336972adf0fSEmmanuel Vadot }
337972adf0fSEmmanuel Vadot 
338972adf0fSEmmanuel Vadot static struct mbuf *
339972adf0fSEmmanuel Vadot dwc_alloc_mbufcl(struct dwc_softc *sc)
340972adf0fSEmmanuel Vadot {
341972adf0fSEmmanuel Vadot 	struct mbuf *m;
342972adf0fSEmmanuel Vadot 
343972adf0fSEmmanuel Vadot 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
344972adf0fSEmmanuel Vadot 	if (m != NULL)
345972adf0fSEmmanuel Vadot 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
346972adf0fSEmmanuel Vadot 
347972adf0fSEmmanuel Vadot 	return (m);
348972adf0fSEmmanuel Vadot }
349972adf0fSEmmanuel Vadot 
350972adf0fSEmmanuel Vadot static struct mbuf *
351972adf0fSEmmanuel Vadot dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
352972adf0fSEmmanuel Vadot     struct dwc_bufmap *map)
353972adf0fSEmmanuel Vadot {
354972adf0fSEmmanuel Vadot 	if_t ifp;
355972adf0fSEmmanuel Vadot 	struct mbuf *m, *m0;
356972adf0fSEmmanuel Vadot 	int len;
357972adf0fSEmmanuel Vadot 	uint32_t rdesc0;
358972adf0fSEmmanuel Vadot 
359972adf0fSEmmanuel Vadot 	m = map->mbuf;
360972adf0fSEmmanuel Vadot 	ifp = sc->ifp;
361972adf0fSEmmanuel Vadot 	rdesc0 = desc ->desc0;
362972adf0fSEmmanuel Vadot 
363972adf0fSEmmanuel Vadot 	if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
364972adf0fSEmmanuel Vadot 		    (RDESC0_FS | RDESC0_LS)) {
365972adf0fSEmmanuel Vadot 		/*
366972adf0fSEmmanuel Vadot 		 * Something very wrong happens. The whole packet should be
367972adf0fSEmmanuel Vadot 		 * recevied in one descriptr. Report problem.
368972adf0fSEmmanuel Vadot 		 */
369972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
370972adf0fSEmmanuel Vadot 		    "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
371972adf0fSEmmanuel Vadot 		    __func__, rdesc0);
372972adf0fSEmmanuel Vadot 		return (NULL);
373972adf0fSEmmanuel Vadot 	}
374972adf0fSEmmanuel Vadot 
375972adf0fSEmmanuel Vadot 	len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
376972adf0fSEmmanuel Vadot 	if (len < 64) {
377972adf0fSEmmanuel Vadot 		/*
378972adf0fSEmmanuel Vadot 		 * Lenght is invalid, recycle old mbuf
379972adf0fSEmmanuel Vadot 		 * Probably impossible case
380972adf0fSEmmanuel Vadot 		 */
381972adf0fSEmmanuel Vadot 		return (NULL);
382972adf0fSEmmanuel Vadot 	}
383972adf0fSEmmanuel Vadot 
384972adf0fSEmmanuel Vadot 	/* Allocate new buffer */
385972adf0fSEmmanuel Vadot 	m0 = dwc_alloc_mbufcl(sc);
386972adf0fSEmmanuel Vadot 	if (m0 == NULL) {
387972adf0fSEmmanuel Vadot 		/* no new mbuf available, recycle old */
388972adf0fSEmmanuel Vadot 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
389972adf0fSEmmanuel Vadot 		return (NULL);
390972adf0fSEmmanuel Vadot 	}
391972adf0fSEmmanuel Vadot 	/* Do dmasync for newly received packet */
392972adf0fSEmmanuel Vadot 	bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
393972adf0fSEmmanuel Vadot 	bus_dmamap_unload(sc->rxbuf_tag, map->map);
394972adf0fSEmmanuel Vadot 
395972adf0fSEmmanuel Vadot 	/* Received packet is valid, process it */
396972adf0fSEmmanuel Vadot 	m->m_pkthdr.rcvif = ifp;
397972adf0fSEmmanuel Vadot 	m->m_pkthdr.len = len;
398972adf0fSEmmanuel Vadot 	m->m_len = len;
399972adf0fSEmmanuel Vadot 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
400972adf0fSEmmanuel Vadot 
401972adf0fSEmmanuel Vadot 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
402972adf0fSEmmanuel Vadot 	  (rdesc0 & RDESC0_FT) != 0) {
403972adf0fSEmmanuel Vadot 		m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
404972adf0fSEmmanuel Vadot 		if ((rdesc0 & RDESC0_ICE) == 0)
405972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
406972adf0fSEmmanuel Vadot 		if ((rdesc0 & RDESC0_PCE) == 0) {
407972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_flags |=
408972adf0fSEmmanuel Vadot 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
409972adf0fSEmmanuel Vadot 			m->m_pkthdr.csum_data = 0xffff;
410972adf0fSEmmanuel Vadot 		}
411972adf0fSEmmanuel Vadot 	}
412972adf0fSEmmanuel Vadot 
413972adf0fSEmmanuel Vadot 	/* Remove trailing FCS */
414972adf0fSEmmanuel Vadot 	m_adj(m, -ETHER_CRC_LEN);
415972adf0fSEmmanuel Vadot 
416972adf0fSEmmanuel Vadot 	DWC_UNLOCK(sc);
417972adf0fSEmmanuel Vadot 	if_input(ifp, m);
418972adf0fSEmmanuel Vadot 	DWC_LOCK(sc);
419972adf0fSEmmanuel Vadot 	return (m0);
420972adf0fSEmmanuel Vadot }
421972adf0fSEmmanuel Vadot 
422972adf0fSEmmanuel Vadot void
423972adf0fSEmmanuel Vadot dma1000_txfinish_locked(struct dwc_softc *sc)
424972adf0fSEmmanuel Vadot {
425972adf0fSEmmanuel Vadot 	struct dwc_bufmap *bmap;
426972adf0fSEmmanuel Vadot 	struct dwc_hwdesc *desc;
427972adf0fSEmmanuel Vadot 	if_t ifp;
428972adf0fSEmmanuel Vadot 	int idx, last_idx;
429972adf0fSEmmanuel Vadot 	bool map_finished;
430972adf0fSEmmanuel Vadot 
431972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
432972adf0fSEmmanuel Vadot 
433972adf0fSEmmanuel Vadot 	ifp = sc->ifp;
434972adf0fSEmmanuel Vadot 	/* check if all descriptors of the map are done */
435972adf0fSEmmanuel Vadot 	while (sc->tx_map_tail != sc->tx_map_head) {
436972adf0fSEmmanuel Vadot 		map_finished = true;
437972adf0fSEmmanuel Vadot 		bmap = &sc->txbuf_map[sc->tx_map_tail];
438972adf0fSEmmanuel Vadot 		idx = sc->tx_desc_tail;
439972adf0fSEmmanuel Vadot 		last_idx = next_txidx(sc, bmap->last_desc_idx);
440972adf0fSEmmanuel Vadot 		while (idx != last_idx) {
441972adf0fSEmmanuel Vadot 			desc = &sc->txdesc_ring[idx];
442972adf0fSEmmanuel Vadot 			if ((desc->desc0 & TDESC0_OWN) != 0) {
443972adf0fSEmmanuel Vadot 				map_finished = false;
444972adf0fSEmmanuel Vadot 				break;
445972adf0fSEmmanuel Vadot 			}
446972adf0fSEmmanuel Vadot 			idx = next_txidx(sc, idx);
447972adf0fSEmmanuel Vadot 		}
448972adf0fSEmmanuel Vadot 
449972adf0fSEmmanuel Vadot 		if (!map_finished)
450972adf0fSEmmanuel Vadot 			break;
451972adf0fSEmmanuel Vadot 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
452972adf0fSEmmanuel Vadot 		    BUS_DMASYNC_POSTWRITE);
453972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
454972adf0fSEmmanuel Vadot 		m_freem(bmap->mbuf);
455972adf0fSEmmanuel Vadot 		bmap->mbuf = NULL;
456972adf0fSEmmanuel Vadot 		sc->tx_mapcount--;
457972adf0fSEmmanuel Vadot 		while (sc->tx_desc_tail != last_idx) {
4587786911fSEmmanuel Vadot 			txdesc_clear(sc, sc->tx_desc_tail);
459972adf0fSEmmanuel Vadot 			sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
460972adf0fSEmmanuel Vadot 		}
461972adf0fSEmmanuel Vadot 		sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
462972adf0fSEmmanuel Vadot 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
463972adf0fSEmmanuel Vadot 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
464972adf0fSEmmanuel Vadot 	}
465972adf0fSEmmanuel Vadot 
466972adf0fSEmmanuel Vadot 	/* If there are no buffers outstanding, muzzle the watchdog. */
467972adf0fSEmmanuel Vadot 	if (sc->tx_desc_tail == sc->tx_desc_head) {
468972adf0fSEmmanuel Vadot 		sc->tx_watchdog_count = 0;
469972adf0fSEmmanuel Vadot 	}
470972adf0fSEmmanuel Vadot }
471972adf0fSEmmanuel Vadot 
472972adf0fSEmmanuel Vadot void
473afa0f66eSEmmanuel Vadot dma1000_txstart(struct dwc_softc *sc)
474afa0f66eSEmmanuel Vadot {
475afa0f66eSEmmanuel Vadot 	int enqueued;
476afa0f66eSEmmanuel Vadot 	struct mbuf *m;
477afa0f66eSEmmanuel Vadot 
478afa0f66eSEmmanuel Vadot 	enqueued = 0;
479afa0f66eSEmmanuel Vadot 
480afa0f66eSEmmanuel Vadot 	for (;;) {
481afa0f66eSEmmanuel Vadot 		if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS  + 1)) {
482afa0f66eSEmmanuel Vadot 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
483afa0f66eSEmmanuel Vadot 			break;
484afa0f66eSEmmanuel Vadot 		}
485afa0f66eSEmmanuel Vadot 
486afa0f66eSEmmanuel Vadot 		if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
487afa0f66eSEmmanuel Vadot 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
488afa0f66eSEmmanuel Vadot 			break;
489afa0f66eSEmmanuel Vadot 		}
490afa0f66eSEmmanuel Vadot 
491afa0f66eSEmmanuel Vadot 		m = if_dequeue(sc->ifp);
492afa0f66eSEmmanuel Vadot 		if (m == NULL)
493afa0f66eSEmmanuel Vadot 			break;
494afa0f66eSEmmanuel Vadot 		if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
495afa0f66eSEmmanuel Vadot 			if_sendq_prepend(sc->ifp, m);
496afa0f66eSEmmanuel Vadot 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
497afa0f66eSEmmanuel Vadot 			break;
498afa0f66eSEmmanuel Vadot 		}
499afa0f66eSEmmanuel Vadot 		bpf_mtap_if(sc->ifp, m);
500afa0f66eSEmmanuel Vadot 		sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
501afa0f66eSEmmanuel Vadot 		sc->tx_mapcount++;
502afa0f66eSEmmanuel Vadot 		++enqueued;
503afa0f66eSEmmanuel Vadot 	}
504afa0f66eSEmmanuel Vadot 
505afa0f66eSEmmanuel Vadot 	if (enqueued != 0) {
506afa0f66eSEmmanuel Vadot 		WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
507afa0f66eSEmmanuel Vadot 		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
508afa0f66eSEmmanuel Vadot 	}
509afa0f66eSEmmanuel Vadot }
510afa0f66eSEmmanuel Vadot 
511afa0f66eSEmmanuel Vadot void
512972adf0fSEmmanuel Vadot dma1000_rxfinish_locked(struct dwc_softc *sc)
513972adf0fSEmmanuel Vadot {
514972adf0fSEmmanuel Vadot 	struct mbuf *m;
515972adf0fSEmmanuel Vadot 	int error, idx;
516972adf0fSEmmanuel Vadot 	struct dwc_hwdesc *desc;
517972adf0fSEmmanuel Vadot 
518972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
519972adf0fSEmmanuel Vadot 	for (;;) {
520972adf0fSEmmanuel Vadot 		idx = sc->rx_idx;
521972adf0fSEmmanuel Vadot 		desc = sc->rxdesc_ring + idx;
522972adf0fSEmmanuel Vadot 		if ((desc->desc0 & RDESC0_OWN) != 0)
523972adf0fSEmmanuel Vadot 			break;
524972adf0fSEmmanuel Vadot 
525972adf0fSEmmanuel Vadot 		m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
526972adf0fSEmmanuel Vadot 		if (m == NULL) {
527972adf0fSEmmanuel Vadot 			wmb();
52843cd6bbbSEmmanuel Vadot 			desc->desc0 = RDESC0_OWN;
529*0cb63dcaSEmmanuel Vadot 			wmb();
530972adf0fSEmmanuel Vadot 		} else {
531972adf0fSEmmanuel Vadot 			/* We cannot create hole in RX ring */
532972adf0fSEmmanuel Vadot 			error = dma1000_setup_rxbuf(sc, idx, m);
533972adf0fSEmmanuel Vadot 			if (error != 0)
534972adf0fSEmmanuel Vadot 				panic("dma1000_setup_rxbuf failed:  error %d\n",
535972adf0fSEmmanuel Vadot 				    error);
536972adf0fSEmmanuel Vadot 
537972adf0fSEmmanuel Vadot 		}
538972adf0fSEmmanuel Vadot 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
539972adf0fSEmmanuel Vadot 	}
540972adf0fSEmmanuel Vadot }
541972adf0fSEmmanuel Vadot 
542972adf0fSEmmanuel Vadot /*
543972adf0fSEmmanuel Vadot  * Start the DMA controller
544972adf0fSEmmanuel Vadot  */
545972adf0fSEmmanuel Vadot void
546972adf0fSEmmanuel Vadot dma1000_start(struct dwc_softc *sc)
547972adf0fSEmmanuel Vadot {
548972adf0fSEmmanuel Vadot 	uint32_t reg;
549972adf0fSEmmanuel Vadot 
550972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
551972adf0fSEmmanuel Vadot 
552972adf0fSEmmanuel Vadot 	/* Initializa DMA and enable transmitters */
553972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
554972adf0fSEmmanuel Vadot 	reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
555972adf0fSEmmanuel Vadot 	reg &= ~(MODE_RSF);
556972adf0fSEmmanuel Vadot 	reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
557972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
558972adf0fSEmmanuel Vadot 
559972adf0fSEmmanuel Vadot 	WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
560972adf0fSEmmanuel Vadot 
561972adf0fSEmmanuel Vadot 	/* Start DMA */
562972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
563972adf0fSEmmanuel Vadot 	reg |= (MODE_ST | MODE_SR);
564972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
565972adf0fSEmmanuel Vadot }
566972adf0fSEmmanuel Vadot 
567972adf0fSEmmanuel Vadot /*
568972adf0fSEmmanuel Vadot  * Stop the DMA controller
569972adf0fSEmmanuel Vadot  */
570972adf0fSEmmanuel Vadot void
571972adf0fSEmmanuel Vadot dma1000_stop(struct dwc_softc *sc)
572972adf0fSEmmanuel Vadot {
573972adf0fSEmmanuel Vadot 	uint32_t reg;
574972adf0fSEmmanuel Vadot 
575972adf0fSEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
576972adf0fSEmmanuel Vadot 
577972adf0fSEmmanuel Vadot 	/* Stop DMA TX */
578972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
579972adf0fSEmmanuel Vadot 	reg &= ~(MODE_ST);
580972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
581972adf0fSEmmanuel Vadot 
582972adf0fSEmmanuel Vadot 	/* Flush TX */
583972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
584972adf0fSEmmanuel Vadot 	reg |= (MODE_FTF);
585972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
586972adf0fSEmmanuel Vadot 
587972adf0fSEmmanuel Vadot 	/* Stop DMA RX */
588972adf0fSEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
589972adf0fSEmmanuel Vadot 	reg &= ~(MODE_SR);
590972adf0fSEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
591972adf0fSEmmanuel Vadot }
592972adf0fSEmmanuel Vadot 
593363b7c39SEmmanuel Vadot int
594363b7c39SEmmanuel Vadot dma1000_reset(struct dwc_softc *sc)
595363b7c39SEmmanuel Vadot {
596363b7c39SEmmanuel Vadot 	uint32_t reg;
597363b7c39SEmmanuel Vadot 	int i;
598363b7c39SEmmanuel Vadot 
599363b7c39SEmmanuel Vadot 	reg = READ4(sc, BUS_MODE);
600363b7c39SEmmanuel Vadot 	reg |= (BUS_MODE_SWR);
601363b7c39SEmmanuel Vadot 	WRITE4(sc, BUS_MODE, reg);
602363b7c39SEmmanuel Vadot 
603363b7c39SEmmanuel Vadot 	for (i = 0; i < DMA_RESET_TIMEOUT; i++) {
604363b7c39SEmmanuel Vadot 		if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
605363b7c39SEmmanuel Vadot 			break;
606363b7c39SEmmanuel Vadot 		DELAY(10);
607363b7c39SEmmanuel Vadot 	}
608363b7c39SEmmanuel Vadot 	if (i >= DMA_RESET_TIMEOUT) {
609363b7c39SEmmanuel Vadot 		return (ENXIO);
610363b7c39SEmmanuel Vadot 	}
611363b7c39SEmmanuel Vadot 
612363b7c39SEmmanuel Vadot 	return (0);
613363b7c39SEmmanuel Vadot }
614363b7c39SEmmanuel Vadot 
615972adf0fSEmmanuel Vadot /*
616972adf0fSEmmanuel Vadot  * Create the bus_dma resources
617972adf0fSEmmanuel Vadot  */
618972adf0fSEmmanuel Vadot int
619972adf0fSEmmanuel Vadot dma1000_init(struct dwc_softc *sc)
620972adf0fSEmmanuel Vadot {
621972adf0fSEmmanuel Vadot 	struct mbuf *m;
622f0a7dd77SEmmanuel Vadot 	uint32_t reg;
623972adf0fSEmmanuel Vadot 	int error;
624972adf0fSEmmanuel Vadot 	int nidx;
625972adf0fSEmmanuel Vadot 	int idx;
626972adf0fSEmmanuel Vadot 
6275d88a52bSEmmanuel Vadot 	reg = BUS_MODE_USP;
6285d88a52bSEmmanuel Vadot 	if (!sc->nopblx8)
6295d88a52bSEmmanuel Vadot 		reg |= BUS_MODE_EIGHTXPBL;
6305d88a52bSEmmanuel Vadot 	reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT);
6315d88a52bSEmmanuel Vadot 	reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT);
6325d88a52bSEmmanuel Vadot 	if (sc->fixed_burst)
6335d88a52bSEmmanuel Vadot 		reg |= BUS_MODE_FIXEDBURST;
6345d88a52bSEmmanuel Vadot 	if (sc->mixed_burst)
6355d88a52bSEmmanuel Vadot 		reg |= BUS_MODE_MIXEDBURST;
6365d88a52bSEmmanuel Vadot 	if (sc->aal)
6375d88a52bSEmmanuel Vadot 		reg |= BUS_MODE_AAL;
6385d88a52bSEmmanuel Vadot 
6395d88a52bSEmmanuel Vadot 	WRITE4(sc, BUS_MODE, reg);
6405d88a52bSEmmanuel Vadot 
6414b7975ecSEmmanuel Vadot 	reg = READ4(sc, HW_FEATURE);
6424b7975ecSEmmanuel Vadot 	if (reg & HW_FEATURE_EXT_DESCRIPTOR)
6434b7975ecSEmmanuel Vadot 		sc->dma_ext_desc = true;
6444b7975ecSEmmanuel Vadot 
645972adf0fSEmmanuel Vadot 	/*
646f0a7dd77SEmmanuel Vadot 	 * DMA must be stop while changing descriptor list addresses.
647f0a7dd77SEmmanuel Vadot 	 */
648f0a7dd77SEmmanuel Vadot 	reg = READ4(sc, OPERATION_MODE);
649f0a7dd77SEmmanuel Vadot 	reg &= ~(MODE_ST | MODE_SR);
650f0a7dd77SEmmanuel Vadot 	WRITE4(sc, OPERATION_MODE, reg);
651f0a7dd77SEmmanuel Vadot 
652f0a7dd77SEmmanuel Vadot 	/*
653972adf0fSEmmanuel Vadot 	 * Set up TX descriptor ring, descriptors, and dma maps.
654972adf0fSEmmanuel Vadot 	 */
655972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
656972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
657972adf0fSEmmanuel Vadot 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
658972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
659972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
660972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
661972adf0fSEmmanuel Vadot 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
662972adf0fSEmmanuel Vadot 	    TX_DESC_SIZE,		/* maxsegsize */
663972adf0fSEmmanuel Vadot 	    0,				/* flags */
664972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
665972adf0fSEmmanuel Vadot 	    &sc->txdesc_tag);
666972adf0fSEmmanuel Vadot 	if (error != 0) {
667972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
668972adf0fSEmmanuel Vadot 		    "could not create TX ring DMA tag.\n");
669972adf0fSEmmanuel Vadot 		goto out;
670972adf0fSEmmanuel Vadot 	}
671972adf0fSEmmanuel Vadot 
672972adf0fSEmmanuel Vadot 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
673972adf0fSEmmanuel Vadot 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
674972adf0fSEmmanuel Vadot 	    &sc->txdesc_map);
675972adf0fSEmmanuel Vadot 	if (error != 0) {
676972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
677972adf0fSEmmanuel Vadot 		    "could not allocate TX descriptor ring.\n");
678972adf0fSEmmanuel Vadot 		goto out;
679972adf0fSEmmanuel Vadot 	}
680972adf0fSEmmanuel Vadot 
681972adf0fSEmmanuel Vadot 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
682972adf0fSEmmanuel Vadot 	    sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
683972adf0fSEmmanuel Vadot 	    &sc->txdesc_ring_paddr, 0);
684972adf0fSEmmanuel Vadot 	if (error != 0) {
685972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
686972adf0fSEmmanuel Vadot 		    "could not load TX descriptor ring map.\n");
687972adf0fSEmmanuel Vadot 		goto out;
688972adf0fSEmmanuel Vadot 	}
689972adf0fSEmmanuel Vadot 
690972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
691972adf0fSEmmanuel Vadot 		nidx = next_txidx(sc, idx);
692972adf0fSEmmanuel Vadot 		sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
693972adf0fSEmmanuel Vadot 		    (nidx * sizeof(struct dwc_hwdesc));
694972adf0fSEmmanuel Vadot 	}
695972adf0fSEmmanuel Vadot 
696972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
697972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
698972adf0fSEmmanuel Vadot 	    1, 0,			/* alignment, boundary */
699972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
700972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
701972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
702972adf0fSEmmanuel Vadot 	    MCLBYTES*TX_MAP_MAX_SEGS,	/* maxsize */
703972adf0fSEmmanuel Vadot 	    TX_MAP_MAX_SEGS,		/* nsegments */
704972adf0fSEmmanuel Vadot 	    MCLBYTES,			/* maxsegsize */
705972adf0fSEmmanuel Vadot 	    0,				/* flags */
706972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
707972adf0fSEmmanuel Vadot 	    &sc->txbuf_tag);
708972adf0fSEmmanuel Vadot 	if (error != 0) {
709972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
710972adf0fSEmmanuel Vadot 		    "could not create TX ring DMA tag.\n");
711972adf0fSEmmanuel Vadot 		goto out;
712972adf0fSEmmanuel Vadot 	}
713972adf0fSEmmanuel Vadot 
714972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_MAP_COUNT; idx++) {
715972adf0fSEmmanuel Vadot 		error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
716972adf0fSEmmanuel Vadot 		    &sc->txbuf_map[idx].map);
717972adf0fSEmmanuel Vadot 		if (error != 0) {
718972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
719972adf0fSEmmanuel Vadot 			    "could not create TX buffer DMA map.\n");
720972adf0fSEmmanuel Vadot 			goto out;
721972adf0fSEmmanuel Vadot 		}
722972adf0fSEmmanuel Vadot 	}
723972adf0fSEmmanuel Vadot 
724972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; idx++)
7257786911fSEmmanuel Vadot 		txdesc_clear(sc, idx);
726972adf0fSEmmanuel Vadot 
727f0a7dd77SEmmanuel Vadot 	WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
728f0a7dd77SEmmanuel Vadot 
729972adf0fSEmmanuel Vadot 	/*
730972adf0fSEmmanuel Vadot 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
731972adf0fSEmmanuel Vadot 	 */
732972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
733972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
734972adf0fSEmmanuel Vadot 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
735972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
736972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
737972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
738972adf0fSEmmanuel Vadot 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
739972adf0fSEmmanuel Vadot 	    RX_DESC_SIZE,		/* maxsegsize */
740972adf0fSEmmanuel Vadot 	    0,				/* flags */
741972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
742972adf0fSEmmanuel Vadot 	    &sc->rxdesc_tag);
743972adf0fSEmmanuel Vadot 	if (error != 0) {
744972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
745972adf0fSEmmanuel Vadot 		    "could not create RX ring DMA tag.\n");
746972adf0fSEmmanuel Vadot 		goto out;
747972adf0fSEmmanuel Vadot 	}
748972adf0fSEmmanuel Vadot 
749972adf0fSEmmanuel Vadot 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
750972adf0fSEmmanuel Vadot 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
751972adf0fSEmmanuel Vadot 	    &sc->rxdesc_map);
752972adf0fSEmmanuel Vadot 	if (error != 0) {
753972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
754972adf0fSEmmanuel Vadot 		    "could not allocate RX descriptor ring.\n");
755972adf0fSEmmanuel Vadot 		goto out;
756972adf0fSEmmanuel Vadot 	}
757972adf0fSEmmanuel Vadot 
758972adf0fSEmmanuel Vadot 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
759972adf0fSEmmanuel Vadot 	    sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
760972adf0fSEmmanuel Vadot 	    &sc->rxdesc_ring_paddr, 0);
761972adf0fSEmmanuel Vadot 	if (error != 0) {
762972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
763972adf0fSEmmanuel Vadot 		    "could not load RX descriptor ring map.\n");
764972adf0fSEmmanuel Vadot 		goto out;
765972adf0fSEmmanuel Vadot 	}
766972adf0fSEmmanuel Vadot 
767972adf0fSEmmanuel Vadot 	error = bus_dma_tag_create(
768972adf0fSEmmanuel Vadot 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
769972adf0fSEmmanuel Vadot 	    1, 0,			/* alignment, boundary */
770972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
771972adf0fSEmmanuel Vadot 	    BUS_SPACE_MAXADDR,		/* highaddr */
772972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* filter, filterarg */
773972adf0fSEmmanuel Vadot 	    MCLBYTES, 1, 		/* maxsize, nsegments */
774972adf0fSEmmanuel Vadot 	    MCLBYTES,			/* maxsegsize */
775972adf0fSEmmanuel Vadot 	    0,				/* flags */
776972adf0fSEmmanuel Vadot 	    NULL, NULL,			/* lockfunc, lockarg */
777972adf0fSEmmanuel Vadot 	    &sc->rxbuf_tag);
778972adf0fSEmmanuel Vadot 	if (error != 0) {
779972adf0fSEmmanuel Vadot 		device_printf(sc->dev,
780972adf0fSEmmanuel Vadot 		    "could not create RX buf DMA tag.\n");
781972adf0fSEmmanuel Vadot 		goto out;
782972adf0fSEmmanuel Vadot 	}
783972adf0fSEmmanuel Vadot 
784972adf0fSEmmanuel Vadot 	for (idx = 0; idx < RX_DESC_COUNT; idx++) {
785972adf0fSEmmanuel Vadot 		error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
786972adf0fSEmmanuel Vadot 		    &sc->rxbuf_map[idx].map);
787972adf0fSEmmanuel Vadot 		if (error != 0) {
788972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
789972adf0fSEmmanuel Vadot 			    "could not create RX buffer DMA map.\n");
790972adf0fSEmmanuel Vadot 			goto out;
791972adf0fSEmmanuel Vadot 		}
792972adf0fSEmmanuel Vadot 		if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
793972adf0fSEmmanuel Vadot 			device_printf(sc->dev, "Could not alloc mbuf\n");
794972adf0fSEmmanuel Vadot 			error = ENOMEM;
795972adf0fSEmmanuel Vadot 			goto out;
796972adf0fSEmmanuel Vadot 		}
797972adf0fSEmmanuel Vadot 		if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
798972adf0fSEmmanuel Vadot 			device_printf(sc->dev,
799972adf0fSEmmanuel Vadot 			    "could not create new RX buffer.\n");
800972adf0fSEmmanuel Vadot 			goto out;
801972adf0fSEmmanuel Vadot 		}
802972adf0fSEmmanuel Vadot 	}
803f0a7dd77SEmmanuel Vadot 	WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
804972adf0fSEmmanuel Vadot 
805972adf0fSEmmanuel Vadot out:
806972adf0fSEmmanuel Vadot 	if (error != 0)
807972adf0fSEmmanuel Vadot 		return (ENXIO);
808972adf0fSEmmanuel Vadot 
809972adf0fSEmmanuel Vadot 	return (0);
810972adf0fSEmmanuel Vadot }
811972adf0fSEmmanuel Vadot 
812972adf0fSEmmanuel Vadot /*
813972adf0fSEmmanuel Vadot  * Free the bus_dma resources
814972adf0fSEmmanuel Vadot  */
815972adf0fSEmmanuel Vadot void
816972adf0fSEmmanuel Vadot dma1000_free(struct dwc_softc *sc)
817972adf0fSEmmanuel Vadot {
818972adf0fSEmmanuel Vadot 	bus_dmamap_t map;
819972adf0fSEmmanuel Vadot 	int idx;
820972adf0fSEmmanuel Vadot 
821972adf0fSEmmanuel Vadot 	/* Clean up RX DMA resources and free mbufs. */
822972adf0fSEmmanuel Vadot 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
823972adf0fSEmmanuel Vadot 		if ((map = sc->rxbuf_map[idx].map) != NULL) {
824972adf0fSEmmanuel Vadot 			bus_dmamap_unload(sc->rxbuf_tag, map);
825972adf0fSEmmanuel Vadot 			bus_dmamap_destroy(sc->rxbuf_tag, map);
826972adf0fSEmmanuel Vadot 			m_freem(sc->rxbuf_map[idx].mbuf);
827972adf0fSEmmanuel Vadot 		}
828972adf0fSEmmanuel Vadot 	}
829972adf0fSEmmanuel Vadot 	if (sc->rxbuf_tag != NULL)
830972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->rxbuf_tag);
831972adf0fSEmmanuel Vadot 	if (sc->rxdesc_map != NULL) {
832972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
833972adf0fSEmmanuel Vadot 		bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
834972adf0fSEmmanuel Vadot 		    sc->rxdesc_map);
835972adf0fSEmmanuel Vadot 	}
836972adf0fSEmmanuel Vadot 	if (sc->rxdesc_tag != NULL)
837972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->rxdesc_tag);
838972adf0fSEmmanuel Vadot 
839972adf0fSEmmanuel Vadot 	/* Clean up TX DMA resources. */
840972adf0fSEmmanuel Vadot 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
841972adf0fSEmmanuel Vadot 		if ((map = sc->txbuf_map[idx].map) != NULL) {
842972adf0fSEmmanuel Vadot 			/* TX maps are already unloaded. */
843972adf0fSEmmanuel Vadot 			bus_dmamap_destroy(sc->txbuf_tag, map);
844972adf0fSEmmanuel Vadot 		}
845972adf0fSEmmanuel Vadot 	}
846972adf0fSEmmanuel Vadot 	if (sc->txbuf_tag != NULL)
847972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->txbuf_tag);
848972adf0fSEmmanuel Vadot 	if (sc->txdesc_map != NULL) {
849972adf0fSEmmanuel Vadot 		bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
850972adf0fSEmmanuel Vadot 		bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
851972adf0fSEmmanuel Vadot 		    sc->txdesc_map);
852972adf0fSEmmanuel Vadot 	}
853972adf0fSEmmanuel Vadot 	if (sc->txdesc_tag != NULL)
854972adf0fSEmmanuel Vadot 		bus_dma_tag_destroy(sc->txdesc_tag);
855972adf0fSEmmanuel Vadot }
85629776aa4SEmmanuel Vadot 
85729776aa4SEmmanuel Vadot /*
85829776aa4SEmmanuel Vadot  * Interrupt function
85929776aa4SEmmanuel Vadot  */
86029776aa4SEmmanuel Vadot 
86129776aa4SEmmanuel Vadot int
86229776aa4SEmmanuel Vadot dma1000_intr(struct dwc_softc *sc)
86329776aa4SEmmanuel Vadot {
86429776aa4SEmmanuel Vadot 	uint32_t reg;
86529776aa4SEmmanuel Vadot 	int rv;
86629776aa4SEmmanuel Vadot 
86729776aa4SEmmanuel Vadot 	DWC_ASSERT_LOCKED(sc);
86829776aa4SEmmanuel Vadot 
86929776aa4SEmmanuel Vadot 	rv = 0;
87029776aa4SEmmanuel Vadot 	reg = READ4(sc, DMA_STATUS);
87129776aa4SEmmanuel Vadot 	if (reg & DMA_STATUS_NIS) {
87229776aa4SEmmanuel Vadot 		if (reg & DMA_STATUS_RI)
87329776aa4SEmmanuel Vadot 			dma1000_rxfinish_locked(sc);
87429776aa4SEmmanuel Vadot 
87529776aa4SEmmanuel Vadot 		if (reg & DMA_STATUS_TI) {
87629776aa4SEmmanuel Vadot 			dma1000_txfinish_locked(sc);
87729776aa4SEmmanuel Vadot 			dma1000_txstart(sc);
87829776aa4SEmmanuel Vadot 		}
87929776aa4SEmmanuel Vadot 	}
88029776aa4SEmmanuel Vadot 
88129776aa4SEmmanuel Vadot 	if (reg & DMA_STATUS_AIS) {
88229776aa4SEmmanuel Vadot 		if (reg & DMA_STATUS_FBI) {
88329776aa4SEmmanuel Vadot 			/* Fatal bus error */
88429776aa4SEmmanuel Vadot 			rv = EIO;
88529776aa4SEmmanuel Vadot 		}
88629776aa4SEmmanuel Vadot 	}
88729776aa4SEmmanuel Vadot 
88829776aa4SEmmanuel Vadot 	WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);
88929776aa4SEmmanuel Vadot 	return (rv);
89029776aa4SEmmanuel Vadot }
891