xref: /freebsd/sys/dev/dwc/dwc1000_dma.c (revision 4f8f43b06ed07e96a250855488cc531799d5b78f)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  *
4  * This software was developed by SRI International and the University of
5  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6  * ("CTSRD"), as part of the DARPA CRASH research programme.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/socket.h>
42 
43 #include <net/bpf.h>
44 #include <net/if.h>
45 #include <net/ethernet.h>
46 #include <net/if_dl.h>
47 #include <net/if_media.h>
48 #include <net/if_types.h>
49 #include <net/if_var.h>
50 
51 #include <machine/bus.h>
52 
53 #include <dev/extres/clk/clk.h>
54 #include <dev/extres/hwreset/hwreset.h>
55 
56 #include <dev/ofw/ofw_bus.h>
57 #include <dev/ofw/ofw_bus_subr.h>
58 
59 #include <dev/dwc/if_dwcvar.h>
60 #include <dev/dwc/dwc1000_reg.h>
61 #include <dev/dwc/dwc1000_dma.h>
62 
63 #define	WATCHDOG_TIMEOUT_SECS	5
64 #define	DMA_RESET_TIMEOUT	100
65 
66 /* TX descriptors - TDESC0 is almost unified */
67 #define	TDESC0_OWN		(1U << 31)
68 #define	TDESC0_IHE		(1U << 16)	/* IP Header Error */
69 #define	TDESC0_ES		(1U << 15)	/* Error Summary */
70 #define	TDESC0_JT		(1U << 14)	/* Jabber Timeout */
71 #define	TDESC0_FF		(1U << 13)	/* Frame Flushed */
72 #define	TDESC0_PCE		(1U << 12)	/* Payload Checksum Error */
73 #define	TDESC0_LOC		(1U << 11)	/* Loss of Carrier */
74 #define	TDESC0_NC		(1U << 10)	/* No Carrier */
75 #define	TDESC0_LC		(1U <<  9)	/* Late Collision */
76 #define	TDESC0_EC		(1U <<  8)	/* Excessive Collision */
77 #define	TDESC0_VF		(1U <<  7)	/* VLAN Frame */
78 #define	TDESC0_CC_MASK		0xf
79 #define	TDESC0_CC_SHIFT		3		/* Collision Count */
80 #define	TDESC0_ED		(1U <<  2)	/* Excessive Deferral */
81 #define	TDESC0_UF		(1U <<  1)	/* Underflow Error */
82 #define	TDESC0_DB		(1U <<  0)	/* Deferred Bit */
83 /* TX descriptors - TDESC0 extended format only */
84 #define	ETDESC0_IC		(1U << 30)	/* Interrupt on Completion */
85 #define	ETDESC0_LS		(1U << 29)	/* Last Segment */
86 #define	ETDESC0_FS		(1U << 28)	/* First Segment */
87 #define	ETDESC0_DC		(1U << 27)	/* Disable CRC */
88 #define	ETDESC0_DP		(1U << 26)	/* Disable Padding */
89 #define	ETDESC0_CIC_NONE	(0U << 22)	/* Checksum Insertion Control */
90 #define	ETDESC0_CIC_HDR		(1U << 22)
91 #define	ETDESC0_CIC_SEG 	(2U << 22)
92 #define	ETDESC0_CIC_FULL	(3U << 22)
93 #define	ETDESC0_TER		(1U << 21)	/* Transmit End of Ring */
94 #define	ETDESC0_TCH		(1U << 20)	/* Second Address Chained */
95 
96 /* TX descriptors - TDESC1 normal format */
97 #define	NTDESC1_IC		(1U << 31)	/* Interrupt on Completion */
98 #define	NTDESC1_LS		(1U << 30)	/* Last Segment */
99 #define	NTDESC1_FS		(1U << 29)	/* First Segment */
100 #define	NTDESC1_CIC_NONE	(0U << 27)	/* Checksum Insertion Control */
101 #define	NTDESC1_CIC_HDR		(1U << 27)
102 #define	NTDESC1_CIC_SEG 	(2U << 27)
103 #define	NTDESC1_CIC_FULL	(3U << 27)
104 #define	NTDESC1_DC		(1U << 26)	/* Disable CRC */
105 #define	NTDESC1_TER		(1U << 25)	/* Transmit End of Ring */
106 #define	NTDESC1_TCH		(1U << 24)	/* Second Address Chained */
107 /* TX descriptors - TDESC1 extended format */
108 #define	ETDESC1_DP		(1U << 23)	/* Disable Padding */
109 #define	ETDESC1_TBS2_MASK	0x7ff
110 #define	ETDESC1_TBS2_SHIFT	11		/* Receive Buffer 2 Size */
111 #define	ETDESC1_TBS1_MASK	0x7ff
112 #define	ETDESC1_TBS1_SHIFT	0		/* Receive Buffer 1 Size */
113 
114 /* RX descriptor - RDESC0 is unified */
115 #define	RDESC0_OWN		(1U << 31)
116 #define	RDESC0_AFM		(1U << 30)	/* Dest. Address Filter Fail */
117 #define	RDESC0_FL_MASK		0x3fff
118 #define	RDESC0_FL_SHIFT		16		/* Frame Length */
119 #define	RDESC0_ES		(1U << 15)	/* Error Summary */
120 #define	RDESC0_DE		(1U << 14)	/* Descriptor Error */
121 #define	RDESC0_SAF		(1U << 13)	/* Source Address Filter Fail */
122 #define	RDESC0_LE		(1U << 12)	/* Length Error */
123 #define	RDESC0_OE		(1U << 11)	/* Overflow Error */
124 #define	RDESC0_VLAN		(1U << 10)	/* VLAN Tag */
125 #define	RDESC0_FS		(1U <<  9)	/* First Descriptor */
126 #define	RDESC0_LS		(1U <<  8)	/* Last Descriptor */
127 #define	RDESC0_ICE		(1U <<  7)	/* IPC Checksum Error */
128 #define	RDESC0_LC		(1U <<  6)	/* Late Collision */
129 #define	RDESC0_FT		(1U <<  5)	/* Frame Type */
130 #define	RDESC0_RWT		(1U <<  4)	/* Receive Watchdog Timeout */
131 #define	RDESC0_RE		(1U <<  3)	/* Receive Error */
132 #define	RDESC0_DBE		(1U <<  2)	/* Dribble Bit Error */
133 #define	RDESC0_CE		(1U <<  1)	/* CRC Error */
134 #define	RDESC0_PCE		(1U <<  0)	/* Payload Checksum Error */
135 #define	RDESC0_RXMA		(1U <<  0)	/* Rx MAC Address */
136 
137 /* RX descriptors - RDESC1 normal format */
138 #define	NRDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
139 #define	NRDESC1_RER		(1U << 25)	/* Receive End of Ring */
140 #define	NRDESC1_RCH		(1U << 24)	/* Second Address Chained */
141 #define	NRDESC1_RBS2_MASK	0x7ff
142 #define	NRDESC1_RBS2_SHIFT	11		/* Receive Buffer 2 Size */
143 #define	NRDESC1_RBS1_MASK	0x7ff
144 #define	NRDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
145 
146 /* RX descriptors - RDESC1 enhanced format */
147 #define	ERDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
148 #define	ERDESC1_RBS2_MASK	0x7ffff
149 #define	ERDESC1_RBS2_SHIFT	16		/* Receive Buffer 2 Size */
150 #define	ERDESC1_RER		(1U << 15)	/* Receive End of Ring */
151 #define	ERDESC1_RCH		(1U << 14)	/* Second Address Chained */
152 #define	ERDESC1_RBS1_MASK	0x7ffff
153 #define	ERDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
154 
155 /*
156  * The hardware imposes alignment restrictions on various objects involved in
157  * DMA transfers.  These values are expressed in bytes (not bits).
158  */
159 #define	DWC_DESC_RING_ALIGN	2048
160 
161 static inline uint32_t
162 next_txidx(struct dwc_softc *sc, uint32_t curidx)
163 {
164 
165 	return ((curidx + 1) % TX_DESC_COUNT);
166 }
167 
168 static inline uint32_t
169 next_rxidx(struct dwc_softc *sc, uint32_t curidx)
170 {
171 
172 	return ((curidx + 1) % RX_DESC_COUNT);
173 }
174 
175 static void
176 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
177 {
178 
179 	if (error != 0)
180 		return;
181 	*(bus_addr_t *)arg = segs[0].ds_addr;
182 }
183 
184 inline static void
185 txdesc_clear(struct dwc_softc *sc, int idx)
186 {
187 
188 	sc->tx_desccount--;
189 	sc->txdesc_ring[idx].addr1 = (uint32_t)(0);
190 	sc->txdesc_ring[idx].desc0 = 0;
191 	sc->txdesc_ring[idx].desc1 = 0;
192 }
193 
194 inline static void
195 txdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr,
196   uint32_t len, uint32_t flags, bool first, bool last)
197 {
198 	uint32_t desc0, desc1;
199 
200 	if (!sc->dma_ext_desc) {
201 		desc0 = 0;
202 		desc1 = NTDESC1_TCH | len | flags;
203 		if (first)
204 			desc1 |=  NTDESC1_FS;
205 		if (last)
206 			desc1 |= NTDESC1_LS | NTDESC1_IC;
207 	} else {
208 		desc0 = ETDESC0_TCH | flags;
209 		if (first)
210 			desc0 |= ETDESC0_FS;
211 		if (last)
212 			desc0 |= ETDESC0_LS | ETDESC0_IC;
213 		desc1 = len;
214 	}
215 	++sc->tx_desccount;
216 	sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
217 	sc->txdesc_ring[idx].desc0 = desc0;
218 	sc->txdesc_ring[idx].desc1 = desc1;
219 	wmb();
220 	sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
221 	wmb();
222 }
223 
224 inline static uint32_t
225 rxdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr)
226 {
227 	uint32_t nidx;
228 
229 	sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
230 	nidx = next_rxidx(sc, idx);
231 	sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
232 	    (nidx * sizeof(struct dwc_hwdesc));
233 	if (!sc->dma_ext_desc)
234 		sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
235 		    MIN(MCLBYTES, NRDESC1_RBS1_MASK);
236 	else
237 		sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
238 		    MIN(MCLBYTES, ERDESC1_RBS1_MASK);
239 
240 	wmb();
241 	sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
242 	wmb();
243 	return (nidx);
244 }
245 
246 int
247 dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
248 {
249 	struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
250 	int error, nsegs;
251 	struct mbuf * m;
252 	uint32_t flags = 0;
253 	int i;
254 	int last;
255 
256 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
257 	    *mp, segs, &nsegs, 0);
258 	if (error == EFBIG) {
259 		/*
260 		 * The map may be partially mapped from the first call.
261 		 * Make sure to reset it.
262 		 */
263 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
264 		if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
265 			return (ENOMEM);
266 		*mp = m;
267 		error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
268 		    *mp, segs, &nsegs, 0);
269 	}
270 	if (error != 0)
271 		return (ENOMEM);
272 
273 	if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
274 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
275 		return (ENOMEM);
276 	}
277 
278 	m = *mp;
279 
280 	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
281 		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
282 			if (!sc->dma_ext_desc)
283 				flags = NTDESC1_CIC_FULL;
284 			else
285 				flags = ETDESC0_CIC_FULL;
286 		} else {
287 			if (!sc->dma_ext_desc)
288 				flags = NTDESC1_CIC_HDR;
289 			else
290 				flags = ETDESC0_CIC_HDR;
291 		}
292 	}
293 
294 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
295 	    BUS_DMASYNC_PREWRITE);
296 
297 	sc->txbuf_map[idx].mbuf = m;
298 
299 	for (i = 0; i < nsegs; i++) {
300 		txdesc_setup(sc, sc->tx_desc_head,
301 		    segs[i].ds_addr, segs[i].ds_len,
302 		    (i == 0) ? flags : 0, /* only first desc needs flags */
303 		    (i == 0),
304 		    (i == nsegs - 1));
305 		last = sc->tx_desc_head;
306 		sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
307 	}
308 
309 	sc->txbuf_map[idx].last_desc_idx = last;
310 
311 	return (0);
312 }
313 
314 static int
315 dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
316 {
317 	struct bus_dma_segment seg;
318 	int error, nsegs;
319 
320 	m_adj(m, ETHER_ALIGN);
321 
322 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
323 	    m, &seg, &nsegs, 0);
324 	if (error != 0)
325 		return (error);
326 
327 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
328 
329 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
330 	    BUS_DMASYNC_PREREAD);
331 
332 	sc->rxbuf_map[idx].mbuf = m;
333 	rxdesc_setup(sc, idx, seg.ds_addr);
334 
335 	return (0);
336 }
337 
338 static struct mbuf *
339 dwc_alloc_mbufcl(struct dwc_softc *sc)
340 {
341 	struct mbuf *m;
342 
343 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
344 	if (m != NULL)
345 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
346 
347 	return (m);
348 }
349 
350 static struct mbuf *
351 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
352     struct dwc_bufmap *map)
353 {
354 	if_t ifp;
355 	struct mbuf *m, *m0;
356 	int len;
357 	uint32_t rdesc0;
358 
359 	m = map->mbuf;
360 	ifp = sc->ifp;
361 	rdesc0 = desc ->desc0;
362 
363 	if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
364 		    (RDESC0_FS | RDESC0_LS)) {
365 		/*
366 		 * Something very wrong happens. The whole packet should be
367 		 * recevied in one descriptr. Report problem.
368 		 */
369 		device_printf(sc->dev,
370 		    "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
371 		    __func__, rdesc0);
372 		return (NULL);
373 	}
374 
375 	len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
376 	if (len < 64) {
377 		/*
378 		 * Lenght is invalid, recycle old mbuf
379 		 * Probably impossible case
380 		 */
381 		return (NULL);
382 	}
383 
384 	/* Allocate new buffer */
385 	m0 = dwc_alloc_mbufcl(sc);
386 	if (m0 == NULL) {
387 		/* no new mbuf available, recycle old */
388 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
389 		return (NULL);
390 	}
391 	/* Do dmasync for newly received packet */
392 	bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
393 	bus_dmamap_unload(sc->rxbuf_tag, map->map);
394 
395 	/* Received packet is valid, process it */
396 	m->m_pkthdr.rcvif = ifp;
397 	m->m_pkthdr.len = len;
398 	m->m_len = len;
399 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
400 
401 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
402 	  (rdesc0 & RDESC0_FT) != 0) {
403 		m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
404 		if ((rdesc0 & RDESC0_ICE) == 0)
405 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
406 		if ((rdesc0 & RDESC0_PCE) == 0) {
407 			m->m_pkthdr.csum_flags |=
408 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
409 			m->m_pkthdr.csum_data = 0xffff;
410 		}
411 	}
412 
413 	/* Remove trailing FCS */
414 	m_adj(m, -ETHER_CRC_LEN);
415 
416 	DWC_UNLOCK(sc);
417 	if_input(ifp, m);
418 	DWC_LOCK(sc);
419 	return (m0);
420 }
421 
422 void
423 dma1000_txfinish_locked(struct dwc_softc *sc)
424 {
425 	struct dwc_bufmap *bmap;
426 	struct dwc_hwdesc *desc;
427 	if_t ifp;
428 	int idx, last_idx;
429 	bool map_finished;
430 
431 	DWC_ASSERT_LOCKED(sc);
432 
433 	ifp = sc->ifp;
434 	/* check if all descriptors of the map are done */
435 	while (sc->tx_map_tail != sc->tx_map_head) {
436 		map_finished = true;
437 		bmap = &sc->txbuf_map[sc->tx_map_tail];
438 		idx = sc->tx_desc_tail;
439 		last_idx = next_txidx(sc, bmap->last_desc_idx);
440 		while (idx != last_idx) {
441 			desc = &sc->txdesc_ring[idx];
442 			if ((desc->desc0 & TDESC0_OWN) != 0) {
443 				map_finished = false;
444 				break;
445 			}
446 			idx = next_txidx(sc, idx);
447 		}
448 
449 		if (!map_finished)
450 			break;
451 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
452 		    BUS_DMASYNC_POSTWRITE);
453 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
454 		m_freem(bmap->mbuf);
455 		bmap->mbuf = NULL;
456 		sc->tx_mapcount--;
457 		while (sc->tx_desc_tail != last_idx) {
458 			txdesc_clear(sc, sc->tx_desc_tail);
459 			sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
460 		}
461 		sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
462 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
463 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
464 	}
465 
466 	/* If there are no buffers outstanding, muzzle the watchdog. */
467 	if (sc->tx_desc_tail == sc->tx_desc_head) {
468 		sc->tx_watchdog_count = 0;
469 	}
470 }
471 
472 void
473 dma1000_txstart(struct dwc_softc *sc)
474 {
475 	int enqueued;
476 	struct mbuf *m;
477 
478 	enqueued = 0;
479 
480 	for (;;) {
481 		if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS  + 1)) {
482 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
483 			break;
484 		}
485 
486 		if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
487 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
488 			break;
489 		}
490 
491 		m = if_dequeue(sc->ifp);
492 		if (m == NULL)
493 			break;
494 		if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
495 			if_sendq_prepend(sc->ifp, m);
496 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
497 			break;
498 		}
499 		bpf_mtap_if(sc->ifp, m);
500 		sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
501 		sc->tx_mapcount++;
502 		++enqueued;
503 	}
504 
505 	if (enqueued != 0) {
506 		WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
507 		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
508 	}
509 }
510 
511 void
512 dma1000_rxfinish_locked(struct dwc_softc *sc)
513 {
514 	struct mbuf *m;
515 	int error, idx;
516 	struct dwc_hwdesc *desc;
517 
518 	DWC_ASSERT_LOCKED(sc);
519 	for (;;) {
520 		idx = sc->rx_idx;
521 		desc = sc->rxdesc_ring + idx;
522 		if ((desc->desc0 & RDESC0_OWN) != 0)
523 			break;
524 
525 		m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
526 		if (m == NULL) {
527 			wmb();
528 			desc->desc0 = RDESC0_OWN;
529 			wmb();
530 		} else {
531 			/* We cannot create hole in RX ring */
532 			error = dma1000_setup_rxbuf(sc, idx, m);
533 			if (error != 0)
534 				panic("dma1000_setup_rxbuf failed:  error %d\n",
535 				    error);
536 
537 		}
538 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
539 	}
540 }
541 
542 /*
543  * Start the DMA controller
544  */
545 void
546 dma1000_start(struct dwc_softc *sc)
547 {
548 	uint32_t reg;
549 
550 	DWC_ASSERT_LOCKED(sc);
551 
552 	/* Initializa DMA and enable transmitters */
553 	reg = READ4(sc, OPERATION_MODE);
554 	reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
555 	reg &= ~(MODE_RSF);
556 	reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
557 	WRITE4(sc, OPERATION_MODE, reg);
558 
559 	WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
560 
561 	/* Start DMA */
562 	reg = READ4(sc, OPERATION_MODE);
563 	reg |= (MODE_ST | MODE_SR);
564 	WRITE4(sc, OPERATION_MODE, reg);
565 }
566 
567 /*
568  * Stop the DMA controller
569  */
570 void
571 dma1000_stop(struct dwc_softc *sc)
572 {
573 	uint32_t reg;
574 
575 	DWC_ASSERT_LOCKED(sc);
576 
577 	/* Stop DMA TX */
578 	reg = READ4(sc, OPERATION_MODE);
579 	reg &= ~(MODE_ST);
580 	WRITE4(sc, OPERATION_MODE, reg);
581 
582 	/* Flush TX */
583 	reg = READ4(sc, OPERATION_MODE);
584 	reg |= (MODE_FTF);
585 	WRITE4(sc, OPERATION_MODE, reg);
586 
587 	/* Stop DMA RX */
588 	reg = READ4(sc, OPERATION_MODE);
589 	reg &= ~(MODE_SR);
590 	WRITE4(sc, OPERATION_MODE, reg);
591 }
592 
593 int
594 dma1000_reset(struct dwc_softc *sc)
595 {
596 	uint32_t reg;
597 	int i;
598 
599 	reg = READ4(sc, BUS_MODE);
600 	reg |= (BUS_MODE_SWR);
601 	WRITE4(sc, BUS_MODE, reg);
602 
603 	for (i = 0; i < DMA_RESET_TIMEOUT; i++) {
604 		if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
605 			break;
606 		DELAY(10);
607 	}
608 	if (i >= DMA_RESET_TIMEOUT) {
609 		return (ENXIO);
610 	}
611 
612 	return (0);
613 }
614 
615 /*
616  * Create the bus_dma resources
617  */
618 int
619 dma1000_init(struct dwc_softc *sc)
620 {
621 	struct mbuf *m;
622 	uint32_t reg;
623 	int error;
624 	int nidx;
625 	int idx;
626 
627 	reg = BUS_MODE_USP;
628 	if (!sc->nopblx8)
629 		reg |= BUS_MODE_EIGHTXPBL;
630 	reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT);
631 	reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT);
632 	if (sc->fixed_burst)
633 		reg |= BUS_MODE_FIXEDBURST;
634 	if (sc->mixed_burst)
635 		reg |= BUS_MODE_MIXEDBURST;
636 	if (sc->aal)
637 		reg |= BUS_MODE_AAL;
638 
639 	WRITE4(sc, BUS_MODE, reg);
640 
641 	reg = READ4(sc, HW_FEATURE);
642 	if (reg & HW_FEATURE_EXT_DESCRIPTOR)
643 		sc->dma_ext_desc = true;
644 
645 	/*
646 	 * DMA must be stop while changing descriptor list addresses.
647 	 */
648 	reg = READ4(sc, OPERATION_MODE);
649 	reg &= ~(MODE_ST | MODE_SR);
650 	WRITE4(sc, OPERATION_MODE, reg);
651 
652 	/*
653 	 * Set up TX descriptor ring, descriptors, and dma maps.
654 	 */
655 	error = bus_dma_tag_create(
656 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
657 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
658 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
659 	    BUS_SPACE_MAXADDR,		/* highaddr */
660 	    NULL, NULL,			/* filter, filterarg */
661 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
662 	    TX_DESC_SIZE,		/* maxsegsize */
663 	    0,				/* flags */
664 	    NULL, NULL,			/* lockfunc, lockarg */
665 	    &sc->txdesc_tag);
666 	if (error != 0) {
667 		device_printf(sc->dev,
668 		    "could not create TX ring DMA tag.\n");
669 		goto out;
670 	}
671 
672 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
673 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
674 	    &sc->txdesc_map);
675 	if (error != 0) {
676 		device_printf(sc->dev,
677 		    "could not allocate TX descriptor ring.\n");
678 		goto out;
679 	}
680 
681 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
682 	    sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
683 	    &sc->txdesc_ring_paddr, 0);
684 	if (error != 0) {
685 		device_printf(sc->dev,
686 		    "could not load TX descriptor ring map.\n");
687 		goto out;
688 	}
689 
690 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
691 		nidx = next_txidx(sc, idx);
692 		sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
693 		    (nidx * sizeof(struct dwc_hwdesc));
694 	}
695 
696 	error = bus_dma_tag_create(
697 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
698 	    1, 0,			/* alignment, boundary */
699 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
700 	    BUS_SPACE_MAXADDR,		/* highaddr */
701 	    NULL, NULL,			/* filter, filterarg */
702 	    MCLBYTES*TX_MAP_MAX_SEGS,	/* maxsize */
703 	    TX_MAP_MAX_SEGS,		/* nsegments */
704 	    MCLBYTES,			/* maxsegsize */
705 	    0,				/* flags */
706 	    NULL, NULL,			/* lockfunc, lockarg */
707 	    &sc->txbuf_tag);
708 	if (error != 0) {
709 		device_printf(sc->dev,
710 		    "could not create TX ring DMA tag.\n");
711 		goto out;
712 	}
713 
714 	for (idx = 0; idx < TX_MAP_COUNT; idx++) {
715 		error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
716 		    &sc->txbuf_map[idx].map);
717 		if (error != 0) {
718 			device_printf(sc->dev,
719 			    "could not create TX buffer DMA map.\n");
720 			goto out;
721 		}
722 	}
723 
724 	for (idx = 0; idx < TX_DESC_COUNT; idx++)
725 		txdesc_clear(sc, idx);
726 
727 	WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
728 
729 	/*
730 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
731 	 */
732 	error = bus_dma_tag_create(
733 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
734 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
735 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
736 	    BUS_SPACE_MAXADDR,		/* highaddr */
737 	    NULL, NULL,			/* filter, filterarg */
738 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
739 	    RX_DESC_SIZE,		/* maxsegsize */
740 	    0,				/* flags */
741 	    NULL, NULL,			/* lockfunc, lockarg */
742 	    &sc->rxdesc_tag);
743 	if (error != 0) {
744 		device_printf(sc->dev,
745 		    "could not create RX ring DMA tag.\n");
746 		goto out;
747 	}
748 
749 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
750 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
751 	    &sc->rxdesc_map);
752 	if (error != 0) {
753 		device_printf(sc->dev,
754 		    "could not allocate RX descriptor ring.\n");
755 		goto out;
756 	}
757 
758 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
759 	    sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
760 	    &sc->rxdesc_ring_paddr, 0);
761 	if (error != 0) {
762 		device_printf(sc->dev,
763 		    "could not load RX descriptor ring map.\n");
764 		goto out;
765 	}
766 
767 	error = bus_dma_tag_create(
768 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
769 	    1, 0,			/* alignment, boundary */
770 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
771 	    BUS_SPACE_MAXADDR,		/* highaddr */
772 	    NULL, NULL,			/* filter, filterarg */
773 	    MCLBYTES, 1, 		/* maxsize, nsegments */
774 	    MCLBYTES,			/* maxsegsize */
775 	    0,				/* flags */
776 	    NULL, NULL,			/* lockfunc, lockarg */
777 	    &sc->rxbuf_tag);
778 	if (error != 0) {
779 		device_printf(sc->dev,
780 		    "could not create RX buf DMA tag.\n");
781 		goto out;
782 	}
783 
784 	for (idx = 0; idx < RX_DESC_COUNT; idx++) {
785 		error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
786 		    &sc->rxbuf_map[idx].map);
787 		if (error != 0) {
788 			device_printf(sc->dev,
789 			    "could not create RX buffer DMA map.\n");
790 			goto out;
791 		}
792 		if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
793 			device_printf(sc->dev, "Could not alloc mbuf\n");
794 			error = ENOMEM;
795 			goto out;
796 		}
797 		if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
798 			device_printf(sc->dev,
799 			    "could not create new RX buffer.\n");
800 			goto out;
801 		}
802 	}
803 	WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
804 
805 out:
806 	if (error != 0)
807 		return (ENXIO);
808 
809 	return (0);
810 }
811 
812 /*
813  * Free the bus_dma resources
814  */
815 void
816 dma1000_free(struct dwc_softc *sc)
817 {
818 	bus_dmamap_t map;
819 	int idx;
820 
821 	/* Clean up RX DMA resources and free mbufs. */
822 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
823 		if ((map = sc->rxbuf_map[idx].map) != NULL) {
824 			bus_dmamap_unload(sc->rxbuf_tag, map);
825 			bus_dmamap_destroy(sc->rxbuf_tag, map);
826 			m_freem(sc->rxbuf_map[idx].mbuf);
827 		}
828 	}
829 	if (sc->rxbuf_tag != NULL)
830 		bus_dma_tag_destroy(sc->rxbuf_tag);
831 	if (sc->rxdesc_map != NULL) {
832 		bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
833 		bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
834 		    sc->rxdesc_map);
835 	}
836 	if (sc->rxdesc_tag != NULL)
837 		bus_dma_tag_destroy(sc->rxdesc_tag);
838 
839 	/* Clean up TX DMA resources. */
840 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
841 		if ((map = sc->txbuf_map[idx].map) != NULL) {
842 			/* TX maps are already unloaded. */
843 			bus_dmamap_destroy(sc->txbuf_tag, map);
844 		}
845 	}
846 	if (sc->txbuf_tag != NULL)
847 		bus_dma_tag_destroy(sc->txbuf_tag);
848 	if (sc->txdesc_map != NULL) {
849 		bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
850 		bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
851 		    sc->txdesc_map);
852 	}
853 	if (sc->txdesc_tag != NULL)
854 		bus_dma_tag_destroy(sc->txdesc_tag);
855 }
856 
857 /*
858  * Interrupt function
859  */
860 
861 int
862 dma1000_intr(struct dwc_softc *sc)
863 {
864 	uint32_t reg;
865 	int rv;
866 
867 	DWC_ASSERT_LOCKED(sc);
868 
869 	rv = 0;
870 	reg = READ4(sc, DMA_STATUS);
871 	if (reg & DMA_STATUS_NIS) {
872 		if (reg & DMA_STATUS_RI)
873 			dma1000_rxfinish_locked(sc);
874 
875 		if (reg & DMA_STATUS_TI) {
876 			dma1000_txfinish_locked(sc);
877 			dma1000_txstart(sc);
878 		}
879 	}
880 
881 	if (reg & DMA_STATUS_AIS) {
882 		if (reg & DMA_STATUS_FBI) {
883 			/* Fatal bus error */
884 			rv = EIO;
885 		}
886 	}
887 
888 	WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);
889 	return (rv);
890 }
891