xref: /freebsd/sys/dev/dwc/dwc1000_dma.c (revision e125371fb6ff22d452c5ae90d3787432b8bfa0d1)
1 /*-
2  * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3  *
4  * This software was developed by SRI International and the University of
5  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6  * ("CTSRD"), as part of the DARPA CRASH research programme.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/kernel.h>
34 #include <sys/lock.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/rman.h>
40 #include <sys/socket.h>
41 
42 #include <net/bpf.h>
43 #include <net/if.h>
44 #include <net/ethernet.h>
45 #include <net/if_dl.h>
46 #include <net/if_media.h>
47 #include <net/if_types.h>
48 #include <net/if_var.h>
49 
50 #include <machine/bus.h>
51 
52 #include <dev/clk/clk.h>
53 #include <dev/hwreset/hwreset.h>
54 
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57 
58 #include <dev/dwc/if_dwcvar.h>
59 #include <dev/dwc/dwc1000_reg.h>
60 #include <dev/dwc/dwc1000_dma.h>
61 
62 #define	WATCHDOG_TIMEOUT_SECS	5
63 #define	DMA_RESET_TIMEOUT	100
64 
65 /* TX descriptors - TDESC0 is almost unified */
66 #define	TDESC0_OWN		(1U << 31)
67 #define	TDESC0_IHE		(1U << 16)	/* IP Header Error */
68 #define	TDESC0_ES		(1U << 15)	/* Error Summary */
69 #define	TDESC0_JT		(1U << 14)	/* Jabber Timeout */
70 #define	TDESC0_FF		(1U << 13)	/* Frame Flushed */
71 #define	TDESC0_PCE		(1U << 12)	/* Payload Checksum Error */
72 #define	TDESC0_LOC		(1U << 11)	/* Loss of Carrier */
73 #define	TDESC0_NC		(1U << 10)	/* No Carrier */
74 #define	TDESC0_LC		(1U <<  9)	/* Late Collision */
75 #define	TDESC0_EC		(1U <<  8)	/* Excessive Collision */
76 #define	TDESC0_VF		(1U <<  7)	/* VLAN Frame */
77 #define	TDESC0_CC_MASK		0xf
78 #define	TDESC0_CC_SHIFT		3		/* Collision Count */
79 #define	TDESC0_ED		(1U <<  2)	/* Excessive Deferral */
80 #define	TDESC0_UF		(1U <<  1)	/* Underflow Error */
81 #define	TDESC0_DB		(1U <<  0)	/* Deferred Bit */
82 /* TX descriptors - TDESC0 extended format only */
83 #define	ETDESC0_IC		(1U << 30)	/* Interrupt on Completion */
84 #define	ETDESC0_LS		(1U << 29)	/* Last Segment */
85 #define	ETDESC0_FS		(1U << 28)	/* First Segment */
86 #define	ETDESC0_DC		(1U << 27)	/* Disable CRC */
87 #define	ETDESC0_DP		(1U << 26)	/* Disable Padding */
88 #define	ETDESC0_CIC_NONE	(0U << 22)	/* Checksum Insertion Control */
89 #define	ETDESC0_CIC_HDR		(1U << 22)
90 #define	ETDESC0_CIC_SEG 	(2U << 22)
91 #define	ETDESC0_CIC_FULL	(3U << 22)
92 #define	ETDESC0_TER		(1U << 21)	/* Transmit End of Ring */
93 #define	ETDESC0_TCH		(1U << 20)	/* Second Address Chained */
94 
95 /* TX descriptors - TDESC1 normal format */
96 #define	NTDESC1_IC		(1U << 31)	/* Interrupt on Completion */
97 #define	NTDESC1_LS		(1U << 30)	/* Last Segment */
98 #define	NTDESC1_FS		(1U << 29)	/* First Segment */
99 #define	NTDESC1_CIC_NONE	(0U << 27)	/* Checksum Insertion Control */
100 #define	NTDESC1_CIC_HDR		(1U << 27)
101 #define	NTDESC1_CIC_SEG 	(2U << 27)
102 #define	NTDESC1_CIC_FULL	(3U << 27)
103 #define	NTDESC1_DC		(1U << 26)	/* Disable CRC */
104 #define	NTDESC1_TER		(1U << 25)	/* Transmit End of Ring */
105 #define	NTDESC1_TCH		(1U << 24)	/* Second Address Chained */
106 /* TX descriptors - TDESC1 extended format */
107 #define	ETDESC1_DP		(1U << 23)	/* Disable Padding */
108 #define	ETDESC1_TBS2_MASK	0x7ff
109 #define	ETDESC1_TBS2_SHIFT	11		/* Receive Buffer 2 Size */
110 #define	ETDESC1_TBS1_MASK	0x7ff
111 #define	ETDESC1_TBS1_SHIFT	0		/* Receive Buffer 1 Size */
112 
113 /* RX descriptor - RDESC0 is unified */
114 #define	RDESC0_OWN		(1U << 31)
115 #define	RDESC0_AFM		(1U << 30)	/* Dest. Address Filter Fail */
116 #define	RDESC0_FL_MASK		0x3fff
117 #define	RDESC0_FL_SHIFT		16		/* Frame Length */
118 #define	RDESC0_ES		(1U << 15)	/* Error Summary */
119 #define	RDESC0_DE		(1U << 14)	/* Descriptor Error */
120 #define	RDESC0_SAF		(1U << 13)	/* Source Address Filter Fail */
121 #define	RDESC0_LE		(1U << 12)	/* Length Error */
122 #define	RDESC0_OE		(1U << 11)	/* Overflow Error */
123 #define	RDESC0_VLAN		(1U << 10)	/* VLAN Tag */
124 #define	RDESC0_FS		(1U <<  9)	/* First Descriptor */
125 #define	RDESC0_LS		(1U <<  8)	/* Last Descriptor */
126 #define	RDESC0_ICE		(1U <<  7)	/* IPC Checksum Error */
127 #define	RDESC0_LC		(1U <<  6)	/* Late Collision */
128 #define	RDESC0_FT		(1U <<  5)	/* Frame Type */
129 #define	RDESC0_RWT		(1U <<  4)	/* Receive Watchdog Timeout */
130 #define	RDESC0_RE		(1U <<  3)	/* Receive Error */
131 #define	RDESC0_DBE		(1U <<  2)	/* Dribble Bit Error */
132 #define	RDESC0_CE		(1U <<  1)	/* CRC Error */
133 #define	RDESC0_PCE		(1U <<  0)	/* Payload Checksum Error */
134 #define	RDESC0_RXMA		(1U <<  0)	/* Rx MAC Address */
135 
136 /* RX descriptors - RDESC1 normal format */
137 #define	NRDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
138 #define	NRDESC1_RER		(1U << 25)	/* Receive End of Ring */
139 #define	NRDESC1_RCH		(1U << 24)	/* Second Address Chained */
140 #define	NRDESC1_RBS2_MASK	0x7ff
141 #define	NRDESC1_RBS2_SHIFT	11		/* Receive Buffer 2 Size */
142 #define	NRDESC1_RBS1_MASK	0x7ff
143 #define	NRDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
144 
145 /* RX descriptors - RDESC1 enhanced format */
146 #define	ERDESC1_DIC		(1U << 31)	/* Disable Intr on Completion */
147 #define	ERDESC1_RBS2_MASK	0x7ffff
148 #define	ERDESC1_RBS2_SHIFT	16		/* Receive Buffer 2 Size */
149 #define	ERDESC1_RER		(1U << 15)	/* Receive End of Ring */
150 #define	ERDESC1_RCH		(1U << 14)	/* Second Address Chained */
151 #define	ERDESC1_RBS1_MASK	0x7ffff
152 #define	ERDESC1_RBS1_SHIFT	0		/* Receive Buffer 1 Size */
153 
154 /*
155  * The hardware imposes alignment restrictions on various objects involved in
156  * DMA transfers.  These values are expressed in bytes (not bits).
157  */
158 #define	DWC_DESC_RING_ALIGN	2048
159 
160 static inline uint32_t
next_txidx(struct dwc_softc * sc,uint32_t curidx)161 next_txidx(struct dwc_softc *sc, uint32_t curidx)
162 {
163 
164 	return ((curidx + 1) % TX_DESC_COUNT);
165 }
166 
167 static inline uint32_t
next_rxidx(struct dwc_softc * sc,uint32_t curidx)168 next_rxidx(struct dwc_softc *sc, uint32_t curidx)
169 {
170 
171 	return ((curidx + 1) % RX_DESC_COUNT);
172 }
173 
174 static void
dwc_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)175 dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
176 {
177 
178 	if (error != 0)
179 		return;
180 	*(bus_addr_t *)arg = segs[0].ds_addr;
181 }
182 
183 inline static void
txdesc_clear(struct dwc_softc * sc,int idx)184 txdesc_clear(struct dwc_softc *sc, int idx)
185 {
186 
187 	sc->tx_desccount--;
188 	sc->txdesc_ring[idx].addr1 = (uint32_t)(0);
189 	sc->txdesc_ring[idx].desc0 = 0;
190 	sc->txdesc_ring[idx].desc1 = 0;
191 }
192 
193 inline static void
txdesc_setup(struct dwc_softc * sc,int idx,bus_addr_t paddr,uint32_t len,uint32_t flags,bool first,bool last)194 txdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr,
195   uint32_t len, uint32_t flags, bool first, bool last)
196 {
197 	uint32_t desc0, desc1;
198 
199 	if (!sc->dma_ext_desc) {
200 		desc0 = 0;
201 		desc1 = NTDESC1_TCH | len | flags;
202 		if (first)
203 			desc1 |=  NTDESC1_FS;
204 		if (last)
205 			desc1 |= NTDESC1_LS | NTDESC1_IC;
206 	} else {
207 		desc0 = ETDESC0_TCH | flags;
208 		if (first)
209 			desc0 |= ETDESC0_FS;
210 		if (last)
211 			desc0 |= ETDESC0_LS | ETDESC0_IC;
212 		desc1 = len;
213 	}
214 	++sc->tx_desccount;
215 	sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
216 	sc->txdesc_ring[idx].desc0 = desc0;
217 	sc->txdesc_ring[idx].desc1 = desc1;
218 	wmb();
219 	sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
220 	wmb();
221 }
222 
223 inline static uint32_t
rxdesc_setup(struct dwc_softc * sc,int idx,bus_addr_t paddr)224 rxdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr)
225 {
226 	uint32_t nidx;
227 
228 	sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
229 	nidx = next_rxidx(sc, idx);
230 	sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
231 	    (nidx * sizeof(struct dwc_hwdesc));
232 	if (!sc->dma_ext_desc)
233 		sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
234 		    MIN(MCLBYTES, NRDESC1_RBS1_MASK);
235 	else
236 		sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
237 		    MIN(MCLBYTES, ERDESC1_RBS1_MASK);
238 
239 	wmb();
240 	sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
241 	wmb();
242 	return (nidx);
243 }
244 
245 int
dma1000_setup_txbuf(struct dwc_softc * sc,int idx,struct mbuf ** mp)246 dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
247 {
248 	struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
249 	int error, nsegs;
250 	struct mbuf * m;
251 	uint32_t flags = 0;
252 	int i;
253 	int last;
254 
255 	error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
256 	    *mp, segs, &nsegs, 0);
257 	if (error == EFBIG) {
258 		/*
259 		 * The map may be partially mapped from the first call.
260 		 * Make sure to reset it.
261 		 */
262 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
263 		if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
264 			return (ENOMEM);
265 		*mp = m;
266 		error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
267 		    *mp, segs, &nsegs, 0);
268 	}
269 	if (error != 0)
270 		return (ENOMEM);
271 
272 	if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
273 		bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
274 		return (ENOMEM);
275 	}
276 
277 	m = *mp;
278 
279 	if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
280 		if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
281 			if (!sc->dma_ext_desc)
282 				flags = NTDESC1_CIC_FULL;
283 			else
284 				flags = ETDESC0_CIC_FULL;
285 		} else {
286 			if (!sc->dma_ext_desc)
287 				flags = NTDESC1_CIC_HDR;
288 			else
289 				flags = ETDESC0_CIC_HDR;
290 		}
291 	}
292 
293 	bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
294 	    BUS_DMASYNC_PREWRITE);
295 
296 	sc->txbuf_map[idx].mbuf = m;
297 
298 	for (i = 0; i < nsegs; i++) {
299 		txdesc_setup(sc, sc->tx_desc_head,
300 		    segs[i].ds_addr, segs[i].ds_len,
301 		    (i == 0) ? flags : 0, /* only first desc needs flags */
302 		    (i == 0),
303 		    (i == nsegs - 1));
304 		last = sc->tx_desc_head;
305 		sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
306 	}
307 
308 	sc->txbuf_map[idx].last_desc_idx = last;
309 
310 	return (0);
311 }
312 
313 static int
dma1000_setup_rxbuf(struct dwc_softc * sc,int idx,struct mbuf * m)314 dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
315 {
316 	struct bus_dma_segment seg;
317 	int error, nsegs;
318 
319 	m_adj(m, ETHER_ALIGN);
320 
321 	error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
322 	    m, &seg, &nsegs, 0);
323 	if (error != 0)
324 		return (error);
325 
326 	KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
327 
328 	bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
329 	    BUS_DMASYNC_PREREAD);
330 
331 	sc->rxbuf_map[idx].mbuf = m;
332 	rxdesc_setup(sc, idx, seg.ds_addr);
333 
334 	return (0);
335 }
336 
337 static struct mbuf *
dwc_alloc_mbufcl(struct dwc_softc * sc)338 dwc_alloc_mbufcl(struct dwc_softc *sc)
339 {
340 	struct mbuf *m;
341 
342 	m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
343 	if (m != NULL)
344 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
345 
346 	return (m);
347 }
348 
349 static struct mbuf *
dwc_rxfinish_one(struct dwc_softc * sc,struct dwc_hwdesc * desc,struct dwc_bufmap * map)350 dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
351     struct dwc_bufmap *map)
352 {
353 	if_t ifp;
354 	struct mbuf *m, *m0;
355 	int len;
356 	uint32_t rdesc0;
357 
358 	m = map->mbuf;
359 	ifp = sc->ifp;
360 	rdesc0 = desc ->desc0;
361 
362 	if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
363 		    (RDESC0_FS | RDESC0_LS)) {
364 		/*
365 		 * Something very wrong happens. The whole packet should be
366 		 * received in one descriptor. Report problem.
367 		 */
368 		device_printf(sc->dev,
369 		    "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
370 		    __func__, rdesc0);
371 		return (NULL);
372 	}
373 
374 	len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
375 	if (len < 64) {
376 		/*
377 		 * Lenght is invalid, recycle old mbuf
378 		 * Probably impossible case
379 		 */
380 		return (NULL);
381 	}
382 
383 	/* Allocate new buffer */
384 	m0 = dwc_alloc_mbufcl(sc);
385 	if (m0 == NULL) {
386 		/* no new mbuf available, recycle old */
387 		if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
388 		return (NULL);
389 	}
390 	/* Do dmasync for newly received packet */
391 	bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
392 	bus_dmamap_unload(sc->rxbuf_tag, map->map);
393 
394 	/* Received packet is valid, process it */
395 	m->m_pkthdr.rcvif = ifp;
396 	m->m_pkthdr.len = len;
397 	m->m_len = len;
398 	if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
399 
400 	if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
401 	  (rdesc0 & RDESC0_FT) != 0) {
402 		m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
403 		if ((rdesc0 & RDESC0_ICE) == 0)
404 			m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
405 		if ((rdesc0 & RDESC0_PCE) == 0) {
406 			m->m_pkthdr.csum_flags |=
407 				CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
408 			m->m_pkthdr.csum_data = 0xffff;
409 		}
410 	}
411 
412 	/* Remove trailing FCS */
413 	m_adj(m, -ETHER_CRC_LEN);
414 
415 	DWC_UNLOCK(sc);
416 	if_input(ifp, m);
417 	DWC_LOCK(sc);
418 	return (m0);
419 }
420 
421 void
dma1000_txfinish_locked(struct dwc_softc * sc)422 dma1000_txfinish_locked(struct dwc_softc *sc)
423 {
424 	struct dwc_bufmap *bmap;
425 	struct dwc_hwdesc *desc;
426 	if_t ifp;
427 	int idx, last_idx;
428 	bool map_finished;
429 
430 	DWC_ASSERT_LOCKED(sc);
431 
432 	ifp = sc->ifp;
433 	/* check if all descriptors of the map are done */
434 	while (sc->tx_map_tail != sc->tx_map_head) {
435 		map_finished = true;
436 		bmap = &sc->txbuf_map[sc->tx_map_tail];
437 		idx = sc->tx_desc_tail;
438 		last_idx = next_txidx(sc, bmap->last_desc_idx);
439 		while (idx != last_idx) {
440 			desc = &sc->txdesc_ring[idx];
441 			if ((desc->desc0 & TDESC0_OWN) != 0) {
442 				map_finished = false;
443 				break;
444 			}
445 			idx = next_txidx(sc, idx);
446 		}
447 
448 		if (!map_finished)
449 			break;
450 		bus_dmamap_sync(sc->txbuf_tag, bmap->map,
451 		    BUS_DMASYNC_POSTWRITE);
452 		bus_dmamap_unload(sc->txbuf_tag, bmap->map);
453 		m_freem(bmap->mbuf);
454 		bmap->mbuf = NULL;
455 		sc->tx_mapcount--;
456 		while (sc->tx_desc_tail != last_idx) {
457 			txdesc_clear(sc, sc->tx_desc_tail);
458 			sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
459 		}
460 		sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
461 		if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
462 		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
463 	}
464 
465 	/* If there are no buffers outstanding, muzzle the watchdog. */
466 	if (sc->tx_desc_tail == sc->tx_desc_head) {
467 		sc->tx_watchdog_count = 0;
468 	}
469 }
470 
471 void
dma1000_txstart(struct dwc_softc * sc)472 dma1000_txstart(struct dwc_softc *sc)
473 {
474 	int enqueued;
475 	struct mbuf *m;
476 
477 	enqueued = 0;
478 
479 	for (;;) {
480 		if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS  + 1)) {
481 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
482 			break;
483 		}
484 
485 		if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
486 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
487 			break;
488 		}
489 
490 		m = if_dequeue(sc->ifp);
491 		if (m == NULL)
492 			break;
493 		if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
494 			if_sendq_prepend(sc->ifp, m);
495 			if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
496 			break;
497 		}
498 		bpf_mtap_if(sc->ifp, m);
499 		sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
500 		sc->tx_mapcount++;
501 		++enqueued;
502 	}
503 
504 	if (enqueued != 0) {
505 		WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
506 		sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
507 	}
508 }
509 
510 void
dma1000_rxfinish_locked(struct dwc_softc * sc)511 dma1000_rxfinish_locked(struct dwc_softc *sc)
512 {
513 	struct mbuf *m;
514 	int error, idx;
515 	struct dwc_hwdesc *desc;
516 
517 	DWC_ASSERT_LOCKED(sc);
518 	for (;;) {
519 		idx = sc->rx_idx;
520 		desc = sc->rxdesc_ring + idx;
521 		if ((desc->desc0 & RDESC0_OWN) != 0)
522 			break;
523 
524 		m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
525 		if (m == NULL) {
526 			wmb();
527 			desc->desc0 = RDESC0_OWN;
528 			wmb();
529 		} else {
530 			/* We cannot create hole in RX ring */
531 			error = dma1000_setup_rxbuf(sc, idx, m);
532 			if (error != 0)
533 				panic("dma1000_setup_rxbuf failed:  error %d\n",
534 				    error);
535 
536 		}
537 		sc->rx_idx = next_rxidx(sc, sc->rx_idx);
538 	}
539 }
540 
541 /*
542  * Start the DMA controller
543  */
544 void
dma1000_start(struct dwc_softc * sc)545 dma1000_start(struct dwc_softc *sc)
546 {
547 	uint32_t reg;
548 
549 	DWC_ASSERT_LOCKED(sc);
550 
551 	/* Initializa DMA and enable transmitters */
552 	reg = READ4(sc, OPERATION_MODE);
553 	reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
554 	reg &= ~(MODE_RSF);
555 	reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
556 	WRITE4(sc, OPERATION_MODE, reg);
557 
558 	WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
559 
560 	/* Start DMA */
561 	reg = READ4(sc, OPERATION_MODE);
562 	reg |= (MODE_ST | MODE_SR);
563 	WRITE4(sc, OPERATION_MODE, reg);
564 }
565 
566 /*
567  * Stop the DMA controller
568  */
569 void
dma1000_stop(struct dwc_softc * sc)570 dma1000_stop(struct dwc_softc *sc)
571 {
572 	uint32_t reg;
573 
574 	DWC_ASSERT_LOCKED(sc);
575 
576 	/* Stop DMA TX */
577 	reg = READ4(sc, OPERATION_MODE);
578 	reg &= ~(MODE_ST);
579 	WRITE4(sc, OPERATION_MODE, reg);
580 
581 	/* Flush TX */
582 	reg = READ4(sc, OPERATION_MODE);
583 	reg |= (MODE_FTF);
584 	WRITE4(sc, OPERATION_MODE, reg);
585 
586 	/* Stop DMA RX */
587 	reg = READ4(sc, OPERATION_MODE);
588 	reg &= ~(MODE_SR);
589 	WRITE4(sc, OPERATION_MODE, reg);
590 }
591 
592 int
dma1000_reset(struct dwc_softc * sc)593 dma1000_reset(struct dwc_softc *sc)
594 {
595 	uint32_t reg;
596 	int i;
597 
598 	reg = READ4(sc, BUS_MODE);
599 	reg |= (BUS_MODE_SWR);
600 	WRITE4(sc, BUS_MODE, reg);
601 
602 	for (i = 0; i < DMA_RESET_TIMEOUT; i++) {
603 		if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
604 			break;
605 		DELAY(10);
606 	}
607 	if (i >= DMA_RESET_TIMEOUT) {
608 		return (ENXIO);
609 	}
610 
611 	return (0);
612 }
613 
614 /*
615  * Create the bus_dma resources
616  */
617 int
dma1000_init(struct dwc_softc * sc)618 dma1000_init(struct dwc_softc *sc)
619 {
620 	struct mbuf *m;
621 	uint32_t reg;
622 	int error;
623 	int nidx;
624 	int idx;
625 
626 	reg = BUS_MODE_USP;
627 	if (!sc->nopblx8)
628 		reg |= BUS_MODE_EIGHTXPBL;
629 	reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT);
630 	reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT);
631 	if (sc->fixed_burst)
632 		reg |= BUS_MODE_FIXEDBURST;
633 	if (sc->mixed_burst)
634 		reg |= BUS_MODE_MIXEDBURST;
635 	if (sc->aal)
636 		reg |= BUS_MODE_AAL;
637 
638 	WRITE4(sc, BUS_MODE, reg);
639 
640 	reg = READ4(sc, HW_FEATURE);
641 	if (reg & HW_FEATURE_EXT_DESCRIPTOR)
642 		sc->dma_ext_desc = true;
643 
644 	/*
645 	 * DMA must be stop while changing descriptor list addresses.
646 	 */
647 	reg = READ4(sc, OPERATION_MODE);
648 	reg &= ~(MODE_ST | MODE_SR);
649 	WRITE4(sc, OPERATION_MODE, reg);
650 
651 	/*
652 	 * Set up TX descriptor ring, descriptors, and dma maps.
653 	 */
654 	error = bus_dma_tag_create(
655 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
656 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
657 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
658 	    BUS_SPACE_MAXADDR,		/* highaddr */
659 	    NULL, NULL,			/* filter, filterarg */
660 	    TX_DESC_SIZE, 1, 		/* maxsize, nsegments */
661 	    TX_DESC_SIZE,		/* maxsegsize */
662 	    0,				/* flags */
663 	    NULL, NULL,			/* lockfunc, lockarg */
664 	    &sc->txdesc_tag);
665 	if (error != 0) {
666 		device_printf(sc->dev,
667 		    "could not create TX ring DMA tag.\n");
668 		goto out;
669 	}
670 
671 	error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
672 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
673 	    &sc->txdesc_map);
674 	if (error != 0) {
675 		device_printf(sc->dev,
676 		    "could not allocate TX descriptor ring.\n");
677 		goto out;
678 	}
679 
680 	error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
681 	    sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
682 	    &sc->txdesc_ring_paddr, 0);
683 	if (error != 0) {
684 		device_printf(sc->dev,
685 		    "could not load TX descriptor ring map.\n");
686 		goto out;
687 	}
688 
689 	for (idx = 0; idx < TX_DESC_COUNT; idx++) {
690 		nidx = next_txidx(sc, idx);
691 		sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
692 		    (nidx * sizeof(struct dwc_hwdesc));
693 	}
694 
695 	error = bus_dma_tag_create(
696 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
697 	    1, 0,			/* alignment, boundary */
698 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
699 	    BUS_SPACE_MAXADDR,		/* highaddr */
700 	    NULL, NULL,			/* filter, filterarg */
701 	    MCLBYTES*TX_MAP_MAX_SEGS,	/* maxsize */
702 	    TX_MAP_MAX_SEGS,		/* nsegments */
703 	    MCLBYTES,			/* maxsegsize */
704 	    0,				/* flags */
705 	    NULL, NULL,			/* lockfunc, lockarg */
706 	    &sc->txbuf_tag);
707 	if (error != 0) {
708 		device_printf(sc->dev,
709 		    "could not create TX ring DMA tag.\n");
710 		goto out;
711 	}
712 
713 	for (idx = 0; idx < TX_MAP_COUNT; idx++) {
714 		error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
715 		    &sc->txbuf_map[idx].map);
716 		if (error != 0) {
717 			device_printf(sc->dev,
718 			    "could not create TX buffer DMA map.\n");
719 			goto out;
720 		}
721 	}
722 
723 	for (idx = 0; idx < TX_DESC_COUNT; idx++)
724 		txdesc_clear(sc, idx);
725 
726 	WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
727 
728 	/*
729 	 * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
730 	 */
731 	error = bus_dma_tag_create(
732 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
733 	    DWC_DESC_RING_ALIGN, 0,	/* alignment, boundary */
734 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
735 	    BUS_SPACE_MAXADDR,		/* highaddr */
736 	    NULL, NULL,			/* filter, filterarg */
737 	    RX_DESC_SIZE, 1, 		/* maxsize, nsegments */
738 	    RX_DESC_SIZE,		/* maxsegsize */
739 	    0,				/* flags */
740 	    NULL, NULL,			/* lockfunc, lockarg */
741 	    &sc->rxdesc_tag);
742 	if (error != 0) {
743 		device_printf(sc->dev,
744 		    "could not create RX ring DMA tag.\n");
745 		goto out;
746 	}
747 
748 	error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
749 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
750 	    &sc->rxdesc_map);
751 	if (error != 0) {
752 		device_printf(sc->dev,
753 		    "could not allocate RX descriptor ring.\n");
754 		goto out;
755 	}
756 
757 	error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
758 	    sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
759 	    &sc->rxdesc_ring_paddr, 0);
760 	if (error != 0) {
761 		device_printf(sc->dev,
762 		    "could not load RX descriptor ring map.\n");
763 		goto out;
764 	}
765 
766 	error = bus_dma_tag_create(
767 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
768 	    1, 0,			/* alignment, boundary */
769 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
770 	    BUS_SPACE_MAXADDR,		/* highaddr */
771 	    NULL, NULL,			/* filter, filterarg */
772 	    MCLBYTES, 1, 		/* maxsize, nsegments */
773 	    MCLBYTES,			/* maxsegsize */
774 	    0,				/* flags */
775 	    NULL, NULL,			/* lockfunc, lockarg */
776 	    &sc->rxbuf_tag);
777 	if (error != 0) {
778 		device_printf(sc->dev,
779 		    "could not create RX buf DMA tag.\n");
780 		goto out;
781 	}
782 
783 	for (idx = 0; idx < RX_DESC_COUNT; idx++) {
784 		error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
785 		    &sc->rxbuf_map[idx].map);
786 		if (error != 0) {
787 			device_printf(sc->dev,
788 			    "could not create RX buffer DMA map.\n");
789 			goto out;
790 		}
791 		if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
792 			device_printf(sc->dev, "Could not alloc mbuf\n");
793 			error = ENOMEM;
794 			goto out;
795 		}
796 		if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
797 			device_printf(sc->dev,
798 			    "could not create new RX buffer.\n");
799 			goto out;
800 		}
801 	}
802 	WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
803 
804 out:
805 	if (error != 0)
806 		return (ENXIO);
807 
808 	return (0);
809 }
810 
811 /*
812  * Free the bus_dma resources
813  */
814 void
dma1000_free(struct dwc_softc * sc)815 dma1000_free(struct dwc_softc *sc)
816 {
817 	bus_dmamap_t map;
818 	int idx;
819 
820 	/* Clean up RX DMA resources and free mbufs. */
821 	for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
822 		if ((map = sc->rxbuf_map[idx].map) != NULL) {
823 			bus_dmamap_unload(sc->rxbuf_tag, map);
824 			bus_dmamap_destroy(sc->rxbuf_tag, map);
825 			m_freem(sc->rxbuf_map[idx].mbuf);
826 		}
827 	}
828 	if (sc->rxbuf_tag != NULL)
829 		bus_dma_tag_destroy(sc->rxbuf_tag);
830 	if (sc->rxdesc_map != NULL) {
831 		bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
832 		bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
833 		    sc->rxdesc_map);
834 	}
835 	if (sc->rxdesc_tag != NULL)
836 		bus_dma_tag_destroy(sc->rxdesc_tag);
837 
838 	/* Clean up TX DMA resources. */
839 	for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
840 		if ((map = sc->txbuf_map[idx].map) != NULL) {
841 			/* TX maps are already unloaded. */
842 			bus_dmamap_destroy(sc->txbuf_tag, map);
843 		}
844 	}
845 	if (sc->txbuf_tag != NULL)
846 		bus_dma_tag_destroy(sc->txbuf_tag);
847 	if (sc->txdesc_map != NULL) {
848 		bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
849 		bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
850 		    sc->txdesc_map);
851 	}
852 	if (sc->txdesc_tag != NULL)
853 		bus_dma_tag_destroy(sc->txdesc_tag);
854 }
855 
856 /*
857  * Interrupt function
858  */
859 
860 int
dma1000_intr(struct dwc_softc * sc)861 dma1000_intr(struct dwc_softc *sc)
862 {
863 	uint32_t reg;
864 	int rv;
865 
866 	DWC_ASSERT_LOCKED(sc);
867 
868 	rv = 0;
869 	reg = READ4(sc, DMA_STATUS);
870 	if (reg & DMA_STATUS_NIS) {
871 		if (reg & DMA_STATUS_RI)
872 			dma1000_rxfinish_locked(sc);
873 
874 		if (reg & DMA_STATUS_TI) {
875 			dma1000_txfinish_locked(sc);
876 			dma1000_txstart(sc);
877 		}
878 	}
879 
880 	if (reg & DMA_STATUS_AIS) {
881 		if (reg & DMA_STATUS_FBI) {
882 			/* Fatal bus error */
883 			rv = EIO;
884 		}
885 	}
886 
887 	WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);
888 	return (rv);
889 }
890